summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorzdc <zdc@users.noreply.github.com>2022-03-26 15:41:59 +0200
committerGitHub <noreply@github.com>2022-03-26 15:41:59 +0200
commitaa60d48c2711cdcd9f88a4e5c77379adb0408231 (patch)
tree349631a02467dae0158f6f663cc8aa8537974a97
parent5c4b3943343a85fbe517e5ec1fc670b3a8566b4b (diff)
parent31448cccedd8f841fb3ac7d0f2e3cdefe08a53ba (diff)
downloadvyos-cloud-init-aa60d48c2711cdcd9f88a4e5c77379adb0408231.tar.gz
vyos-cloud-init-aa60d48c2711cdcd9f88a4e5c77379adb0408231.zip
Merge pull request #51 from zdc/T2117-sagitta-22.1
T2117: Cloud-init updated to 22.1
-rw-r--r--.git-blame-ignore-revs4
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md26
-rw-r--r--.github/workflows/stale.yml4
-rw-r--r--.gitignore5
-rw-r--r--.pylintrc3
-rw-r--r--.travis.yml163
-rw-r--r--CONTRIBUTING.rst (renamed from HACKING.rst)263
-rw-r--r--ChangeLog547
-rw-r--r--Makefile50
-rw-r--r--README.md8
-rw-r--r--bash_completion/cloud-init5
-rw-r--r--cloud-tests-requirements.txt28
-rw-r--r--cloudinit/analyze/__main__.py269
-rw-r--r--cloudinit/analyze/dump.py71
-rw-r--r--cloudinit/analyze/show.py192
-rw-r--r--cloudinit/analyze/tests/test_boot.py170
-rw-r--r--cloudinit/apport.py151
-rw-r--r--cloudinit/atomic_helper.py25
-rw-r--r--cloudinit/cloud.py14
-rw-r--r--cloudinit/cmd/clean.py65
-rwxr-xr-xcloudinit/cmd/cloud_id.py83
-rw-r--r--cloudinit/cmd/devel/__init__.py3
-rw-r--r--cloudinit/cmd/devel/hotplug_hook.py291
-rw-r--r--cloudinit/cmd/devel/logs.py134
-rwxr-xr-xcloudinit/cmd/devel/make_mime.py113
-rwxr-xr-xcloudinit/cmd/devel/net_convert.py150
-rw-r--r--cloudinit/cmd/devel/parser.py45
-rwxr-xr-xcloudinit/cmd/devel/render.py54
-rw-r--r--cloudinit/cmd/devel/tests/test_logs.py167
-rw-r--r--cloudinit/cmd/devel/tests/test_render.py144
-rw-r--r--cloudinit/cmd/main.py630
-rw-r--r--cloudinit/cmd/query.py286
-rw-r--r--cloudinit/cmd/status.py141
-rw-r--r--cloudinit/cmd/tests/test_clean.py178
-rw-r--r--cloudinit/cmd/tests/test_cloud_id.py127
-rw-r--r--cloudinit/cmd/tests/test_main.py162
-rw-r--r--cloudinit/cmd/tests/test_query.py341
-rw-r--r--cloudinit/cmd/tests/test_status.py391
-rw-r--r--cloudinit/config/__init__.py20
-rw-r--r--cloudinit/config/cc_apk_configure.py185
-rw-r--r--cloudinit/config/cc_apt_configure.py765
-rw-r--r--cloudinit/config/cc_apt_pipelining.py74
-rw-r--r--cloudinit/config/cc_bootcmd.py69
-rwxr-xr-xcloudinit/config/cc_byobu.py61
-rw-r--r--cloudinit/config/cc_ca_certs.py231
-rw-r--r--cloudinit/config/cc_chef.py549
-rw-r--r--cloudinit/config/cc_debug.py72
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py64
-rw-r--r--cloudinit/config/cc_disk_setup.py502
-rw-r--r--cloudinit/config/cc_emit_upstart.py26
-rw-r--r--cloudinit/config/cc_fan.py68
-rw-r--r--cloudinit/config/cc_final_message.py26
-rw-r--r--cloudinit/config/cc_foo.py1
-rw-r--r--cloudinit/config/cc_growpart.py162
-rw-r--r--cloudinit/config/cc_grub_dpkg.py54
-rw-r--r--cloudinit/config/cc_install_hotplug.py151
-rw-r--r--cloudinit/config/cc_keyboard.py129
-rw-r--r--cloudinit/config/cc_keys_to_console.py59
-rw-r--r--cloudinit/config/cc_landscape.py24
-rw-r--r--cloudinit/config/cc_locale.py64
-rw-r--r--cloudinit/config/cc_lxd.py186
-rw-r--r--cloudinit/config/cc_mcollective.py50
-rw-r--r--cloudinit/config/cc_migrator.py27
-rw-r--r--cloudinit/config/cc_mounts.py185
-rw-r--r--cloudinit/config/cc_ntp.py572
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py30
-rw-r--r--cloudinit/config/cc_phone_home.py111
-rw-r--r--cloudinit/config/cc_power_state_change.py58
-rw-r--r--cloudinit/config/cc_puppet.py274
-rw-r--r--cloudinit/config/cc_refresh_rmc_and_interface.py53
-rw-r--r--cloudinit/config/cc_reset_rmc.py43
-rw-r--r--cloudinit/config/cc_resizefs.py198
-rw-r--r--cloudinit/config/cc_resizefs_vyos.py333
-rw-r--r--cloudinit/config/cc_resolv_conf.py61
-rw-r--r--cloudinit/config/cc_rh_subscription.py248
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py33
-rw-r--r--cloudinit/config/cc_rsyslog.py101
-rw-r--r--cloudinit/config/cc_runcmd.py95
-rw-r--r--cloudinit/config/cc_salt_minion.py71
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py16
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py14
-rw-r--r--cloudinit/config/cc_scripts_per_once.py14
-rw-r--r--cloudinit/config/cc_scripts_user.py12
-rw-r--r--cloudinit/config/cc_scripts_vendor.py22
-rw-r--r--cloudinit/config/cc_seed_random.py53
-rw-r--r--cloudinit/config/cc_set_hostname.py40
-rwxr-xr-xcloudinit/config/cc_set_passwords.py85
-rw-r--r--cloudinit/config/cc_snap.py181
-rw-r--r--cloudinit/config/cc_spacewalk.py67
-rwxr-xr-xcloudinit/config/cc_ssh.py133
-rwxr-xr-xcloudinit/config/cc_ssh_authkey_fingerprints.py75
-rwxr-xr-xcloudinit/config/cc_ssh_import_id.py23
-rw-r--r--cloudinit/config/cc_timezone.py2
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py166
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py145
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py72
-rw-r--r--cloudinit/config/cc_update_hostname.py27
-rw-r--r--cloudinit/config/cc_users_groups.py39
-rw-r--r--cloudinit/config/cc_vyos.py5
-rw-r--r--cloudinit/config/cc_vyos_userdata.py5
-rw-r--r--cloudinit/config/cc_write_files.py273
-rw-r--r--cloudinit/config/cc_write_files_deferred.py56
-rw-r--r--cloudinit/config/cc_yum_add_repo.py69
-rw-r--r--cloudinit/config/cc_zypper_add_repo.py167
-rw-r--r--cloudinit/config/cloud-init-schema.json560
-rw-r--r--cloudinit/config/schema.py658
-rw-r--r--cloudinit/config/tests/test_apt_pipelining.py28
-rw-r--r--cloudinit/config/tests/test_disable_ec2_metadata.py48
-rw-r--r--cloudinit/config/tests/test_mounts.py61
-rw-r--r--cloudinit/config/tests/test_resolv_conf.py86
-rw-r--r--cloudinit/config/tests/test_set_passwords.py155
-rw-r--r--cloudinit/config/tests/test_ubuntu_advantage.py333
-rw-r--r--cloudinit/config/tests/test_users_groups.py172
-rw-r--r--cloudinit/cs_utils.py20
-rw-r--r--cloudinit/dhclient_hook.py21
-rwxr-xr-xcloudinit/distros/__init__.py493
-rw-r--r--cloudinit/distros/almalinux.py (renamed from tests/cloud_tests/testcases/bugs/__init__.py)8
-rw-r--r--cloudinit/distros/alpine.py67
-rw-r--r--cloudinit/distros/amazon.py1
-rw-r--r--cloudinit/distros/arch.py177
-rw-r--r--cloudinit/distros/bsd.py69
-rw-r--r--cloudinit/distros/bsd_utils.py18
-rw-r--r--cloudinit/distros/centos.py1
-rw-r--r--cloudinit/distros/cloudlinux.py (renamed from tests/cloud_tests/testcases/examples/__init__.py)8
-rw-r--r--cloudinit/distros/debian.py274
-rw-r--r--cloudinit/distros/dragonflybsd.py12
-rw-r--r--cloudinit/distros/eurolinux.py (renamed from tests/cloud_tests/testcases/main/__init__.py)8
-rw-r--r--cloudinit/distros/fedora.py1
-rw-r--r--cloudinit/distros/freebsd.py105
-rw-r--r--cloudinit/distros/gentoo.py183
-rw-r--r--cloudinit/distros/miraclelinux.py (renamed from tests/cloud_tests/testcases/modules/__init__.py)8
-rw-r--r--cloudinit/distros/net_util.py68
-rw-r--r--cloudinit/distros/netbsd.py85
-rw-r--r--cloudinit/distros/networking.py16
-rw-r--r--cloudinit/distros/openEuler.py10
-rw-r--r--cloudinit/distros/openbsd.py20
-rw-r--r--cloudinit/distros/opensuse.py137
-rw-r--r--cloudinit/distros/parsers/__init__.py3
-rw-r--r--cloudinit/distros/parsers/hostname.py24
-rw-r--r--cloudinit/distros/parsers/hosts.py24
-rw-r--r--cloudinit/distros/parsers/networkmanager_conf.py6
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py73
-rw-r--r--cloudinit/distros/parsers/sys_conf.py38
-rw-r--r--cloudinit/distros/photon.py150
-rw-r--r--cloudinit/distros/rhel.py103
-rw-r--r--cloudinit/distros/rhel_util.py4
-rw-r--r--cloudinit/distros/rocky.py10
-rw-r--r--cloudinit/distros/sles.py1
-rw-r--r--cloudinit/distros/tests/test_init.py156
-rw-r--r--cloudinit/distros/ubuntu.py33
-rwxr-xr-xcloudinit/distros/ug_util.py290
-rw-r--r--cloudinit/distros/virtuozzo.py10
-rw-r--r--cloudinit/dmi.py68
-rw-r--r--cloudinit/ec2_utils.py165
-rw-r--r--cloudinit/event.py72
-rw-r--r--cloudinit/filters/launch_index.py12
-rw-r--r--cloudinit/gpg.py70
-rw-r--r--cloudinit/handlers/__init__.py159
-rw-r--r--cloudinit/handlers/boot_hook.py21
-rw-r--r--cloudinit/handlers/cloud_config.py38
-rw-r--r--cloudinit/handlers/jinja_template.py120
-rw-r--r--cloudinit/handlers/shell_script.py15
-rw-r--r--cloudinit/handlers/shell_script_by_frequency.py62
-rw-r--r--cloudinit/handlers/upstart_job.py22
-rw-r--r--cloudinit/helpers.py127
-rw-r--r--cloudinit/importer.py25
-rw-r--r--cloudinit/log.py25
-rw-r--r--cloudinit/mergers/__init__.py43
-rw-r--r--cloudinit/mergers/m_dict.py34
-rw-r--r--cloudinit/mergers/m_list.py37
-rw-r--r--cloudinit/mergers/m_str.py5
-rw-r--r--cloudinit/net/__init__.py689
-rw-r--r--cloudinit/net/activators.py290
-rw-r--r--cloudinit/net/bsd.py125
-rwxr-xr-xcloudinit/net/cmdline.py101
-rw-r--r--cloudinit/net/dhcp.py208
-rw-r--r--cloudinit/net/eni.py452
-rw-r--r--cloudinit/net/freebsd.py48
-rw-r--r--cloudinit/net/netbsd.py27
-rw-r--r--cloudinit/net/netplan.py317
-rw-r--r--cloudinit/net/network_state.py880
-rw-r--r--cloudinit/net/networkd.py280
-rw-r--r--cloudinit/net/openbsd.py44
-rw-r--r--cloudinit/net/renderer.py37
-rw-r--r--cloudinit/net/renderers.py51
-rw-r--r--cloudinit/net/sysconfig.py903
-rw-r--r--cloudinit/net/tests/test_dhcp.py634
-rw-r--r--cloudinit/net/tests/test_init.py1270
-rw-r--r--cloudinit/net/tests/test_network_state.py58
-rw-r--r--cloudinit/net/udev.py23
-rw-r--r--cloudinit/netinfo.py492
-rw-r--r--cloudinit/patcher.py16
-rw-r--r--cloudinit/registry.py4
-rw-r--r--cloudinit/reporting/__init__.py9
-rw-r--r--cloudinit/reporting/events.py105
-rwxr-xr-xcloudinit/reporting/handlers.py128
-rw-r--r--cloudinit/safeyaml.py25
-rw-r--r--cloudinit/serial.py25
-rw-r--r--cloudinit/settings.py77
-rw-r--r--cloudinit/signal_handler.py12
-rw-r--r--cloudinit/simpletable.py26
-rw-r--r--cloudinit/sources/DataSourceAliYun.py18
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py113
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py2358
-rw-r--r--cloudinit/sources/DataSourceBigstep.py9
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py39
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py137
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py115
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py67
-rw-r--r--cloudinit/sources/DataSourceEc2.py461
-rw-r--r--cloudinit/sources/DataSourceExoscale.py171
-rw-r--r--cloudinit/sources/DataSourceGCE.py251
-rw-r--r--cloudinit/sources/DataSourceHetzner.py99
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py128
-rw-r--r--cloudinit/sources/DataSourceLXD.py392
-rw-r--r--cloudinit/sources/DataSourceMAAS.py180
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py156
-rw-r--r--cloudinit/sources/DataSourceNone.py15
-rw-r--r--cloudinit/sources/DataSourceOVF.py445
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py194
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py132
-rw-r--r--cloudinit/sources/DataSourceOracle.py138
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py195
-rw-r--r--cloudinit/sources/DataSourceScaleway.py139
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py553
-rw-r--r--cloudinit/sources/DataSourceUpCloud.py162
-rw-r--r--cloudinit/sources/DataSourceVMware.py869
-rw-r--r--cloudinit/sources/DataSourceVultr.py157
-rw-r--r--cloudinit/sources/__init__.py471
-rwxr-xr-xcloudinit/sources/helpers/azure.py743
-rw-r--r--cloudinit/sources/helpers/digitalocean.py195
-rw-r--r--cloudinit/sources/helpers/hetzner.py15
-rw-r--r--cloudinit/sources/helpers/netlink.py187
-rw-r--r--cloudinit/sources/helpers/openstack.py439
-rw-r--r--cloudinit/sources/helpers/tests/test_openstack.py44
-rw-r--r--cloudinit/sources/helpers/upcloud.py229
-rw-r--r--cloudinit/sources/helpers/vmware/imc/boot_proto.py5
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py67
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_custom_script.py45
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py90
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py38
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_error.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_event.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_state.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py46
-rw-r--r--cloudinit/sources/helpers/vmware/imc/ipv4_mode.py11
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic.py33
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic_base.py29
-rw-r--r--cloudinit/sources/helpers/vultr.py230
-rw-r--r--cloudinit/sources/tests/test_init.py759
-rw-r--r--cloudinit/ssh_util.py288
-rw-r--r--cloudinit/stages.py766
-rw-r--r--cloudinit/subp.py165
-rw-r--r--cloudinit/temp_utils.py20
-rw-r--r--cloudinit/templater.py111
-rw-r--r--cloudinit/tests/test_gpg.py55
-rw-r--r--cloudinit/tests/test_netinfo.py181
-rw-r--r--cloudinit/tests/test_stages.py406
-rw-r--r--cloudinit/tests/test_subp.py286
-rw-r--r--cloudinit/tests/test_temp_utils.py117
-rw-r--r--cloudinit/tests/test_util.py854
-rw-r--r--cloudinit/type_utils.py4
-rw-r--r--cloudinit/url_helper.py296
-rw-r--r--cloudinit/user_data.py121
-rw-r--r--cloudinit/util.py1006
-rw-r--r--cloudinit/version.py11
-rw-r--r--cloudinit/warnings.py21
-rw-r--r--config/cloud.cfg.tmpl88
-rw-r--r--conftest.py23
-rw-r--r--doc-requirements.txt4
-rw-r--r--doc/examples/cloud-config-apt.txt36
-rw-r--r--doc/examples/cloud-config-ca-certs.txt6
-rw-r--r--doc/examples/cloud-config-chef.txt75
-rw-r--r--doc/examples/cloud-config-datasources.txt7
-rw-r--r--doc/examples/cloud-config-disk-setup.txt6
-rw-r--r--doc/examples/cloud-config-install-packages.txt2
-rw-r--r--doc/examples/cloud-config-landscape.txt2
-rw-r--r--doc/examples/cloud-config-mount-points.txt2
-rw-r--r--doc/examples/cloud-config-power-state.txt2
-rw-r--r--doc/examples/cloud-config-puppet.txt60
-rw-r--r--doc/examples/cloud-config-ssh-keys.txt10
-rw-r--r--doc/examples/cloud-config-user-groups.txt15
-rw-r--r--doc/examples/cloud-config-write-files.txt2
-rw-r--r--doc/examples/cloud-config.txt10
-rw-r--r--doc/examples/kernel-cmdline.txt2
-rw-r--r--doc/examples/part-handler.txt1
-rw-r--r--doc/examples/seed/README2
-rw-r--r--doc/examples/seed/meta-data2
-rw-r--r--doc/man/cloud-id.125
-rw-r--r--doc/man/cloud-init.12
-rw-r--r--doc/rtd/conf.py45
-rw-r--r--doc/rtd/index.rst7
-rw-r--r--doc/rtd/topics/availability.rst14
-rw-r--r--doc/rtd/topics/boot.rst16
-rw-r--r--doc/rtd/topics/bugs.rst4
-rw-r--r--doc/rtd/topics/cli.rst10
-rw-r--r--doc/rtd/topics/cloud_tests.rst764
-rw-r--r--doc/rtd/topics/code_review.rst8
-rw-r--r--doc/rtd/topics/contributing.rst2
-rw-r--r--doc/rtd/topics/datasources.rst15
-rw-r--r--doc/rtd/topics/datasources/aliyun.rst17
-rw-r--r--doc/rtd/topics/datasources/altcloud.rst2
-rw-r--r--doc/rtd/topics/datasources/azure.rst84
-rw-r--r--doc/rtd/topics/datasources/cloudsigma.rst2
-rw-r--r--doc/rtd/topics/datasources/cloudstack.rst4
-rw-r--r--doc/rtd/topics/datasources/configdrive.rst2
-rw-r--r--doc/rtd/topics/datasources/digitalocean.rst6
-rw-r--r--doc/rtd/topics/datasources/e24cloud.rst4
-rw-r--r--doc/rtd/topics/datasources/ec2.rst2
-rw-r--r--doc/rtd/topics/datasources/fallback.rst2
-rw-r--r--doc/rtd/topics/datasources/gce.rst24
-rw-r--r--doc/rtd/topics/datasources/lxd.rst65
-rw-r--r--doc/rtd/topics/datasources/nocloud.rst10
-rw-r--r--doc/rtd/topics/datasources/opennebula.rst10
-rw-r--r--doc/rtd/topics/datasources/openstack.rst16
-rw-r--r--doc/rtd/topics/datasources/oracle.rst2
-rw-r--r--doc/rtd/topics/datasources/ovf.rst19
-rw-r--r--doc/rtd/topics/datasources/rbxcloud.rst2
-rw-r--r--doc/rtd/topics/datasources/smartos.rst4
-rw-r--r--doc/rtd/topics/datasources/upcloud.rst24
-rw-r--r--doc/rtd/topics/datasources/vmware.rst358
-rw-r--r--doc/rtd/topics/datasources/vultr.rst35
-rw-r--r--doc/rtd/topics/datasources/zstack.rst2
-rw-r--r--doc/rtd/topics/debugging.rst16
-rw-r--r--doc/rtd/topics/dir_layout.rst2
-rw-r--r--doc/rtd/topics/events.rst95
-rw-r--r--doc/rtd/topics/examples.rst6
-rw-r--r--doc/rtd/topics/faq.rst10
-rw-r--r--doc/rtd/topics/format.rst42
-rw-r--r--doc/rtd/topics/hacking.rst2
-rw-r--r--doc/rtd/topics/instancedata.rst61
-rw-r--r--doc/rtd/topics/integration_tests.rst199
-rw-r--r--doc/rtd/topics/logging.rst6
-rw-r--r--doc/rtd/topics/merging.rst8
-rw-r--r--doc/rtd/topics/modules.rst4
-rw-r--r--doc/rtd/topics/network-config-format-eni.rst2
-rw-r--r--doc/rtd/topics/network-config-format-v1.rst30
-rw-r--r--doc/rtd/topics/network-config-format-v2.rst42
-rw-r--r--doc/rtd/topics/network-config.rst30
-rw-r--r--doc/rtd/topics/security.rst2
-rw-r--r--doc/rtd/topics/testing.rst160
-rw-r--r--doc/rtd/topics/vendordata.rst6
-rw-r--r--doc/sources/kernel-cmdline.txt2
-rw-r--r--doc/sources/ovf/README2
-rw-r--r--doc/sources/ovf/example/ovf-env.xml8
-rw-r--r--doc/sources/ovf/example/ubuntu-server.ovf6
-rwxr-xr-xdoc/sources/ovf/make-iso2
-rw-r--r--doc/userdata.txt2
-rw-r--r--integration-requirements.txt2
-rwxr-xr-xpackages/bddeb2
-rw-r--r--packages/pkg-deps.json14
-rw-r--r--packages/redhat/cloud-init.spec.in1
-rw-r--r--packages/suse/cloud-init.spec.in1
-rw-r--r--pyproject.toml102
-rw-r--r--requirements.txt9
-rwxr-xr-xsetup.py273
-rw-r--r--[-rwxr-xr-x]systemd/cloud-init-generator.tmpl3
-rw-r--r--systemd/cloud-init-hotplugd.service22
-rw-r--r--systemd/cloud-init-hotplugd.socket13
-rw-r--r--systemd/cloud-init.service.tmpl5
-rw-r--r--systemd/disable-sshd-keygen-if-cloud-init-active.conf7
-rwxr-xr-xsysvinit/freebsd/cloudinit2
-rw-r--r--templates/chef_client.rb.tmpl2
-rw-r--r--templates/chrony.conf.photon.tmpl48
-rw-r--r--templates/hosts.alpine.tmpl13
-rw-r--r--templates/hosts.debian.tmpl5
-rw-r--r--templates/hosts.gentoo.tmpl23
-rw-r--r--templates/hosts.photon.tmpl22
-rw-r--r--templates/ntp.conf.photon.tmpl61
-rw-r--r--templates/resolv.conf.tmpl2
-rw-r--r--templates/sources.list.debian.tmpl4
-rw-r--r--templates/sources.list.ubuntu.tmpl7
-rw-r--r--templates/systemd.resolved.conf.tmpl15
-rw-r--r--test-requirements.txt1
-rw-r--r--tests/cloud_tests/__init__.py39
-rw-r--r--tests/cloud_tests/__main__.py71
-rw-r--r--tests/cloud_tests/args.py304
-rw-r--r--tests/cloud_tests/bddeb.py119
-rw-r--r--tests/cloud_tests/collect.py219
-rw-r--r--tests/cloud_tests/config.py165
-rw-r--r--tests/cloud_tests/manage.py74
-rw-r--r--tests/cloud_tests/platforms.yaml77
-rw-r--r--tests/cloud_tests/platforms/__init__.py43
-rw-r--r--tests/cloud_tests/platforms/azurecloud/image.py116
-rw-r--r--tests/cloud_tests/platforms/azurecloud/instance.py247
-rw-r--r--tests/cloud_tests/platforms/azurecloud/platform.py240
-rw-r--r--tests/cloud_tests/platforms/azurecloud/regions.json42
-rw-r--r--tests/cloud_tests/platforms/azurecloud/snapshot.py58
-rw-r--r--tests/cloud_tests/platforms/ec2/image.py100
-rw-r--r--tests/cloud_tests/platforms/ec2/instance.py132
-rw-r--r--tests/cloud_tests/platforms/ec2/platform.py263
-rw-r--r--tests/cloud_tests/platforms/ec2/snapshot.py66
-rw-r--r--tests/cloud_tests/platforms/images.py56
-rw-r--r--tests/cloud_tests/platforms/instances.py165
-rw-r--r--tests/cloud_tests/platforms/lxd/image.py211
-rw-r--r--tests/cloud_tests/platforms/lxd/instance.py278
-rw-r--r--tests/cloud_tests/platforms/lxd/platform.py104
-rw-r--r--tests/cloud_tests/platforms/lxd/snapshot.py53
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/__init__.py0
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/image.py79
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/instance.py197
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/platform.py94
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/snapshot.py59
-rw-r--r--tests/cloud_tests/platforms/platforms.py109
-rw-r--r--tests/cloud_tests/platforms/snapshots.py44
-rw-r--r--tests/cloud_tests/releases.yaml364
-rw-r--r--tests/cloud_tests/run_funcs.py75
-rw-r--r--tests/cloud_tests/setup_image.py237
-rw-r--r--tests/cloud_tests/stage.py116
-rw-r--r--tests/cloud_tests/testcases.yaml50
-rw-r--r--tests/cloud_tests/testcases/__init__.py73
-rw-r--r--tests/cloud_tests/testcases/base.py385
-rw-r--r--tests/cloud_tests/testcases/bugs/README.md13
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1511485.py15
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1511485.yaml11
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1611074.yaml8
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1628337.py23
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1628337.yaml23
-rw-r--r--tests/cloud_tests/testcases/examples/README.md12
-rw-r--r--tests/cloud_tests/testcases/examples/TODO.md15
-rw-r--r--tests/cloud_tests/testcases/examples/add_apt_repositories.py20
-rw-r--r--tests/cloud_tests/testcases/examples/add_apt_repositories.yaml23
-rw-r--r--tests/cloud_tests/testcases/examples/alter_completion_message.py40
-rw-r--r--tests/cloud_tests/testcases/examples/alter_completion_message.yaml16
-rw-r--r--tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py27
-rw-r--r--tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml41
-rw-r--r--tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py31
-rw-r--r--tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml63
-rw-r--r--tests/cloud_tests/testcases/examples/including_user_groups.py49
-rw-r--r--tests/cloud_tests/testcases/examples/including_user_groups.yaml56
-rw-r--r--tests/cloud_tests/testcases/examples/install_arbitrary_packages.py20
-rw-r--r--tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml20
-rw-r--r--tests/cloud_tests/testcases/examples/install_run_chef_recipes.py17
-rw-r--r--tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml104
-rw-r--r--tests/cloud_tests/testcases/examples/run_apt_upgrade.py19
-rw-r--r--tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml11
-rw-r--r--tests/cloud_tests/testcases/examples/run_commands.py15
-rw-r--r--tests/cloud_tests/testcases/examples/run_commands.yaml16
-rw-r--r--tests/cloud_tests/testcases/examples/run_commands_first_boot.py15
-rw-r--r--tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml16
-rw-r--r--tests/cloud_tests/testcases/examples/setup_run_puppet.yaml55
-rw-r--r--tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py30
-rw-r--r--tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml45
-rw-r--r--tests/cloud_tests/testcases/main/README.md11
-rw-r--r--tests/cloud_tests/testcases/main/command_output_simple.py21
-rw-r--r--tests/cloud_tests/testcases/main/command_output_simple.yaml13
-rw-r--r--tests/cloud_tests/testcases/modules/README.md12
-rw-r--r--tests/cloud_tests/testcases/modules/TODO.md95
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_conf.py20
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_conf.yaml21
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py15
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml20
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_primary.py24
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_primary.yaml19
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_proxy.py22
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml18
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_security.py15
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_security.yaml18
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_key.py23
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml50
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py23
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml23
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_list.py31
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml28
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py23
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml29
-rw-r--r--tests/cloud_tests/testcases/modules/apt_pipelining_disable.py15
-rw-r--r--tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml14
-rw-r--r--tests/cloud_tests/testcases/modules/apt_pipelining_os.py15
-rw-r--r--tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml14
-rw-r--r--tests/cloud_tests/testcases/modules/bootcmd.py15
-rw-r--r--tests/cloud_tests/testcases/modules/bootcmd.yaml13
-rw-r--r--tests/cloud_tests/testcases/modules/byobu.py24
-rw-r--r--tests/cloud_tests/testcases/modules/byobu.yaml17
-rw-r--r--tests/cloud_tests/testcases/modules/ca_certs.py33
-rw-r--r--tests/cloud_tests/testcases/modules/ca_certs.yaml56
-rw-r--r--tests/cloud_tests/testcases/modules/debug_disable.py16
-rw-r--r--tests/cloud_tests/testcases/modules/debug_disable.yaml9
-rw-r--r--tests/cloud_tests/testcases/modules/debug_enable.py15
-rw-r--r--tests/cloud_tests/testcases/modules/debug_enable.yaml9
-rw-r--r--tests/cloud_tests/testcases/modules/final_message.py40
-rw-r--r--tests/cloud_tests/testcases/modules/final_message.yaml13
-rw-r--r--tests/cloud_tests/testcases/modules/keys_to_console.py22
-rw-r--r--tests/cloud_tests/testcases/modules/keys_to_console.yaml15
-rw-r--r--tests/cloud_tests/testcases/modules/landscape.yaml28
-rw-r--r--tests/cloud_tests/testcases/modules/locale.py30
-rw-r--r--tests/cloud_tests/testcases/modules/locale.yaml22
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_bridge.py36
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_bridge.yaml32
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_dir.py30
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_dir.yaml19
-rw-r--r--tests/cloud_tests/testcases/modules/ntp.py24
-rw-r--r--tests/cloud_tests/testcases/modules/ntp.yaml22
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_chrony.py26
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_chrony.yaml17
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_pools.py34
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_pools.yaml32
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_servers.py34
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_servers.yaml28
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_timesyncd.py15
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml15
-rw-r--r--tests/cloud_tests/testcases/modules/package_update_upgrade_install.py36
-rw-r--r--tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml30
-rw-r--r--tests/cloud_tests/testcases/modules/runcmd.py15
-rw-r--r--tests/cloud_tests/testcases/modules/runcmd.yaml13
-rw-r--r--tests/cloud_tests/testcases/modules/seed_random_command.yaml18
-rw-r--r--tests/cloud_tests/testcases/modules/seed_random_data.py15
-rw-r--r--tests/cloud_tests/testcases/modules/seed_random_data.yaml15
-rw-r--r--tests/cloud_tests/testcases/modules/set_hostname.py17
-rw-r--r--tests/cloud_tests/testcases/modules/set_hostname.yaml21
-rw-r--r--tests/cloud_tests/testcases/modules/set_hostname_fqdn.py31
-rw-r--r--tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml23
-rw-r--r--tests/cloud_tests/testcases/modules/set_password.py22
-rw-r--r--tests/cloud_tests/testcases/modules/set_password.yaml19
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_expire.py23
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_expire.yaml32
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list.py12
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list.yaml41
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list_string.py12
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list_string.yaml41
-rw-r--r--tests/cloud_tests/testcases/modules/snap.py16
-rw-r--r--tests/cloud_tests/testcases/modules/snap.yaml21
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py16
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml14
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py18
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml21
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_import_id.py17
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_import_id.yaml17
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_keys_generate.py52
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml38
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_keys_provided.py58
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml99
-rw-r--r--tests/cloud_tests/testcases/modules/timezone.py15
-rw-r--r--tests/cloud_tests/testcases/modules/timezone.yaml16
-rw-r--r--tests/cloud_tests/testcases/modules/user_groups.py49
-rw-r--r--tests/cloud_tests/testcases/modules/user_groups.yaml55
-rw-r--r--tests/cloud_tests/testcases/modules/write_files.py33
-rw-r--r--tests/cloud_tests/testcases/modules/write_files.yaml53
-rw-r--r--tests/cloud_tests/util.py532
-rw-r--r--tests/cloud_tests/verify.py149
-rw-r--r--tests/configs/sample1.yaml49
-rw-r--r--tests/data/netinfo/sample-ipaddrshow-json91
-rw-r--r--tests/data/netinfo/sample-ipaddrshow-json-down57
-rw-r--r--tests/data/netinfo/sample-ipaddrshow-output3
-rw-r--r--tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl504
-rw-r--r--tests/integration_tests/__init__.py14
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test138
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test1.pub1
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test238
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test2.pub1
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test338
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test3.pub1
-rw-r--r--tests/integration_tests/assets/test_version_change.pklbin0 -> 21 bytes
-rw-r--r--tests/integration_tests/assets/trusty_with_mime.pkl572
-rw-r--r--tests/integration_tests/bugs/test_gh570.py39
-rw-r--r--tests/integration_tests/bugs/test_gh626.py43
-rw-r--r--tests/integration_tests/bugs/test_gh632.py33
-rw-r--r--tests/integration_tests/bugs/test_gh668.py46
-rw-r--r--tests/integration_tests/bugs/test_gh671.py53
-rw-r--r--tests/integration_tests/bugs/test_gh868.py27
-rw-r--r--tests/integration_tests/bugs/test_lp1813396.py31
-rw-r--r--tests/integration_tests/bugs/test_lp1835584.py101
-rw-r--r--tests/integration_tests/bugs/test_lp1886531.py4
-rw-r--r--tests/integration_tests/bugs/test_lp1897099.py14
-rw-r--r--tests/integration_tests/bugs/test_lp1898997.py77
-rw-r--r--tests/integration_tests/bugs/test_lp1900837.py5
-rw-r--r--tests/integration_tests/bugs/test_lp1901011.py67
-rw-r--r--tests/integration_tests/bugs/test_lp1910835.py64
-rw-r--r--tests/integration_tests/bugs/test_lp1912844.py105
-rw-r--r--tests/integration_tests/clouds.py331
-rw-r--r--tests/integration_tests/conftest.py273
-rw-r--r--tests/integration_tests/datasources/test_lxd_discovery.py90
-rw-r--r--tests/integration_tests/datasources/test_network_dependency.py33
-rw-r--r--tests/integration_tests/instances.py207
-rw-r--r--tests/integration_tests/integration_settings.py61
-rw-r--r--tests/integration_tests/modules/test_apt.py354
-rw-r--r--tests/integration_tests/modules/test_apt_configure_sources_list.py51
-rw-r--r--tests/integration_tests/modules/test_ca_certs.py90
-rw-r--r--tests/integration_tests/modules/test_cli.py81
-rw-r--r--tests/integration_tests/modules/test_combined.py342
-rw-r--r--tests/integration_tests/modules/test_command_output.py21
-rw-r--r--tests/integration_tests/modules/test_disk_setup.py212
-rw-r--r--tests/integration_tests/modules/test_growpart.py68
-rw-r--r--tests/integration_tests/modules/test_hotplug.py112
-rw-r--r--tests/integration_tests/modules/test_jinja_templating.py33
-rw-r--r--tests/integration_tests/modules/test_keyboard.py17
-rw-r--r--tests/integration_tests/modules/test_keys_to_console.py113
-rw-r--r--tests/integration_tests/modules/test_lxd_bridge.py46
-rw-r--r--tests/integration_tests/modules/test_ntp_servers.py98
-rw-r--r--tests/integration_tests/modules/test_package_update_upgrade_install.py19
-rw-r--r--tests/integration_tests/modules/test_persistence.py32
-rw-r--r--tests/integration_tests/modules/test_power_state_change.py97
-rw-r--r--tests/integration_tests/modules/test_puppet.py39
-rw-r--r--tests/integration_tests/modules/test_runcmd.py25
-rw-r--r--tests/integration_tests/modules/test_seed_random_data.py28
-rw-r--r--tests/integration_tests/modules/test_set_hostname.py27
-rw-r--r--tests/integration_tests/modules/test_set_password.py57
-rw-r--r--tests/integration_tests/modules/test_snap.py29
-rw-r--r--tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py18
-rw-r--r--tests/integration_tests/modules/test_ssh_generate.py16
-rw-r--r--tests/integration_tests/modules/test_ssh_import_id.py29
-rw-r--r--tests/integration_tests/modules/test_ssh_keys_provided.py122
-rw-r--r--tests/integration_tests/modules/test_ssh_keysfile.py224
-rw-r--r--tests/integration_tests/modules/test_timezone.py25
-rw-r--r--tests/integration_tests/modules/test_user_events.py110
-rw-r--r--tests/integration_tests/modules/test_users_groups.py50
-rw-r--r--tests/integration_tests/modules/test_version_change.py76
-rw-r--r--tests/integration_tests/modules/test_write_files.py47
-rw-r--r--tests/integration_tests/network/test_net_config_load.py27
-rw-r--r--tests/integration_tests/test_logging.py22
-rw-r--r--tests/integration_tests/test_shell_script_by_frequency.py48
-rw-r--r--tests/integration_tests/test_upgrade.py188
-rw-r--r--tests/integration_tests/util.py142
-rw-r--r--tests/unittests/__init__.py1
-rw-r--r--tests/unittests/analyze/test_boot.py174
-rw-r--r--tests/unittests/analyze/test_dump.py (renamed from cloudinit/analyze/tests/test_dump.py)215
-rw-r--r--tests/unittests/cloudinit/__init__py (renamed from cloudinit/cmd/devel/tests/__init__.py)0
-rw-r--r--tests/unittests/cmd/__init__.py (renamed from cloudinit/cmd/tests/__init__.py)0
-rw-r--r--tests/unittests/cmd/devel/__init__.py (renamed from cloudinit/distros/tests/__init__.py)0
-rw-r--r--tests/unittests/cmd/devel/test_hotplug_hook.py236
-rw-r--r--tests/unittests/cmd/devel/test_logs.py213
-rw-r--r--tests/unittests/cmd/devel/test_render.py154
-rw-r--r--tests/unittests/cmd/test_clean.py211
-rw-r--r--tests/unittests/cmd/test_cloud_id.py187
-rw-r--r--tests/unittests/cmd/test_main.py241
-rw-r--r--tests/unittests/cmd/test_query.py537
-rw-r--r--tests/unittests/cmd/test_status.py548
-rw-r--r--tests/unittests/config/__init__.py (renamed from cloudinit/net/tests/__init__.py)0
-rw-r--r--tests/unittests/config/test_apt_conf_v1.py (renamed from tests/unittests/test_handler/test_handler_apt_conf_v1.py)68
-rw-r--r--tests/unittests/config/test_apt_configure_sources_list_v1.py (renamed from tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py)152
-rw-r--r--tests/unittests/config/test_apt_configure_sources_list_v3.py (renamed from tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py)190
-rw-r--r--tests/unittests/config/test_apt_key.py124
-rw-r--r--tests/unittests/config/test_apt_source_v1.py852
-rw-r--r--tests/unittests/config/test_apt_source_v3.py1442
-rw-r--r--tests/unittests/config/test_cc_apk_configure.py410
-rw-r--r--tests/unittests/config/test_cc_apt_configure.py202
-rw-r--r--tests/unittests/config/test_cc_apt_pipelining.py65
-rw-r--r--tests/unittests/config/test_cc_bootcmd.py165
-rw-r--r--tests/unittests/config/test_cc_byobu.py51
-rw-r--r--tests/unittests/config/test_cc_ca_certs.py507
-rw-r--r--tests/unittests/config/test_cc_chef.py464
-rw-r--r--tests/unittests/config/test_cc_debug.py112
-rw-r--r--tests/unittests/config/test_cc_disable_ec2_metadata.py81
-rw-r--r--tests/unittests/config/test_cc_disk_setup.py333
-rw-r--r--tests/unittests/config/test_cc_final_message.py (renamed from cloudinit/config/tests/test_final_message.py)0
-rw-r--r--tests/unittests/config/test_cc_growpart.py (renamed from tests/unittests/test_handler/test_handler_growpart.py)223
-rw-r--r--tests/unittests/config/test_cc_grub_dpkg.py (renamed from cloudinit/config/tests/test_grub_dpkg.py)121
-rw-r--r--tests/unittests/config/test_cc_install_hotplug.py129
-rw-r--r--tests/unittests/config/test_cc_keys_to_console.py40
-rw-r--r--tests/unittests/config/test_cc_landscape.py170
-rw-r--r--tests/unittests/config/test_cc_locale.py123
-rw-r--r--tests/unittests/config/test_cc_lxd.py272
-rw-r--r--tests/unittests/config/test_cc_mcollective.py (renamed from tests/unittests/test_handler/test_handler_mcollective.py)123
-rw-r--r--tests/unittests/config/test_cc_mounts.py522
-rw-r--r--tests/unittests/config/test_cc_ntp.py (renamed from tests/unittests/test_handler/test_handler_ntp.py)762
-rw-r--r--tests/unittests/config/test_cc_power_state_change.py (renamed from tests/unittests/test_handler/test_handler_power_state.py)78
-rw-r--r--tests/unittests/config/test_cc_puppet.py450
-rw-r--r--tests/unittests/config/test_cc_refresh_rmc_and_interface.py157
-rw-r--r--tests/unittests/config/test_cc_resizefs.py490
-rw-r--r--tests/unittests/config/test_cc_resizefs_vyos.py490
-rw-r--r--tests/unittests/config/test_cc_resolv_conf.py197
-rw-r--r--tests/unittests/config/test_cc_rh_subscription.py320
-rw-r--r--tests/unittests/config/test_cc_rsyslog.py (renamed from tests/unittests/test_handler/test_handler_rsyslog.py)114
-rw-r--r--tests/unittests/config/test_cc_runcmd.py137
-rw-r--r--tests/unittests/config/test_cc_seed_random.py221
-rw-r--r--tests/unittests/config/test_cc_set_hostname.py208
-rw-r--r--tests/unittests/config/test_cc_set_passwords.py177
-rw-r--r--tests/unittests/config/test_cc_snap.py (renamed from cloudinit/config/tests/test_snap.py)492
-rw-r--r--tests/unittests/config/test_cc_spacewalk.py (renamed from tests/unittests/test_handler/test_handler_spacewalk.py)36
-rw-r--r--tests/unittests/config/test_cc_ssh.py (renamed from cloudinit/config/tests/test_ssh.py)358
-rw-r--r--tests/unittests/config/test_cc_timezone.py (renamed from tests/unittests/test_handler/test_handler_timezone.py)49
-rw-r--r--tests/unittests/config/test_cc_ubuntu_advantage.py391
-rw-r--r--tests/unittests/config/test_cc_ubuntu_drivers.py (renamed from cloudinit/config/tests/test_ubuntu_drivers.py)213
-rw-r--r--tests/unittests/config/test_cc_update_etc_hosts.py68
-rw-r--r--tests/unittests/config/test_cc_users_groups.py268
-rw-r--r--tests/unittests/config/test_cc_write_files.py (renamed from tests/unittests/test_handler/test_handler_write_files.py)154
-rw-r--r--tests/unittests/config/test_cc_write_files_deferred.py85
-rw-r--r--tests/unittests/config/test_cc_yum_add_repo.py120
-rw-r--r--tests/unittests/config/test_cc_zypper_add_repo.py (renamed from tests/unittests/test_handler/test_handler_zypper_add_repo.py)170
-rw-r--r--tests/unittests/config/test_schema.py917
-rw-r--r--tests/unittests/distros/__init__.py (renamed from tests/unittests/test_distros/__init__.py)10
-rw-r--r--tests/unittests/distros/test_arch.py55
-rw-r--r--tests/unittests/distros/test_bsd_utils.py66
-rw-r--r--tests/unittests/distros/test_create_users.py282
-rw-r--r--tests/unittests/distros/test_debian.py211
-rw-r--r--tests/unittests/distros/test_dragonflybsd.py25
-rw-r--r--tests/unittests/distros/test_freebsd.py (renamed from tests/unittests/test_distros/test_freebsd.py)28
-rw-r--r--tests/unittests/distros/test_generic.py383
-rw-r--r--tests/unittests/distros/test_gentoo.py (renamed from tests/unittests/test_distros/test_gentoo.py)13
-rw-r--r--tests/unittests/distros/test_hostname.py (renamed from tests/unittests/test_distros/test_hostname.py)16
-rw-r--r--tests/unittests/distros/test_hosts.py47
-rw-r--r--tests/unittests/distros/test_init.py248
-rw-r--r--tests/unittests/distros/test_manage_service.py41
-rw-r--r--tests/unittests/distros/test_netbsd.py (renamed from tests/unittests/test_distros/test_netbsd.py)11
-rw-r--r--tests/unittests/distros/test_netconfig.py (renamed from tests/unittests/test_distros/test_netconfig.py)682
-rw-r--r--tests/unittests/distros/test_networking.py (renamed from cloudinit/distros/tests/test_networking.py)34
-rw-r--r--tests/unittests/distros/test_opensuse.py (renamed from tests/unittests/test_distros/test_opensuse.py)5
-rw-r--r--tests/unittests/distros/test_photon.py68
-rw-r--r--tests/unittests/distros/test_resolv.py (renamed from tests/unittests/test_distros/test_resolv.py)55
-rw-r--r--tests/unittests/distros/test_sles.py (renamed from tests/unittests/test_distros/test_sles.py)5
-rw-r--r--tests/unittests/distros/test_sysconfig.py (renamed from tests/unittests/test_distros/test_sysconfig.py)64
-rw-r--r--tests/unittests/distros/test_user_data_normalize.py365
-rw-r--r--tests/unittests/filters/__init__.py (renamed from cloudinit/sources/tests/__init__.py)0
-rw-r--r--tests/unittests/filters/test_launch_index.py (renamed from tests/unittests/test_filters/test_launch_index.py)23
-rw-r--r--tests/unittests/helpers.py (renamed from cloudinit/tests/helpers.py)212
-rw-r--r--tests/unittests/net/__init__.py (renamed from cloudinit/tests/__init__.py)0
-rw-r--r--tests/unittests/net/test_dhcp.py797
-rw-r--r--tests/unittests/net/test_init.py1734
-rw-r--r--tests/unittests/net/test_network_state.py222
-rw-r--r--tests/unittests/net/test_networkd.py64
-rw-r--r--tests/unittests/runs/__init__.py (renamed from tests/cloud_tests/platforms/azurecloud/__init__.py)0
-rw-r--r--tests/unittests/runs/test_merge_run.py61
-rw-r--r--tests/unittests/runs/test_simple_run.py (renamed from tests/unittests/test_runs/test_simple_run.py)134
-rw-r--r--tests/unittests/sources/__init__.py (renamed from tests/cloud_tests/platforms/ec2/__init__.py)0
-rw-r--r--tests/unittests/sources/helpers/test_netlink.py (renamed from cloudinit/sources/helpers/tests/test_netlink.py)357
-rw-r--r--tests/unittests/sources/helpers/test_openstack.py62
-rw-r--r--tests/unittests/sources/test_aliyun.py287
-rw-r--r--tests/unittests/sources/test_altcloud.py (renamed from tests/unittests/test_datasource/test_altcloud.py)311
-rw-r--r--tests/unittests/sources/test_azure.py4306
-rw-r--r--tests/unittests/sources/test_azure_helper.py (renamed from tests/unittests/test_datasource/test_azure_helper.py)1156
-rw-r--r--tests/unittests/sources/test_cloudsigma.py (renamed from tests/unittests/test_datasource/test_cloudsigma.py)74
-rw-r--r--tests/unittests/sources/test_cloudstack.py (renamed from tests/unittests/test_datasource/test_cloudstack.py)121
-rw-r--r--tests/unittests/sources/test_common.py123
-rw-r--r--tests/unittests/sources/test_configdrive.py1068
-rw-r--r--tests/unittests/sources/test_digitalocean.py389
-rw-r--r--tests/unittests/sources/test_ec2.py (renamed from tests/unittests/test_datasource/test_ec2.py)853
-rw-r--r--tests/unittests/sources/test_exoscale.py241
-rw-r--r--tests/unittests/sources/test_gce.py416
-rw-r--r--tests/unittests/sources/test_hetzner.py (renamed from tests/unittests/test_datasource/test_hetzner.py)124
-rw-r--r--tests/unittests/sources/test_ibmcloud.py (renamed from tests/unittests/test_datasource/test_ibmcloud.py)299
-rw-r--r--tests/unittests/sources/test_init.py994
-rw-r--r--tests/unittests/sources/test_lxd.py394
-rw-r--r--tests/unittests/sources/test_maas.py (renamed from tests/unittests/test_datasource/test_maas.py)149
-rw-r--r--tests/unittests/sources/test_nocloud.py (renamed from tests/unittests/test_datasource/test_nocloud.py)320
-rw-r--r--tests/unittests/sources/test_opennebula.py (renamed from tests/unittests/test_datasource/test_opennebula.py)890
-rw-r--r--tests/unittests/sources/test_openstack.py788
-rw-r--r--tests/unittests/sources/test_oracle.py (renamed from cloudinit/sources/tests/test_oracle.py)426
-rw-r--r--tests/unittests/sources/test_ovf.py1237
-rw-r--r--tests/unittests/sources/test_rbx.py241
-rw-r--r--tests/unittests/sources/test_scaleway.py526
-rw-r--r--tests/unittests/sources/test_smartos.py (renamed from tests/unittests/test_datasource/test_smartos.py)960
-rw-r--r--tests/unittests/sources/test_upcloud.py331
-rw-r--r--tests/unittests/sources/test_vmware.py389
-rw-r--r--tests/unittests/sources/test_vultr.py339
-rw-r--r--tests/unittests/sources/vmware/__init__.py (renamed from tests/cloud_tests/platforms/lxd/__init__.py)0
-rw-r--r--tests/unittests/sources/vmware/test_custom_script.py (renamed from tests/unittests/test_vmware/test_custom_script.py)63
-rw-r--r--tests/unittests/sources/vmware/test_guestcust_util.py109
-rw-r--r--tests/unittests/sources/vmware/test_vmware_config_file.py635
-rw-r--r--tests/unittests/test__init__.py193
-rw-r--r--tests/unittests/test_atomic_helper.py6
-rw-r--r--tests/unittests/test_builtin_handlers.py472
-rw-r--r--tests/unittests/test_cli.py304
-rw-r--r--tests/unittests/test_conftest.py (renamed from cloudinit/tests/test_conftest.py)12
-rw-r--r--tests/unittests/test_cs_util.py39
-rw-r--r--tests/unittests/test_data.py537
-rw-r--r--tests/unittests/test_datasource/__init__.py0
-rw-r--r--tests/unittests/test_datasource/test_aliyun.py218
-rw-r--r--tests/unittests/test_datasource/test_azure.py2999
-rw-r--r--tests/unittests/test_datasource/test_common.py110
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py837
-rw-r--r--tests/unittests/test_datasource/test_digitalocean.py372
-rw-r--r--tests/unittests/test_datasource/test_exoscale.py211
-rw-r--r--tests/unittests/test_datasource/test_gce.py363
-rw-r--r--tests/unittests/test_datasource/test_openstack.py694
-rw-r--r--tests/unittests/test_datasource/test_ovf.py544
-rw-r--r--tests/unittests/test_datasource/test_rbx.py238
-rw-r--r--tests/unittests/test_datasource/test_scaleway.py473
-rw-r--r--tests/unittests/test_dhclient_hook.py (renamed from cloudinit/tests/test_dhclient_hook.py)89
-rw-r--r--tests/unittests/test_distros/test_arch.py45
-rw-r--r--tests/unittests/test_distros/test_bsd_utils.py67
-rw-r--r--tests/unittests/test_distros/test_create_users.py271
-rw-r--r--tests/unittests/test_distros/test_debian.py100
-rw-r--r--tests/unittests/test_distros/test_generic.py302
-rw-r--r--tests/unittests/test_distros/test_hosts.py45
-rw-r--r--tests/unittests/test_distros/test_user_data_normalize.py374
-rw-r--r--tests/unittests/test_dmi.py (renamed from cloudinit/tests/test_dmi.py)90
-rw-r--r--tests/unittests/test_ds_identify.py1634
-rw-r--r--tests/unittests/test_ec2_util.py376
-rw-r--r--tests/unittests/test_event.py26
-rw-r--r--tests/unittests/test_features.py (renamed from cloudinit/tests/test_features.py)38
-rw-r--r--tests/unittests/test_filters/__init__.py0
-rw-r--r--tests/unittests/test_gpg.py139
-rw-r--r--tests/unittests/test_handler/__init__.py0
-rw-r--r--tests/unittests/test_handler/test_handler_apk_configure.py299
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v1.py626
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v3.py1134
-rw-r--r--tests/unittests/test_handler/test_handler_bootcmd.py161
-rw-r--r--tests/unittests/test_handler/test_handler_ca_certs.py298
-rw-r--r--tests/unittests/test_handler/test_handler_chef.py280
-rw-r--r--tests/unittests/test_handler/test_handler_debug.py74
-rw-r--r--tests/unittests/test_handler/test_handler_disk_setup.py243
-rw-r--r--tests/unittests/test_handler/test_handler_etc_hosts.py70
-rw-r--r--tests/unittests/test_handler/test_handler_landscape.py130
-rw-r--r--tests/unittests/test_handler/test_handler_locale.py108
-rw-r--r--tests/unittests/test_handler/test_handler_lxd.py231
-rw-r--r--tests/unittests/test_handler/test_handler_mounts.py397
-rw-r--r--tests/unittests/test_handler/test_handler_puppet.py179
-rw-r--r--tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py109
-rw-r--r--tests/unittests/test_handler/test_handler_resizefs.py398
-rw-r--r--tests/unittests/test_handler/test_handler_resizefs_vyos.py398
-rw-r--r--tests/unittests/test_handler/test_handler_runcmd.py121
-rw-r--r--tests/unittests/test_handler/test_handler_seed_random.py221
-rw-r--r--tests/unittests/test_handler/test_handler_set_hostname.py126
-rw-r--r--tests/unittests/test_handler/test_handler_yum_add_repo.py111
-rw-r--r--tests/unittests/test_handler/test_schema.py554
-rw-r--r--tests/unittests/test_helpers.py40
-rw-r--r--tests/unittests/test_log.py14
-rw-r--r--tests/unittests/test_merging.py123
-rw-r--r--tests/unittests/test_net.py5107
-rw-r--r--tests/unittests/test_net_activators.py262
-rw-r--r--tests/unittests/test_net_freebsd.py80
-rw-r--r--tests/unittests/test_netinfo.py353
-rw-r--r--tests/unittests/test_pathprefix2dict.py28
-rw-r--r--tests/unittests/test_persistence.py (renamed from cloudinit/tests/test_persistence.py)0
-rw-r--r--tests/unittests/test_registry.py21
-rw-r--r--tests/unittests/test_render_cloudcfg.py91
-rw-r--r--tests/unittests/test_reporting.py373
-rw-r--r--tests/unittests/test_reporting_hyperv.py193
-rw-r--r--tests/unittests/test_rh_subscription.py234
-rw-r--r--tests/unittests/test_runs/__init__.py0
-rw-r--r--tests/unittests/test_runs/test_merge_run.py60
-rw-r--r--tests/unittests/test_simpletable.py (renamed from cloudinit/tests/test_simpletable.py)49
-rw-r--r--tests/unittests/test_sshutil.py1199
-rw-r--r--tests/unittests/test_stages.py568
-rw-r--r--tests/unittests/test_subp.py353
-rw-r--r--tests/unittests/test_temp_utils.py135
-rw-r--r--tests/unittests/test_templating.py103
-rw-r--r--tests/unittests/test_upgrade.py (renamed from cloudinit/tests/test_upgrade.py)9
-rw-r--r--tests/unittests/test_url_helper.py (renamed from cloudinit/tests/test_url_helper.py)134
-rw-r--r--tests/unittests/test_util.py2034
-rw-r--r--tests/unittests/test_version.py (renamed from cloudinit/tests/test_version.py)11
-rw-r--r--tests/unittests/test_vmware/__init__.py0
-rw-r--r--tests/unittests/test_vmware/test_guestcust_util.py98
-rw-r--r--tests/unittests/test_vmware_config_file.py529
-rw-r--r--tests/unittests/util.py145
-rw-r--r--tools/.github-cla-signers59
-rw-r--r--tools/.lp-to-git-user2
-rwxr-xr-xtools/build-on-netbsd25
-rwxr-xr-xtools/ds-identify153
-rwxr-xr-xtools/hook-hotplug22
-rwxr-xr-xtools/mock-meta.py301
-rwxr-xr-xtools/read-dependencies16
-rwxr-xr-xtools/render-cloudcfg78
-rwxr-xr-xtools/run-container9
-rwxr-xr-xtools/run-flake8 (renamed from tools/run-pyflakes)4
-rwxr-xr-xtools/run-pep821
-rwxr-xr-xtools/validate-yaml.py4
-rwxr-xr-xtools/write-ssh-key-fingerprints58
-rw-r--r--tox.ini256
852 files changed, 79569 insertions, 55447 deletions
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 00000000..58796591
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,4 @@
+# Automatically apply to git blame with `git config blame.ignorerevsfile .git-blame-ignore-revs`
+
+# Apply black and isort formatting
+bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 2b59d10a..017e82e4 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,17 +1,19 @@
## Proposed Commit Message
<!-- Include a proposed commit message because all PRs are squash merged -->
-> summary: no more than 70 characters
->
-> A description of what the change being made is and why it is being
-> made, if the summary line is insufficient. The blank line above is
-> required. This should be wrapped at 72 characters, but otherwise has
-> no particular length requirements.
->
-> If you need to write multiple paragraphs, feel free.
->
-> LP: #NNNNNNN (replace with the appropriate bug reference or remove
-> this line entirely if there is no associated bug)
+```
+summary: no more than 70 characters
+
+A description of what the change being made is and why it is being
+made, if the summary line is insufficient. The blank line above is
+required. This should be wrapped at 72 characters, but otherwise has
+no particular length requirements.
+
+If you need to write multiple paragraphs, feel free.
+
+LP: #NNNNNNN (replace with the appropriate bug reference or remove
+this line entirely if there is no associated bug)
+```
## Additional Context
<!-- If relevant -->
@@ -25,6 +27,6 @@ setup, and teardown. Scripts used may be attached directly to this PR. -->
## Checklist:
<!-- Go over all the following points, and put an `x` in all the boxes
that apply. -->
- - [ ] My code follows the process laid out in [the documentation](https://cloudinit.readthedocs.io/en/latest/topics/hacking.html)
+ - [ ] My code follows the process laid out in [the documentation](https://cloudinit.readthedocs.io/en/latest/topics/contributing.html)
- [ ] I have updated or added any unit tests accordingly
- [ ] I have updated or added any documentation accordingly
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index 20c5735d..c763dafd 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -18,7 +18,7 @@ jobs:
stale-pr-message: |
Hello! Thank you for this proposed change to cloud-init. This pull request is now marked as stale as it has not seen any activity in 14 days. If no activity occurs within the next 7 days, this pull request will automatically close.
- If you are waiting for code review and you are seeing this message, apologies! Please reply, tagging mitechie, and he will ensure that someone takes a look soon.
+ If you are waiting for code review and you are seeing this message, apologies! Please reply, tagging TheRealFalcon, and he will ensure that someone takes a look soon.
- (If the pull request is closed, please do feel free to reopen it if you wish to continue working on it.)
+ (If the pull request is closed and you would like to continue working on it, please do tag TheRealFalcon to reopen it.)
stale-pr-label: 'stale-pr'
diff --git a/.gitignore b/.gitignore
index 5a68bff9..6eae45c9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,6 +5,7 @@ dist
__pycache__
.tox
.coverage
+.python-version
doc/rtd_html
parts
prime
@@ -16,6 +17,10 @@ stage
.pc/
.cache/
.mypy_cache/
+.pytest_cache/
+.vscode/
+htmlcov/
+tags
# Ignore packaging artifacts
cloud-init.dsc
diff --git a/.pylintrc b/.pylintrc
index 94a81d0e..3edb0092 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -24,8 +24,9 @@ jobs=4
# W0631(undefined-loop-variable)
# W0703(broad-except)
# W1401(anomalous-backslash-in-string)
+# W1514(unspecified-encoding)
-disable=C, F, I, R, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401
+disable=C, F, I, R, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401, W1514
[REPORTS]
diff --git a/.travis.yml b/.travis.yml
index 2fad49f3..f655fa50 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -33,96 +33,17 @@ install:
script:
- tox
+env:
+ TOXENV=py3
+ PYTEST_ADDOPTS=-v # List all tests run by pytest
+
matrix:
fast_finish: true
- allow_failures:
- - name: "Integration Tests (WIP)"
include:
- python: 3.6
- env:
- TOXENV=py3
- PYTEST_ADDOPTS=-v # List all tests run by pytest
- - if: NOT branch =~ /^ubuntu\//
- cache:
- - directories:
- - lxd_images
- - chroots
- before_cache:
- - |
- # Find the most recent image file
- latest_file="$(sudo ls -Art /var/snap/lxd/common/lxd/images/ | tail -n 1)"
- # This might be <hash>.rootfs or <hash>, normalise
- latest_file="$(basename $latest_file .rootfs)"
- # Find all files with that prefix and copy them to our cache dir
- sudo find /var/snap/lxd/common/lxd/images/ -name $latest_file* -print -exec cp {} "$TRAVIS_BUILD_DIR/lxd_images/" \;
- install:
- - git fetch --unshallow
- - sudo apt-get install -y --install-recommends sbuild ubuntu-dev-tools fakeroot tox debhelper
- - pip install .
- - pip install tox
- # bionic has lxd from deb installed, remove it first to ensure
- # pylxd talks only to the lxd from snap
- - sudo apt remove --purge lxd lxd-client
- - sudo rm -Rf /var/lib/lxd
- - sudo snap install lxd
- - sudo lxd init --auto
- - sudo mkdir --mode=1777 -p /var/snap/lxd/common/consoles
- # Move any cached lxd images into lxd's image dir
- - sudo find "$TRAVIS_BUILD_DIR/lxd_images/" -type f -print -exec mv {} /var/snap/lxd/common/lxd/images/ \;
- - sudo usermod -a -G lxd $USER
- - sudo sbuild-adduser $USER
- - cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc
- script:
- # Ubuntu LTS: Build
- - ./packages/bddeb -S -d --release xenial
- - |
- needs_caching=false
- if [ -e "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" ]; then
- # If we have a cached chroot, move it into place
- sudo mkdir -p /var/lib/schroot/chroots/xenial-amd64
- sudo tar --sparse --xattrs --preserve-permissions --numeric-owner -xf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64
- # Write its configuration
- cat > sbuild-xenial-amd64 << EOM
- [xenial-amd64]
- description=xenial-amd64
- groups=sbuild,root,admin
- root-groups=sbuild,root,admin
- # Uncomment these lines to allow members of these groups to access
- # the -source chroots directly (useful for automated updates, etc).
- #source-root-users=sbuild,root,admin
- #source-root-groups=sbuild,root,admin
- type=directory
- profile=sbuild
- union-type=overlay
- directory=/var/lib/schroot/chroots/xenial-amd64
- EOM
- sudo mv sbuild-xenial-amd64 /etc/schroot/chroot.d/
- sudo chown root /etc/schroot/chroot.d/sbuild-xenial-amd64
- # And ensure it's up-to-date.
- before_pkgs="$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum)"
- sudo schroot -c source:xenial-amd64 -d / -- sh -c "apt-get update && apt-get -qqy upgrade"
- after_pkgs=$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum)
- if [ "$before_pkgs" != "$after_pkgs" ]; then
- needs_caching=true
- fi
- else
- # Otherwise, create the chroot
- sudo -E su $USER -c 'mk-sbuild xenial'
- needs_caching=true
- fi
- # If there are changes to the schroot (or it's entirely new),
- # tar up the schroot (to preserve ownership/permissions) and
- # move it into the cached dir; no need to compress it because
- # Travis will do that anyway
- if [ "$needs_caching" = "true" ]; then
- sudo tar --sparse --xattrs --xattrs-include=* -cf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64 .
- fi
- # Use sudo to get a new shell where we're in the sbuild group
- - sudo -E su $USER -c 'sbuild --nolog --no-run-lintian --verbose --dist=xenial cloud-init_*.dsc'
- # Ubuntu LTS: Integration
- - sg lxd -c 'tox -e citest -- run --verbose --preserve-data --data-dir results --os-name xenial --test modules/apt_configure_sources_list.yaml --test modules/ntp_servers --test modules/set_password_list --test modules/user_groups --deb cloud-init_*_all.deb'
- - name: "Integration Tests (WIP)"
+ - name: "Integration Tests"
if: NOT branch =~ /^ubuntu\//
+ env: {}
cache:
- directories:
- lxd_images
@@ -152,19 +73,20 @@ matrix:
- sudo usermod -a -G lxd $USER
- sudo sbuild-adduser $USER
- cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc
+ - echo "[lxd]" > /home/$USER/.config/pycloudlib.toml
script:
# Ubuntu LTS: Build
- - ./packages/bddeb -S -d --release xenial
+ - ./packages/bddeb -S -d --release bionic
- |
needs_caching=false
- if [ -e "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" ]; then
+ if [ -e "$TRAVIS_BUILD_DIR/chroots/bionic-amd64.tar" ]; then
# If we have a cached chroot, move it into place
- sudo mkdir -p /var/lib/schroot/chroots/xenial-amd64
- sudo tar --sparse --xattrs --preserve-permissions --numeric-owner -xf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64
+ sudo mkdir -p /var/lib/schroot/chroots/bionic-amd64
+ sudo tar --sparse --xattrs --preserve-permissions --numeric-owner -xf "$TRAVIS_BUILD_DIR/chroots/bionic-amd64.tar" -C /var/lib/schroot/chroots/bionic-amd64
# Write its configuration
- cat > sbuild-xenial-amd64 << EOM
- [xenial-amd64]
- description=xenial-amd64
+ cat > sbuild-bionic-amd64 << EOM
+ [bionic-amd64]
+ description=bionic-amd64
groups=sbuild,root,admin
root-groups=sbuild,root,admin
# Uncomment these lines to allow members of these groups to access
@@ -174,20 +96,20 @@ matrix:
type=directory
profile=sbuild
union-type=overlay
- directory=/var/lib/schroot/chroots/xenial-amd64
+ directory=/var/lib/schroot/chroots/bionic-amd64
EOM
- sudo mv sbuild-xenial-amd64 /etc/schroot/chroot.d/
- sudo chown root /etc/schroot/chroot.d/sbuild-xenial-amd64
+ sudo mv sbuild-bionic-amd64 /etc/schroot/chroot.d/
+ sudo chown root /etc/schroot/chroot.d/sbuild-bionic-amd64
# And ensure it's up-to-date.
- before_pkgs="$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum)"
- sudo schroot -c source:xenial-amd64 -d / -- sh -c "apt-get update && apt-get -qqy upgrade"
- after_pkgs=$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum)
+ before_pkgs="$(sudo schroot -c source:bionic-amd64 -d / dpkg -l | sha256sum)"
+ sudo schroot -c source:bionic-amd64 -d / -- sh -c "apt-get update && apt-get -qqy upgrade"
+ after_pkgs=$(sudo schroot -c source:bionic-amd64 -d / dpkg -l | sha256sum)
if [ "$before_pkgs" != "$after_pkgs" ]; then
needs_caching=true
fi
else
# Otherwise, create the chroot
- sudo -E su $USER -c 'mk-sbuild xenial'
+ sudo -E su $USER -c 'mk-sbuild bionic'
needs_caching=true
fi
# If there are changes to the schroot (or it's entirely new),
@@ -195,28 +117,41 @@ matrix:
# move it into the cached dir; no need to compress it because
# Travis will do that anyway
if [ "$needs_caching" = "true" ]; then
- sudo tar --sparse --xattrs --xattrs-include=* -cf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64 .
+ sudo tar --sparse --xattrs --xattrs-include=* -cf "$TRAVIS_BUILD_DIR/chroots/bionic-amd64.tar" -C /var/lib/schroot/chroots/bionic-amd64 .
fi
# Use sudo to get a new shell where we're in the sbuild group
- - sudo -E su $USER -c 'sbuild --nolog --no-run-lintian --verbose --dist=xenial cloud-init_*.dsc'
- - sg lxd -c 'CLOUD_INIT_CLOUD_INIT_SOURCE="$(ls *.deb)" tox -e integration-tests-ci' &
+ # Don't run integration tests when build fails
- |
- SECONDS=0
- while [ -e /proc/$! ]; do
- if [ "$SECONDS" -gt "570" ]; then
- echo -n '.'
- SECONDS=0
- fi
- sleep 10
- done
- - python: 3.5
+ sudo -E su $USER -c 'DEB_BUILD_OPTIONS=nocheck sbuild --nolog --no-run-lintian --no-run-autopkgtest --verbose --dist=bionic cloud-init_*.dsc' &&
+ ssh-keygen -P "" -q -f ~/.ssh/id_rsa &&
+ sg lxd -c 'CLOUD_INIT_CLOUD_INIT_SOURCE="$(ls *.deb)" tox -e integration-tests-ci'
+ - python: 3.6
env:
- TOXENV=xenial
- PYTEST_ADDOPTS=-v # List all tests run by pytest
- dist: xenial
+ TOXENV=lowest-supported
+ PYTEST_ADDOPTS=-v # List all tests run by pytest
+ dist: bionic
- python: 3.6
env: TOXENV=flake8
- python: 3.6
+ env: TOXENV=mypy
+ - python: 3.6
env: TOXENV=pylint
- python: 3.6
+ env: TOXENV=black
+ - python: 3.6
+ env: TOXENV=isort
+ - python: 3.7
env: TOXENV=doc
+ install:
+ - git fetch --unshallow
+ - sudo apt-get install lintian
+ - pip install tox
+ script:
+ - make check_spelling
+ - tox
+ # Test all supported Python versions (but at the end, so we schedule
+ # longer-running jobs first)
+ - python: "3.10.1"
+ - python: 3.9
+ - python: 3.8
+ - python: 3.7
diff --git a/HACKING.rst b/CONTRIBUTING.rst
index 8a12e3e3..73122d79 100644
--- a/HACKING.rst
+++ b/CONTRIBUTING.rst
@@ -1,6 +1,5 @@
-*********************
-Hacking on cloud-init
-*********************
+Contributing to cloud-init
+**************************
This document describes how to contribute changes to cloud-init.
It assumes you have a `GitHub`_ account, and refers to your GitHub user
@@ -9,6 +8,27 @@ as ``GH_USER`` throughout.
Submitting your first pull request
==================================
+Summary
+-------
+
+Before any pull request can be accepted, you must do the following:
+
+* Sign the Canonical `contributor license agreement`_
+* Add yourself (alphabetically) to the in-repository list that we use
+ to track CLA signatures:
+ `tools/.github-cla-signers`_
+* Add or update any `unit tests`_ accordingly
+* Add or update any `integration tests`_ (if applicable)
+* Format code (using black and isort) with `tox -e do_format`
+* Ensure unit tests and linting pass using `tox`_
+* Submit a PR against the `main` branch of the `cloud-init` repository
+
+.. _unit tests: https://cloudinit.readthedocs.io/en/latest/topics/testing.html
+.. _integration tests: https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html
+
+The detailed instructions
+-------------------------
+
Follow these steps to submit your first pull request to cloud-init:
* To contribute to cloud-init, you must sign the Canonical `contributor
@@ -23,18 +43,18 @@ Follow these steps to submit your first pull request to cloud-init:
* ensure that you fill in the GitHub username field.
* when prompted for 'Project contact' or 'Canonical Project
- Manager', enter 'Rick Harding'.
+ Manager', enter 'James Falcon'.
* If your company has signed the CLA for you, please contact us to
help in verifying which Launchpad/GitHub accounts are associated
with the company.
- * For any questions or help with the process, please email `Rick
- Harding <mailto:rick.harding@canonical.com>`_ with the subject,
+ * For any questions or help with the process, please email `James
+ Falcon <mailto:james.falcon@canonical.com>`_ with the subject,
"Cloud-Init CLA"
- * You also may contact user ``rick_h`` in the ``#cloud-init``
- channel on the Freenode IRC network.
+ * You also may contact user ``falcojr`` in the ``#cloud-init``
+ channel on the Libera IRC network.
* Configure git with your email and name for commit messages.
@@ -55,7 +75,7 @@ Follow these steps to submit your first pull request to cloud-init:
git clone git://github.com/canonical/cloud-init
cd cloud-init
git remote add GH_USER git@github.com:GH_USER/cloud-init.git
- git push GH_USER master
+ git push GH_USER main
* Read through the cloud-init `Code Review Process`_, so you understand
how your changes will end up in cloud-init's codebase.
@@ -78,7 +98,6 @@ Follow these steps to submit your first pull request to cloud-init:
.. _repository: https://github.com/canonical/cloud-init
.. _contributor license agreement: https://ubuntu.com/legal/contributors
.. _contributor-agreement-canonical: https://launchpad.net/%7Econtributor-agreement-canonical/+members
-.. _tools/.github-cla-signers: https://github.com/canonical/cloud-init/blob/master/tools/.github-cla-signers
.. _PR #344: https://github.com/canonical/cloud-init/pull/344
.. _PR #345: https://github.com/canonical/cloud-init/pull/345
@@ -98,6 +117,11 @@ The cloud-init team will review the two merge proposals and verify that
the CLA has been signed for the Launchpad user and record the
associated GitHub account.
+.. note::
+ If you are a first time contributor, you will not need to touch
+ Launchpad to contribute to cloud-init: all new CLA signatures are
+ handled as part of the GitHub pull request process described above.
+
Do these things for each feature or bug
=======================================
@@ -110,6 +134,10 @@ Do these things for each feature or bug
git commit
+* Apply black and isort formatting rules with `tox`_::
+
+ tox -e format
+
* Run unit tests and lint/formatting checks with `tox`_::
tox
@@ -118,7 +146,7 @@ Do these things for each feature or bug
git push -u GH_USER my-topic-branch
-* Use your browser to create a merge request:
+* Use your browser to create a pull request:
- Open the branch on GitHub
@@ -137,28 +165,30 @@ Do these things for each feature or bug
as footers with syntax as shown here.
The commit message should be one summary line of less than
- 74 characters followed by a blank line, and then one or more
- paragraphs describing the change and why it was needed.
+ 70 characters followed by a blank line, and then one or more
+ paragraphs wrapped at 72 characters describing the change and why
+ it was needed.
This is the message that will be used on the commit when it
- is sqaushed and merged into trunk.
+ is sqaushed and merged into main. If there is a related launchpad
+ bug, specify it at the bottom of the commit message.
- LP: #1
+ LP: #NNNNNNN (replace with the appropriate bug reference or remove
+ this line entirely if there is no associated bug)
Note that the project continues to use LP: #NNNNN format for closing
launchpad bugs rather than GitHub Issues.
- Click 'Create Pull Request`
-Then, someone in the `Ubuntu Server`_ team will review your changes and
+Then, a cloud-init committer will review your changes and
follow up in the pull request. Look at the `Code Review Process`_ doc
to understand the following steps.
-Feel free to ping and/or join ``#cloud-init`` on freenode irc if you
+Feel free to ping and/or join ``#cloud-init`` on Libera irc if you
have any questions.
.. _tox: https://tox.readthedocs.io/en/latest/
-.. _Ubuntu Server: https://github.com/orgs/canonical/teams/ubuntu-server
.. _Code Review Process: https://cloudinit.readthedocs.io/en/latest/topics/code_review.html
Design
@@ -167,190 +197,48 @@ Design
This section captures design decisions that are helpful to know when
hacking on cloud-init.
+Python Support
+--------------
+Cloud-init upstream currently supports Python 3.6 and above.
+
+Cloud-init upstream will stay compatible with a particular python version
+for 6 years after release. After 6 years, we will stop testing upstream
+changes against the unsupported version of python and may introduce
+breaking changes. This policy may change as needed.
+
+The following table lists the cloud-init versions in which the
+minimum python version changed:
+
+================== ==================
+Cloud-init version Python version
+================== ==================
+22.1 3.6+
+20.3 3.5+
+19.4 2.7+
+================== ==================
+
Cloud Config Modules
--------------------
* Any new modules should use underscores in any new config options and not
hyphens (e.g. `new_option` and *not* `new-option`).
-.. _unit_testing:
-
-Testing
-------------
-
-cloud-init has both unit tests and integration tests. Unit tests can
-be found in-tree alongside the source code, as well as
-at ``tests/unittests``. Integration tests can be found at
-``tests/integration_tests``. Documentation specifically for integration
-tests can be found on the :ref:`integration_tests` page, but
-the guidelines specified below apply to both types of tests.
-
-cloud-init uses `pytest`_ to run its tests, and has tests written both
-as ``unittest.TestCase`` sub-classes and as un-subclassed pytest tests.
-The following guidelines should be followed:
-
-* For ease of organisation and greater accessibility for developers not
- familiar with pytest, all cloud-init unit tests must be contained
- within test classes
-
- * Put another way, module-level test functions should not be used
-
-* pytest test classes should use `pytest fixtures`_ to share
- functionality instead of inheritance
-
-* As all tests are contained within classes, it is acceptable to mix
- ``TestCase`` test classes and pytest test classes within the same
- test file
-
- * These can be easily distinguished by their definition: pytest
- classes will not use inheritance at all (e.g.
- `TestGetPackageMirrorInfo`_), whereas ``TestCase`` classes will
- subclass (indirectly) from ``TestCase`` (e.g.
- `TestPrependBaseCommands`_)
-
-* pytest tests should use bare ``assert`` statements, to take advantage
- of pytest's `assertion introspection`_
-
- * For ``==`` and other commutative assertions, the expected value
- should be placed before the value under test:
- ``assert expected_value == function_under_test()``
-
-* As we still support Ubuntu 16.04 (Xenial Xerus), we can only use
- pytest features that are available in v2.8.7. This is an
- inexhaustive list of ways in which this may catch you out:
-
- * Support for using ``yield`` in ``pytest.fixture`` functions was
- only introduced in `pytest 3.0`_. Such functions must instead use
- the ``pytest.yield_fixture`` decorator.
-
- * Only the following built-in fixtures are available
- [#fixture-list]_:
-
- * ``cache``
- * ``capfd``
- * ``caplog`` (provided by ``python3-pytest-catchlog`` on xenial)
- * ``capsys``
- * ``monkeypatch``
- * ``pytestconfig``
- * ``record_xml_property``
- * ``recwarn``
- * ``tmpdir_factory``
- * ``tmpdir``
-
- * On xenial, the objects returned by the ``tmpdir`` fixture cannot be
- used where paths are required; they are rejected as invalid paths.
- You must instead use their ``.strpath`` attribute.
-
- * For example, instead of
- ``util.write_file(tmpdir.join("some_file"), ...)``, you should
- write ``util.write_file(tmpdir.join("some_file").strpath, ...)``.
-
- * The `pytest.param`_ function cannot be used. It was introduced in
- pytest 3.1, which means it is not available on xenial. The more
- limited mechanism it replaced was removed in pytest 4.0, so is not
- available in focal or later. The only available alternatives are
- to write mark-requiring test instances as completely separate
- tests, without utilising parameterisation, or to apply the mark to
- the entire parameterized test (and therefore every test instance).
-
-* Variables/parameter names for ``Mock`` or ``MagicMock`` instances
- should start with ``m_`` to clearly distinguish them from non-mock
- variables
-
- * For example, ``m_readurl`` (which would be a mock for ``readurl``)
-
-* The ``assert_*`` methods that are available on ``Mock`` and
- ``MagicMock`` objects should be avoided, as typos in these method
- names may not raise ``AttributeError`` (and so can cause tests to
- silently pass). An important exception: if a ``Mock`` is
- `autospecced`_ then misspelled assertion methods *will* raise an
- ``AttributeError``, so these assertion methods may be used on
- autospecced ``Mock`` objects.
-
- For non-autospecced ``Mock`` s, these substitutions can be used
- (``m`` is assumed to be a ``Mock``):
-
- * ``m.assert_any_call(*args, **kwargs)`` => ``assert
- mock.call(*args, **kwargs) in m.call_args_list``
- * ``m.assert_called()`` => ``assert 0 != m.call_count``
- * ``m.assert_called_once()`` => ``assert 1 == m.call_count``
- * ``m.assert_called_once_with(*args, **kwargs)`` => ``assert
- [mock.call(*args, **kwargs)] == m.call_args_list``
- * ``m.assert_called_with(*args, **kwargs)`` => ``assert
- mock.call(*args, **kwargs) == m.call_args_list[-1]``
- * ``m.assert_has_calls(call_list, any_order=True)`` => ``for call in
- call_list: assert call in m.call_args_list``
-
- * ``m.assert_has_calls(...)`` and ``m.assert_has_calls(...,
- any_order=False)`` are not easily replicated in a single
- statement, so their use when appropriate is acceptable.
-
- * ``m.assert_not_called()`` => ``assert 0 == m.call_count``
-
-* Test arguments should be ordered as follows:
-
- * ``mock.patch`` arguments. When used as a decorator, ``mock.patch``
- partially applies its generated ``Mock`` object as the first
- argument, so these arguments must go first.
- * ``pytest.mark.parametrize`` arguments, in the order specified to
- the ``parametrize`` decorator. These arguments are also provided
- by a decorator, so it's natural that they sit next to the
- ``mock.patch`` arguments.
- * Fixture arguments, alphabetically. These are not provided by a
- decorator, so they are last, and their order has no defined
- meaning, so we default to alphabetical.
-
-* It follows from this ordering of test arguments (so that we retain
- the property that arguments left-to-right correspond to decorators
- bottom-to-top) that test decorators should be ordered as follows:
-
- * ``pytest.mark.parametrize``
- * ``mock.patch``
-
-* When there are multiple patch calls in a test file for the module it
- is testing, it may be desirable to capture the shared string prefix
- for these patch calls in a module-level variable. If used, such
- variables should be named ``M_PATH`` or, for datasource tests,
- ``DS_PATH``.
-
-.. _pytest: https://docs.pytest.org/
-.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html
-.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15
-.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/master/cloudinit/tests/test_subp.py#L9
-.. _assertion introspection: https://docs.pytest.org/en/latest/assert.html
-.. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093
-.. _pytest.param: https://docs.pytest.org/en/latest/reference.html#pytest-param
-.. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing
+Tests
+-----
+
+Submissions to cloud-init must include testing. See :ref:`testing` for
+details on these requirements.
Type Annotations
----------------
The cloud-init codebase uses Python's annotation support for storing
-type annotations in the style specified by `PEP-484`_. Their use in
-the codebase is encouraged but with one important caveat: types from
-the ``typing`` module cannot be used.
-
-cloud-init still supports Python 3.4, which doesn't have the ``typing``
-module in the stdlib. This means that the use of any types from the
-``typing`` module in the codebase would require installation of an
-additional Python module on platforms using Python 3.4. As such
-platforms are generally in maintenance mode, the introduction of a new
-dependency may act as a break in compatibility in practical terms.
-
-Similarly, only function annotations are appropriate for use, as the
-variable annotations specified in `PEP-526`_ were introduced in Python
-3.6.
+type annotations in the style specified by `PEP-484`_ and `PEP-526`_.
+Their use in the codebase is encouraged.
.. _PEP-484: https://www.python.org/dev/peps/pep-0484/
.. _PEP-526: https://www.python.org/dev/peps/pep-0526/
-.. [#fixture-list] This list of fixtures (with markup) can be
- reproduced by running::
-
- py.test-3 --fixtures -q | grep "^[^ -]" | grep -v '\(no\|capturelog\)' | sort | sed 's/.*/* ``\0``/'
-
- in a xenial lxd container with python3-pytest-catchlog installed.
-
Feature Flags
-------------
@@ -662,6 +550,7 @@ References
* `PR #363`_, the discussion which prompted finally starting this
refactor (and where a lot of the above details were hashed out)
+.. _tools/.github-cla-signers: https://github.com/canonical/cloud-init/blob/main/tools/.github-cla-signers
.. _get_interfaces_by_mac: https://github.com/canonical/cloud-init/blob/961239749106daead88da483e7319e9268c67cde/cloudinit/net/__init__.py#L810-L818
.. _Mina Galić's email the the cloud-init ML in 2018: https://lists.launchpad.net/cloud-init/msg00185.html
.. _Mina Galić's email to the cloud-init ML in 2019: https://lists.launchpad.net/cloud-init/msg00237.html
diff --git a/ChangeLog b/ChangeLog
index 33b2bf74..676264cd 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,548 @@
+22.1
+ - sources/azure: report ready in local phase (#1265) [Chris Patterson]
+ - sources/azure: validate IMDS network configuration metadata (#1257)
+ [Chris Patterson]
+ - docs: Add more details to runcmd docs (#1266)
+ - use PEP 589 syntax for TypeDict (#1253)
+ - mypy: introduce type checking (#1254) [Chris Patterson]
+ - Fix extra ipv6 issues, code reduction and simplification (#1243) [eb3095]
+ - tests: when generating crypted password, generate in target env (#1252)
+ - sources/azure: address mypy/pyright typing complaints (#1245)
+ [Chris Patterson]
+ - Docs for x-shellscript* userdata (#1260)
+ - test_apt_security: azure platform has specific security URL overrides
+ (#1263)
+ - tests: lsblk --json output changes mountpoint key to mountpoinst []
+ (#1261)
+ - mounts: fix mount opts string for ephemeral disk (#1250)
+ [Chris Patterson]
+ - Shell script handlers by freq (#1166) [Chris Lalos]
+ - minor improvements to documentation (#1259) [Mark Esler]
+ - cloud-id: publish /run/cloud-init/cloud-id-<cloud-type> files (#1244)
+ - add "eslerm" as contributor (#1258) [Mark Esler]
+ - sources/azure: refactor ssh key handling (#1248) [Chris Patterson]
+ - bump pycloudlib (#1256)
+ - sources/hetzner: Use EphemeralDHCPv4 instead of static configuration
+ (#1251) [Markus Schade]
+ - bump pycloudlib version (#1255)
+ - Fix IPv6 netmask format for sysconfig (#1215) [Harald] (LP: #1959148)
+ - sources/azure: drop debug print (#1249) [Chris Patterson]
+ - tests: do not check instance.pull_file().ok() (#1246)
+ - sources/azure: consolidate ephemeral DHCP configuration (#1229)
+ [Chris Patterson]
+ - cc_salt_minion freebsd fix for rc.conf (#1236)
+ - sources/azure: fix metadata check in _check_if_nic_is_primary() (#1232)
+ [Chris Patterson]
+ - Add _netdev option to mount Azure ephemeral disk (#1213) [Eduardo Otubo]
+ - testing: stop universally overwriting /etc/cloud/cloud.cfg.d (#1237)
+ - Integration test changes (#1240)
+ - Fix Gentoo Locales (#1205)
+ - Add "slingamn" as contributor (#1235) [Shivaram Lingamneni]
+ - integration: do not LXD bind mount /etc/cloud/cloud.cfg.d (#1234)
+ - Integration testing docs and refactor (#1231)
+ - vultr: Return metadata immediately when found (#1233) [eb3095]
+ - spell check docs with spellintian (#1223)
+ - docs: include upstream python version info (#1230)
+ - Schema a d (#1211)
+ - Move LXD to end ds-identify DSLIST (#1228) (LP: #1959118)
+ - fix parallel tox execution (#1214)
+ - sources/azure: refactor _report_ready_if_needed and _poll_imds (#1222)
+ [Chris Patterson]
+ - Do not support setting up archive.canonical.com as a source (#1219)
+ [Steve Langasek] (LP: #1959343)
+ - Vultr: Fix lo being used for DHCP, try next on cmd fail (#1208) [eb3095]
+ - sources/azure: refactor _should_reprovision[_after_nic_attach]() logic
+ (#1206) [Chris Patterson]
+ - update ssh logs to show ssh private key gens pub and simplify code
+ (#1221) [Steve Weber]
+ - Remove mitechie from stale PR github action (#1217)
+ - Include POST format in cc_phone_home docs (#1218) (LP: #1959149)
+ - Add json parsing of ip addr show (SC-723) (#1210)
+ - cc_rsyslog: fix typo in docstring (#1207) [Louis Sautier]
+ - Update .github-cla-signers (#1204) [Chris Lalos]
+ - sources/azure: drop unused case in _report_failure() (#1200)
+ [Chris Patterson]
+ - sources/azure: always initialize _ephemeral_dhcp_ctx on unpickle (#1199)
+ [Chris Patterson]
+ - Add support for gentoo templates and cloud.cfg (#1179) [vteratipally]
+ - sources/azure: unpack ret tuple in crawl_metadata() (#1194)
+ [Chris Patterson]
+ - tests: focal caplog has whitespace indentation for multi-line logs
+ (#1201)
+ - Seek interfaces, skip dummy interface, fix region codes (#1192) [eb3095]
+ - integration: test against the Ubuntu daily images (#1198)
+ [Paride Legovini]
+ - cmd: status and cloud-id avoid change in behavior for 'not run' (#1197)
+ - tox: pass PYCLOUDLIB_* env vars into integration tests when present
+ (#1196)
+ - sources/azure: set ovf_is_accessible when OVF is read successfully
+ (#1193) [Chris Patterson]
+ - Enable OVF environment transport via ISO in example (#1195) [Megian]
+ - sources/azure: consolidate DHCP variants to EphemeralDHCPv4WithReporting
+ (#1190) [Chris Patterson]
+ - Single JSON schema validation in early boot (#1175)
+ - Add DatasourceOVF network-config propery to Ubuntu OVF example (#1184)
+ [Megian]
+ - testing: support pycloudlib config file (#1189)
+ - Ensure system_cfg read before ds net config on Oracle (SC-720) (#1174)
+ (LP: #1956788)
+ - Test Optimization Proposal (SC-736) (#1188)
+ - cli: cloud-id report not-run or disabled state as cloud-id (#1162)
+ - Remove distutils usage (#1177) [Shreenidhi Shedi]
+ - add .python-version to gitignore (#1186)
+ - print error if datasource import fails (#1170)
+ [Emanuele Giuseppe Esposito]
+ - Add new config module to set keyboard layout (#1176)
+ [maxnet] (LP: #1951593)
+ - sources/azure: rename metadata_type -> MetadataType (#1181)
+ [Chris Patterson]
+ - Remove 3.5 and xenial support (SC-711) (#1167)
+ - tests: mock LXD datasource detection in ds-identify on LXD containers
+ (#1178)
+ - pylint: silence errors on compat code for old jsonschema (#1172)
+ [Paride Legovini]
+ - testing: Add 3.10 Test Coverage (#1173)
+ - Remove unittests from integration test job in travis (#1141)
+ - Don't throw exceptions for empty cloud config (#1130)
+ - bsd/resolv.d/ avoid duplicated entries (#1163) [Gonéri Le Bouder]
+ - sources/azure: do not persist failed_desired_api_version flag (#1159)
+ [Chris Patterson]
+ - Update cc_ubuntu_advantage calls to assume-yes (#1158)
+ [John Chittum] (LP: #1954842)
+ - openbsd: properly restart the network on 7.0 (#1150) [Gonéri Le Bouder]
+ - Add .git-blame-ignore-revs (#1161)
+ - Adopt Black and isort (SC-700) (#1157)
+ - Include dpkg frontend lock in APT_LOCK_FILES (#1153)
+ - tests/cmd/query: fix test run as root and add coverage for defaults
+ (#1156) [Chris Patterson] (LP: #1825027)
+ - Schema processing changes (SC-676) (#1144)
+ - Add dependency workaround for impish in bddeb (#1148)
+ - netbsd: install new dep packages (#1151) [Gonéri Le Bouder]
+ - find_devs_with_openbsd: ensure we return the last entry (#1149)
+ [Gonéri Le Bouder]
+ - sources/azure: remove unnecessary hostname bounce (#1143)
+ [Chris Patterson]
+ - find_devs/openbsd: accept ISO on disk (#1132)
+ [Gonéri Le Bouder] (GH:
+ https://github.com/ContainerCraft/kmi/issues/12)
+ - Improve error log message when mount failed (#1140) [Ksenija Stanojevic]
+ - add KsenijaS as a contributor (#1145) [Ksenija Stanojevic]
+ - travis - don't run integration tests if no deb (#1139)
+ - factor out function for getting top level directory of cloudinit (#1136)
+ - testing: Add deterministic test id (#1138)
+ - mock sleep() in azure test (#1137)
+ - Add miraclelinux support (#1128) [Haruki TSURUMOTO]
+ - docs: Make MACs lowercase in network config (#1135) (GH: #1876941)
+ - Add Strict Metaschema Validation (#1101)
+ - update dead link (#1133)
+ - cloudinit/net: handle two different routes for the same ip (#1124)
+ [Emanuele Giuseppe Esposito]
+ - docs: pin mistune dependency (#1134)
+ - Reorganize unit test locations under tests/unittests (#1126)
+ - Fix exception when no activator found (#1129) (GH: #1948681)
+ - jinja: provide and document jinja-safe key aliases in instance-data
+ (SC-622) (#1123)
+ - testing: Remove date from final_message test (SC-638) (#1127)
+ - Move GCE metadata fetch to init-local (SC-502) (#1122)
+ - Fix missing metadata routes for vultr (#1125) [eb3095]
+ - cc_ssh_authkey_fingerprints.py: prevent duplicate messages on console
+ (#1081) [dermotbradley]
+ - sources/azure: remove unused remnants related to agent command (#1119)
+ [Chris Patterson]
+ - github: update PR template's contributing URL (#1120) [Chris Patterson]
+ - docs: Rename HACKING.rst to CONTRIBUTING.rst (#1118)
+ - testing: monkeypatch system_info call in unit tests (SC-533) (#1117)
+ - Fix Vultr timeout and wait values (#1113) [eb3095]
+ - lxd: add preference for LXD cloud-init.* config keys over user keys
+ (#1108)
+ - VMware: source /etc/network/interfaces.d/* on Debian
+ [chengcheng-chcheng] (GH: #1950136)
+ - Add cjp256 as contributor (#1109) [Chris Patterson]
+ - integration_tests: Ensure log directory exists before symlinking to it
+ (#1110)
+ - testing: add growpart integration test (#1104)
+ - integration_test: Speed up CI run time (#1111)
+ - Some miscellaneous integration test fixes (SC-606) (#1103)
+ - tests: specialize lxd_discovery test for lxd_vm vendordata (#1106)
+ - Add convenience symlink to integration test output (#1105)
+ - Fix for set-name bug in networkd renderer (#1100)
+ [Andrew Kutz] (GH: #1949407)
+ - Wait for apt lock (#1034) (GH: #1944611)
+ - testing: stop chef test from running on openstack (#1102)
+ - alpine.py: add options to the apk upgrade command (#1089) [dermotbradley]
+
+21.4
+ - Azure: fallback nic needs to be reevaluated during reprovisioning
+ (#1094) [Anh Vo]
+ - azure: pps imds (#1093) [Anh Vo]
+ - testing: Remove calls to 'install_new_cloud_init' (#1092)
+ - Add LXD datasource (#1040)
+ - Fix unhandled apt_configure case. (#1065) [Brett Holman]
+ - Allow libexec for hotplug (#1088)
+ - Add necessary mocks to test_ovf unit tests (#1087)
+ - Remove (deprecated) apt-key (#1068) [Brett Holman] (LP: #1836336)
+ - distros: Remove a completed "TODO" comment (#1086)
+ - cc_ssh.py: Add configuration for controlling ssh-keygen output (#1083)
+ [dermotbradley]
+ - Add "install hotplug" module (SC-476) (#1069) (LP: #1946003)
+ - hosts.alpine.tmpl: rearrange the order of short and long hostnames
+ (#1084) [dermotbradley]
+ - Add max version to docutils
+ - cloudinit/dmi.py: Change warning to debug to prevent console display
+ (#1082) [dermotbradley]
+ - remove unnecessary EOF string in
+ disable-sshd-keygen-if-cloud-init-active.conf (#1075) [Emanuele
+ Giuseppe Esposito]
+ - Add module 'write-files-deferred' executed in stage 'final' (#916)
+ [Lucendio]
+ - Bump pycloudlib to fix CI (#1080)
+ - Remove pin in dependencies for jsonschema (#1078)
+ - Add "Google" as possible system-product-name (#1077) [vteratipally]
+ - Update Debian security suite for bullseye (#1076) [Johann Queuniet]
+ - Leave the details of service management to the distro (#1074)
+ [Andy Fiddaman]
+ - Fix typos in setup.py (#1059) [Christian Clauss]
+ - Update Azure _unpickle (SC-500) (#1067) (LP: #1946644)
+ - cc_ssh.py: fix private key group owner and permissions (#1070)
+ [Emanuele Giuseppe Esposito]
+ - VMware: read network-config from ISO (#1066) [Thomas Weißschuh]
+ - testing: mock sleep in gce unit tests (#1072)
+ - CloudStack: fix data-server DNS resolution (#1004)
+ [Olivier Lemasle] (LP: #1942232)
+ - Fix unit test broken by pyyaml upgrade (#1071)
+ - testing: add get_cloud function (SC-461) (#1038)
+ - Inhibit sshd-keygen@.service if cloud-init is active (#1028)
+ [Ryan Harper]
+ - VMWARE: search the deployPkg plugin in multiarch dir (#1061)
+ [xiaofengw-vmware] (LP: #1944946)
+ - Fix set-name/interface DNS bug (#1058) [Andrew Kutz] (LP: #1946493)
+ - Use specified tmp location for growpart (#1046) [jshen28]
+ - .gitignore: ignore tags file for ctags users (#1057) [Brett Holman]
+ - Allow comments in runcmd and report failed commands correctly (#1049)
+ [Brett Holman] (LP: #1853146)
+ - tox integration: pass the *_proxy, GOOGLE_*, GCP_* env vars (#1050)
+ [Paride Legovini]
+ - Allow disabling of network activation (SC-307) (#1048) (LP: #1938299)
+ - renderer: convert relative imports to absolute (#1052) [Paride Legovini]
+ - Support ETHx_IP6_GATEWAY, SET_HOSTNAME on OpenNebula (#1045)
+ [Vlastimil Holer]
+ - integration-requirements: bump the pycloudlib commit (#1047)
+ [Paride Legovini]
+ - Allow Vultr to set MTU and use as-is configs (#1037) [eb3095]
+ - pin jsonschema in requirements.txt (#1043)
+ - testing: remove cloud_tests (#1020)
+ - Add andgein as contributor (#1042) [Andrew Gein]
+ - Make wording for module frequency consistent (#1039) [Nicolas Bock]
+ - Use ascii code for growpart (#1036) [jshen28]
+ - Add jshen28 as contributor (#1035) [jshen28]
+ - Skip test_cache_purged_on_version_change on Azure (#1033)
+ - Remove invalid ssh_import_id from examples (#1031)
+ - Cleanup Vultr support (#987) [eb3095]
+ - docs: update cc_disk_setup for fs to raw disk (#1017)
+ - HACKING.rst: change contact info to James Falcon (#1030)
+ - tox: bump the pinned flake8 and pylint version (#1029)
+ [Paride Legovini] (LP: #1944414)
+ - Add retries to DataSourceGCE.py when connecting to GCE (#1005)
+ [vteratipally]
+ - Set Azure to apply networking config every BOOT (#1023)
+ - Add connectivity_url to Oracle's EphemeralDHCPv4 (#988) (LP: #1939603)
+ - docs: fix typo and include sudo for report bugs commands (#1022)
+ [Renan Rodrigo] (LP: #1940236)
+ - VMware: Fix typo introduced in #947 and add test (#1019) [PengpengSun]
+ - Update IPv6 entries in /etc/hosts (#1021) [Richard Hansen] (LP: #1943798)
+ - Integration test upgrades for the 21.3-1 SRU (#1001)
+ - Add Jille to tools/.github-cla-signers (#1016) [Jille Timmermans]
+ - Improve ug_util.py (#1013) [Shreenidhi Shedi]
+ - Support openEuler OS (#1012) [zhuzaifangxuele]
+ - ssh_utils.py: ignore when sshd_config options are not key/value pairs
+ (#1007) [Emanuele Giuseppe Esposito]
+ - Set Azure to only update metadata on BOOT_NEW_INSTANCE (#1006)
+ - cc_update_etc_hosts: Use the distribution-defined path for the hosts
+ file (#983) [Andy Fiddaman]
+ - Add CloudLinux OS support (#1003) [Alexandr Kravchenko]
+ - puppet config: add the start_agent option (#1002) [Andrew Bogott]
+ - Fix `make style-check` errors (#1000) [Shreenidhi Shedi]
+ - Make cloud-id copyright year (#991) [Andrii Podanenko]
+ - Add support to accept-ra in networkd renderer (#999) [Shreenidhi Shedi]
+ - Update ds-identify to pass shellcheck (#979) [Andrew Kutz]
+ - Azure: Retry dhcp on timeouts when polling reprovisiondata (#998)
+ [aswinrajamannar]
+ - testing: Fix ssh keys integration test (#992)
+
+21.3
+ - Azure: During primary nic detection, check interface status continuously
+ before rebinding again (#990) [aswinrajamannar]
+ - Fix home permissions modified by ssh module (SC-338) (#984)
+ (LP: #1940233)
+ - Add integration test for sensitive jinja substitution (#986)
+ - Ignore hotplug socket when collecting logs (#985) (LP: #1940235)
+ - testing: Add missing mocks to test_vmware.py (#982)
+ - add Zadara Edge Cloud Platform to the supported clouds list (#963)
+ [sarahwzadara]
+ - testing: skip upgrade tests on LXD VMs (#980)
+ - Only invoke hotplug socket when functionality is enabled (#952)
+ - Revert unnecesary lcase in ds-identify (#978) [Andrew Kutz]
+ - cc_resolv_conf: fix typos (#969) [Shreenidhi Shedi]
+ - Replace broken httpretty tests with mock (SC-324) (#973)
+ - Azure: Check if interface is up after sleep when trying to bring it up
+ (#972) [aswinrajamannar]
+ - Update dscheck_VMware's rpctool check (#970) [Shreenidhi Shedi]
+ - Azure: Logging the detected interfaces (#968) [Moustafa Moustafa]
+ - Change netifaces dependency to 0.10.4 (#965) [Andrew Kutz]
+ - Azure: Limit polling network metadata on connection errors (#961)
+ [aswinrajamannar]
+ - Update inconsistent indentation (#962) [Andrew Kutz]
+ - cc_puppet: support AIO installations and more (#960) [Gabriel Nagy]
+ - Add Puppet contributors to CLA signers (#964) [Noah Fontes]
+ - Datasource for VMware (#953) [Andrew Kutz]
+ - photon: refactor hostname handling and add networkd activator (#958)
+ [sshedi]
+ - Stop copying ssh system keys and check folder permissions (#956)
+ [Emanuele Giuseppe Esposito]
+ - testing: port remaining cloud tests to integration testing framework
+ (SC-191) (#955)
+ - generate contents for ovf-env.xml when provisioning via IMDS (#959)
+ [Anh Vo]
+ - Add support for EuroLinux 7 && EuroLinux 8 (#957) [Aleksander Baranowski]
+ - Implementing device_aliases as described in docs (#945)
+ [Mal Graty] (LP: #1867532)
+ - testing: fix test_ssh_import_id.py (#954)
+ - Add ability to manage fallback network config on PhotonOS (#941) [sshedi]
+ - Add VZLinux support (#951) [eb3095]
+ - VMware: add network-config support in ovf-env.xml (#947) [PengpengSun]
+ - Update pylint to v2.9.3 and fix the new issues it spots (#946)
+ [Paride Legovini]
+ - Azure: mount default provisioning iso before try device listing (#870)
+ [Anh Vo]
+ - Document known hotplug limitations (#950)
+ - Initial hotplug support (#936)
+ - Fix MIME policy failure on python version upgrade (#934)
+ - run-container: fixup the centos repos baseurls when using http_proxy
+ (#944) [Paride Legovini]
+ - tools: add support for building rpms on rocky linux (#940)
+ - ssh-util: allow cloudinit to merge all ssh keys into a custom user
+ file, defined in AuthorizedKeysFile (#937) [Emanuele Giuseppe Esposito]
+ (LP: #1911680)
+ - VMware: new "allow_raw_data" switch (#939) [xiaofengw-vmware]
+ - bump pycloudlib version (#935)
+ - add renanrodrigo as a contributor (#938) [Renan Rodrigo]
+ - testing: simplify test_upgrade.py (#932)
+ - freebsd/net_v1 format: read MTU from root (#930) [Gonéri Le Bouder]
+ - Add new network activators to bring up interfaces (#919)
+ - - Detect a Python version change and clear the cache (#857)
+ [Robert Schweikert]
+ - cloud_tests: fix the Impish release name (#931) [Paride Legovini]
+ - Removed distro specific network code from Photon (#929) [sshedi]
+ - Add support for VMware PhotonOS (#909) [sshedi]
+ - cloud_tests: add impish release definition (#927) [Paride Legovini]
+ - docs: fix stale links rename master branch to main (#926)
+ - Fix DNS in NetworkState (SC-133) (#923)
+ - tests: Add 'adhoc' mark for integration tests (#925)
+ - Fix the spelling of "DigitalOcean" (#924) [Mark Mercado]
+ - Small Doc Update for ReportEventStack and Test (#920) [Mike Russell]
+ - Replace deprecated collections.Iterable with abc replacement (#922)
+ (LP: #1932048)
+ - testing: OCI availability domain is now required (SC-59) (#910)
+ - add DragonFlyBSD support (#904) [Gonéri Le Bouder]
+ - Use instance-data-sensitive.json in jinja templates (SC-117) (#917)
+ (LP: #1931392)
+ - doc: Update NoCloud docs stating required files (#918) (LP: #1931577)
+ - build-on-netbsd: don't pin a specific py3 version (#913)
+ [Gonéri Le Bouder]
+ - - Create the log file with 640 permissions (#858) [Robert Schweikert]
+ - Allow braces to appear in dhclient output (#911) [eb3095]
+ - Docs: Replace all freenode references with libera (#912)
+ - openbsd/net: flush the route table on net restart (#908)
+ [Gonéri Le Bouder]
+ - Add Rocky Linux support to cloud-init (#906) [Louis Abel]
+ - Add "esposem" as contributor (#907) [Emanuele Giuseppe Esposito]
+ - Add integration test for #868 (#901)
+ - Added support for importing keys via primary/security mirror clauses
+ (#882) [Paul Goins] (LP: #1925395)
+ - [examples] config-user-groups expire in the future (#902)
+ [Geert Stappers]
+ - BSD: static network, set the mtu (#894) [Gonéri Le Bouder]
+ - Add integration test for lp-1920939 (#891)
+ - Fix unit tests breaking from new httpretty version (#903)
+ - Allow user control over update events (#834)
+ - Update test characters in substitution unit test (#893)
+ - cc_disk_setup.py: remove UDEVADM_CMD definition as not used (#886)
+ [dermotbradley]
+ - Add AlmaLinux OS support (#872) [Andrew Lukoshko]
+
+21.2
+ - Add \r\n check for SSH keys in Azure (#889)
+ - Revert "Add support to resize rootfs if using LVM (#721)" (#887)
+ (LP: #1922742)
+ - Add Vultaire as contributor (#881) [Paul Goins]
+ - Azure: adding support for consuming userdata from IMDS (#884) [Anh Vo]
+ - test_upgrade: modify test_upgrade_package to run for more sources (#883)
+ - Fix chef module run failure when chef_license is set (#868) [Ben Hughes]
+ - Azure: Retry net metadata during nic attach for non-timeout errs (#878)
+ [aswinrajamannar]
+ - Azure: Retrieve username and hostname from IMDS (#865) [Thomas Stringer]
+ - Azure: eject the provisioning iso before reporting ready (#861) [Anh Vo]
+ - Use `partprobe` to re-read partition table if available (#856)
+ [Nicolas Bock] (LP: #1920939)
+ - fix error on upgrade caused by new vendordata2 attributes (#869)
+ (LP: #1922739)
+ - add prefer_fqdn_over_hostname config option (#859)
+ [hamalq] (LP: #1921004)
+ - Emit dots on travis to avoid timeout (#867)
+ - doc: Replace remaining references to user-scripts as a config module
+ (#866) [Ryan Harper]
+ - azure: Removing ability to invoke walinuxagent (#799) [Anh Vo]
+ - Add Vultr support (#827) [David Dymko]
+ - Fix unpickle for source paths missing run_dir (#863)
+ [lucasmoura] (LP: #1899299)
+ - sysconfig: use BONDING_MODULE_OPTS on SUSE (#831) [Jens Sandmann]
+ - bringup_static_routes: fix gateway check (#850) [Petr Fedchenkov]
+ - add hamalq user (#860) [hamalq]
+ - Add support to resize rootfs if using LVM (#721)
+ [Eduardo Otubo] (LP: #1799953)
+ - Fix mis-detecting network configuration in initramfs cmdline (#844)
+ (LP: #1919188)
+ - tools/write-ssh-key-fingerprints: do not display empty header/footer
+ (#817) [dermotbradley]
+ - Azure helper: Ensure Azure http handler sleeps between retries (#842)
+ [Johnson Shi]
+ - Fix chef apt source example (#826) [timothegenzmer]
+ - .travis.yml: generate an SSH key before running tests (#848)
+ - write passwords only to serial console, lock down cloud-init-output.log
+ (#847) (LP: #1918303)
+ - Fix apt default integration test (#845)
+ - integration_tests: bump pycloudlib dependency (#846)
+ - Fix stack trace if vendordata_raw contained an array (#837) [eb3095]
+ - archlinux: Fix broken locale logic (#841)
+ [Kristian Klausen] (LP: #1402406)
+ - Integration test for #783 (#832)
+ - integration_tests: mount more paths IN_PLACE (#838)
+ - Fix requiring device-number on EC2 derivatives (#836) (LP: #1917875)
+ - Remove the vi comment from the part-handler example (#835)
+ - net: exclude OVS internal interfaces in get_interfaces (#829)
+ (LP: #1912844)
+ - tox.ini: pass OS_* environment variables to integration tests (#830)
+ - integration_tests: add OpenStack as a platform (#804)
+ - Add flexibility to IMDS api-version (#793) [Thomas Stringer]
+ - Fix the TestApt tests using apt-key on Xenial and Hirsute (#823)
+ [Paride Legovini] (LP: #1916629)
+ - doc: remove duplicate "it" from nocloud.rst (#825) [V.I. Wood]
+ - archlinux: Use hostnamectl to set the transient hostname (#797)
+ [Kristian Klausen]
+ - cc_keys_to_console.py: Add documentation for recently added config key
+ (#824) [dermotbradley]
+ - Update cc_set_hostname documentation (#818) [Toshi Aoyama]
+
+21.1
+ - Azure: Support for VMs without ephemeral resource disks. (#800)
+ [Johnson Shi] (LP: #1901011)
+ - cc_keys_to_console: add option to disable key emission (#811)
+ [Michael Hudson-Doyle] (LP: #1915460)
+ - integration_tests: introduce lxd_use_exec mark (#802)
+ - azure: case-insensitive UUID to avoid new IID during kernel upgrade
+ (#798) (LP: #1835584)
+ - stale.yml: don't ask submitters to reopen PRs (#816)
+ - integration_tests: fix use of SSH agent within tox (#815)
+ - integration_tests: add UPGRADE CloudInitSource (#812)
+ - integration_tests: use unique MAC addresses for tests (#813)
+ - Update .gitignore (#814)
+ - Port apt cloud_tests to integration tests (#808)
+ - integration_tests: fix test_gh626 on LXD VMs (#809)
+ - Fix attempting to decode binary data in test_seed_random_data test (#806)
+ - Remove wait argument from tests with session_cloud calls (#805)
+ - Datasource for UpCloud (#743) [Antti Myyrä]
+ - test_gh668: fix failure on LXD VMs (#801)
+ - openstack: read the dynamic metadata group vendor_data2.json (#777)
+ [Andrew Bogott] (LP: #1841104)
+ - includedir in suoders can be prefixed by "arroba" (#783)
+ [Jordi Massaguer Pla]
+ - [VMware] change default max wait time to 15s (#774) [xiaofengw-vmware]
+ - Revert integration test associated with reverted #586 (#784)
+ - Add jordimassaguerpla as contributor (#787) [Jordi Massaguer Pla]
+ - Add Rick Harding to CLA signers (#792) [Rick Harding]
+ - HACKING.rst: add clarifying note to LP CLA process section (#789)
+ - Stop linting cloud_tests (#791)
+ - cloud-tests: update cryptography requirement (#790) [Joshua Powers]
+ - Remove 'remove-raise-on-failure' calls from integration_tests (#788)
+ - Use more cloud defaults in integration tests (#757)
+ - Adding self to cla signers (#776) [Andrew Bogott]
+ - doc: avoid two warnings (#781) [Dan Kenigsberg]
+ - Use proper spelling for Red Hat (#778) [Dan Kenigsberg]
+ - Add antonyc to .github-cla-signers (#747) [Anton Chaporgin]
+ - integration_tests: log image serial if available (#772)
+ - [VMware] Support cloudinit raw data feature (#691) [xiaofengw-vmware]
+ - net: Fix static routes to host in eni renderer (#668) [Pavel Abalikhin]
+ - .travis.yml: don't run cloud_tests in CI (#756)
+ - test_upgrade: add some missing commas (#769)
+ - cc_seed_random: update documentation and fix integration test (#771)
+ (LP: #1911227)
+ - Fix test gh-632 test to only run on NoCloud (#770) (LP: #1911230)
+ - archlinux: fix package upgrade command handling (#768) [Bao Trinh]
+ - integration_tests: add integration test for LP: #1910835 (#761)
+ - Fix regression with handling of IMDS ssh keys (#760) [Thomas Stringer]
+ - integration_tests: log cloud-init version in SUT (#758)
+ - Add ajmyyra as contributor (#742) [Antti Myyrä]
+ - net_convert: add some missing help text (#755)
+ - Missing IPV6_AUTOCONF=no to render sysconfig dhcp6 stateful on RHEL
+ (#753) [Eduardo Otubo]
+ - doc: document missing IPv6 subnet types (#744) [Antti Myyrä]
+ - Add example configuration for datasource `AliYun` (#751) [Xiaoyu Zhong]
+ - integration_tests: add SSH key selection settings (#754)
+ - fix a typo in man page cloud-init.1 (#752) [Amy Chen]
+ - network-config-format-v2.rst: add Netplan Passthrough section (#750)
+ - stale: re-enable post holidays (#749)
+ - integration_tests: port ca_certs tests from cloud_tests (#732)
+ - Azure: Add telemetry for poll IMDS (#741) [Johnson Shi]
+ - doc: move testing section from HACKING to its own doc (#739)
+ - No longer allow integration test failures on travis (#738)
+ - stale: fix error in definition (#740)
+ - integration_tests: set log-cli-level to INFO by default (#737)
+ - PULL_REQUEST_TEMPLATE.md: use backticks around commit message (#736)
+ - stale: disable check for holiday break (#735)
+ - integration_tests: log the path we collect logs into (#733)
+ - .travis.yml: add (most) supported Python versions to CI (#734)
+ - integration_tests: fix IN_PLACE CLOUD_INIT_SOURCE (#731)
+ - cc_ca_certs: add RHEL support (#633) [cawamata]
+ - Azure: only generate config for NICs with addresses (#709)
+ [Thomas Stringer]
+ - doc: fix CloudStack configuration example (#707) [Olivier Lemasle]
+ - integration_tests: restrict test_lxd_bridge appropriately (#730)
+ - Add integration tests for CLI functionality (#729)
+ - Integration test for gh-626 (#728)
+ - Some test_upgrade fixes (#726)
+ - Ensure overriding test vars with env vars works for booleans (#727)
+ - integration_tests: port lxd_bridge test from cloud_tests (#718)
+ - Integration test for gh-632. (#725)
+ - Integration test for gh-671 (#724)
+ - integration-requirements.txt: bump pycloudlib commit (#723)
+ - Drop unnecessary shebang from cmd/main.py (#722) [Eduardo Otubo]
+ - Integration test for LP: #1813396 and #669 (#719)
+ - integration_tests: include timestamp in log output (#720)
+ - integration_tests: add test for LP: #1898997 (#713)
+ - Add integration test for power_state_change module (#717)
+ - Update documentation for network-config-format-v2 (#701) [ggiesen]
+ - sandbox CA Cert tests to not require ca-certificates (#715)
+ [Eduardo Otubo]
+ - Add upgrade integration test (#693)
+ - Integration test for 570 (#712)
+ - Add ability to keep snapshotted images in integration tests (#711)
+ - Integration test for pull #586 (#706)
+ - integration_tests: introduce skipping of tests by OS (#702)
+ - integration_tests: introduce IntegrationInstance.restart (#708)
+ - Add lxd-vm to list of valid integration test platforms (#705)
+ - Adding BOOTPROTO = dhcp to render sysconfig dhcp6 stateful on RHEL
+ (#685) [Eduardo Otubo]
+ - Delete image snapshots created for integration tests (#682)
+ - Parametrize ssh_keys_provided integration test (#700) [lucasmoura]
+ - Drop use_sudo attribute on IntegrationInstance (#694) [lucasmoura]
+ - cc_apt_configure: add riscv64 as a ports arch (#687)
+ [Dimitri John Ledkov]
+ - cla: add xnox (#692) [Dimitri John Ledkov]
+ - Collect logs from integration test runs (#675)
+
+20.4.1
+ - Revert "ssh_util: handle non-default AuthorizedKeysFile config (#586)"
+
20.4
- tox: avoid tox testenv subsvars for xenial support (#684)
- Ensure proper root permissions in integration tests (#664) [James Falcon]
@@ -528,7 +1073,7 @@
- docs: add additional details to per-instance/once [Joshua Powers]
- Update doc-requirements.txt [Joshua Powers]
- doc-requirements: add missing dep [Joshua Powers]
- - dhcp: Support RedHat dhcp rfc3442 lease format for option 121 (#76)
+ - dhcp: Support Red Hat dhcp rfc3442 lease format for option 121 (#76)
[Eric Lafontaine] (LP: #1850642)
- network_state: handle empty v1 config (#45) (LP: #1852496)
- docs: Add document on how to report bugs [Joshua Powers]
diff --git a/Makefile b/Makefile
index 5fb0fcbf..4ead786f 100644
--- a/Makefile
+++ b/Makefile
@@ -18,13 +18,10 @@ all: check
check: check_version test yaml
-style-check: pep8 $(pyflakes)
+style-check: flake8
-pep8:
- @$(CWD)/tools/run-pep8
-
-pyflakes:
- @$(CWD)/tools/run-pyflakes
+flake8:
+ @$(CWD)/tools/run-flake8
unittest: clean_pyc
python3 -m pytest -v tests/unittests cloudinit
@@ -86,6 +83,43 @@ deb-src:
doc:
tox -e doc
-.PHONY: test pyflakes clean pep8 rpm srpm deb deb-src yaml
+# Spell check && filter false positives
+_CHECK_SPELLING := find doc -type f -exec spellintian {} + | \
+ grep -v -e 'doc/rtd/topics/cli.rst: modules modules' \
+ -e 'doc/examples/cloud-config-mcollective.txt: WARNING WARNING' \
+ -e 'doc/examples/cloud-config-power-state.txt: Bye Bye' \
+ -e 'doc/examples/cloud-config.txt: Bye Bye'
+
+
+# For CI we require a failing return code when spellintian finds spelling errors
+check_spelling:
+ @! $(_CHECK_SPELLING)
+
+# Manipulate the output of spellintian into a valid "sed" command which is run
+# to fix the error
+#
+# Example spellintian output:
+#
+# doc/examples/kernel-cmdline.txt: everthing -> everything
+#
+# The "fix_spelling" target manipulates the above output into the following command
+# and runs that command.
+#
+# sed -i "s/everthing/everything/g" doc/examples/kernel-cmdline.txt
+#
+# awk notes:
+#
+# -F ': | -> ' means use the strings ": " or " -> " as field delimeters
+# \046 is octal for double quote
+# $$2 will contain the second field, ($ must be escaped because this is in a Makefile)
+#
+# Limitation: duplicate words with newline between them are not automatically fixed
+fix_spelling:
+ @$(_CHECK_SPELLING) | \
+ sed 's/ (duplicate word)//g' | \
+ awk -F ': | -> ' '{printf "sed -i \047s/%s/%s/g\047 %s\n", $$2, $$3, $$1}' | \
+ sh
+
+.PHONY: test flake8 clean rpm srpm deb deb-src yaml
.PHONY: check_version pip-test-requirements pip-requirements clean_pyc
-.PHONY: unittest style-check doc
+.PHONY: unittest style-check doc fix_spelling check_spelling
diff --git a/README.md b/README.md
index 435405da..f2a745f8 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# cloud-init
-[![Build Status](https://travis-ci.com/canonical/cloud-init.svg?branch=master)](https://travis-ci.com/canonical/cloud-init) [![Read the Docs](https://readthedocs.org/projects/cloudinit/badge/?version=latest&style=flat)](https://cloudinit.readthedocs.org)
+[![Build Status](https://travis-ci.com/canonical/cloud-init.svg?branch=main)](https://travis-ci.com/canonical/cloud-init) [![Read the Docs](https://readthedocs.org/projects/cloudinit/badge/?version=latest&style=flat)](https://cloudinit.readthedocs.org)
Cloud-init is the *industry standard* multi-distribution method for
cross-platform cloud instance initialization. It is supported across all
@@ -26,7 +26,7 @@ If you need support, start with the [user documentation](https://cloudinit.readt
If you need additional help consider reaching out with one of the following options:
-- Ask a question in the [``#cloud-init`` IRC channel on Freenode](https://webchat.freenode.net/?channel=#cloud-init)
+- Ask a question in the [``#cloud-init`` IRC channel on Libera](https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init)
- Search the cloud-init [mailing list archive](https://lists.launchpad.net/cloud-init/)
- Better yet, join the [cloud-init mailing list](https://launchpad.net/~cloud-init) and participate
- Find a bug? [Report bugs on Launchpad](https://bugs.launchpad.net/cloud-init/+filebug)
@@ -39,11 +39,11 @@ get in contact with that distribution and send them our way!
| Supported OSes | Supported Public Clouds | Supported Private Clouds |
| --- | --- | --- |
-| Alpine Linux<br />ArchLinux<br />Debian<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />RHEL/CentOS<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />Digital Ocean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
+| Alpine Linux<br />ArchLinux<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />openEuler<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux/MIRACLE LINUX<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
## To start developing cloud-init
-Checkout the [hacking](https://cloudinit.readthedocs.io/en/latest/topics/hacking.html)
+Checkout the [contributing](https://cloudinit.readthedocs.io/en/latest/topics/contributing.html)
document that outlines the steps necessary to develop, test, and submit code.
## Daily builds
diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init
index a9577e9d..b9f137b1 100644
--- a/bash_completion/cloud-init
+++ b/bash_completion/cloud-init
@@ -28,7 +28,7 @@ _cloudinit_complete()
COMPREPLY=($(compgen -W "--help --tarfile --include-userdata" -- $cur_word))
;;
devel)
- COMPREPLY=($(compgen -W "--help schema net-convert" -- $cur_word))
+ COMPREPLY=($(compgen -W "--help hotplug-hook schema net-convert" -- $cur_word))
;;
dhclient-hook)
COMPREPLY=($(compgen -W "--help up down" -- $cur_word))
@@ -64,6 +64,9 @@ _cloudinit_complete()
--frequency)
COMPREPLY=($(compgen -W "--help instance always once" -- $cur_word))
;;
+ hotplug-hook)
+ COMPREPLY=($(compgen -W "--help" -- $cur_word))
+ ;;
net-convert)
COMPREPLY=($(compgen -W "--help --network-data --kind --directory --output-kind" -- $cur_word))
;;
diff --git a/cloud-tests-requirements.txt b/cloud-tests-requirements.txt
deleted file mode 100644
index b4cd18d5..00000000
--- a/cloud-tests-requirements.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-# PyPI requirements for cloud-init cloud tests
-# https://cloudinit.readthedocs.io/en/latest/topics/cloud_tests.html
-#
-# Note: Changes to this requirements may require updates to
-# the packages/pkg-deps.json file as well.
-#
-
-# ec2 backend
-boto3==1.14.53
-
-# ssh communication
-paramiko==2.7.2
-cryptography==3.1
-
-# lxd backend
-pylxd==2.2.11
-
-# finds latest image information
-git+https://git.launchpad.net/simplestreams
-
-# azure backend
-azure-storage==0.36.0
-msrestazure==0.6.1
-azure-common==1.1.23
-azure-mgmt-compute==7.0.0
-azure-mgmt-network==5.0.0
-azure-mgmt-resource==4.0.0
-azure-mgmt-storage==6.0.0
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
index 99e5c203..36a5be78 100644
--- a/cloudinit/analyze/__main__.py
+++ b/cloudinit/analyze/__main__.py
@@ -5,62 +5,111 @@
import argparse
import re
import sys
+from datetime import datetime
from cloudinit.util import json_dumps
-from datetime import datetime
-from . import dump
-from . import show
+
+from . import dump, show
def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(
- prog='cloudinit-analyze',
- description='Devel tool: Analyze cloud-init logs and data')
- subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ prog="cloudinit-analyze",
+ description="Devel tool: Analyze cloud-init logs and data",
+ )
+ subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand")
subparsers.required = True
parser_blame = subparsers.add_parser(
- 'blame', help='Print list of executed stages ordered by time to init')
+ "blame", help="Print list of executed stages ordered by time to init"
+ )
parser_blame.add_argument(
- '-i', '--infile', action='store', dest='infile',
- default='/var/log/cloud-init.log',
- help='specify where to read input.')
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input.",
+ )
parser_blame.add_argument(
- '-o', '--outfile', action='store', dest='outfile', default='-',
- help='specify where to write output. ')
- parser_blame.set_defaults(action=('blame', analyze_blame))
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output. ",
+ )
+ parser_blame.set_defaults(action=("blame", analyze_blame))
parser_show = subparsers.add_parser(
- 'show', help='Print list of in-order events during execution')
- parser_show.add_argument('-f', '--format', action='store',
- dest='print_format', default='%I%D @%Es +%ds',
- help='specify formatting of output.')
- parser_show.add_argument('-i', '--infile', action='store',
- dest='infile', default='/var/log/cloud-init.log',
- help='specify where to read input.')
- parser_show.add_argument('-o', '--outfile', action='store',
- dest='outfile', default='-',
- help='specify where to write output.')
- parser_show.set_defaults(action=('show', analyze_show))
+ "show", help="Print list of in-order events during execution"
+ )
+ parser_show.add_argument(
+ "-f",
+ "--format",
+ action="store",
+ dest="print_format",
+ default="%I%D @%Es +%ds",
+ help="specify formatting of output.",
+ )
+ parser_show.add_argument(
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input.",
+ )
+ parser_show.add_argument(
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output.",
+ )
+ parser_show.set_defaults(action=("show", analyze_show))
parser_dump = subparsers.add_parser(
- 'dump', help='Dump cloud-init events in JSON format')
- parser_dump.add_argument('-i', '--infile', action='store',
- dest='infile', default='/var/log/cloud-init.log',
- help='specify where to read input. ')
- parser_dump.add_argument('-o', '--outfile', action='store',
- dest='outfile', default='-',
- help='specify where to write output. ')
- parser_dump.set_defaults(action=('dump', analyze_dump))
+ "dump", help="Dump cloud-init events in JSON format"
+ )
+ parser_dump.add_argument(
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input. ",
+ )
+ parser_dump.add_argument(
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output. ",
+ )
+ parser_dump.set_defaults(action=("dump", analyze_dump))
parser_boot = subparsers.add_parser(
- 'boot', help='Print list of boot times for kernel and cloud-init')
- parser_boot.add_argument('-i', '--infile', action='store',
- dest='infile', default='/var/log/cloud-init.log',
- help='specify where to read input. ')
- parser_boot.add_argument('-o', '--outfile', action='store',
- dest='outfile', default='-',
- help='specify where to write output.')
- parser_boot.set_defaults(action=('boot', analyze_boot))
+ "boot", help="Print list of boot times for kernel and cloud-init"
+ )
+ parser_boot.add_argument(
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input. ",
+ )
+ parser_boot.add_argument(
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output.",
+ )
+ parser_boot.set_defaults(action=("boot", analyze_boot))
return parser
@@ -78,61 +127,68 @@ def analyze_boot(name, args):
"""
infh, outfh = configure_io(args)
kernel_info = show.dist_check_timestamp()
- status_code, kernel_start, kernel_end, ci_sysd_start = \
- kernel_info
+ status_code, kernel_start, kernel_end, ci_sysd_start = kernel_info
kernel_start_timestamp = datetime.utcfromtimestamp(kernel_start)
kernel_end_timestamp = datetime.utcfromtimestamp(kernel_end)
ci_sysd_start_timestamp = datetime.utcfromtimestamp(ci_sysd_start)
try:
- last_init_local = \
- [e for e in _get_events(infh) if e['name'] == 'init-local' and
- 'starting search' in e['description']][-1]
- ci_start = datetime.utcfromtimestamp(last_init_local['timestamp'])
+ last_init_local = [
+ e
+ for e in _get_events(infh)
+ if e["name"] == "init-local"
+ and "starting search" in e["description"]
+ ][-1]
+ ci_start = datetime.utcfromtimestamp(last_init_local["timestamp"])
except IndexError:
- ci_start = 'Could not find init-local log-line in cloud-init.log'
+ ci_start = "Could not find init-local log-line in cloud-init.log"
status_code = show.FAIL_CODE
- FAILURE_MSG = 'Your Linux distro or container does not support this ' \
- 'functionality.\n' \
- 'You must be running a Kernel Telemetry supported ' \
- 'distro.\nPlease check ' \
- 'https://cloudinit.readthedocs.io/en/latest' \
- '/topics/analyze.html for more ' \
- 'information on supported distros.\n'
-
- SUCCESS_MSG = '-- Most Recent Boot Record --\n' \
- ' Kernel Started at: {k_s_t}\n' \
- ' Kernel ended boot at: {k_e_t}\n' \
- ' Kernel time to boot (seconds): {k_r}\n' \
- ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \
- ' Time between Kernel end boot and Cloud-init ' \
- 'activation (seconds): {bt_r}\n' \
- ' Cloud-init start: {ci_start}\n'
-
- CONTAINER_MSG = '-- Most Recent Container Boot Record --\n' \
- ' Container started at: {k_s_t}\n' \
- ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \
- ' Cloud-init start: {ci_start}\n' \
-
+ FAILURE_MSG = (
+ "Your Linux distro or container does not support this "
+ "functionality.\n"
+ "You must be running a Kernel Telemetry supported "
+ "distro.\nPlease check "
+ "https://cloudinit.readthedocs.io/en/latest"
+ "/topics/analyze.html for more "
+ "information on supported distros.\n"
+ )
+
+ SUCCESS_MSG = (
+ "-- Most Recent Boot Record --\n"
+ " Kernel Started at: {k_s_t}\n"
+ " Kernel ended boot at: {k_e_t}\n"
+ " Kernel time to boot (seconds): {k_r}\n"
+ " Cloud-init activated by systemd at: {ci_sysd_t}\n"
+ " Time between Kernel end boot and Cloud-init "
+ "activation (seconds): {bt_r}\n"
+ " Cloud-init start: {ci_start}\n"
+ )
+
+ CONTAINER_MSG = (
+ "-- Most Recent Container Boot Record --\n"
+ " Container started at: {k_s_t}\n"
+ " Cloud-init activated by systemd at: {ci_sysd_t}\n"
+ " Cloud-init start: {ci_start}\n"
+ )
status_map = {
show.FAIL_CODE: FAILURE_MSG,
show.CONTAINER_CODE: CONTAINER_MSG,
- show.SUCCESS_CODE: SUCCESS_MSG
+ show.SUCCESS_CODE: SUCCESS_MSG,
}
kernel_runtime = kernel_end - kernel_start
between_process_runtime = ci_sysd_start - kernel_end
kwargs = {
- 'k_s_t': kernel_start_timestamp,
- 'k_e_t': kernel_end_timestamp,
- 'k_r': kernel_runtime,
- 'bt_r': between_process_runtime,
- 'k_e': kernel_end,
- 'k_s': kernel_start,
- 'ci_sysd': ci_sysd_start,
- 'ci_sysd_t': ci_sysd_start_timestamp,
- 'ci_start': ci_start
+ "k_s_t": kernel_start_timestamp,
+ "k_e_t": kernel_end_timestamp,
+ "k_r": kernel_runtime,
+ "bt_r": between_process_runtime,
+ "k_e": kernel_end,
+ "k_s": kernel_start,
+ "ci_sysd": ci_sysd_start,
+ "ci_sysd_t": ci_sysd_start_timestamp,
+ "ci_start": ci_start,
}
outfh.write(status_map[status_code].format(**kwargs))
@@ -152,15 +208,16 @@ def analyze_blame(name, args):
and sorting by record data ('delta')
"""
(infh, outfh) = configure_io(args)
- blame_format = ' %ds (%n)'
- r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE)
- for idx, record in enumerate(show.show_events(_get_events(infh),
- blame_format)):
+ blame_format = " %ds (%n)"
+ r = re.compile(r"(^\s+\d+\.\d+)", re.MULTILINE)
+ for idx, record in enumerate(
+ show.show_events(_get_events(infh), blame_format)
+ ):
srecs = sorted(filter(r.match, record), reverse=True)
- outfh.write('-- Boot Record %02d --\n' % (idx + 1))
- outfh.write('\n'.join(srecs) + '\n')
- outfh.write('\n')
- outfh.write('%d boot records analyzed\n' % (idx + 1))
+ outfh.write("-- Boot Record %02d --\n" % (idx + 1))
+ outfh.write("\n".join(srecs) + "\n")
+ outfh.write("\n")
+ outfh.write("%d boot records analyzed\n" % (idx + 1))
def analyze_show(name, args):
@@ -184,21 +241,25 @@ def analyze_show(name, args):
Finished stage: (modules-final) 0.NNN seconds
"""
(infh, outfh) = configure_io(args)
- for idx, record in enumerate(show.show_events(_get_events(infh),
- args.print_format)):
- outfh.write('-- Boot Record %02d --\n' % (idx + 1))
- outfh.write('The total time elapsed since completing an event is'
- ' printed after the "@" character.\n')
- outfh.write('The time the event takes is printed after the "+" '
- 'character.\n\n')
- outfh.write('\n'.join(record) + '\n')
- outfh.write('%d boot records analyzed\n' % (idx + 1))
+ for idx, record in enumerate(
+ show.show_events(_get_events(infh), args.print_format)
+ ):
+ outfh.write("-- Boot Record %02d --\n" % (idx + 1))
+ outfh.write(
+ "The total time elapsed since completing an event is"
+ ' printed after the "@" character.\n'
+ )
+ outfh.write(
+ 'The time the event takes is printed after the "+" character.\n\n'
+ )
+ outfh.write("\n".join(record) + "\n")
+ outfh.write("%d boot records analyzed\n" % (idx + 1))
def analyze_dump(name, args):
"""Dump cloud-init events in json format"""
(infh, outfh) = configure_io(args)
- outfh.write(json_dumps(_get_events(infh)) + '\n')
+ outfh.write(json_dumps(_get_events(infh)) + "\n")
def _get_events(infile):
@@ -211,28 +272,28 @@ def _get_events(infile):
def configure_io(args):
"""Common parsing and setup of input/output files"""
- if args.infile == '-':
+ if args.infile == "-":
infh = sys.stdin
else:
try:
- infh = open(args.infile, 'r')
+ infh = open(args.infile, "r")
except OSError:
- sys.stderr.write('Cannot open file %s\n' % args.infile)
+ sys.stderr.write("Cannot open file %s\n" % args.infile)
sys.exit(1)
- if args.outfile == '-':
+ if args.outfile == "-":
outfh = sys.stdout
else:
try:
- outfh = open(args.outfile, 'w')
+ outfh = open(args.outfile, "w")
except OSError:
- sys.stderr.write('Cannot open file %s\n' % args.outfile)
+ sys.stderr.write("Cannot open file %s\n" % args.outfile)
sys.exit(1)
return (infh, outfh)
-if __name__ == '__main__':
+if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
(name, action_functor) = args.action
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
index 62ad51fe..8e6e3c6a 100644
--- a/cloudinit/analyze/dump.py
+++ b/cloudinit/analyze/dump.py
@@ -1,21 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
import calendar
-from datetime import datetime
import sys
+from datetime import datetime
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
stage_to_description = {
- 'finished': 'finished running cloud-init',
- 'init-local': 'starting search for local datasources',
- 'init-network': 'searching for network datasources',
- 'init': 'searching for network datasources',
- 'modules-config': 'running config modules',
- 'modules-final': 'finalizing modules',
- 'modules': 'running modules for',
- 'single': 'running single module ',
+ "finished": "finished running cloud-init",
+ "init-local": "starting search for local datasources",
+ "init-network": "searching for network datasources",
+ "init": "searching for network datasources",
+ "modules-config": "running config modules",
+ "modules-final": "finalizing modules",
+ "modules": "running modules for",
+ "single": "running single module ",
}
# logger's asctime format
@@ -34,11 +33,11 @@ def parse_timestamp(timestampstr):
if timestampstr.split()[0] in months:
# Aug 29 22:55:26
FMT = DEFAULT_FMT
- if '.' in timestampstr:
+ if "." in timestampstr:
FMT = CLOUD_INIT_JOURNALCTL_FMT
- dt = datetime.strptime(timestampstr + " " +
- str(datetime.now().year),
- FMT)
+ dt = datetime.strptime(
+ timestampstr + " " + str(datetime.now().year), FMT
+ )
timestamp = dt.strftime("%s.%f")
elif "," in timestampstr:
# 2016-09-12 14:39:20,839
@@ -52,7 +51,7 @@ def parse_timestamp(timestampstr):
def parse_timestamp_from_date(timestampstr):
- out, _ = subp.subp(['date', '+%s.%3N', '-d', timestampstr])
+ out, _ = subp.subp(["date", "+%s.%3N", "-d", timestampstr])
timestamp = out.strip()
return float(timestamp)
@@ -79,8 +78,8 @@ def parse_ci_logline(line):
# Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start: \
# init-local/check-cache: attempting to read from cache [check]
- amazon_linux_2_sep = ' cloud-init['
- separators = [' - ', ' [CLOUDINIT] ', amazon_linux_2_sep]
+ amazon_linux_2_sep = " cloud-init["
+ separators = [" - ", " [CLOUDINIT] ", amazon_linux_2_sep]
found = False
for sep in separators:
if sep in line:
@@ -99,7 +98,7 @@ def parse_ci_logline(line):
if "," in timehost:
timestampstr, extra = timehost.split(",")
timestampstr += ",%s" % extra.split()[0]
- if ' ' in extra:
+ if " " in extra:
hostname = extra.split()[-1]
else:
hostname = timehost.split()[-1]
@@ -111,11 +110,11 @@ def parse_ci_logline(line):
eventstr = eventstr.split(maxsplit=1)[1]
else:
timestampstr = timehost.split(hostname)[0].strip()
- if 'Cloud-init v.' in eventstr:
- event_type = 'start'
- if 'running' in eventstr:
- stage_and_timestamp = eventstr.split('running')[1].lstrip()
- event_name, _ = stage_and_timestamp.split(' at ')
+ if "Cloud-init v." in eventstr:
+ event_type = "start"
+ if "running" in eventstr:
+ stage_and_timestamp = eventstr.split("running")[1].lstrip()
+ event_name, _ = stage_and_timestamp.split(" at ")
event_name = event_name.replace("'", "").replace(":", "-")
if event_name == "init":
event_name = "init-network"
@@ -128,17 +127,17 @@ def parse_ci_logline(line):
event_description = eventstr.split(event_name)[1].strip()
event = {
- 'name': event_name.rstrip(":"),
- 'description': event_description,
- 'timestamp': parse_timestamp(timestampstr),
- 'origin': 'cloudinit',
- 'event_type': event_type.rstrip(":"),
+ "name": event_name.rstrip(":"),
+ "description": event_description,
+ "timestamp": parse_timestamp(timestampstr),
+ "origin": "cloudinit",
+ "event_type": event_type.rstrip(":"),
}
- if event['event_type'] == "finish":
+ if event["event_type"] == "finish":
result = event_description.split(":")[0]
- desc = event_description.split(result)[1].lstrip(':').strip()
- event['result'] = result
- event['description'] = desc.strip()
+ desc = event_description.split(result)[1].lstrip(":").strip()
+ event["result"] = result
+ event["description"] = desc.strip()
return event
@@ -146,10 +145,10 @@ def parse_ci_logline(line):
def dump_events(cisource=None, rawdata=None):
events = []
event = None
- CI_EVENT_MATCHES = ['start:', 'finish:', 'Cloud-init v.']
+ CI_EVENT_MATCHES = ["start:", "finish:", "Cloud-init v."]
if not any([cisource, rawdata]):
- raise ValueError('Either cisource or rawdata parameters are required')
+ raise ValueError("Either cisource or rawdata parameters are required")
if rawdata:
data = rawdata.splitlines()
@@ -162,7 +161,7 @@ def dump_events(cisource=None, rawdata=None):
try:
event = parse_ci_logline(line)
except ValueError:
- sys.stderr.write('Skipping invalid entry\n')
+ sys.stderr.write("Skipping invalid entry\n")
if event:
events.append(event)
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
index 01a4d3e5..5fd9cdfd 100644
--- a/cloudinit/analyze/show.py
+++ b/cloudinit/analyze/show.py
@@ -8,11 +8,10 @@ import base64
import datetime
import json
import os
-import time
import sys
+import time
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
from cloudinit.distros import uses_systemd
# Example events:
@@ -35,24 +34,25 @@ from cloudinit.distros import uses_systemd
# }
format_key = {
- '%d': 'delta',
- '%D': 'description',
- '%E': 'elapsed',
- '%e': 'event_type',
- '%I': 'indent',
- '%l': 'level',
- '%n': 'name',
- '%o': 'origin',
- '%r': 'result',
- '%t': 'timestamp',
- '%T': 'total_time',
+ "%d": "delta",
+ "%D": "description",
+ "%E": "elapsed",
+ "%e": "event_type",
+ "%I": "indent",
+ "%l": "level",
+ "%n": "name",
+ "%o": "origin",
+ "%r": "result",
+ "%t": "timestamp",
+ "%T": "total_time",
}
-formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v)
- for k, v in format_key.items()])
-SUCCESS_CODE = 'successful'
-FAIL_CODE = 'failure'
-CONTAINER_CODE = 'container'
+formatting_help = " ".join(
+ ["{0}: {1}".format(k.replace("%", "%%"), v) for k, v in format_key.items()]
+)
+SUCCESS_CODE = "successful"
+FAIL_CODE = "failure"
+CONTAINER_CODE = "container"
TIMESTAMP_UNKNOWN = (FAIL_CODE, -1, -1, -1)
@@ -60,7 +60,7 @@ def format_record(msg, event):
for i, j in format_key.items():
if i in msg:
# ensure consistent formatting of time values
- if j in ['delta', 'elapsed', 'timestamp']:
+ if j in ["delta", "elapsed", "timestamp"]:
msg = msg.replace(i, "{%s:08.5f}" % j)
else:
msg = msg.replace(i, "{%s}" % j)
@@ -68,13 +68,13 @@ def format_record(msg, event):
def dump_event_files(event):
- content = dict((k, v) for k, v in event.items() if k not in ['content'])
- files = content['files']
+ content = dict((k, v) for k, v in event.items() if k not in ["content"])
+ files = content["files"]
saved = []
for f in files:
- fname = f['path']
+ fname = f["path"]
fn_local = os.path.basename(fname)
- fcontent = base64.b64decode(f['content']).decode('ascii')
+ fcontent = base64.b64decode(f["content"]).decode("ascii")
util.write_file(fn_local, fcontent)
saved.append(fn_local)
@@ -83,13 +83,13 @@ def dump_event_files(event):
def event_name(event):
if event:
- return event.get('name')
+ return event.get("name")
return None
def event_type(event):
if event:
- return event.get('event_type')
+ return event.get("event_type")
return None
@@ -100,7 +100,7 @@ def event_parent(event):
def event_timestamp(event):
- return float(event.get('timestamp'))
+ return float(event.get("timestamp"))
def event_datetime(event):
@@ -117,41 +117,44 @@ def event_duration(start, finish):
def event_record(start_time, start, finish):
record = finish.copy()
- record.update({
- 'delta': event_duration(start, finish),
- 'elapsed': delta_seconds(start_time, event_datetime(start)),
- 'indent': '|' + ' ' * (event_name(start).count('/') - 1) + '`->',
- })
+ record.update(
+ {
+ "delta": event_duration(start, finish),
+ "elapsed": delta_seconds(start_time, event_datetime(start)),
+ "indent": "|" + " " * (event_name(start).count("/") - 1) + "`->",
+ }
+ )
return record
def total_time_record(total_time):
- return 'Total Time: %3.5f seconds\n' % total_time
+ return "Total Time: %3.5f seconds\n" % total_time
class SystemctlReader(object):
- '''
+ """
Class for dealing with all systemctl subp calls in a consistent manner.
- '''
+ """
+
def __init__(self, property, parameter=None):
self.epoch = None
- self.args = ['/bin/systemctl', 'show']
+ self.args = ["/bin/systemctl", "show"]
if parameter:
self.args.append(parameter)
- self.args.extend(['-p', property])
+ self.args.extend(["-p", property])
# Don't want the init of our object to break. Instead of throwing
# an exception, set an error code that gets checked when data is
# requested from the object
self.failure = self.subp()
def subp(self):
- '''
+ """
Make a subp call based on set args and handle errors by setting
failure code
:return: whether the subp call failed or not
- '''
+ """
try:
value, err = subp.subp(self.args, capture=True)
if err:
@@ -162,41 +165,41 @@ class SystemctlReader(object):
return systemctl_fail
def parse_epoch_as_float(self):
- '''
+ """
If subp call succeeded, return the timestamp from subp as a float.
:return: timestamp as a float
- '''
+ """
# subp has 2 ways to fail: it either fails and throws an exception,
# or returns an error code. Raise an exception here in order to make
# sure both scenarios throw exceptions
if self.failure:
- raise RuntimeError('Subprocess call to systemctl has failed, '
- 'returning error code ({})'
- .format(self.failure))
+ raise RuntimeError(
+ "Subprocess call to systemctl has failed, "
+ "returning error code ({})".format(self.failure)
+ )
# Output from systemctl show has the format Property=Value.
# For example, UserspaceMonotonic=1929304
- timestamp = self.epoch.split('=')[1]
+ timestamp = self.epoch.split("=")[1]
# Timestamps reported by systemctl are in microseconds, converting
return float(timestamp) / 1000000
def dist_check_timestamp():
- '''
+ """
Determine which init system a particular linux distro is using.
Each init system (systemd, upstart, etc) has a different way of
providing timestamps.
:return: timestamps of kernelboot, kernelendboot, and cloud-initstart
or TIMESTAMP_UNKNOWN if the timestamps cannot be retrieved.
- '''
+ """
if uses_systemd():
return gather_timestamps_using_systemd()
# Use dmesg to get timestamps if the distro does not have systemd
- if util.is_FreeBSD() or 'gentoo' in \
- util.system_info()['system'].lower():
+ if util.is_FreeBSD() or "gentoo" in util.system_info()["system"].lower():
return gather_timestamps_using_dmesg()
# this distro doesn't fit anything that is supported by cloud-init. just
@@ -205,20 +208,20 @@ def dist_check_timestamp():
def gather_timestamps_using_dmesg():
- '''
+ """
Gather timestamps that corresponds to kernel begin initialization,
kernel finish initialization using dmesg as opposed to systemctl
:return: the two timestamps plus a dummy timestamp to keep consistency
with gather_timestamps_using_systemd
- '''
+ """
try:
- data, _ = subp.subp(['dmesg'], capture=True)
+ data, _ = subp.subp(["dmesg"], capture=True)
split_entries = data[0].splitlines()
for i in split_entries:
- if i.decode('UTF-8').find('user') != -1:
- splitup = i.decode('UTF-8').split()
- stripped = splitup[1].strip(']')
+ if i.decode("UTF-8").find("user") != -1:
+ splitup = i.decode("UTF-8").split()
+ stripped = splitup[1].strip("]")
# kernel timestamp from dmesg is equal to 0,
# with the userspace timestamp relative to it.
@@ -228,8 +231,7 @@ def gather_timestamps_using_dmesg():
# systemd wont start cloud-init in this case,
# so we cannot get that timestamp
- return SUCCESS_CODE, kernel_start, kernel_end, \
- kernel_end
+ return SUCCESS_CODE, kernel_start, kernel_end, kernel_end
except Exception:
pass
@@ -237,18 +239,20 @@ def gather_timestamps_using_dmesg():
def gather_timestamps_using_systemd():
- '''
+ """
Gather timestamps that corresponds to kernel begin initialization,
kernel finish initialization. and cloud-init systemd unit activation
:return: the three timestamps
- '''
+ """
kernel_start = float(time.time()) - float(util.uptime())
try:
- delta_k_end = SystemctlReader('UserspaceTimestampMonotonic')\
- .parse_epoch_as_float()
- delta_ci_s = SystemctlReader('InactiveExitTimestampMonotonic',
- 'cloud-init-local').parse_epoch_as_float()
+ delta_k_end = SystemctlReader(
+ "UserspaceTimestampMonotonic"
+ ).parse_epoch_as_float()
+ delta_ci_s = SystemctlReader(
+ "InactiveExitTimestampMonotonic", "cloud-init-local"
+ ).parse_epoch_as_float()
base_time = kernel_start
status = SUCCESS_CODE
# lxc based containers do not set their monotonic zero point to be when
@@ -262,12 +266,13 @@ def gather_timestamps_using_systemd():
# in containers when https://github.com/lxc/lxcfs/issues/292
# is fixed, util.uptime() should be used instead of stat on
try:
- file_stat = os.stat('/proc/1/cmdline')
+ file_stat = os.stat("/proc/1/cmdline")
kernel_start = file_stat.st_atime
except OSError as err:
- raise RuntimeError('Could not determine container boot '
- 'time from /proc/1/cmdline. ({})'
- .format(err)) from err
+ raise RuntimeError(
+ "Could not determine container boot "
+ "time from /proc/1/cmdline. ({})".format(err)
+ ) from err
status = CONTAINER_CODE
else:
status = FAIL_CODE
@@ -283,10 +288,14 @@ def gather_timestamps_using_systemd():
return status, kernel_start, kernel_end, cloudinit_sysd
-def generate_records(events, blame_sort=False,
- print_format="(%n) %d seconds in %I%D",
- dump_files=False, log_datafiles=False):
- '''
+def generate_records(
+ events,
+ blame_sort=False,
+ print_format="(%n) %d seconds in %I%D",
+ dump_files=False,
+ log_datafiles=False,
+):
+ """
Take in raw events and create parent-child dependencies between events
in order to order events in chronological order.
@@ -298,9 +307,9 @@ def generate_records(events, blame_sort=False,
:param log_datafiles: whether or not to log events generated
:return: boot records ordered chronologically
- '''
+ """
- sorted_events = sorted(events, key=lambda x: x['timestamp'])
+ sorted_events = sorted(events, key=lambda x: x["timestamp"])
records = []
start_time = None
total_time = 0.0
@@ -316,8 +325,8 @@ def generate_records(events, blame_sort=False,
except IndexError:
next_evt = None
- if event_type(event) == 'start':
- if event.get('name') in stages_seen:
+ if event_type(event) == "start":
+ if event.get("name") in stages_seen:
records.append(total_time_record(total_time))
boot_records.append(records)
records = []
@@ -331,25 +340,28 @@ def generate_records(events, blame_sort=False,
# see if we have a pair
if event_name(event) == event_name(next_evt):
- if event_type(next_evt) == 'finish':
- records.append(format_record(print_format,
- event_record(start_time,
- event,
- next_evt)))
+ if event_type(next_evt) == "finish":
+ records.append(
+ format_record(
+ print_format,
+ event_record(start_time, event, next_evt),
+ )
+ )
else:
# This is a parent event
- records.append("Starting stage: %s" % event.get('name'))
+ records.append("Starting stage: %s" % event.get("name"))
unprocessed.append(event)
- stages_seen.append(event.get('name'))
+ stages_seen.append(event.get("name"))
continue
else:
prev_evt = unprocessed.pop()
if event_name(event) == event_name(prev_evt):
record = event_record(start_time, prev_evt, event)
- records.append(format_record("Finished stage: "
- "(%n) %d seconds",
- record) + "\n")
- total_time += record.get('delta')
+ records.append(
+ format_record("Finished stage: (%n) %d seconds", record)
+ + "\n"
+ )
+ total_time += record.get("delta")
else:
# not a match, put it back
unprocessed.append(prev_evt)
@@ -360,7 +372,7 @@ def generate_records(events, blame_sort=False,
def show_events(events, print_format):
- '''
+ """
A passthrough method that makes it easier to call generate_records()
:param events: JSONs from dump that represents events taken from logs
@@ -368,18 +380,18 @@ def show_events(events, print_format):
and time taken by the event in one line
:return: boot records ordered chronologically
- '''
+ """
return generate_records(events, print_format=print_format)
def load_events_infile(infile):
- '''
+ """
Takes in a log file, read it, and convert to json.
:param infile: The Log file to be read
:return: json version of logfile, raw file
- '''
+ """
data = infile.read()
try:
return json.loads(data), data
diff --git a/cloudinit/analyze/tests/test_boot.py b/cloudinit/analyze/tests/test_boot.py
deleted file mode 100644
index f69423c3..00000000
--- a/cloudinit/analyze/tests/test_boot.py
+++ /dev/null
@@ -1,170 +0,0 @@
-import os
-from cloudinit.analyze.__main__ import (analyze_boot, get_parser)
-from cloudinit.tests.helpers import CiTestCase, mock
-from cloudinit.analyze.show import dist_check_timestamp, SystemctlReader, \
- FAIL_CODE, CONTAINER_CODE
-
-err_code = (FAIL_CODE, -1, -1, -1)
-
-
-class TestDistroChecker(CiTestCase):
-
- @mock.patch('cloudinit.util.system_info', return_value={'dist': ('', '',
- ''),
- 'system': ''})
- @mock.patch('cloudinit.util.get_linux_distro', return_value=('', '', ''))
- @mock.patch('cloudinit.util.is_FreeBSD', return_value=False)
- def test_blank_distro(self, m_sys_info, m_get_linux_distro, m_free_bsd):
- self.assertEqual(err_code, dist_check_timestamp())
-
- @mock.patch('cloudinit.util.system_info', return_value={'dist': ('', '',
- '')})
- @mock.patch('cloudinit.util.get_linux_distro', return_value=('', '', ''))
- @mock.patch('cloudinit.util.is_FreeBSD', return_value=True)
- def test_freebsd_gentoo_cant_find(self, m_sys_info,
- m_get_linux_distro, m_is_FreeBSD):
- self.assertEqual(err_code, dist_check_timestamp())
-
- @mock.patch('cloudinit.subp.subp', return_value=(0, 1))
- def test_subp_fails(self, m_subp):
- self.assertEqual(err_code, dist_check_timestamp())
-
-
-class TestSystemCtlReader(CiTestCase):
-
- def test_systemctl_invalid_property(self):
- reader = SystemctlReader('dummyProperty')
- with self.assertRaises(RuntimeError):
- reader.parse_epoch_as_float()
-
- def test_systemctl_invalid_parameter(self):
- reader = SystemctlReader('dummyProperty', 'dummyParameter')
- with self.assertRaises(RuntimeError):
- reader.parse_epoch_as_float()
-
- @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
- def test_systemctl_works_correctly_threshold(self, m_subp):
- reader = SystemctlReader('dummyProperty', 'dummyParameter')
- self.assertEqual(1.0, reader.parse_epoch_as_float())
- thresh = 1.0 - reader.parse_epoch_as_float()
- self.assertTrue(thresh < 1e-6)
- self.assertTrue(thresh > (-1 * 1e-6))
-
- @mock.patch('cloudinit.subp.subp', return_value=('U=0', None))
- def test_systemctl_succeed_zero(self, m_subp):
- reader = SystemctlReader('dummyProperty', 'dummyParameter')
- self.assertEqual(0.0, reader.parse_epoch_as_float())
-
- @mock.patch('cloudinit.subp.subp', return_value=('U=1', None))
- def test_systemctl_succeed_distinct(self, m_subp):
- reader = SystemctlReader('dummyProperty', 'dummyParameter')
- val1 = reader.parse_epoch_as_float()
- m_subp.return_value = ('U=2', None)
- reader2 = SystemctlReader('dummyProperty', 'dummyParameter')
- val2 = reader2.parse_epoch_as_float()
- self.assertNotEqual(val1, val2)
-
- @mock.patch('cloudinit.subp.subp', return_value=('100', None))
- def test_systemctl_epoch_not_splittable(self, m_subp):
- reader = SystemctlReader('dummyProperty', 'dummyParameter')
- with self.assertRaises(IndexError):
- reader.parse_epoch_as_float()
-
- @mock.patch('cloudinit.subp.subp', return_value=('U=foobar', None))
- def test_systemctl_cannot_convert_epoch_to_float(self, m_subp):
- reader = SystemctlReader('dummyProperty', 'dummyParameter')
- with self.assertRaises(ValueError):
- reader.parse_epoch_as_float()
-
-
-class TestAnalyzeBoot(CiTestCase):
-
- def set_up_dummy_file_ci(self, path, log_path):
- infh = open(path, 'w+')
- infh.write('2019-07-08 17:40:49,601 - util.py[DEBUG]: Cloud-init v. '
- '19.1-1-gbaa47854-0ubuntu1~18.04.1 running \'init-local\' '
- 'at Mon, 08 Jul 2019 17:40:49 +0000. Up 18.84 seconds.')
- infh.close()
- outfh = open(log_path, 'w+')
- outfh.close()
-
- def set_up_dummy_file(self, path, log_path):
- infh = open(path, 'w+')
- infh.write('dummy data')
- infh.close()
- outfh = open(log_path, 'w+')
- outfh.close()
-
- def remove_dummy_file(self, path, log_path):
- if os.path.isfile(path):
- os.remove(path)
- if os.path.isfile(log_path):
- os.remove(log_path)
-
- @mock.patch('cloudinit.analyze.show.dist_check_timestamp',
- return_value=err_code)
- def test_boot_invalid_distro(self, m_dist_check_timestamp):
-
- path = os.path.dirname(os.path.abspath(__file__))
- log_path = path + '/boot-test.log'
- path += '/dummy.log'
- self.set_up_dummy_file(path, log_path)
-
- parser = get_parser()
- args = parser.parse_args(args=['boot', '-i', path, '-o',
- log_path])
- name_default = ''
- analyze_boot(name_default, args)
- # now args have been tested, go into outfile and make sure error
- # message is in the outfile
- outfh = open(args.outfile, 'r')
- data = outfh.read()
- err_string = 'Your Linux distro or container does not support this ' \
- 'functionality.\nYou must be running a Kernel ' \
- 'Telemetry supported distro.\nPlease check ' \
- 'https://cloudinit.readthedocs.io/en/latest/topics' \
- '/analyze.html for more information on supported ' \
- 'distros.\n'
-
- self.remove_dummy_file(path, log_path)
- self.assertEqual(err_string, data)
-
- @mock.patch("cloudinit.util.is_container", return_value=True)
- @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
- def test_container_no_ci_log_line(self, m_is_container, m_subp):
- path = os.path.dirname(os.path.abspath(__file__))
- log_path = path + '/boot-test.log'
- path += '/dummy.log'
- self.set_up_dummy_file(path, log_path)
-
- parser = get_parser()
- args = parser.parse_args(args=['boot', '-i', path, '-o',
- log_path])
- name_default = ''
-
- finish_code = analyze_boot(name_default, args)
-
- self.remove_dummy_file(path, log_path)
- self.assertEqual(FAIL_CODE, finish_code)
-
- @mock.patch("cloudinit.util.is_container", return_value=True)
- @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
- @mock.patch('cloudinit.analyze.__main__._get_events', return_value=[{
- 'name': 'init-local', 'description': 'starting search', 'timestamp':
- 100000}])
- @mock.patch('cloudinit.analyze.show.dist_check_timestamp',
- return_value=(CONTAINER_CODE, 1, 1, 1))
- def test_container_ci_log_line(self, m_is_container, m_subp, m_get, m_g):
- path = os.path.dirname(os.path.abspath(__file__))
- log_path = path + '/boot-test.log'
- path += '/dummy.log'
- self.set_up_dummy_file_ci(path, log_path)
-
- parser = get_parser()
- args = parser.parse_args(args=['boot', '-i', path, '-o',
- log_path])
- name_default = ''
- finish_code = analyze_boot(name_default, args)
-
- self.remove_dummy_file(path, log_path)
- self.assertEqual(CONTAINER_CODE, finish_code)
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 9bded16c..92068aa9 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -2,125 +2,143 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-'''Cloud-init apport interface'''
+"""Cloud-init apport interface"""
try:
from apport.hookutils import (
- attach_file, attach_root_command_outputs, root_command_output)
+ attach_file,
+ attach_root_command_outputs,
+ root_command_output,
+ )
+
has_apport = True
except ImportError:
has_apport = False
KNOWN_CLOUD_NAMES = [
- 'AliYun',
- 'AltCloud',
- 'Amazon - Ec2',
- 'Azure',
- 'Bigstep',
- 'Brightbox',
- 'CloudSigma',
- 'CloudStack',
- 'DigitalOcean',
- 'E24Cloud',
- 'GCE - Google Compute Engine',
- 'Exoscale',
- 'Hetzner Cloud',
- 'IBM - (aka SoftLayer or BlueMix)',
- 'LXD',
- 'MAAS',
- 'NoCloud',
- 'OpenNebula',
- 'OpenStack',
- 'Oracle',
- 'OVF',
- 'RbxCloud - (HyperOne, Rootbox, Rubikon)',
- 'OpenTelekomCloud',
- 'SAP Converged Cloud',
- 'Scaleway',
- 'SmartOS',
- 'VMware',
- 'ZStack',
- 'Other'
+ "AliYun",
+ "AltCloud",
+ "Amazon - Ec2",
+ "Azure",
+ "Bigstep",
+ "Brightbox",
+ "CloudSigma",
+ "CloudStack",
+ "DigitalOcean",
+ "E24Cloud",
+ "GCE - Google Compute Engine",
+ "Exoscale",
+ "Hetzner Cloud",
+ "IBM - (aka SoftLayer or BlueMix)",
+ "LXD",
+ "MAAS",
+ "NoCloud",
+ "OpenNebula",
+ "OpenStack",
+ "Oracle",
+ "OVF",
+ "RbxCloud - (HyperOne, Rootbox, Rubikon)",
+ "OpenTelekomCloud",
+ "SAP Converged Cloud",
+ "Scaleway",
+ "SmartOS",
+ "UpCloud",
+ "VMware",
+ "Vultr",
+ "ZStack",
+ "Other",
]
# Potentially clear text collected logs
-CLOUDINIT_LOG = '/var/log/cloud-init.log'
-CLOUDINIT_OUTPUT_LOG = '/var/log/cloud-init-output.log'
-USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional
+CLOUDINIT_LOG = "/var/log/cloud-init.log"
+CLOUDINIT_OUTPUT_LOG = "/var/log/cloud-init-output.log"
+USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional
def attach_cloud_init_logs(report, ui=None):
- '''Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.'''
- attach_root_command_outputs(report, {
- 'cloud-init-log-warnings':
- 'egrep -i "warn|error" /var/log/cloud-init.log',
- 'cloud-init-output.log.txt': 'cat /var/log/cloud-init-output.log'})
+ """Attach cloud-init logs and tarfile from 'cloud-init collect-logs'."""
+ attach_root_command_outputs(
+ report,
+ {
+ "cloud-init-log-warnings": (
+ 'egrep -i "warn|error" /var/log/cloud-init.log'
+ ),
+ "cloud-init-output.log.txt": "cat /var/log/cloud-init-output.log",
+ },
+ )
root_command_output(
- ['cloud-init', 'collect-logs', '-t', '/tmp/cloud-init-logs.tgz'])
- attach_file(report, '/tmp/cloud-init-logs.tgz', 'logs.tgz')
+ ["cloud-init", "collect-logs", "-t", "/tmp/cloud-init-logs.tgz"]
+ )
+ attach_file(report, "/tmp/cloud-init-logs.tgz", "logs.tgz")
def attach_hwinfo(report, ui=None):
- '''Optionally attach hardware info from lshw.'''
+ """Optionally attach hardware info from lshw."""
prompt = (
- 'Your device details (lshw) may be useful to developers when'
- ' addressing this bug, but gathering it requires admin privileges.'
- ' Would you like to include this info?')
+ "Your device details (lshw) may be useful to developers when"
+ " addressing this bug, but gathering it requires admin privileges."
+ " Would you like to include this info?"
+ )
if ui and ui.yesno(prompt):
- attach_root_command_outputs(report, {'lshw.txt': 'lshw'})
+ attach_root_command_outputs(report, {"lshw.txt": "lshw"})
def attach_cloud_info(report, ui=None):
- '''Prompt for cloud details if available.'''
+ """Prompt for cloud details if available."""
if ui:
- prompt = 'Is this machine running in a cloud environment?'
+ prompt = "Is this machine running in a cloud environment?"
response = ui.yesno(prompt)
if response is None:
raise StopIteration # User cancelled
if response:
- prompt = ('Please select the cloud vendor or environment in which'
- ' this instance is running')
+ prompt = (
+ "Please select the cloud vendor or environment in which"
+ " this instance is running"
+ )
response = ui.choice(prompt, KNOWN_CLOUD_NAMES)
if response:
- report['CloudName'] = KNOWN_CLOUD_NAMES[response[0]]
+ report["CloudName"] = KNOWN_CLOUD_NAMES[response[0]]
else:
- report['CloudName'] = 'None'
+ report["CloudName"] = "None"
def attach_user_data(report, ui=None):
- '''Optionally provide user-data if desired.'''
+ """Optionally provide user-data if desired."""
if ui:
prompt = (
- 'Your user-data or cloud-config file can optionally be provided'
- ' from {0} and could be useful to developers when addressing this'
- ' bug. Do you wish to attach user-data to this bug?'.format(
- USER_DATA_FILE))
+ "Your user-data or cloud-config file can optionally be provided"
+ " from {0} and could be useful to developers when addressing this"
+ " bug. Do you wish to attach user-data to this bug?".format(
+ USER_DATA_FILE
+ )
+ )
response = ui.yesno(prompt)
if response is None:
raise StopIteration # User cancelled
if response:
- attach_file(report, USER_DATA_FILE, 'user_data.txt')
+ attach_file(report, USER_DATA_FILE, "user_data.txt")
def add_bug_tags(report):
- '''Add any appropriate tags to the bug.'''
- if 'JournalErrors' in report.keys():
- errors = report['JournalErrors']
- if 'Breaking ordering cycle' in errors:
- report['Tags'] = 'systemd-ordering'
+ """Add any appropriate tags to the bug."""
+ if "JournalErrors" in report.keys():
+ errors = report["JournalErrors"]
+ if "Breaking ordering cycle" in errors:
+ report["Tags"] = "systemd-ordering"
def add_info(report, ui):
- '''This is an entry point to run cloud-init's apport functionality.
+ """This is an entry point to run cloud-init's apport functionality.
Distros which want apport support will have a cloud-init package-hook at
/usr/share/apport/package-hooks/cloud-init.py which defines an add_info
function and returns the result of cloudinit.apport.add_info(report, ui).
- '''
+ """
if not has_apport:
raise RuntimeError(
- 'No apport imports discovered. Apport functionality disabled')
+ "No apport imports discovered. Apport functionality disabled"
+ )
attach_cloud_init_logs(report, ui)
attach_hwinfo(report, ui)
attach_cloud_info(report, ui)
@@ -128,4 +146,5 @@ def add_info(report, ui):
add_bug_tags(report)
return True
+
# vi: ts=4 expandtab
diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py
index 485ff92f..ae117fad 100644
--- a/cloudinit/atomic_helper.py
+++ b/cloudinit/atomic_helper.py
@@ -10,8 +10,9 @@ _DEF_PERMS = 0o644
LOG = logging.getLogger(__name__)
-def write_file(filename, content, mode=_DEF_PERMS,
- omode="wb", preserve_mode=False):
+def write_file(
+ filename, content, mode=_DEF_PERMS, omode="wb", preserve_mode=False
+):
# open filename in mode 'omode', write content, set permissions to 'mode'
if preserve_mode:
@@ -23,12 +24,18 @@ def write_file(filename, content, mode=_DEF_PERMS,
tf = None
try:
- tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename),
- delete=False, mode=omode)
+ tf = tempfile.NamedTemporaryFile(
+ dir=os.path.dirname(filename), delete=False, mode=omode
+ )
LOG.debug(
"Atomically writing to file %s (via temporary file %s) - %s: [%o]"
" %d bytes/chars",
- filename, tf.name, omode, mode, len(content))
+ filename,
+ tf.name,
+ omode,
+ mode,
+ len(content),
+ )
tf.write(content)
tf.close()
os.chmod(tf.name, mode)
@@ -42,7 +49,11 @@ def write_file(filename, content, mode=_DEF_PERMS,
def write_json(filename, data, mode=_DEF_PERMS):
# dump json representation of data to file filename.
return write_file(
- filename, json.dumps(data, indent=1, sort_keys=True) + "\n",
- omode="w", mode=mode)
+ filename,
+ json.dumps(data, indent=1, sort_keys=True) + "\n",
+ omode="w",
+ mode=mode,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index 7ae98e1c..91e48103 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -35,7 +35,8 @@ class Cloud(object):
reporter = events.ReportEventStack(
name="unnamed-cloud-reporter",
description="unnamed-cloud-reporter",
- reporting_enabled=False)
+ reporting_enabled=False,
+ )
self.reporter = reporter
# If a 'user' manipulates logging or logging services
@@ -56,8 +57,11 @@ class Cloud(object):
def get_template_filename(self, name):
fn = self.paths.template_tpl % (name)
if not os.path.isfile(fn):
- LOG.warning("No template found in %s for template named %s",
- os.path.dirname(fn), name)
+ LOG.warning(
+ "No template found in %s for template named %s",
+ os.path.dirname(fn),
+ name,
+ )
return None
return fn
@@ -80,7 +84,8 @@ class Cloud(object):
def get_hostname(self, fqdn=False, metadata_only=False):
return self.datasource.get_hostname(
- fqdn=fqdn, metadata_only=metadata_only)
+ fqdn=fqdn, metadata_only=metadata_only
+ )
def device_name_to_device(self, name):
return self.datasource.device_name_to_device(name)
@@ -94,4 +99,5 @@ class Cloud(object):
def get_ipath(self, name=None):
return self.paths.get_ipath(name)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py
index 928a8eea..0e1db118 100644
--- a/cloudinit/cmd/clean.py
+++ b/cloudinit/cmd/clean.py
@@ -10,12 +10,14 @@ import os
import sys
from cloudinit.stages import Init
-from cloudinit.subp import (ProcessExecutionError, subp)
-from cloudinit.util import (del_dir, del_file, get_config_logfiles, is_link)
-
-
-def error(msg):
- sys.stderr.write("ERROR: " + msg + "\n")
+from cloudinit.subp import ProcessExecutionError, subp
+from cloudinit.util import (
+ del_dir,
+ del_file,
+ error,
+ get_config_logfiles,
+ is_link,
+)
def get_parser(parser=None):
@@ -29,18 +31,35 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(
- prog='clean',
- description=('Remove logs and artifacts so cloud-init re-runs on '
- 'a clean system'))
+ prog="clean",
+ description=(
+ "Remove logs and artifacts so cloud-init re-runs on "
+ "a clean system"
+ ),
+ )
parser.add_argument(
- '-l', '--logs', action='store_true', default=False, dest='remove_logs',
- help='Remove cloud-init logs.')
+ "-l",
+ "--logs",
+ action="store_true",
+ default=False,
+ dest="remove_logs",
+ help="Remove cloud-init logs.",
+ )
parser.add_argument(
- '-r', '--reboot', action='store_true', default=False,
- help='Reboot system after logs are cleaned so cloud-init re-runs.')
+ "-r",
+ "--reboot",
+ action="store_true",
+ default=False,
+ help="Reboot system after logs are cleaned so cloud-init re-runs.",
+ )
parser.add_argument(
- '-s', '--seed', action='store_true', default=False, dest='remove_seed',
- help='Remove cloud-init seed directory /var/lib/cloud/seed.')
+ "-s",
+ "--seed",
+ action="store_true",
+ default=False,
+ dest="remove_seed",
+ help="Remove cloud-init seed directory /var/lib/cloud/seed.",
+ )
return parser
@@ -61,8 +80,8 @@ def remove_artifacts(remove_logs, remove_seed=False):
if not os.path.isdir(init.paths.cloud_dir):
return 0 # Artifacts dir already cleaned
- seed_path = os.path.join(init.paths.cloud_dir, 'seed')
- for path in glob.glob('%s/*' % init.paths.cloud_dir):
+ seed_path = os.path.join(init.paths.cloud_dir, "seed")
+ for path in glob.glob("%s/*" % init.paths.cloud_dir):
if path == seed_path and not remove_seed:
continue
try:
@@ -71,7 +90,7 @@ def remove_artifacts(remove_logs, remove_seed=False):
else:
del_file(path)
except OSError as e:
- error('Could not remove {0}: {1}'.format(path, str(e)))
+ error("Could not remove {0}: {1}".format(path, str(e)))
return 1
return 0
@@ -80,13 +99,15 @@ def handle_clean_args(name, args):
"""Handle calls to 'cloud-init clean' as a subcommand."""
exit_code = remove_artifacts(args.remove_logs, args.remove_seed)
if exit_code == 0 and args.reboot:
- cmd = ['shutdown', '-r', 'now']
+ cmd = ["shutdown", "-r", "now"]
try:
subp(cmd, capture=False)
except ProcessExecutionError as e:
error(
'Could not reboot this system using "{0}": {1}'.format(
- cmd, str(e)))
+ cmd, str(e)
+ )
+ )
exit_code = 1
return exit_code
@@ -94,10 +115,10 @@ def handle_clean_args(name, args):
def main():
"""Tool to collect and tar all cloud-init related logs."""
parser = get_parser()
- sys.exit(handle_clean_args('clean', parser.parse_args()))
+ sys.exit(handle_clean_args("clean", parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py
index 97608921..b9c30fb4 100755
--- a/cloudinit/cmd/cloud_id.py
+++ b/cloudinit/cmd/cloud_id.py
@@ -6,12 +6,17 @@ import argparse
import json
import sys
+from cloudinit.cmd.status import UXAppStatus, get_status_details
from cloudinit.sources import (
- INSTANCE_JSON_FILE, METADATA_UNKNOWN, canonical_cloud_id)
+ INSTANCE_JSON_FILE,
+ METADATA_UNKNOWN,
+ canonical_cloud_id,
+)
+from cloudinit.util import error
-DEFAULT_INSTANCE_JSON = '/run/cloud-init/%s' % INSTANCE_JSON_FILE
+DEFAULT_INSTANCE_JSON = "/run/cloud-init/%s" % INSTANCE_JSON_FILE
-NAME = 'cloud-id'
+NAME = "cloud-id"
def get_parser(parser=None):
@@ -26,55 +31,75 @@ def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(
prog=NAME,
- description='Report the canonical cloud-id for this instance')
+ description="Report the canonical cloud-id for this instance",
+ )
parser.add_argument(
- '-j', '--json', action='store_true', default=False,
- help='Report all standardized cloud-id information as json.')
+ "-j",
+ "--json",
+ action="store_true",
+ default=False,
+ help="Report all standardized cloud-id information as json.",
+ )
parser.add_argument(
- '-l', '--long', action='store_true', default=False,
- help='Report extended cloud-id information as tab-delimited string.')
+ "-l",
+ "--long",
+ action="store_true",
+ default=False,
+ help="Report extended cloud-id information as tab-delimited string.",
+ )
parser.add_argument(
- '-i', '--instance-data', type=str, default=DEFAULT_INSTANCE_JSON,
- help=('Path to instance-data.json file. Default is %s' %
- DEFAULT_INSTANCE_JSON))
+ "-i",
+ "--instance-data",
+ type=str,
+ default=DEFAULT_INSTANCE_JSON,
+ help="Path to instance-data.json file. Default is %s"
+ % DEFAULT_INSTANCE_JSON,
+ )
return parser
-def error(msg):
- sys.stderr.write('ERROR: %s\n' % msg)
- return 1
-
-
def handle_args(name, args):
"""Handle calls to 'cloud-id' cli.
Print the canonical cloud-id on which the instance is running.
- @return: 0 on success, 1 otherwise.
+ @return: 0 on success, 1 on error, 2 on disabled, 3 on cloud-init not run.
"""
+ status, _status_details, _time = get_status_details()
+ if status == UXAppStatus.DISABLED:
+ sys.stdout.write("{0}\n".format(status.value))
+ return 2
+ elif status == UXAppStatus.NOT_RUN:
+ sys.stdout.write("{0}\n".format(status.value))
+ return 3
+
try:
instance_data = json.load(open(args.instance_data))
except IOError:
return error(
"File not found '%s'. Provide a path to instance data json file"
- ' using --instance-data' % args.instance_data)
+ " using --instance-data" % args.instance_data
+ )
except ValueError as e:
return error(
- "File '%s' is not valid json. %s" % (args.instance_data, e))
- v1 = instance_data.get('v1', {})
+ "File '%s' is not valid json. %s" % (args.instance_data, e)
+ )
+ v1 = instance_data.get("v1", {})
cloud_id = canonical_cloud_id(
- v1.get('cloud_name', METADATA_UNKNOWN),
- v1.get('region', METADATA_UNKNOWN),
- v1.get('platform', METADATA_UNKNOWN))
+ v1.get("cloud_name", METADATA_UNKNOWN),
+ v1.get("region", METADATA_UNKNOWN),
+ v1.get("platform", METADATA_UNKNOWN),
+ )
if args.json:
- v1['cloud_id'] = cloud_id
- response = json.dumps( # Pretty, sorted json
- v1, indent=1, sort_keys=True, separators=(',', ': '))
+ v1["cloud_id"] = cloud_id
+ response = json.dumps( # Pretty, sorted json
+ v1, indent=1, sort_keys=True, separators=(",", ": ")
+ )
elif args.long:
- response = '%s\t%s' % (cloud_id, v1.get('region', METADATA_UNKNOWN))
+ response = "%s\t%s" % (cloud_id, v1.get("region", METADATA_UNKNOWN))
else:
response = cloud_id
- sys.stdout.write('%s\n' % response)
+ sys.stdout.write("%s\n" % response)
return 0
@@ -84,7 +109,7 @@ def main():
sys.exit(handle_args(NAME, parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py
index 3ae28b69..ead5f7a9 100644
--- a/cloudinit/cmd/devel/__init__.py
+++ b/cloudinit/cmd/devel/__init__.py
@@ -11,7 +11,7 @@ from cloudinit.stages import Init
def addLogHandlerCLI(logger, log_level):
"""Add a commandline logging handler to emit messages to stderr."""
- formatter = logging.Formatter('%(levelname)s: %(message)s')
+ formatter = logging.Formatter("%(levelname)s: %(message)s")
log.setupBasicLogging(log_level, formatter=formatter)
return logger
@@ -22,4 +22,5 @@ def read_cfg_paths():
init.read_cfg()
return init.paths
+
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py
new file mode 100644
index 00000000..a9be0379
--- /dev/null
+++ b/cloudinit/cmd/devel/hotplug_hook.py
@@ -0,0 +1,291 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Handle reconfiguration on hotplug events"""
+import abc
+import argparse
+import os
+import sys
+import time
+
+from cloudinit import log, reporting, stages
+from cloudinit.event import EventScope, EventType
+from cloudinit.net import activators, read_sys_net_safe
+from cloudinit.net.network_state import parse_net_config_data
+from cloudinit.reporting import events
+from cloudinit.sources import DataSource # noqa: F401
+from cloudinit.sources import DataSourceNotFoundException
+from cloudinit.stages import Init
+
+LOG = log.getLogger(__name__)
+NAME = "hotplug-hook"
+
+
+def get_parser(parser=None):
+ """Build or extend an arg parser for hotplug-hook utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ subcommand which will be extended to support the args of this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
+
+ parser.description = __doc__
+ parser.add_argument(
+ "-s",
+ "--subsystem",
+ required=True,
+ help="subsystem to act on",
+ choices=["net"],
+ )
+
+ subparsers = parser.add_subparsers(
+ title="Hotplug Action", dest="hotplug_action"
+ )
+ subparsers.required = True
+
+ subparsers.add_parser(
+ "query", help="query if hotplug is enabled for given subsystem"
+ )
+
+ parser_handle = subparsers.add_parser(
+ "handle", help="handle the hotplug event"
+ )
+ parser_handle.add_argument(
+ "-d",
+ "--devpath",
+ required=True,
+ metavar="PATH",
+ help="sysfs path to hotplugged device",
+ )
+ parser_handle.add_argument(
+ "-u",
+ "--udevaction",
+ required=True,
+ help="action to take",
+ choices=["add", "remove"],
+ )
+
+ return parser
+
+
+class UeventHandler(abc.ABC):
+ def __init__(self, id, datasource, devpath, action, success_fn):
+ self.id = id
+ self.datasource = datasource # type: DataSource
+ self.devpath = devpath
+ self.action = action
+ self.success_fn = success_fn
+
+ @abc.abstractmethod
+ def apply(self):
+ raise NotImplementedError()
+
+ @property
+ @abc.abstractmethod
+ def config(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def device_detected(self) -> bool:
+ raise NotImplementedError()
+
+ def detect_hotplugged_device(self):
+ detect_presence = None
+ if self.action == "add":
+ detect_presence = True
+ elif self.action == "remove":
+ detect_presence = False
+ else:
+ raise ValueError("Unknown action: %s" % self.action)
+
+ if detect_presence != self.device_detected():
+ raise RuntimeError(
+ "Failed to detect %s in updated metadata" % self.id
+ )
+
+ def success(self):
+ return self.success_fn()
+
+ def update_metadata(self):
+ result = self.datasource.update_metadata_if_supported(
+ [EventType.HOTPLUG]
+ )
+ if not result:
+ raise RuntimeError(
+ "Datasource %s not updated for event %s"
+ % (self.datasource, EventType.HOTPLUG)
+ )
+ return result
+
+
+class NetHandler(UeventHandler):
+ def __init__(self, datasource, devpath, action, success_fn):
+ # convert devpath to mac address
+ id = read_sys_net_safe(os.path.basename(devpath), "address")
+ super().__init__(id, datasource, devpath, action, success_fn)
+
+ def apply(self):
+ self.datasource.distro.apply_network_config(
+ self.config,
+ bring_up=False,
+ )
+ interface_name = os.path.basename(self.devpath)
+ activator = activators.select_activator()
+ if self.action == "add":
+ if not activator.bring_up_interface(interface_name):
+ raise RuntimeError(
+ "Failed to bring up device: {}".format(self.devpath)
+ )
+ elif self.action == "remove":
+ if not activator.bring_down_interface(interface_name):
+ raise RuntimeError(
+ "Failed to bring down device: {}".format(self.devpath)
+ )
+
+ @property
+ def config(self):
+ return self.datasource.network_config
+
+ def device_detected(self) -> bool:
+ netstate = parse_net_config_data(self.config)
+ found = [
+ iface
+ for iface in netstate.iter_interfaces()
+ if iface.get("mac_address") == self.id
+ ]
+ LOG.debug("Ifaces with ID=%s : %s", self.id, found)
+ return len(found) > 0
+
+
+SUBSYSTEM_PROPERTES_MAP = {
+ "net": (NetHandler, EventScope.NETWORK),
+}
+
+
+def is_enabled(hotplug_init, subsystem):
+ try:
+ scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1]
+ except KeyError as e:
+ raise Exception(
+ "hotplug-hook: cannot handle events for subsystem: {}".format(
+ subsystem
+ )
+ ) from e
+
+ return stages.update_event_enabled(
+ datasource=hotplug_init.datasource,
+ cfg=hotplug_init.cfg,
+ event_source_type=EventType.HOTPLUG,
+ scope=scope,
+ )
+
+
+def initialize_datasource(hotplug_init, subsystem):
+ LOG.debug("Fetching datasource")
+ datasource = hotplug_init.fetch(existing="trust")
+
+ if not datasource.get_supported_events([EventType.HOTPLUG]):
+ LOG.debug("hotplug not supported for event of type %s", subsystem)
+ return
+
+ if not is_enabled(hotplug_init, subsystem):
+ LOG.debug("hotplug not enabled for event of type %s", subsystem)
+ return
+ return datasource
+
+
+def handle_hotplug(hotplug_init: Init, devpath, subsystem, udevaction):
+ datasource = initialize_datasource(hotplug_init, subsystem)
+ if not datasource:
+ return
+ handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0]
+ LOG.debug("Creating %s event handler", subsystem)
+ event_handler = handler_cls(
+ datasource=datasource,
+ devpath=devpath,
+ action=udevaction,
+ success_fn=hotplug_init._write_to_cache,
+ ) # type: UeventHandler
+ wait_times = [1, 3, 5, 10, 30]
+ for attempt, wait in enumerate(wait_times):
+ LOG.debug(
+ "subsystem=%s update attempt %s/%s",
+ subsystem,
+ attempt,
+ len(wait_times),
+ )
+ try:
+ LOG.debug("Refreshing metadata")
+ event_handler.update_metadata()
+ LOG.debug("Detecting device in updated metadata")
+ event_handler.detect_hotplugged_device()
+ LOG.debug("Applying config change")
+ event_handler.apply()
+ LOG.debug("Updating cache")
+ event_handler.success()
+ break
+ except Exception as e:
+ LOG.debug("Exception while processing hotplug event. %s", e)
+ time.sleep(wait)
+ last_exception = e
+ else:
+ raise last_exception # type: ignore
+
+
+def handle_args(name, args):
+ # Note that if an exception happens between now and when logging is
+ # setup, we'll only see it in the journal
+ hotplug_reporter = events.ReportEventStack(
+ name, __doc__, reporting_enabled=True
+ )
+
+ hotplug_init = Init(ds_deps=[], reporter=hotplug_reporter)
+ hotplug_init.read_cfg()
+
+ log.setupLogging(hotplug_init.cfg)
+ if "reporting" in hotplug_init.cfg:
+ reporting.update_configuration(hotplug_init.cfg.get("reporting"))
+ # Logging isn't going to be setup until now
+ LOG.debug(
+ "%s called with the following arguments: {"
+ "hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}",
+ name,
+ args.hotplug_action,
+ args.subsystem,
+ args.udevaction if "udevaction" in args else None,
+ args.devpath if "devpath" in args else None,
+ )
+
+ with hotplug_reporter:
+ try:
+ if args.hotplug_action == "query":
+ try:
+ datasource = initialize_datasource(
+ hotplug_init, args.subsystem
+ )
+ except DataSourceNotFoundException:
+ print(
+ "Unable to determine hotplug state. No datasource "
+ "detected"
+ )
+ sys.exit(1)
+ print("enabled" if datasource else "disabled")
+ else:
+ handle_hotplug(
+ hotplug_init=hotplug_init,
+ devpath=args.devpath,
+ subsystem=args.subsystem,
+ udevaction=args.udevaction,
+ )
+ except Exception:
+ LOG.exception("Received fatal exception handling hotplug!")
+ raise
+
+ LOG.debug("Exiting hotplug handler")
+ reporting.flush_events()
+
+
+if __name__ == "__main__":
+ args = get_parser().parse_args()
+ handle_args(NAME, args)
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index 51c61cca..d54b809a 100644
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -5,20 +5,19 @@
"""Define 'collect-logs' utility and handler to include in cloud-init cmd."""
import argparse
-from datetime import datetime
import os
import shutil
import sys
+from datetime import datetime
from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
+from cloudinit.subp import ProcessExecutionError, subp
from cloudinit.temp_utils import tempdir
-from cloudinit.subp import (ProcessExecutionError, subp)
-from cloudinit.util import (chdir, copy, ensure_dir, write_file)
+from cloudinit.util import chdir, copy, ensure_dir, write_file
-
-CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']
-CLOUDINIT_RUN_DIR = '/run/cloud-init'
-USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional
+CLOUDINIT_LOGS = ["/var/log/cloud-init.log", "/var/log/cloud-init-output.log"]
+CLOUDINIT_RUN_DIR = "/run/cloud-init"
+USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional
def get_parser(parser=None):
@@ -32,27 +31,49 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(
- prog='collect-logs',
- description='Collect and tar all cloud-init debug info')
- parser.add_argument('--verbose', '-v', action='count', default=0,
- dest='verbosity', help="Be more verbose.")
+ prog="collect-logs",
+ description="Collect and tar all cloud-init debug info",
+ )
+ parser.add_argument(
+ "--verbose",
+ "-v",
+ action="count",
+ default=0,
+ dest="verbosity",
+ help="Be more verbose.",
+ )
parser.add_argument(
- "--tarfile", '-t', default='cloud-init.tar.gz',
- help=('The tarfile to create containing all collected logs.'
- ' Default: cloud-init.tar.gz'))
+ "--tarfile",
+ "-t",
+ default="cloud-init.tar.gz",
+ help=(
+ "The tarfile to create containing all collected logs."
+ " Default: cloud-init.tar.gz"
+ ),
+ )
parser.add_argument(
- "--include-userdata", '-u', default=False, action='store_true',
- dest='userdata', help=(
- 'Optionally include user-data from {0} which could contain'
- ' sensitive information.'.format(USER_DATA_FILE)))
+ "--include-userdata",
+ "-u",
+ default=False,
+ action="store_true",
+ dest="userdata",
+ help=(
+ "Optionally include user-data from {0} which could contain"
+ " sensitive information.".format(USER_DATA_FILE)
+ ),
+ )
return parser
-def _copytree_ignore_sensitive_files(curdir, files):
- """Return a list of files to ignore if we are non-root"""
- if os.getuid() == 0:
- return ()
- return (INSTANCE_JSON_SENSITIVE_FILE,) # Ignore root-permissioned files
+def _copytree_rundir_ignore_files(curdir, files):
+ """Return a list of files to ignore for /run/cloud-init directory"""
+ ignored_files = [
+ "hook-hotplug-cmd", # named pipe for hotplug
+ ]
+ if os.getuid() != 0:
+ # Ignore root-permissioned files
+ ignored_files.append(INSTANCE_JSON_SENSITIVE_FILE)
+ return ignored_files
def _write_command_output_to_file(cmd, filename, msg, verbosity):
@@ -90,48 +111,67 @@ def collect_logs(tarfile, include_userdata, verbosity=0):
if include_userdata and os.getuid() != 0:
sys.stderr.write(
"To include userdata, root user is required."
- " Try sudo cloud-init collect-logs\n")
+ " Try sudo cloud-init collect-logs\n"
+ )
return 1
tarfile = os.path.abspath(tarfile)
- date = datetime.utcnow().date().strftime('%Y-%m-%d')
- log_dir = 'cloud-init-logs-{0}'.format(date)
- with tempdir(dir='/tmp') as tmp_dir:
+ date = datetime.utcnow().date().strftime("%Y-%m-%d")
+ log_dir = "cloud-init-logs-{0}".format(date)
+ with tempdir(dir="/tmp") as tmp_dir:
log_dir = os.path.join(tmp_dir, log_dir)
version = _write_command_output_to_file(
- ['cloud-init', '--version'],
- os.path.join(log_dir, 'version'),
- "cloud-init --version", verbosity)
+ ["cloud-init", "--version"],
+ os.path.join(log_dir, "version"),
+ "cloud-init --version",
+ verbosity,
+ )
dpkg_ver = _write_command_output_to_file(
- ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'],
- os.path.join(log_dir, 'dpkg-version'),
- "dpkg version", verbosity)
+ ["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"],
+ os.path.join(log_dir, "dpkg-version"),
+ "dpkg version",
+ verbosity,
+ )
if not version:
version = dpkg_ver if dpkg_ver else "not-available"
_debug("collected cloud-init version: %s\n" % version, 1, verbosity)
_write_command_output_to_file(
- ['dmesg'], os.path.join(log_dir, 'dmesg.txt'),
- "dmesg output", verbosity)
+ ["dmesg"],
+ os.path.join(log_dir, "dmesg.txt"),
+ "dmesg output",
+ verbosity,
+ )
_write_command_output_to_file(
- ['journalctl', '--boot=0', '-o', 'short-precise'],
- os.path.join(log_dir, 'journal.txt'),
- "systemd journal of current boot", verbosity)
+ ["journalctl", "--boot=0", "-o", "short-precise"],
+ os.path.join(log_dir, "journal.txt"),
+ "systemd journal of current boot",
+ verbosity,
+ )
for log in CLOUDINIT_LOGS:
_collect_file(log, log_dir, verbosity)
if include_userdata:
_collect_file(USER_DATA_FILE, log_dir, verbosity)
- run_dir = os.path.join(log_dir, 'run')
+ run_dir = os.path.join(log_dir, "run")
ensure_dir(run_dir)
if os.path.exists(CLOUDINIT_RUN_DIR):
- shutil.copytree(CLOUDINIT_RUN_DIR,
- os.path.join(run_dir, 'cloud-init'),
- ignore=_copytree_ignore_sensitive_files)
+ try:
+ shutil.copytree(
+ CLOUDINIT_RUN_DIR,
+ os.path.join(run_dir, "cloud-init"),
+ ignore=_copytree_rundir_ignore_files,
+ )
+ except shutil.Error as e:
+ sys.stderr.write("Failed collecting file(s) due to error:\n")
+ sys.stderr.write(str(e) + "\n")
_debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity)
else:
- _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1,
- verbosity)
+ _debug(
+ "directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR,
+ 1,
+ verbosity,
+ )
with chdir(tmp_dir):
- subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')])
+ subp(["tar", "czvf", tarfile, log_dir.replace(tmp_dir + "/", "")])
sys.stderr.write("Wrote %s\n" % tarfile)
return 0
@@ -144,10 +184,10 @@ def handle_collect_logs_args(name, args):
def main():
"""Tool to collect and tar all cloud-init related logs."""
parser = get_parser()
- return handle_collect_logs_args('collect-logs', parser.parse_args())
+ return handle_collect_logs_args("collect-logs", parser.parse_args())
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py
index 4e6a5778..c7671a93 100755
--- a/cloudinit/cmd/devel/make_mime.py
+++ b/cloudinit/cmd/devel/make_mime.py
@@ -9,19 +9,44 @@ from email.mime.text import MIMEText
from cloudinit import log
from cloudinit.handlers import INCLUSION_TYPES_MAP
+
from . import addLogHandlerCLI
-NAME = 'make-mime'
+NAME = "make-mime"
LOG = log.getLogger(NAME)
-EPILOG = ("Example: make-mime -a config.yaml:cloud-config "
- "-a script.sh:x-shellscript > user-data")
+EPILOG = (
+ "Example: make-mime -a config.yaml:cloud-config "
+ "-a script.sh:x-shellscript > user-data"
+)
+
+
+def create_mime_message(files):
+ sub_messages = []
+ errors = []
+ for i, (fh, filename, format_type) in enumerate(files):
+ contents = fh.read()
+ sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
+ sub_message.add_header(
+ "Content-Disposition", 'attachment; filename="%s"' % (filename)
+ )
+ content_type = sub_message.get_content_type().lower()
+ if content_type not in get_content_types():
+ msg = (
+ "content type %r for attachment %s " "may be incorrect!"
+ ) % (content_type, i + 1)
+ errors.append(msg)
+ sub_messages.append(sub_message)
+ combined_message = MIMEMultipart()
+ for msg in sub_messages:
+ combined_message.attach(msg)
+ return (combined_message, errors)
def file_content_type(text):
- """ Return file content type by reading the first line of the input. """
+ """Return file content type by reading the first line of the input."""
try:
filename, content_type = text.split(":", 1)
- return (open(filename, 'r'), filename, content_type.strip())
+ return (open(filename, "r"), filename, content_type.strip())
except ValueError as e:
raise argparse.ArgumentError(
text, "Invalid value for %r" % (text)
@@ -41,26 +66,43 @@ def get_parser(parser=None):
# update the parser's doc and add an epilog to show an example
parser.description = __doc__
parser.epilog = EPILOG
- parser.add_argument("-a", "--attach", dest="files", type=file_content_type,
- action='append', default=[],
- metavar="<file>:<content-type>",
- help=("attach the given file as the specified "
- "content-type"))
- parser.add_argument('-l', '--list-types', action='store_true',
- default=False,
- help='List support cloud-init content types.')
- parser.add_argument('-f', '--force', action='store_true',
- default=False,
- help='Ignore unknown content-type warnings')
+ parser.add_argument(
+ "-a",
+ "--attach",
+ dest="files",
+ type=file_content_type,
+ action="append",
+ default=[],
+ metavar="<file>:<content-type>",
+ help="attach the given file as the specified content-type",
+ )
+ parser.add_argument(
+ "-l",
+ "--list-types",
+ action="store_true",
+ default=False,
+ help="List support cloud-init content types.",
+ )
+ parser.add_argument(
+ "-f",
+ "--force",
+ action="store_true",
+ default=False,
+ help="Ignore unknown content-type warnings",
+ )
return parser
def get_content_types(strip_prefix=False):
- """ Return a list of cloud-init supported content types. Optionally
- strip out the leading 'text/' of the type if strip_prefix=True.
+ """Return a list of cloud-init supported content types. Optionally
+ strip out the leading 'text/' of the type if strip_prefix=True.
"""
- return sorted([ctype.replace("text/", "") if strip_prefix else ctype
- for ctype in INCLUSION_TYPES_MAP.values()])
+ return sorted(
+ [
+ ctype.replace("text/", "") if strip_prefix else ctype
+ for ctype in INCLUSION_TYPES_MAP.values()
+ ]
+ )
def handle_args(name, args):
@@ -77,37 +119,24 @@ def handle_args(name, args):
print("\n".join(get_content_types(strip_prefix=True)))
return 0
- sub_messages = []
- errors = []
- for i, (fh, filename, format_type) in enumerate(args.files):
- contents = fh.read()
- sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
- sub_message.add_header('Content-Disposition',
- 'attachment; filename="%s"' % (filename))
- content_type = sub_message.get_content_type().lower()
- if content_type not in get_content_types():
- level = "WARNING" if args.force else "ERROR"
- msg = (level + ": content type %r for attachment %s "
- "may be incorrect!") % (content_type, i + 1)
- sys.stderr.write(msg + '\n')
- errors.append(msg)
- sub_messages.append(sub_message)
- if len(errors) and not args.force:
+ combined_message, errors = create_mime_message(args.files)
+ if errors:
+ level = "WARNING" if args.force else "ERROR"
+ for error in errors:
+ sys.stderr.write(f"{level}: {error}\n")
sys.stderr.write("Invalid content-types, override with --force\n")
- return 1
- combined_message = MIMEMultipart()
- for msg in sub_messages:
- combined_message.attach(msg)
+ if not args.force:
+ return 1
print(combined_message)
return 0
def main():
args = get_parser().parse_args()
- return(handle_args(NAME, args))
+ return handle_args(NAME, args)
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py
index 80d217ca..18b1e7ff 100755
--- a/cloudinit/cmd/devel/net_convert.py
+++ b/cloudinit/cmd/devel/net_convert.py
@@ -6,15 +6,13 @@ import json
import os
import sys
-from cloudinit.sources.helpers import openstack
+from cloudinit import distros, log, safeyaml
+from cloudinit.net import eni, netplan, network_state, networkd, sysconfig
from cloudinit.sources import DataSourceAzure as azure
from cloudinit.sources import DataSourceOVF as ovf
+from cloudinit.sources.helpers import openstack
-from cloudinit import distros, safeyaml
-from cloudinit.net import eni, netplan, network_state, sysconfig
-from cloudinit import log
-
-NAME = 'net-convert'
+NAME = "net-convert"
def get_parser(parser=None):
@@ -27,30 +25,59 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
- parser.add_argument("-p", "--network-data", type=open,
- metavar="PATH", required=True)
- parser.add_argument("-k", "--kind",
- choices=['eni', 'network_data.json', 'yaml',
- 'azure-imds', 'vmware-imc'],
- required=True)
- parser.add_argument("-d", "--directory",
- metavar="PATH",
- help="directory to place output in",
- required=True)
- parser.add_argument("-D", "--distro",
- choices=[item for sublist in
- distros.OSFAMILIES.values()
- for item in sublist],
- required=True)
- parser.add_argument("-m", "--mac",
- metavar="name,mac",
- action='append',
- help="interface name to mac mapping")
- parser.add_argument("--debug", action='store_true',
- help='enable debug logging to stderr.')
- parser.add_argument("-O", "--output-kind",
- choices=['eni', 'netplan', 'sysconfig'],
- required=True)
+ parser.add_argument(
+ "-p",
+ "--network-data",
+ type=open,
+ metavar="PATH",
+ required=True,
+ help="The network configuration to read",
+ )
+ parser.add_argument(
+ "-k",
+ "--kind",
+ choices=[
+ "eni",
+ "network_data.json",
+ "yaml",
+ "azure-imds",
+ "vmware-imc",
+ ],
+ required=True,
+ help="The format of the given network config",
+ )
+ parser.add_argument(
+ "-d",
+ "--directory",
+ metavar="PATH",
+ help="directory to place output in",
+ required=True,
+ )
+ parser.add_argument(
+ "-D",
+ "--distro",
+ choices=[
+ item for sublist in distros.OSFAMILIES.values() for item in sublist
+ ],
+ required=True,
+ )
+ parser.add_argument(
+ "-m",
+ "--mac",
+ metavar="name,mac",
+ action="append",
+ help="interface name to mac mapping",
+ )
+ parser.add_argument(
+ "--debug", action="store_true", help="enable debug logging to stderr."
+ )
+ parser.add_argument(
+ "-O",
+ "--output-kind",
+ choices=["eni", "netplan", "networkd", "sysconfig"],
+ required=True,
+ help="The network config format to emit",
+ )
return parser
@@ -78,57 +105,68 @@ def handle_args(name, args):
pre_ns = eni.convert_eni_data(net_data)
elif args.kind == "yaml":
pre_ns = safeyaml.load(net_data)
- if 'network' in pre_ns:
- pre_ns = pre_ns.get('network')
+ if "network" in pre_ns:
+ pre_ns = pre_ns.get("network")
if args.debug:
- sys.stderr.write('\n'.join(
- ["Input YAML", safeyaml.dumps(pre_ns), ""]))
- elif args.kind == 'network_data.json':
+ sys.stderr.write(
+ "\n".join(["Input YAML", safeyaml.dumps(pre_ns), ""])
+ )
+ elif args.kind == "network_data.json":
pre_ns = openstack.convert_net_json(
- json.loads(net_data), known_macs=known_macs)
- elif args.kind == 'azure-imds':
+ json.loads(net_data), known_macs=known_macs
+ )
+ elif args.kind == "azure-imds":
pre_ns = azure.parse_network_config(json.loads(net_data))
- elif args.kind == 'vmware-imc':
+ elif args.kind == "vmware-imc":
config = ovf.Config(ovf.ConfigFile(args.network_data.name))
pre_ns = ovf.get_network_config_from_conf(config, False)
ns = network_state.parse_net_config_data(pre_ns)
- if not ns:
- raise RuntimeError("No valid network_state object created from"
- " input data")
if args.debug:
- sys.stderr.write('\n'.join(
- ["", "Internal State", safeyaml.dumps(ns), ""]))
+ sys.stderr.write(
+ "\n".join(["", "Internal State", safeyaml.dumps(ns), ""])
+ )
distro_cls = distros.fetch(args.distro)
distro = distro_cls(args.distro, {}, None)
config = {}
if args.output_kind == "eni":
r_cls = eni.Renderer
- config = distro.renderer_configs.get('eni')
+ config = distro.renderer_configs.get("eni")
elif args.output_kind == "netplan":
r_cls = netplan.Renderer
- config = distro.renderer_configs.get('netplan')
+ config = distro.renderer_configs.get("netplan")
# don't run netplan generate/apply
- config['postcmds'] = False
+ config["postcmds"] = False
# trim leading slash
- config['netplan_path'] = config['netplan_path'][1:]
+ config["netplan_path"] = config["netplan_path"][1:]
# enable some netplan features
- config['features'] = ['dhcp-use-domains', 'ipv6-mtu']
- else:
+ config["features"] = ["dhcp-use-domains", "ipv6-mtu"]
+ elif args.output_kind == "networkd":
+ r_cls = networkd.Renderer
+ config = distro.renderer_configs.get("networkd")
+ elif args.output_kind == "sysconfig":
r_cls = sysconfig.Renderer
- config = distro.renderer_configs.get('sysconfig')
+ config = distro.renderer_configs.get("sysconfig")
+ else:
+ raise RuntimeError("Invalid output_kind")
r = r_cls(config=config)
- sys.stderr.write(''.join([
- "Read input format '%s' from '%s'.\n" % (
- args.kind, args.network_data.name),
- "Wrote output format '%s' to '%s'\n" % (
- args.output_kind, args.directory)]) + "\n")
+ sys.stderr.write(
+ "".join(
+ [
+ "Read input format '%s' from '%s'.\n"
+ % (args.kind, args.network_data.name),
+ "Wrote output format '%s' to '%s'\n"
+ % (args.output_kind, args.directory),
+ ]
+ )
+ + "\n"
+ )
r.render_network_state(network_state=ns, target=args.directory)
-if __name__ == '__main__':
+if __name__ == "__main__":
args = get_parser().parse_args()
handle_args(NAME, args)
diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
index 1a3c46a4..76b16c2e 100644
--- a/cloudinit/cmd/devel/parser.py
+++ b/cloudinit/cmd/devel/parser.py
@@ -5,30 +5,47 @@
"""Define 'devel' subcommand argument parsers to include in cloud-init cmd."""
import argparse
+
from cloudinit.config import schema
-from . import net_convert
-from . import render
-from . import make_mime
+from . import hotplug_hook, make_mime, net_convert, render
def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(
- prog='cloudinit-devel',
- description='Run development cloud-init tools')
- subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ prog="cloudinit-devel",
+ description="Run development cloud-init tools",
+ )
+ subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand")
subparsers.required = True
subcmds = [
- ('schema', 'Validate cloud-config files for document schema',
- schema.get_parser, schema.handle_schema_args),
- (net_convert.NAME, net_convert.__doc__,
- net_convert.get_parser, net_convert.handle_args),
- (render.NAME, render.__doc__,
- render.get_parser, render.handle_args),
- (make_mime.NAME, make_mime.__doc__,
- make_mime.get_parser, make_mime.handle_args),
+ (
+ hotplug_hook.NAME,
+ hotplug_hook.__doc__,
+ hotplug_hook.get_parser,
+ hotplug_hook.handle_args,
+ ),
+ (
+ "schema",
+ "Validate cloud-config files for document schema",
+ schema.get_parser,
+ schema.handle_schema_args,
+ ),
+ (
+ net_convert.NAME,
+ net_convert.__doc__,
+ net_convert.get_parser,
+ net_convert.handle_args,
+ ),
+ (render.NAME, render.__doc__, render.get_parser, render.handle_args),
+ (
+ make_mime.NAME,
+ make_mime.__doc__,
+ make_mime.get_parser,
+ make_mime.handle_args,
+ ),
]
for (subcmd, helpmsg, get_parser, handler) in subcmds:
parser = subparsers.add_parser(subcmd, help=helpmsg)
diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
index 1090aa16..2f9a22a8 100755
--- a/cloudinit/cmd/devel/render.py
+++ b/cloudinit/cmd/devel/render.py
@@ -6,12 +6,13 @@ import argparse
import os
import sys
-from cloudinit.handlers.jinja_template import render_jinja_payload_from_file
from cloudinit import log
+from cloudinit.handlers.jinja_template import render_jinja_payload_from_file
from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE
+
from . import addLogHandlerCLI, read_cfg_paths
-NAME = 'render'
+NAME = "render"
LOG = log.getLogger(NAME)
@@ -27,13 +28,24 @@ def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.add_argument(
- 'user_data', type=str, help='Path to the user-data file to render')
+ "user_data", type=str, help="Path to the user-data file to render"
+ )
+ parser.add_argument(
+ "-i",
+ "--instance-data",
+ type=str,
+ help=(
+ "Optional path to instance-data.json file. Defaults to"
+ " /run/cloud-init/instance-data.json"
+ ),
+ )
parser.add_argument(
- '-i', '--instance-data', type=str,
- help=('Optional path to instance-data.json file. Defaults to'
- ' /run/cloud-init/instance-data.json'))
- parser.add_argument('-d', '--debug', action='store_true', default=False,
- help='Add verbose messages during template render')
+ "-d",
+ "--debug",
+ action="store_true",
+ default=False,
+ help="Add verbose messages during template render",
+ )
return parser
@@ -54,34 +66,38 @@ def handle_args(name, args):
redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE)
if uid == 0:
instance_data_fn = os.path.join(
- paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+ paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
+ )
if not os.path.exists(instance_data_fn):
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- instance_data_fn, redacted_data_fn
+ "Missing root-readable %s. Using redacted %s instead.",
+ instance_data_fn,
+ redacted_data_fn,
)
instance_data_fn = redacted_data_fn
else:
instance_data_fn = redacted_data_fn
if not os.path.exists(instance_data_fn):
- LOG.error('Missing instance-data.json file: %s', instance_data_fn)
+ LOG.error("Missing instance-data.json file: %s", instance_data_fn)
return 1
try:
with open(args.user_data) as stream:
user_data = stream.read()
except IOError:
- LOG.error('Missing user-data file: %s', args.user_data)
+ LOG.error("Missing user-data file: %s", args.user_data)
return 1
try:
rendered_payload = render_jinja_payload_from_file(
- payload=user_data, payload_fn=args.user_data,
+ payload=user_data,
+ payload_fn=args.user_data,
instance_data_file=instance_data_fn,
- debug=True if args.debug else False)
+ debug=True if args.debug else False,
+ )
except RuntimeError as e:
- LOG.error('Cannot render from instance data: %s', str(e))
+ LOG.error("Cannot render from instance data: %s", str(e))
return 1
if not rendered_payload:
- LOG.error('Unable to render user-data file: %s', args.user_data)
+ LOG.error("Unable to render user-data file: %s", args.user_data)
return 1
sys.stdout.write(rendered_payload)
return 0
@@ -89,10 +105,10 @@ def handle_args(name, args):
def main():
args = get_parser().parse_args()
- return(handle_args(NAME, args))
+ return handle_args(NAME, args)
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py
deleted file mode 100644
index ddfd58e1..00000000
--- a/cloudinit/cmd/devel/tests/test_logs.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from datetime import datetime
-import os
-from io import StringIO
-
-from cloudinit.cmd.devel import logs
-from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
-from cloudinit.tests.helpers import (
- FilesystemMockingTestCase, mock, wrap_and_call)
-from cloudinit.subp import subp
-from cloudinit.util import ensure_dir, load_file, write_file
-
-
-@mock.patch('cloudinit.cmd.devel.logs.os.getuid')
-class TestCollectLogs(FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestCollectLogs, self).setUp()
- self.new_root = self.tmp_dir()
- self.run_dir = self.tmp_path('run', self.new_root)
-
- def test_collect_logs_with_userdata_requires_root_user(self, m_getuid):
- """collect-logs errors when non-root user collects userdata ."""
- m_getuid.return_value = 100 # non-root
- output_tarfile = self.tmp_path('logs.tgz')
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- self.assertEqual(
- 1, logs.collect_logs(output_tarfile, include_userdata=True))
- self.assertEqual(
- 'To include userdata, root user is required.'
- ' Try sudo cloud-init collect-logs\n',
- m_stderr.getvalue())
-
- def test_collect_logs_creates_tarfile(self, m_getuid):
- """collect-logs creates a tarfile with all related cloud-init info."""
- m_getuid.return_value = 100
- log1 = self.tmp_path('cloud-init.log', self.new_root)
- write_file(log1, 'cloud-init-log')
- log2 = self.tmp_path('cloud-init-output.log', self.new_root)
- write_file(log2, 'cloud-init-output-log')
- ensure_dir(self.run_dir)
- write_file(self.tmp_path('results.json', self.run_dir), 'results')
- write_file(self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir),
- 'sensitive')
- output_tarfile = self.tmp_path('logs.tgz')
-
- date = datetime.utcnow().date().strftime('%Y-%m-%d')
- date_logdir = 'cloud-init-logs-{0}'.format(date)
-
- version_out = '/usr/bin/cloud-init 18.2fake\n'
- expected_subp = {
- ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
- '0.7fake\n',
- ('cloud-init', '--version'): version_out,
- ('dmesg',): 'dmesg-out\n',
- ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
- ('tar', 'czvf', output_tarfile, date_logdir): ''
- }
-
- def fake_subp(cmd):
- cmd_tuple = tuple(cmd)
- if cmd_tuple not in expected_subp:
- raise AssertionError(
- 'Unexpected command provided to subp: {0}'.format(cmd))
- if cmd == ['tar', 'czvf', output_tarfile, date_logdir]:
- subp(cmd) # Pass through tar cmd so we can check output
- return expected_subp[cmd_tuple], ''
-
- fake_stderr = mock.MagicMock()
-
- wrap_and_call(
- 'cloudinit.cmd.devel.logs',
- {'subp': {'side_effect': fake_subp},
- 'sys.stderr': {'new': fake_stderr},
- 'CLOUDINIT_LOGS': {'new': [log1, log2]},
- 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}},
- logs.collect_logs, output_tarfile, include_userdata=False)
- # unpack the tarfile and check file contents
- subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root])
- out_logdir = self.tmp_path(date_logdir, self.new_root)
- self.assertFalse(
- os.path.exists(
- os.path.join(out_logdir, 'run', 'cloud-init',
- INSTANCE_JSON_SENSITIVE_FILE)),
- 'Unexpected file found: %s' % INSTANCE_JSON_SENSITIVE_FILE)
- self.assertEqual(
- '0.7fake\n',
- load_file(os.path.join(out_logdir, 'dpkg-version')))
- self.assertEqual(version_out,
- load_file(os.path.join(out_logdir, 'version')))
- self.assertEqual(
- 'cloud-init-log',
- load_file(os.path.join(out_logdir, 'cloud-init.log')))
- self.assertEqual(
- 'cloud-init-output-log',
- load_file(os.path.join(out_logdir, 'cloud-init-output.log')))
- self.assertEqual(
- 'dmesg-out\n',
- load_file(os.path.join(out_logdir, 'dmesg.txt')))
- self.assertEqual(
- 'journal-out\n',
- load_file(os.path.join(out_logdir, 'journal.txt')))
- self.assertEqual(
- 'results',
- load_file(
- os.path.join(out_logdir, 'run', 'cloud-init', 'results.json')))
- fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
-
- def test_collect_logs_includes_optional_userdata(self, m_getuid):
- """collect-logs include userdata when --include-userdata is set."""
- m_getuid.return_value = 0
- log1 = self.tmp_path('cloud-init.log', self.new_root)
- write_file(log1, 'cloud-init-log')
- log2 = self.tmp_path('cloud-init-output.log', self.new_root)
- write_file(log2, 'cloud-init-output-log')
- userdata = self.tmp_path('user-data.txt', self.new_root)
- write_file(userdata, 'user-data')
- ensure_dir(self.run_dir)
- write_file(self.tmp_path('results.json', self.run_dir), 'results')
- write_file(self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir),
- 'sensitive')
- output_tarfile = self.tmp_path('logs.tgz')
-
- date = datetime.utcnow().date().strftime('%Y-%m-%d')
- date_logdir = 'cloud-init-logs-{0}'.format(date)
-
- version_out = '/usr/bin/cloud-init 18.2fake\n'
- expected_subp = {
- ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
- '0.7fake',
- ('cloud-init', '--version'): version_out,
- ('dmesg',): 'dmesg-out\n',
- ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
- ('tar', 'czvf', output_tarfile, date_logdir): ''
- }
-
- def fake_subp(cmd):
- cmd_tuple = tuple(cmd)
- if cmd_tuple not in expected_subp:
- raise AssertionError(
- 'Unexpected command provided to subp: {0}'.format(cmd))
- if cmd == ['tar', 'czvf', output_tarfile, date_logdir]:
- subp(cmd) # Pass through tar cmd so we can check output
- return expected_subp[cmd_tuple], ''
-
- fake_stderr = mock.MagicMock()
-
- wrap_and_call(
- 'cloudinit.cmd.devel.logs',
- {'subp': {'side_effect': fake_subp},
- 'sys.stderr': {'new': fake_stderr},
- 'CLOUDINIT_LOGS': {'new': [log1, log2]},
- 'CLOUDINIT_RUN_DIR': {'new': self.run_dir},
- 'USER_DATA_FILE': {'new': userdata}},
- logs.collect_logs, output_tarfile, include_userdata=True)
- # unpack the tarfile and check file contents
- subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root])
- out_logdir = self.tmp_path(date_logdir, self.new_root)
- self.assertEqual(
- 'user-data',
- load_file(os.path.join(out_logdir, 'user-data.txt')))
- self.assertEqual(
- 'sensitive',
- load_file(os.path.join(out_logdir, 'run', 'cloud-init',
- INSTANCE_JSON_SENSITIVE_FILE)))
- fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py
deleted file mode 100644
index a7fcf2ce..00000000
--- a/cloudinit/cmd/devel/tests/test_render.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import os
-from io import StringIO
-
-from collections import namedtuple
-from cloudinit.cmd.devel import render
-from cloudinit.helpers import Paths
-from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE
-from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJinja
-from cloudinit.util import ensure_dir, write_file
-
-
-class TestRender(CiTestCase):
-
- with_logs = True
-
- args = namedtuple('renderargs', 'user_data instance_data debug')
-
- def setUp(self):
- super(TestRender, self).setUp()
- self.tmp = self.tmp_dir()
-
- def test_handle_args_error_on_missing_user_data(self):
- """When user_data file path does not exist, log an error."""
- absent_file = self.tmp_path('user-data', dir=self.tmp)
- instance_data = self.tmp_path('instance-data', dir=self.tmp)
- write_file(instance_data, '{}')
- args = self.args(
- user_data=absent_file, instance_data=instance_data, debug=False)
- with mock.patch('sys.stderr', new_callable=StringIO):
- self.assertEqual(1, render.handle_args('anyname', args))
- self.assertIn(
- 'Missing user-data file: %s' % absent_file,
- self.logs.getvalue())
-
- def test_handle_args_error_on_missing_instance_data(self):
- """When instance_data file path does not exist, log an error."""
- user_data = self.tmp_path('user-data', dir=self.tmp)
- absent_file = self.tmp_path('instance-data', dir=self.tmp)
- args = self.args(
- user_data=user_data, instance_data=absent_file, debug=False)
- with mock.patch('sys.stderr', new_callable=StringIO):
- self.assertEqual(1, render.handle_args('anyname', args))
- self.assertIn(
- 'Missing instance-data.json file: %s' % absent_file,
- self.logs.getvalue())
-
- def test_handle_args_defaults_instance_data(self):
- """When no instance_data argument, default to configured run_dir."""
- user_data = self.tmp_path('user-data', dir=self.tmp)
- run_dir = self.tmp_path('run_dir', dir=self.tmp)
- ensure_dir(run_dir)
- paths = Paths({'run_dir': run_dir})
- self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths')
- self.m_paths.return_value = paths
- args = self.args(
- user_data=user_data, instance_data=None, debug=False)
- with mock.patch('sys.stderr', new_callable=StringIO):
- self.assertEqual(1, render.handle_args('anyname', args))
- json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
- self.assertIn(
- 'Missing instance-data.json file: %s' % json_file,
- self.logs.getvalue())
-
- def test_handle_args_root_fallback_from_sensitive_instance_data(self):
- """When root user defaults to sensitive.json."""
- user_data = self.tmp_path('user-data', dir=self.tmp)
- run_dir = self.tmp_path('run_dir', dir=self.tmp)
- ensure_dir(run_dir)
- paths = Paths({'run_dir': run_dir})
- self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths')
- self.m_paths.return_value = paths
- args = self.args(
- user_data=user_data, instance_data=None, debug=False)
- with mock.patch('sys.stderr', new_callable=StringIO):
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 0
- self.assertEqual(1, render.handle_args('anyname', args))
- json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
- json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
- self.assertIn(
- 'WARNING: Missing root-readable %s. Using redacted %s' % (
- json_sensitive, json_file), self.logs.getvalue())
- self.assertIn(
- 'ERROR: Missing instance-data.json file: %s' % json_file,
- self.logs.getvalue())
-
- def test_handle_args_root_uses_sensitive_instance_data(self):
- """When root user, and no instance-data arg, use sensitive.json."""
- user_data = self.tmp_path('user-data', dir=self.tmp)
- write_file(user_data, '##template: jinja\nrendering: {{ my_var }}')
- run_dir = self.tmp_path('run_dir', dir=self.tmp)
- ensure_dir(run_dir)
- json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
- write_file(json_sensitive, '{"my-var": "jinja worked"}')
- paths = Paths({'run_dir': run_dir})
- self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths')
- self.m_paths.return_value = paths
- args = self.args(
- user_data=user_data, instance_data=None, debug=False)
- with mock.patch('sys.stderr', new_callable=StringIO):
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 0
- self.assertEqual(0, render.handle_args('anyname', args))
- self.assertIn('rendering: jinja worked', m_stdout.getvalue())
-
- @skipUnlessJinja()
- def test_handle_args_renders_instance_data_vars_in_template(self):
- """If user_data file is a jinja template render instance-data vars."""
- user_data = self.tmp_path('user-data', dir=self.tmp)
- write_file(user_data, '##template: jinja\nrendering: {{ my_var }}')
- instance_data = self.tmp_path('instance-data', dir=self.tmp)
- write_file(instance_data, '{"my-var": "jinja worked"}')
- args = self.args(
- user_data=user_data, instance_data=instance_data, debug=True)
- with mock.patch('sys.stderr', new_callable=StringIO) as m_console_err:
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, render.handle_args('anyname', args))
- self.assertIn(
- 'DEBUG: Converted jinja variables\n{', self.logs.getvalue())
- self.assertIn(
- 'DEBUG: Converted jinja variables\n{', m_console_err.getvalue())
- self.assertEqual('rendering: jinja worked', m_stdout.getvalue())
-
- @skipUnlessJinja()
- def test_handle_args_warns_and_gives_up_on_invalid_jinja_operation(self):
- """If user_data file has invalid jinja operations log warnings."""
- user_data = self.tmp_path('user-data', dir=self.tmp)
- write_file(user_data, '##template: jinja\nrendering: {{ my-var }}')
- instance_data = self.tmp_path('instance-data', dir=self.tmp)
- write_file(instance_data, '{"my-var": "jinja worked"}')
- args = self.args(
- user_data=user_data, instance_data=instance_data, debug=True)
- with mock.patch('sys.stderr', new_callable=StringIO):
- self.assertEqual(1, render.handle_args('anyname', args))
- self.assertIn(
- 'WARNING: Ignoring jinja template for %s: Undefined jinja'
- ' variable: "my-var". Jinja tried subtraction. Perhaps you meant'
- ' "my_var"?' % user_data,
- self.logs.getvalue())
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index a5446da7..c9be41b3 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
@@ -20,8 +19,10 @@ import time
import traceback
from cloudinit import patcher
-patcher.patch() # noqa
+patcher.patch_logging()
+
+from cloudinit.config.schema import validate_cloudconfig_schema
from cloudinit import log as logging
from cloudinit import netinfo
from cloudinit import signal_handler
@@ -35,8 +36,7 @@ from cloudinit import warnings
from cloudinit import reporting
from cloudinit.reporting import events
-from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
- CLOUD_CONFIG)
+from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG
from cloudinit import atomic_helper
@@ -45,8 +45,10 @@ from cloudinit import dhclient_hook
# Welcome message template
-WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at "
- "{timestamp}. Up {uptime} seconds.")
+WELCOME_MSG_TPL = (
+ "Cloud-init v. {version} running '{action}' at "
+ "{timestamp}. Up {uptime} seconds."
+)
# Module section template
MOD_SECTION_TPL = "cloud_%s_modules"
@@ -54,9 +56,9 @@ MOD_SECTION_TPL = "cloud_%s_modules"
# Frequency shortname to full name
# (so users don't have to remember the full name...)
FREQ_SHORT_NAMES = {
- 'instance': PER_INSTANCE,
- 'always': PER_ALWAYS,
- 'once': PER_ONCE,
+ "instance": PER_INSTANCE,
+ "always": PER_ALWAYS,
+ "once": PER_ONCE,
}
LOG = logging.getLogger()
@@ -64,21 +66,20 @@ LOG = logging.getLogger()
# Used for when a logger may not be active
# and we still want to print exceptions...
-def print_exc(msg=''):
+def print_exc(msg=""):
if msg:
sys.stderr.write("%s\n" % (msg))
- sys.stderr.write('-' * 60)
+ sys.stderr.write("-" * 60)
sys.stderr.write("\n")
traceback.print_exc(file=sys.stderr)
- sys.stderr.write('-' * 60)
+ sys.stderr.write("-" * 60)
sys.stderr.write("\n")
def welcome(action, msg=None):
if not msg:
msg = welcome_format(action)
- util.multi_log("%s\n" % (msg),
- console=False, stderr=True, log=LOG)
+ util.multi_log("%s\n" % (msg), console=False, stderr=True, log=LOG)
return msg
@@ -87,7 +88,8 @@ def welcome_format(action):
version=version.version_string(),
uptime=util.uptime(),
timestamp=util.time_rfc2822(),
- action=action)
+ action=action,
+ )
def extract_fns(args):
@@ -108,29 +110,31 @@ def run_module_section(mods, action_name, section):
(which_ran, failures) = mods.run_section(full_section_name)
total_attempted = len(which_ran) + len(failures)
if total_attempted == 0:
- msg = ("No '%s' modules to run"
- " under section '%s'") % (action_name, full_section_name)
+ msg = "No '%s' modules to run under section '%s'" % (
+ action_name,
+ full_section_name,
+ )
sys.stderr.write("%s\n" % (msg))
LOG.debug(msg)
return []
else:
- LOG.debug("Ran %s modules with %s failures",
- len(which_ran), len(failures))
+ LOG.debug(
+ "Ran %s modules with %s failures", len(which_ran), len(failures)
+ )
return failures
def apply_reporting_cfg(cfg):
- if cfg.get('reporting'):
- reporting.update_configuration(cfg.get('reporting'))
+ if cfg.get("reporting"):
+ reporting.update_configuration(cfg.get("reporting"))
-def parse_cmdline_url(cmdline, names=('cloud-config-url', 'url')):
+def parse_cmdline_url(cmdline, names=("cloud-config-url", "url")):
data = util.keyval_str_to_dict(cmdline)
for key in names:
if key in data:
return key, data[key]
- raise KeyError("No keys (%s) found in string '%s'" %
- (cmdline, names))
+ raise KeyError("No keys (%s) found in string '%s'" % (cmdline, names))
def attempt_cmdline_url(path, network=True, cmdline=None):
@@ -164,51 +168,96 @@ def attempt_cmdline_url(path, network=True, cmdline=None):
if path_is_local and os.path.exists(path):
if network:
- m = ("file '%s' existed, possibly from local stage download"
- " of command line url '%s'. Not re-writing." % (path, url))
+ m = (
+ "file '%s' existed, possibly from local stage download"
+ " of command line url '%s'. Not re-writing." % (path, url)
+ )
level = logging.INFO
if path_is_local:
level = logging.DEBUG
else:
- m = ("file '%s' existed, possibly from previous boot download"
- " of command line url '%s'. Not re-writing." % (path, url))
+ m = (
+ "file '%s' existed, possibly from previous boot download"
+ " of command line url '%s'. Not re-writing." % (path, url)
+ )
level = logging.WARN
return (level, m)
- kwargs = {'url': url, 'timeout': 10, 'retries': 2}
+ kwargs = {"url": url, "timeout": 10, "retries": 2}
if network or path_is_local:
level = logging.WARN
- kwargs['sec_between'] = 1
+ kwargs["sec_between"] = 1
else:
level = logging.DEBUG
- kwargs['sec_between'] = .1
+ kwargs["sec_between"] = 0.1
data = None
- header = b'#cloud-config'
+ header = b"#cloud-config"
try:
resp = url_helper.read_file_or_url(**kwargs)
if resp.ok():
data = resp.contents
if not resp.contents.startswith(header):
- if cmdline_name == 'cloud-config-url':
+ if cmdline_name == "cloud-config-url":
level = logging.WARN
else:
level = logging.INFO
return (
level,
- "contents of '%s' did not start with %s" % (url, header))
+ "contents of '%s' did not start with %s" % (url, header),
+ )
else:
- return (level,
- "url '%s' returned code %s. Ignoring." % (url, resp.code))
+ return (
+ level,
+ "url '%s' returned code %s. Ignoring." % (url, resp.code),
+ )
except url_helper.UrlError as e:
return (level, "retrieving url '%s' failed: %s" % (url, e))
util.write_file(path, data, mode=0o600)
- return (logging.INFO,
- "wrote cloud-config data from %s='%s' to %s" %
- (cmdline_name, url, path))
+ return (
+ logging.INFO,
+ "wrote cloud-config data from %s='%s' to %s"
+ % (cmdline_name, url, path),
+ )
+
+
+def purge_cache_on_python_version_change(init):
+ """Purge the cache if python version changed on us.
+
+ There could be changes not represented in our cache (obj.pkl) after we
+ upgrade to a new version of python, so at that point clear the cache
+ """
+ current_python_version = "%d.%d" % (
+ sys.version_info.major,
+ sys.version_info.minor,
+ )
+ python_version_path = os.path.join(
+ init.paths.get_cpath("data"), "python-version"
+ )
+ if os.path.exists(python_version_path):
+ cached_python_version = open(python_version_path).read()
+ # The Python version has changed out from under us, anything that was
+ # pickled previously is likely useless due to API changes.
+ if cached_python_version != current_python_version:
+ LOG.debug("Python version change detected. Purging cache")
+ init.purge_cache(True)
+ util.write_file(python_version_path, current_python_version)
+ else:
+ if os.path.exists(init.paths.get_ipath_cur("obj_pkl")):
+ LOG.info(
+ "Writing python-version file. "
+ "Cache compatibility status is currently unknown."
+ )
+ util.write_file(python_version_path, current_python_version)
+
+
+def _should_bring_up_interfaces(init, args):
+ if util.get_cfg_option_bool(init.cfg, "disable_network_activation"):
+ return False
+ return not args.local
def main_init(name, args):
@@ -216,10 +265,14 @@ def main_init(name, args):
if args.local:
deps = [sources.DEP_FILESYSTEM]
- early_logs = [attempt_cmdline_url(
- path=os.path.join("%s.d" % CLOUD_CONFIG,
- "91_kernel_cmdline_url.cfg"),
- network=not args.local)]
+ early_logs = [
+ attempt_cmdline_url(
+ path=os.path.join(
+ "%s.d" % CLOUD_CONFIG, "91_kernel_cmdline_url.cfg"
+ ),
+ network=not args.local,
+ )
+ ]
# Cloud-init 'init' stage is broken up into the following sub-stages
# 1. Ensure that the init object fetches its config without errors
@@ -255,8 +308,9 @@ def main_init(name, args):
early_logs.append((logging.WARN, msg))
if args.debug:
# Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
+ LOG.debug(
+ "Logging being reset, this logger may no longer be active shortly"
+ )
logging.resetLogging()
logging.setupLogging(init.cfg)
apply_reporting_cfg(init.cfg)
@@ -277,14 +331,17 @@ def main_init(name, args):
util.logexc(LOG, "Failed to initialize, likely bad things to come!")
# Stage 4
path_helper = init.paths
+ purge_cache_on_python_version_change(init)
mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK
if mode == sources.DSMODE_NETWORK:
existing = "trust"
sys.stderr.write("%s\n" % (netinfo.debug_info()))
- LOG.debug(("Checking to see if files that we need already"
- " exist from a previous run that would allow us"
- " to stop early."))
+ LOG.debug(
+ "Checking to see if files that we need already"
+ " exist from a previous run that would allow us"
+ " to stop early."
+ )
# no-net is written by upstart cloud-init-nonet when network failed
# to come up
stop_files = [
@@ -296,15 +353,18 @@ def main_init(name, args):
existing_files.append(fn)
if existing_files:
- LOG.debug("[%s] Exiting. stop file %s existed",
- mode, existing_files)
+ LOG.debug(
+ "[%s] Exiting. stop file %s existed", mode, existing_files
+ )
return (None, [])
else:
- LOG.debug("Execution continuing, no previous run detected that"
- " would allow us to stop early.")
+ LOG.debug(
+ "Execution continuing, no previous run detected that"
+ " would allow us to stop early."
+ )
else:
existing = "check"
- mcfg = util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False)
+ mcfg = util.get_cfg_option_bool(init.cfg, "manual_cache_clean", False)
if mcfg:
LOG.debug("manual cache clean set from config")
existing = "trust"
@@ -319,13 +379,17 @@ def main_init(name, args):
util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))
# Stage 5
+ bring_up_interfaces = _should_bring_up_interfaces(init, args)
try:
init.fetch(existing=existing)
# if in network mode, and the datasource is local
# then work was done at that stage.
if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode:
- LOG.debug("[%s] Exiting. datasource %s in local mode",
- mode, init.datasource)
+ LOG.debug(
+ "[%s] Exiting. datasource %s in local mode",
+ mode,
+ init.datasource,
+ )
return (None, [])
except sources.DataSourceNotFoundException:
# In the case of 'cloud-init init' without '--local' it is a bit
@@ -335,56 +399,71 @@ def main_init(name, args):
if mode == sources.DSMODE_LOCAL:
LOG.debug("No local datasource found")
else:
- util.logexc(LOG, ("No instance datasource found!"
- " Likely bad things to come!"))
+ util.logexc(
+ LOG, "No instance datasource found! Likely bad things to come!"
+ )
if not args.force:
- init.apply_network_config(bring_up=not args.local)
+ init.apply_network_config(bring_up=bring_up_interfaces)
LOG.debug("[%s] Exiting without datasource", mode)
if mode == sources.DSMODE_LOCAL:
return (None, [])
else:
return (None, ["No instance datasource found."])
else:
- LOG.debug("[%s] barreling on in force mode without datasource",
- mode)
+ LOG.debug(
+ "[%s] barreling on in force mode without datasource", mode
+ )
_maybe_persist_instance_data(init)
# Stage 6
iid = init.instancify()
- LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s",
- mode, name, iid, init.is_new_instance())
+ LOG.debug(
+ "[%s] %s will now be targeting instance id: %s. new=%s",
+ mode,
+ name,
+ iid,
+ init.is_new_instance(),
+ )
if mode == sources.DSMODE_LOCAL:
# Before network comes up, set any configured hostname to allow
# dhcp clients to advertize this hostname to any DDNS services
# LP: #1746455.
- _maybe_set_hostname(init, stage='local', retry_stage='network')
- init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL))
+ _maybe_set_hostname(init, stage="local", retry_stage="network")
+ init.apply_network_config(bring_up=bring_up_interfaces)
if mode == sources.DSMODE_LOCAL:
if init.datasource.dsmode != mode:
- LOG.debug("[%s] Exiting. datasource %s not in local mode.",
- mode, init.datasource)
+ LOG.debug(
+ "[%s] Exiting. datasource %s not in local mode.",
+ mode,
+ init.datasource,
+ )
return (init.datasource, [])
else:
- LOG.debug("[%s] %s is in local mode, will apply init modules now.",
- mode, init.datasource)
+ LOG.debug(
+ "[%s] %s is in local mode, will apply init modules now.",
+ mode,
+ init.datasource,
+ )
# Give the datasource a chance to use network resources.
# This is used on Azure to communicate with the fabric over network.
init.setup_datasource()
# update fully realizes user-data (pulling in #include if necessary)
init.update()
- _maybe_set_hostname(init, stage='init-net', retry_stage='modules:config')
+ _maybe_set_hostname(init, stage="init-net", retry_stage="modules:config")
# Stage 7
try:
# Attempt to consume the data per instance.
# This may run user-data handlers and/or perform
# url downloads and such as needed.
- (ran, _results) = init.cloudify().run('consume_data',
- init.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ (ran, _results) = init.cloudify().run(
+ "consume_data",
+ init.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
if not ran:
# Just consume anything that is set to run per-always
# if nothing ran in the per-instance code
@@ -396,6 +475,12 @@ def main_init(name, args):
util.logexc(LOG, "Consuming user data failed!")
return (init.datasource, ["Consuming user data failed!"])
+ # Validate user-data adheres to schema definition
+ if os.path.exists(init.paths.get_ipath_cur("userdata_raw")):
+ validate_cloudconfig_schema(config=init.cfg, strict=False)
+ else:
+ LOG.debug("Skipping user-data validation. No user-data found.")
+
apply_reporting_cfg(init.cfg)
# Stage 8 - re-read and apply relevant cloud-config to include user-data
@@ -406,8 +491,7 @@ def main_init(name, args):
errfmt_orig = errfmt
(outfmt, errfmt) = util.get_output_cfg(mods.cfg, name)
if outfmt_orig != outfmt or errfmt_orig != errfmt:
- LOG.warning("Stdout, stderr changing to (%s, %s)",
- outfmt, errfmt)
+ LOG.warning("Stdout, stderr changing to (%s, %s)", outfmt, errfmt)
(outfmt, errfmt) = util.fixup_output(mods.cfg, name)
except Exception:
util.logexc(LOG, "Failed to re-adjust output redirection!")
@@ -423,11 +507,11 @@ def main_init(name, args):
def di_report_warn(datasource, cfg):
- if 'di_report' not in cfg:
+ if "di_report" not in cfg:
LOG.debug("no di_report found in config.")
return
- dicfg = cfg['di_report']
+ dicfg = cfg["di_report"]
if dicfg is None:
# ds-identify may write 'di_report:\n #comment\n'
# which reads as {'di_report': None}
@@ -438,7 +522,7 @@ def di_report_warn(datasource, cfg):
LOG.warning("di_report config not a dictionary: %s", dicfg)
return
- dslist = dicfg.get('datasource_list')
+ dslist = dicfg.get("datasource_list")
if dslist is None:
LOG.warning("no 'datasource_list' found in di_report.")
return
@@ -450,18 +534,26 @@ def di_report_warn(datasource, cfg):
# where Name is the thing that shows up in datasource_list.
modname = datasource.__module__.rpartition(".")[2]
if modname.startswith(sources.DS_PREFIX):
- modname = modname[len(sources.DS_PREFIX):]
+ modname = modname[len(sources.DS_PREFIX) :]
else:
- LOG.warning("Datasource '%s' came from unexpected module '%s'.",
- datasource, modname)
+ LOG.warning(
+ "Datasource '%s' came from unexpected module '%s'.",
+ datasource,
+ modname,
+ )
if modname in dslist:
- LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s",
- datasource, modname, dslist)
+ LOG.debug(
+ "used datasource '%s' from '%s' was in di_report's list: %s",
+ datasource,
+ modname,
+ dslist,
+ )
return
- warnings.show_warning('dsid_missing_source', cfg,
- source=modname, dslist=str(dslist))
+ warnings.show_warning(
+ "dsid_missing_source", cfg, source=modname, dslist=str(dslist)
+ )
def main_modules(action_name, args):
@@ -485,8 +577,10 @@ def main_modules(action_name, args):
init.fetch(existing="trust")
except sources.DataSourceNotFoundException:
# There was no datasource found, theres nothing to do
- msg = ('Can not apply stage %s, no datasource found! Likely bad '
- 'things to come!' % name)
+ msg = (
+ "Can not apply stage %s, no datasource found! Likely bad "
+ "things to come!" % name
+ )
util.logexc(LOG, msg)
print_exc(msg)
if not args.force:
@@ -503,8 +597,9 @@ def main_modules(action_name, args):
util.logexc(LOG, "Failed to setup output redirection!")
if args.debug:
# Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
+ LOG.debug(
+ "Logging being reset, this logger may no longer be active shortly"
+ )
logging.resetLogging()
logging.setupLogging(mods.cfg)
apply_reporting_cfg(init.cfg)
@@ -537,10 +632,12 @@ def main_single(name, args):
# There was no datasource found,
# that might be bad (or ok) depending on
# the module being ran (so continue on)
- util.logexc(LOG, ("Failed to fetch your datasource,"
- " likely bad things to come!"))
- print_exc(("Failed to fetch your datasource,"
- " likely bad things to come!"))
+ util.logexc(
+ LOG, "Failed to fetch your datasource, likely bad things to come!"
+ )
+ print_exc(
+ "Failed to fetch your datasource, likely bad things to come!"
+ )
if not args.force:
return 1
_maybe_persist_instance_data(init)
@@ -562,8 +659,9 @@ def main_single(name, args):
util.logexc(LOG, "Failed to setup output redirection!")
if args.debug:
# Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
+ LOG.debug(
+ "Logging being reset, this logger may no longer be active shortly"
+ )
logging.resetLogging()
logging.setupLogging(mods.cfg)
apply_reporting_cfg(init.cfg)
@@ -572,9 +670,7 @@ def main_single(name, args):
welcome(name, msg=w_msg)
# Stage 5
- (which_ran, failures) = mods.run_single(mod_name,
- mod_args,
- mod_freq)
+ (which_ran, failures) = mods.run_single(mod_name, mod_args, mod_freq)
if failures:
LOG.warning("Ran %s but it failed!", mod_name)
return 1
@@ -597,7 +693,12 @@ def status_wrapper(name, args, data_d=None, link_d=None):
result_path = os.path.join(data_d, "result.json")
result_link = os.path.join(link_d, "result.json")
- util.ensure_dirs((data_d, link_d,))
+ util.ensure_dirs(
+ (
+ data_d,
+ link_d,
+ )
+ )
(_name, functor) = args.action
@@ -611,14 +712,20 @@ def status_wrapper(name, args, data_d=None, link_d=None):
else:
raise ValueError("unknown name: %s" % name)
- modes = ('init', 'init-local', 'modules-init', 'modules-config',
- 'modules-final')
+ modes = (
+ "init",
+ "init-local",
+ "modules-init",
+ "modules-config",
+ "modules-final",
+ )
if mode not in modes:
raise ValueError(
- "Invalid cloud init mode specified '{0}'".format(mode))
+ "Invalid cloud init mode specified '{0}'".format(mode)
+ )
status = None
- if mode == 'init-local':
+ if mode == "init-local":
for f in (status_link, result_link, status_path, result_path):
util.del_file(f)
else:
@@ -628,45 +735,46 @@ def status_wrapper(name, args, data_d=None, link_d=None):
pass
nullstatus = {
- 'errors': [],
- 'start': None,
- 'finished': None,
+ "errors": [],
+ "start": None,
+ "finished": None,
}
if status is None:
- status = {'v1': {}}
- status['v1']['datasource'] = None
+ status = {"v1": {}}
+ status["v1"]["datasource"] = None
for m in modes:
- if m not in status['v1']:
- status['v1'][m] = nullstatus.copy()
+ if m not in status["v1"]:
+ status["v1"][m] = nullstatus.copy()
- v1 = status['v1']
- v1['stage'] = mode
- v1[mode]['start'] = time.time()
+ v1 = status["v1"]
+ v1["stage"] = mode
+ v1[mode]["start"] = time.time()
atomic_helper.write_json(status_path, status)
- util.sym_link(os.path.relpath(status_path, link_d), status_link,
- force=True)
+ util.sym_link(
+ os.path.relpath(status_path, link_d), status_link, force=True
+ )
try:
ret = functor(name, args)
- if mode in ('init', 'init-local'):
+ if mode in ("init", "init-local"):
(datasource, errors) = ret
if datasource is not None:
- v1['datasource'] = str(datasource)
+ v1["datasource"] = str(datasource)
else:
errors = ret
- v1[mode]['errors'] = [str(e) for e in errors]
+ v1[mode]["errors"] = [str(e) for e in errors]
except Exception as e:
util.logexc(LOG, "failed stage %s", mode)
print_exc("failed run of stage %s" % mode)
- v1[mode]['errors'] = [str(e)]
+ v1[mode]["errors"] = [str(e)]
- v1[mode]['finished'] = time.time()
- v1['stage'] = None
+ v1[mode]["finished"] = time.time()
+ v1["stage"] = None
atomic_helper.write_json(status_path, status)
@@ -674,23 +782,26 @@ def status_wrapper(name, args, data_d=None, link_d=None):
# write the 'finished' file
errors = []
for m in modes:
- if v1[m]['errors']:
- errors.extend(v1[m].get('errors', []))
+ if v1[m]["errors"]:
+ errors.extend(v1[m].get("errors", []))
atomic_helper.write_json(
- result_path, {'v1': {'datasource': v1['datasource'],
- 'errors': errors}})
- util.sym_link(os.path.relpath(result_path, link_d), result_link,
- force=True)
+ result_path,
+ {"v1": {"datasource": v1["datasource"], "errors": errors}},
+ )
+ util.sym_link(
+ os.path.relpath(result_path, link_d), result_link, force=True
+ )
- return len(v1[mode]['errors'])
+ return len(v1[mode]["errors"])
def _maybe_persist_instance_data(init):
"""Write instance-data.json file if absent and datasource is restored."""
if init.ds_restored:
instance_data_file = os.path.join(
- init.paths.run_dir, sources.INSTANCE_JSON_FILE)
+ init.paths.run_dir, sources.INSTANCE_JSON_FILE
+ )
if not os.path.exists(instance_data_file):
init.datasource.persist_instance_data()
@@ -703,18 +814,23 @@ def _maybe_set_hostname(init, stage, retry_stage):
"""
cloud = init.cloudify()
(hostname, _fqdn) = util.get_hostname_fqdn(
- init.cfg, cloud, metadata_only=True)
+ init.cfg, cloud, metadata_only=True
+ )
if hostname: # meta-data or user-data hostname content
try:
- cc_set_hostname.handle('set-hostname', init.cfg, cloud, LOG, None)
+ cc_set_hostname.handle("set-hostname", init.cfg, cloud, LOG, None)
except cc_set_hostname.SetHostnameError as e:
LOG.debug(
- 'Failed setting hostname in %s stage. Will'
- ' retry in %s stage. Error: %s.', stage, retry_stage, str(e))
+ "Failed setting hostname in %s stage. Will"
+ " retry in %s stage. Error: %s.",
+ stage,
+ retry_stage,
+ str(e),
+ )
def main_features(name, args):
- sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n')
+ sys.stdout.write("\n".join(sorted(version.FEATURES)) + "\n")
def main(sysv_args=None):
@@ -724,129 +840,182 @@ def main(sysv_args=None):
sysv_args = sysv_args[1:]
# Top level args
- parser.add_argument('--version', '-v', action='version',
- version='%(prog)s ' + (version.version_string()))
- parser.add_argument('--file', '-f', action='append',
- dest='files',
- help=('additional yaml configuration'
- ' files to use'),
- type=argparse.FileType('rb'))
- parser.add_argument('--debug', '-d', action='store_true',
- help=('show additional pre-action'
- ' logging (default: %(default)s)'),
- default=False)
- parser.add_argument('--force', action='store_true',
- help=('force running even if no datasource is'
- ' found (use at your own risk)'),
- dest='force',
- default=False)
+ parser.add_argument(
+ "--version",
+ "-v",
+ action="version",
+ version="%(prog)s " + (version.version_string()),
+ )
+ parser.add_argument(
+ "--file",
+ "-f",
+ action="append",
+ dest="files",
+ help="additional yaml configuration files to use",
+ type=argparse.FileType("rb"),
+ )
+ parser.add_argument(
+ "--debug",
+ "-d",
+ action="store_true",
+ help="show additional pre-action logging (default: %(default)s)",
+ default=False,
+ )
+ parser.add_argument(
+ "--force",
+ action="store_true",
+ help=(
+ "force running even if no datasource is"
+ " found (use at your own risk)"
+ ),
+ dest="force",
+ default=False,
+ )
parser.set_defaults(reporter=None)
- subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand")
subparsers.required = True
# Each action and its sub-options (if any)
- parser_init = subparsers.add_parser('init',
- help=('initializes cloud-init and'
- ' performs initial modules'))
- parser_init.add_argument("--local", '-l', action='store_true',
- help="start in local mode (default: %(default)s)",
- default=False)
+ parser_init = subparsers.add_parser(
+ "init", help="initializes cloud-init and performs initial modules"
+ )
+ parser_init.add_argument(
+ "--local",
+ "-l",
+ action="store_true",
+ help="start in local mode (default: %(default)s)",
+ default=False,
+ )
# This is used so that we can know which action is selected +
# the functor to use to run this subcommand
- parser_init.set_defaults(action=('init', main_init))
+ parser_init.set_defaults(action=("init", main_init))
# These settings are used for the 'config' and 'final' stages
- parser_mod = subparsers.add_parser('modules',
- help=('activates modules using '
- 'a given configuration key'))
- parser_mod.add_argument("--mode", '-m', action='store',
- help=("module configuration name "
- "to use (default: %(default)s)"),
- default='config',
- choices=('init', 'config', 'final'))
- parser_mod.set_defaults(action=('modules', main_modules))
+ parser_mod = subparsers.add_parser(
+ "modules", help="activates modules using a given configuration key"
+ )
+ parser_mod.add_argument(
+ "--mode",
+ "-m",
+ action="store",
+ help="module configuration name to use (default: %(default)s)",
+ default="config",
+ choices=("init", "config", "final"),
+ )
+ parser_mod.set_defaults(action=("modules", main_modules))
# This subcommand allows you to run a single module
- parser_single = subparsers.add_parser('single',
- help=('run a single module '))
- parser_single.add_argument("--name", '-n', action="store",
- help="module name to run",
- required=True)
- parser_single.add_argument("--frequency", action="store",
- help=("frequency of the module"),
- required=False,
- choices=list(FREQ_SHORT_NAMES.keys()))
- parser_single.add_argument("--report", action="store_true",
- help="enable reporting",
- required=False)
- parser_single.add_argument("module_args", nargs="*",
- metavar='argument',
- help=('any additional arguments to'
- ' pass to this module'))
- parser_single.set_defaults(action=('single', main_single))
+ parser_single = subparsers.add_parser(
+ "single", help="run a single module "
+ )
+ parser_single.add_argument(
+ "--name",
+ "-n",
+ action="store",
+ help="module name to run",
+ required=True,
+ )
+ parser_single.add_argument(
+ "--frequency",
+ action="store",
+ help="frequency of the module",
+ required=False,
+ choices=list(FREQ_SHORT_NAMES.keys()),
+ )
+ parser_single.add_argument(
+ "--report",
+ action="store_true",
+ help="enable reporting",
+ required=False,
+ )
+ parser_single.add_argument(
+ "module_args",
+ nargs="*",
+ metavar="argument",
+ help="any additional arguments to pass to this module",
+ )
+ parser_single.set_defaults(action=("single", main_single))
parser_query = subparsers.add_parser(
- 'query',
- help='Query standardized instance metadata from the command line.')
+ "query",
+ help="Query standardized instance metadata from the command line.",
+ )
parser_dhclient = subparsers.add_parser(
- dhclient_hook.NAME, help=dhclient_hook.__doc__)
+ dhclient_hook.NAME, help=dhclient_hook.__doc__
+ )
dhclient_hook.get_parser(parser_dhclient)
- parser_features = subparsers.add_parser('features',
- help=('list defined features'))
- parser_features.set_defaults(action=('features', main_features))
+ parser_features = subparsers.add_parser(
+ "features", help="list defined features"
+ )
+ parser_features.set_defaults(action=("features", main_features))
parser_analyze = subparsers.add_parser(
- 'analyze', help='Devel tool: Analyze cloud-init logs and data')
+ "analyze", help="Devel tool: Analyze cloud-init logs and data"
+ )
- parser_devel = subparsers.add_parser(
- 'devel', help='Run development tools')
+ parser_devel = subparsers.add_parser("devel", help="Run development tools")
parser_collect_logs = subparsers.add_parser(
- 'collect-logs', help='Collect and tar all cloud-init debug info')
+ "collect-logs", help="Collect and tar all cloud-init debug info"
+ )
parser_clean = subparsers.add_parser(
- 'clean', help='Remove logs and artifacts so cloud-init can re-run.')
+ "clean", help="Remove logs and artifacts so cloud-init can re-run."
+ )
parser_status = subparsers.add_parser(
- 'status', help='Report cloud-init status or wait on completion.')
+ "status", help="Report cloud-init status or wait on completion."
+ )
if sysv_args:
# Only load subparsers if subcommand is specified to avoid load cost
- if sysv_args[0] == 'analyze':
+ if sysv_args[0] == "analyze":
from cloudinit.analyze.__main__ import get_parser as analyze_parser
+
# Construct analyze subcommand parser
analyze_parser(parser_analyze)
- elif sysv_args[0] == 'devel':
+ elif sysv_args[0] == "devel":
from cloudinit.cmd.devel.parser import get_parser as devel_parser
+
# Construct devel subcommand parser
devel_parser(parser_devel)
- elif sysv_args[0] == 'collect-logs':
+ elif sysv_args[0] == "collect-logs":
from cloudinit.cmd.devel.logs import (
- get_parser as logs_parser, handle_collect_logs_args)
+ get_parser as logs_parser,
+ handle_collect_logs_args,
+ )
+
logs_parser(parser_collect_logs)
parser_collect_logs.set_defaults(
- action=('collect-logs', handle_collect_logs_args))
- elif sysv_args[0] == 'clean':
+ action=("collect-logs", handle_collect_logs_args)
+ )
+ elif sysv_args[0] == "clean":
from cloudinit.cmd.clean import (
- get_parser as clean_parser, handle_clean_args)
+ get_parser as clean_parser,
+ handle_clean_args,
+ )
+
clean_parser(parser_clean)
- parser_clean.set_defaults(
- action=('clean', handle_clean_args))
- elif sysv_args[0] == 'query':
+ parser_clean.set_defaults(action=("clean", handle_clean_args))
+ elif sysv_args[0] == "query":
from cloudinit.cmd.query import (
- get_parser as query_parser, handle_args as handle_query_args)
+ get_parser as query_parser,
+ handle_args as handle_query_args,
+ )
+
query_parser(parser_query)
- parser_query.set_defaults(
- action=('render', handle_query_args))
- elif sysv_args[0] == 'status':
+ parser_query.set_defaults(action=("render", handle_query_args))
+ elif sysv_args[0] == "status":
from cloudinit.cmd.status import (
- get_parser as status_parser, handle_status_args)
+ get_parser as status_parser,
+ handle_status_args,
+ )
+
status_parser(parser_status)
- parser_status.set_defaults(
- action=('status', handle_status_args))
+ parser_status.set_defaults(action=("status", handle_status_args))
args = parser.parse_args(args=sysv_args)
@@ -870,14 +1039,20 @@ def main(sysv_args=None):
if args.local:
rname, rdesc = ("init-local", "searching for local datasources")
else:
- rname, rdesc = ("init-network",
- "searching for network datasources")
+ rname, rdesc = (
+ "init-network",
+ "searching for network datasources",
+ )
elif name == "modules":
- rname, rdesc = ("modules-%s" % args.mode,
- "running modules for %s" % args.mode)
+ rname, rdesc = (
+ "modules-%s" % args.mode,
+ "running modules for %s" % args.mode,
+ )
elif name == "single":
- rname, rdesc = ("single/%s" % args.name,
- "running single module %s" % args.name)
+ rname, rdesc = (
+ "single/%s" % args.name,
+ "running single module %s" % args.name,
+ )
report_on = args.report
else:
rname = name
@@ -885,19 +1060,24 @@ def main(sysv_args=None):
report_on = False
args.reporter = events.ReportEventStack(
- rname, rdesc, reporting_enabled=report_on)
+ rname, rdesc, reporting_enabled=report_on
+ )
with args.reporter:
retval = util.log_time(
- logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
- get_uptime=True, func=functor, args=(name, args))
+ logfunc=LOG.debug,
+ msg="cloud-init mode '%s'" % name,
+ get_uptime=True,
+ func=functor,
+ args=(name, args),
+ )
reporting.flush_events()
return retval
-if __name__ == '__main__':
- if 'TZ' not in os.environ:
- os.environ['TZ'] = ":/etc/localtime"
+if __name__ == "__main__":
+ if "TZ" not in os.environ:
+ os.environ["TZ"] = ":/etc/localtime"
return_value = main(sys.argv)
if return_value:
sys.exit(return_value)
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
index 07db9552..46f17699 100644
--- a/cloudinit/cmd/query.py
+++ b/cloudinit/cmd/query.py
@@ -14,19 +14,24 @@ output; if this fails, they are treated as binary.
"""
import argparse
-from errno import EACCES
import os
import sys
+from errno import EACCES
-from cloudinit.handlers.jinja_template import (
- convert_jinja_instance_data, render_jinja_payload)
+from cloudinit import log, util
from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths
-from cloudinit import log
+from cloudinit.handlers.jinja_template import (
+ convert_jinja_instance_data,
+ get_jinja_variable_alias,
+ render_jinja_payload,
+)
from cloudinit.sources import (
- INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, REDACT_SENSITIVE_VALUE)
-from cloudinit import util
+ INSTANCE_JSON_FILE,
+ INSTANCE_JSON_SENSITIVE_FILE,
+ REDACT_SENSITIVE_VALUE,
+)
-NAME = 'query'
+NAME = "query"
LOG = log.getLogger(NAME)
@@ -40,41 +45,79 @@ def get_parser(parser=None):
@returns: ArgumentParser with proper argument configuration.
"""
if not parser:
- parser = argparse.ArgumentParser(
- prog=NAME, description=__doc__)
+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.add_argument(
- '-d', '--debug', action='store_true', default=False,
- help='Add verbose messages during template render')
+ "-d",
+ "--debug",
+ action="store_true",
+ default=False,
+ help="Add verbose messages during template render",
+ )
parser.add_argument(
- '-i', '--instance-data', type=str,
- help=('Path to instance-data.json file. Default is /run/cloud-init/%s'
- % INSTANCE_JSON_FILE))
+ "-i",
+ "--instance-data",
+ type=str,
+ help="Path to instance-data.json file. Default is /run/cloud-init/%s"
+ % INSTANCE_JSON_FILE,
+ )
parser.add_argument(
- '-l', '--list-keys', action='store_true', default=False,
- help=('List query keys available at the provided instance-data'
- ' <varname>.'))
+ "-l",
+ "--list-keys",
+ action="store_true",
+ default=False,
+ help=(
+ "List query keys available at the provided instance-data"
+ " <varname>."
+ ),
+ )
parser.add_argument(
- '-u', '--user-data', type=str,
- help=('Path to user-data file. Default is'
- ' /var/lib/cloud/instance/user-data.txt'))
+ "-u",
+ "--user-data",
+ type=str,
+ help=(
+ "Path to user-data file. Default is"
+ " /var/lib/cloud/instance/user-data.txt"
+ ),
+ )
parser.add_argument(
- '-v', '--vendor-data', type=str,
- help=('Path to vendor-data file. Default is'
- ' /var/lib/cloud/instance/vendor-data.txt'))
+ "-v",
+ "--vendor-data",
+ type=str,
+ help=(
+ "Path to vendor-data file. Default is"
+ " /var/lib/cloud/instance/vendor-data.txt"
+ ),
+ )
parser.add_argument(
- 'varname', type=str, nargs='?',
- help=('A dot-delimited specific variable to query from'
- ' instance-data. For example: v1.local_hostname. If the'
- ' value is not JSON serializable, it will be base64-encoded and'
- ' will contain the prefix "ci-b64:". '))
+ "varname",
+ type=str,
+ nargs="?",
+ help=(
+ "A dot-delimited specific variable to query from"
+ " instance-data. For example: v1.local_hostname. If the"
+ " value is not JSON serializable, it will be base64-encoded and"
+ ' will contain the prefix "ci-b64:". '
+ ),
+ )
parser.add_argument(
- '-a', '--all', action='store_true', default=False, dest='dump_all',
- help='Dump all available instance-data')
+ "-a",
+ "--all",
+ action="store_true",
+ default=False,
+ dest="dump_all",
+ help="Dump all available instance-data",
+ )
parser.add_argument(
- '-f', '--format', type=str, dest='format',
- help=('Optionally specify a custom output format string. Any'
- ' instance-data variable can be specified between double-curly'
- ' braces. For example -f "{{ v2.cloud_name }}"'))
+ "-f",
+ "--format",
+ type=str,
+ dest="format",
+ help=(
+ "Optionally specify a custom output format string. Any"
+ " instance-data variable can be specified between double-curly"
+ ' braces. For example -f "{{ v2.cloud_name }}"'
+ ),
+ )
return parser
@@ -88,50 +131,54 @@ def load_userdata(ud_file_path):
"""
bdata = util.load_file(ud_file_path, decode=False)
try:
- return bdata.decode('utf-8')
+ return bdata.decode("utf-8")
except UnicodeDecodeError:
return util.decomp_gzip(bdata, quiet=False, decode=True)
-def handle_args(name, args):
- """Handle calls to 'cloud-init query' as a subcommand."""
- paths = None
- addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
- if not any([args.list_keys, args.varname, args.format, args.dump_all]):
- LOG.error(
- 'Expected one of the options: --all, --format,'
- ' --list-keys or varname')
- get_parser().print_help()
- return 1
+def _read_instance_data(instance_data, user_data, vendor_data) -> dict:
+ """Return a dict of merged instance-data, vendordata and userdata.
+
+ The dict will contain supplemental userdata and vendordata keys sourced
+ from default user-data and vendor-data files.
+ Non-root users will have redacted INSTANCE_JSON_FILE content and redacted
+ vendordata and userdata values.
+
+ :raise: IOError/OSError on absence of instance-data.json file or invalid
+ access perms.
+ """
+ paths = None
uid = os.getuid()
- if not all([args.instance_data, args.user_data, args.vendor_data]):
+ if not all([instance_data, user_data, vendor_data]):
paths = read_cfg_paths()
- if args.instance_data:
- instance_data_fn = args.instance_data
+ if instance_data:
+ instance_data_fn = instance_data
else:
redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE)
if uid == 0:
sensitive_data_fn = os.path.join(
- paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+ paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
+ )
if os.path.exists(sensitive_data_fn):
instance_data_fn = sensitive_data_fn
else:
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- sensitive_data_fn, redacted_data_fn
+ "Missing root-readable %s. Using redacted %s instead.",
+ sensitive_data_fn,
+ redacted_data_fn,
)
instance_data_fn = redacted_data_fn
else:
instance_data_fn = redacted_data_fn
- if args.user_data:
- user_data_fn = args.user_data
+ if user_data:
+ user_data_fn = user_data
else:
- user_data_fn = os.path.join(paths.instance_link, 'user-data.txt')
- if args.vendor_data:
- vendor_data_fn = args.vendor_data
+ user_data_fn = os.path.join(paths.instance_link, "user-data.txt")
+ if vendor_data:
+ vendor_data_fn = vendor_data
else:
- vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt')
+ vendor_data_fn = os.path.join(paths.instance_link, "vendor-data.txt")
try:
instance_json = util.load_file(instance_data_fn)
@@ -139,44 +186,123 @@ def handle_args(name, args):
if e.errno == EACCES:
LOG.error("No read permission on '%s'. Try sudo", instance_data_fn)
else:
- LOG.error('Missing instance-data file: %s', instance_data_fn)
- return 1
+ LOG.error("Missing instance-data file: %s", instance_data_fn)
+ raise
instance_data = util.load_json(instance_json)
if uid != 0:
- instance_data['userdata'] = (
- '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, user_data_fn))
- instance_data['vendordata'] = (
- '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn))
+ instance_data["userdata"] = "<%s> file:%s" % (
+ REDACT_SENSITIVE_VALUE,
+ user_data_fn,
+ )
+ instance_data["vendordata"] = "<%s> file:%s" % (
+ REDACT_SENSITIVE_VALUE,
+ vendor_data_fn,
+ )
else:
- instance_data['userdata'] = load_userdata(user_data_fn)
- instance_data['vendordata'] = load_userdata(vendor_data_fn)
+ instance_data["userdata"] = load_userdata(user_data_fn)
+ instance_data["vendordata"] = load_userdata(vendor_data_fn)
+ return instance_data
+
+
+def _find_instance_data_leaf_by_varname_path(
+ jinja_vars_without_aliases: dict,
+ jinja_vars_with_aliases: dict,
+ varname: str,
+ list_keys: bool,
+):
+ """Return the value of the dot-delimited varname path in instance-data
+
+ Split a dot-delimited jinja variable name path into components, walk the
+ path components into the instance_data and look up a matching jinja
+ variable name or cloud-init's underscore-delimited key aliases.
+
+ :raises: ValueError when varname represents an invalid key name or path or
+ if list-keys is provided by varname isn't a dict object.
+ """
+ walked_key_path = ""
+ response = jinja_vars_without_aliases
+ for key_path_part in varname.split("."):
+ try:
+ # Walk key path using complete aliases dict, yet response
+ # should only contain jinja_without_aliases
+ jinja_vars_with_aliases = jinja_vars_with_aliases[key_path_part]
+ except KeyError as e:
+ if walked_key_path:
+ msg = "instance-data '{key_path}' has no '{leaf}'".format(
+ leaf=key_path_part, key_path=walked_key_path
+ )
+ else:
+ msg = "Undefined instance-data key '{}'".format(varname)
+ raise ValueError(msg) from e
+ if key_path_part in response:
+ response = response[key_path_part]
+ else: # We are an underscore_delimited key alias
+ for key in response:
+ if get_jinja_variable_alias(key) == key_path_part:
+ response = response[key]
+ break
+ if walked_key_path:
+ walked_key_path += "."
+ walked_key_path += key_path_part
+ return response
+
+
+def handle_args(name, args):
+ """Handle calls to 'cloud-init query' as a subcommand."""
+ addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
+ if not any([args.list_keys, args.varname, args.format, args.dump_all]):
+ LOG.error(
+ "Expected one of the options: --all, --format,"
+ " --list-keys or varname"
+ )
+ get_parser().print_help()
+ return 1
+ try:
+ instance_data = _read_instance_data(
+ args.instance_data, args.user_data, args.vendor_data
+ )
+ except (IOError, OSError):
+ return 1
if args.format:
- payload = '## template: jinja\n{fmt}'.format(fmt=args.format)
+ payload = "## template: jinja\n{fmt}".format(fmt=args.format)
rendered_payload = render_jinja_payload(
- payload=payload, payload_fn='query commandline',
+ payload=payload,
+ payload_fn="query commandline",
instance_data=instance_data,
- debug=True if args.debug else False)
+ debug=True if args.debug else False,
+ )
if rendered_payload:
print(rendered_payload)
return 0
return 1
+ # If not rendering a structured format above, query output will be either:
+ # - JSON dump of all instance-data/jinja variables
+ # - JSON dump of a value at an dict path into the instance-data dict.
+ # - a list of keys for a specific dict path into the instance-data dict.
response = convert_jinja_instance_data(instance_data)
if args.varname:
+ jinja_vars_with_aliases = convert_jinja_instance_data(
+ instance_data, include_key_aliases=True
+ )
try:
- for var in args.varname.split('.'):
- response = response[var]
- except KeyError:
- LOG.error('Undefined instance-data key %s', args.varname)
+ response = _find_instance_data_leaf_by_varname_path(
+ jinja_vars_without_aliases=response,
+ jinja_vars_with_aliases=jinja_vars_with_aliases,
+ varname=args.varname,
+ list_keys=args.list_keys,
+ )
+ except (KeyError, ValueError) as e:
+ LOG.error(e)
+ return 1
+ if args.list_keys:
+ if not isinstance(response, dict):
+ LOG.error(
+ "--list-keys provided but '%s' is not a dict", args.varname
+ )
return 1
- if args.list_keys:
- if not isinstance(response, dict):
- LOG.error("--list-keys provided but '%s' is not a dict", var)
- return 1
- response = '\n'.join(sorted(response.keys()))
- elif args.list_keys:
- response = '\n'.join(sorted(response.keys()))
+ response = "\n".join(sorted(response.keys()))
if not isinstance(response, str):
response = util.json_dumps(response)
print(response)
@@ -189,7 +315,7 @@ def main():
sys.exit(handle_args(NAME, parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py
index ea79a85b..5176549d 100644
--- a/cloudinit/cmd/status.py
+++ b/cloudinit/cmd/status.py
@@ -5,22 +5,28 @@
"""Define 'status' utility and handler as part of cloud-init commandline."""
import argparse
+import enum
import os
import sys
-from time import gmtime, strftime, sleep
+from time import gmtime, sleep, strftime
from cloudinit.distros import uses_systemd
from cloudinit.stages import Init
from cloudinit.util import get_cmdline, load_file, load_json
-CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled'
+CLOUDINIT_DISABLED_FILE = "/etc/cloud/cloud-init.disabled"
+
# customer visible status messages
-STATUS_ENABLED_NOT_RUN = 'not run'
-STATUS_RUNNING = 'running'
-STATUS_DONE = 'done'
-STATUS_ERROR = 'error'
-STATUS_DISABLED = 'disabled'
+@enum.unique
+class UXAppStatus(enum.Enum):
+ """Enum representing user-visible cloud-init application status."""
+
+ NOT_RUN = "not run"
+ RUNNING = "running"
+ DONE = "done"
+ ERROR = "error"
+ DISABLED = "disabled"
def get_parser(parser=None):
@@ -34,15 +40,25 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(
- prog='status',
- description='Report run status of cloud init')
+ prog="status", description="Report run status of cloud init"
+ )
parser.add_argument(
- '-l', '--long', action='store_true', default=False,
- help=('Report long format of statuses including run stage name and'
- ' error messages'))
+ "-l",
+ "--long",
+ action="store_true",
+ default=False,
+ help=(
+ "Report long format of statuses including run stage name and"
+ " error messages"
+ ),
+ )
parser.add_argument(
- '-w', '--wait', action='store_true', default=False,
- help='Block waiting on cloud-init to complete')
+ "-w",
+ "--wait",
+ action="store_true",
+ default=False,
+ help="Block waiting on cloud-init to complete",
+ )
return parser
@@ -51,23 +67,20 @@ def handle_status_args(name, args):
# Read configured paths
init = Init(ds_deps=[])
init.read_cfg()
-
- status, status_detail, time = _get_status_details(init.paths)
+ status, status_detail, time = get_status_details(init.paths)
if args.wait:
- while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING):
- sys.stdout.write('.')
+ while status in (UXAppStatus.NOT_RUN, UXAppStatus.RUNNING):
+ sys.stdout.write(".")
sys.stdout.flush()
- status, status_detail, time = _get_status_details(init.paths)
+ status, status_detail, time = get_status_details(init.paths)
sleep(0.25)
- sys.stdout.write('\n')
+ sys.stdout.write("\n")
+ print("status: {0}".format(status.value))
if args.long:
- print('status: {0}'.format(status))
if time:
- print('time: {0}'.format(time))
- print('detail:\n{0}'.format(status_detail))
- else:
- print('status: {0}'.format(status))
- return 1 if status == STATUS_ERROR else 0
+ print("time: {0}".format(time))
+ print("detail:\n{0}".format(status_detail))
+ return 1 if status == UXAppStatus.ERROR else 0
def _is_cloudinit_disabled(disable_file, paths):
@@ -81,83 +94,91 @@ def _is_cloudinit_disabled(disable_file, paths):
is_disabled = False
cmdline_parts = get_cmdline().split()
if not uses_systemd():
- reason = 'Cloud-init enabled on sysvinit'
- elif 'cloud-init=enabled' in cmdline_parts:
- reason = 'Cloud-init enabled by kernel command line cloud-init=enabled'
+ reason = "Cloud-init enabled on sysvinit"
+ elif "cloud-init=enabled" in cmdline_parts:
+ reason = "Cloud-init enabled by kernel command line cloud-init=enabled"
elif os.path.exists(disable_file):
is_disabled = True
- reason = 'Cloud-init disabled by {0}'.format(disable_file)
- elif 'cloud-init=disabled' in cmdline_parts:
+ reason = "Cloud-init disabled by {0}".format(disable_file)
+ elif "cloud-init=disabled" in cmdline_parts:
is_disabled = True
- reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled'
- elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')):
+ reason = "Cloud-init disabled by kernel parameter cloud-init=disabled"
+ elif os.path.exists(os.path.join(paths.run_dir, "disabled")):
is_disabled = True
- reason = 'Cloud-init disabled by cloud-init-generator'
+ reason = "Cloud-init disabled by cloud-init-generator"
+ elif os.path.exists(os.path.join(paths.run_dir, "enabled")):
+ reason = "Cloud-init enabled by systemd cloud-init-generator"
else:
- reason = 'Cloud-init enabled by systemd cloud-init-generator'
+ reason = "Systemd generator may not have run yet."
return (is_disabled, reason)
-def _get_status_details(paths):
+def get_status_details(paths=None):
"""Return a 3-tuple of status, status_details and time of last event.
@param paths: An initialized cloudinit.helpers.paths object.
Values are obtained from parsing paths.run_dir/status.json.
"""
- status = STATUS_ENABLED_NOT_RUN
- status_detail = ''
+ if not paths:
+ init = Init(ds_deps=[])
+ init.read_cfg()
+ paths = init.paths
+
+ status = UXAppStatus.NOT_RUN
+ status_detail = ""
status_v1 = {}
- status_file = os.path.join(paths.run_dir, 'status.json')
- result_file = os.path.join(paths.run_dir, 'result.json')
+ status_file = os.path.join(paths.run_dir, "status.json")
+ result_file = os.path.join(paths.run_dir, "result.json")
(is_disabled, reason) = _is_cloudinit_disabled(
- CLOUDINIT_DISABLED_FILE, paths)
+ CLOUDINIT_DISABLED_FILE, paths
+ )
if is_disabled:
- status = STATUS_DISABLED
+ status = UXAppStatus.DISABLED
status_detail = reason
if os.path.exists(status_file):
if not os.path.exists(result_file):
- status = STATUS_RUNNING
- status_v1 = load_json(load_file(status_file)).get('v1', {})
+ status = UXAppStatus.RUNNING
+ status_v1 = load_json(load_file(status_file)).get("v1", {})
errors = []
latest_event = 0
for key, value in sorted(status_v1.items()):
- if key == 'stage':
+ if key == "stage":
if value:
- status = STATUS_RUNNING
- status_detail = 'Running in stage: {0}'.format(value)
- elif key == 'datasource':
+ status = UXAppStatus.RUNNING
+ status_detail = "Running in stage: {0}".format(value)
+ elif key == "datasource":
status_detail = value
elif isinstance(value, dict):
- errors.extend(value.get('errors', []))
- start = value.get('start') or 0
- finished = value.get('finished') or 0
+ errors.extend(value.get("errors", []))
+ start = value.get("start") or 0
+ finished = value.get("finished") or 0
if finished == 0 and start != 0:
- status = STATUS_RUNNING
+ status = UXAppStatus.RUNNING
event_time = max(start, finished)
if event_time > latest_event:
latest_event = event_time
if errors:
- status = STATUS_ERROR
- status_detail = '\n'.join(errors)
- elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0:
- status = STATUS_DONE
+ status = UXAppStatus.ERROR
+ status_detail = "\n".join(errors)
+ elif status == UXAppStatus.NOT_RUN and latest_event > 0:
+ status = UXAppStatus.DONE
if latest_event:
- time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event))
+ time = strftime("%a, %d %b %Y %H:%M:%S %z", gmtime(latest_event))
else:
- time = ''
+ time = ""
return status, status_detail, time
def main():
"""Tool to report status of cloud-init."""
parser = get_parser()
- sys.exit(handle_status_args('status', parser.parse_args()))
+ sys.exit(handle_status_args("status", parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py
deleted file mode 100644
index a848a810..00000000
--- a/cloudinit/cmd/tests/test_clean.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.cmd import clean
-from cloudinit.util import ensure_dir, sym_link, write_file
-from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock
-from collections import namedtuple
-import os
-from io import StringIO
-
-mypaths = namedtuple('MyPaths', 'cloud_dir')
-
-
-class TestClean(CiTestCase):
-
- def setUp(self):
- super(TestClean, self).setUp()
- self.new_root = self.tmp_dir()
- self.artifact_dir = self.tmp_path('artifacts', self.new_root)
- self.log1 = self.tmp_path('cloud-init.log', self.new_root)
- self.log2 = self.tmp_path('cloud-init-output.log', self.new_root)
-
- class FakeInit(object):
- cfg = {'def_log_file': self.log1,
- 'output': {'all': '|tee -a {0}'.format(self.log2)}}
- # Ensure cloud_dir has a trailing slash, to match real behaviour
- paths = mypaths(cloud_dir='{}/'.format(self.artifact_dir))
-
- def __init__(self, ds_deps):
- pass
-
- def read_cfg(self):
- pass
-
- self.init_class = FakeInit
-
- def test_remove_artifacts_removes_logs(self):
- """remove_artifacts removes logs when remove_logs is True."""
- write_file(self.log1, 'cloud-init-log')
- write_file(self.log2, 'cloud-init-output-log')
-
- self.assertFalse(
- os.path.exists(self.artifact_dir), 'Unexpected artifacts dir')
- retcode = wrap_and_call(
- 'cloudinit.cmd.clean',
- {'Init': {'side_effect': self.init_class}},
- clean.remove_artifacts, remove_logs=True)
- self.assertFalse(os.path.exists(self.log1), 'Unexpected file')
- self.assertFalse(os.path.exists(self.log2), 'Unexpected file')
- self.assertEqual(0, retcode)
-
- def test_remove_artifacts_preserves_logs(self):
- """remove_artifacts leaves logs when remove_logs is False."""
- write_file(self.log1, 'cloud-init-log')
- write_file(self.log2, 'cloud-init-output-log')
-
- retcode = wrap_and_call(
- 'cloudinit.cmd.clean',
- {'Init': {'side_effect': self.init_class}},
- clean.remove_artifacts, remove_logs=False)
- self.assertTrue(os.path.exists(self.log1), 'Missing expected file')
- self.assertTrue(os.path.exists(self.log2), 'Missing expected file')
- self.assertEqual(0, retcode)
-
- def test_remove_artifacts_removes_unlinks_symlinks(self):
- """remove_artifacts cleans artifacts dir unlinking any symlinks."""
- dir1 = os.path.join(self.artifact_dir, 'dir1')
- ensure_dir(dir1)
- symlink = os.path.join(self.artifact_dir, 'mylink')
- sym_link(dir1, symlink)
-
- retcode = wrap_and_call(
- 'cloudinit.cmd.clean',
- {'Init': {'side_effect': self.init_class}},
- clean.remove_artifacts, remove_logs=False)
- self.assertEqual(0, retcode)
- for path in (dir1, symlink):
- self.assertFalse(
- os.path.exists(path),
- 'Unexpected {0} dir'.format(path))
-
- def test_remove_artifacts_removes_artifacts_skipping_seed(self):
- """remove_artifacts cleans artifacts dir with exception of seed dir."""
- dirs = [
- self.artifact_dir,
- os.path.join(self.artifact_dir, 'seed'),
- os.path.join(self.artifact_dir, 'dir1'),
- os.path.join(self.artifact_dir, 'dir2')]
- for _dir in dirs:
- ensure_dir(_dir)
-
- retcode = wrap_and_call(
- 'cloudinit.cmd.clean',
- {'Init': {'side_effect': self.init_class}},
- clean.remove_artifacts, remove_logs=False)
- self.assertEqual(0, retcode)
- for expected_dir in dirs[:2]:
- self.assertTrue(
- os.path.exists(expected_dir),
- 'Missing {0} dir'.format(expected_dir))
- for deleted_dir in dirs[2:]:
- self.assertFalse(
- os.path.exists(deleted_dir),
- 'Unexpected {0} dir'.format(deleted_dir))
-
- def test_remove_artifacts_removes_artifacts_removes_seed(self):
- """remove_artifacts removes seed dir when remove_seed is True."""
- dirs = [
- self.artifact_dir,
- os.path.join(self.artifact_dir, 'seed'),
- os.path.join(self.artifact_dir, 'dir1'),
- os.path.join(self.artifact_dir, 'dir2')]
- for _dir in dirs:
- ensure_dir(_dir)
-
- retcode = wrap_and_call(
- 'cloudinit.cmd.clean',
- {'Init': {'side_effect': self.init_class}},
- clean.remove_artifacts, remove_logs=False, remove_seed=True)
- self.assertEqual(0, retcode)
- self.assertTrue(
- os.path.exists(self.artifact_dir), 'Missing artifact dir')
- for deleted_dir in dirs[1:]:
- self.assertFalse(
- os.path.exists(deleted_dir),
- 'Unexpected {0} dir'.format(deleted_dir))
-
- def test_remove_artifacts_returns_one_on_errors(self):
- """remove_artifacts returns non-zero on failure and prints an error."""
- ensure_dir(self.artifact_dir)
- ensure_dir(os.path.join(self.artifact_dir, 'dir1'))
-
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- retcode = wrap_and_call(
- 'cloudinit.cmd.clean',
- {'del_dir': {'side_effect': OSError('oops')},
- 'Init': {'side_effect': self.init_class}},
- clean.remove_artifacts, remove_logs=False)
- self.assertEqual(1, retcode)
- self.assertEqual(
- 'ERROR: Could not remove %s/dir1: oops\n' % self.artifact_dir,
- m_stderr.getvalue())
-
- def test_handle_clean_args_reboots(self):
- """handle_clean_args_reboots when reboot arg is provided."""
-
- called_cmds = []
-
- def fake_subp(cmd, capture):
- called_cmds.append((cmd, capture))
- return '', ''
-
- myargs = namedtuple('MyArgs', 'remove_logs remove_seed reboot')
- cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True)
- retcode = wrap_and_call(
- 'cloudinit.cmd.clean',
- {'subp': {'side_effect': fake_subp},
- 'Init': {'side_effect': self.init_class}},
- clean.handle_clean_args, name='does not matter', args=cmdargs)
- self.assertEqual(0, retcode)
- self.assertEqual(
- [(['shutdown', '-r', 'now'], False)], called_cmds)
-
- def test_status_main(self):
- '''clean.main can be run as a standalone script.'''
- write_file(self.log1, 'cloud-init-log')
- with self.assertRaises(SystemExit) as context_manager:
- wrap_and_call(
- 'cloudinit.cmd.clean',
- {'Init': {'side_effect': self.init_class},
- 'sys.argv': {'new': ['clean', '--logs']}},
- clean.main)
-
- self.assertEqual(0, context_manager.exception.code)
- self.assertFalse(
- os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1))
-
-
-# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/cmd/tests/test_cloud_id.py b/cloudinit/cmd/tests/test_cloud_id.py
deleted file mode 100644
index 3f3727fd..00000000
--- a/cloudinit/cmd/tests/test_cloud_id.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Tests for cloud-id command line utility."""
-
-from cloudinit import util
-from collections import namedtuple
-from io import StringIO
-
-from cloudinit.cmd import cloud_id
-
-from cloudinit.tests.helpers import CiTestCase, mock
-
-
-class TestCloudId(CiTestCase):
-
- args = namedtuple('cloudidargs', ('instance_data json long'))
-
- def setUp(self):
- super(TestCloudId, self).setUp()
- self.tmp = self.tmp_dir()
- self.instance_data = self.tmp_path('instance-data.json', dir=self.tmp)
-
- def test_cloud_id_arg_parser_defaults(self):
- """Validate the argument defaults when not provided by the end-user."""
- cmd = ['cloud-id']
- with mock.patch('sys.argv', cmd):
- args = cloud_id.get_parser().parse_args()
- self.assertEqual(
- '/run/cloud-init/instance-data.json',
- args.instance_data)
- self.assertEqual(False, args.long)
- self.assertEqual(False, args.json)
-
- def test_cloud_id_arg_parse_overrides(self):
- """Override argument defaults by specifying values for each param."""
- util.write_file(self.instance_data, '{}')
- cmd = ['cloud-id', '--instance-data', self.instance_data, '--long',
- '--json']
- with mock.patch('sys.argv', cmd):
- args = cloud_id.get_parser().parse_args()
- self.assertEqual(self.instance_data, args.instance_data)
- self.assertEqual(True, args.long)
- self.assertEqual(True, args.json)
-
- def test_cloud_id_missing_instance_data_json(self):
- """Exit error when the provided instance-data.json does not exist."""
- cmd = ['cloud-id', '--instance-data', self.instance_data]
- with mock.patch('sys.argv', cmd):
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- with self.assertRaises(SystemExit) as context_manager:
- cloud_id.main()
- self.assertEqual(1, context_manager.exception.code)
- self.assertIn(
- "ERROR: File not found '%s'" % self.instance_data,
- m_stderr.getvalue())
-
- def test_cloud_id_non_json_instance_data(self):
- """Exit error when the provided instance-data.json is not json."""
- cmd = ['cloud-id', '--instance-data', self.instance_data]
- util.write_file(self.instance_data, '{')
- with mock.patch('sys.argv', cmd):
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- with self.assertRaises(SystemExit) as context_manager:
- cloud_id.main()
- self.assertEqual(1, context_manager.exception.code)
- self.assertIn(
- "ERROR: File '%s' is not valid json." % self.instance_data,
- m_stderr.getvalue())
-
- def test_cloud_id_from_cloud_name_in_instance_data(self):
- """Report canonical cloud-id from cloud_name in instance-data."""
- util.write_file(
- self.instance_data,
- '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}')
- cmd = ['cloud-id', '--instance-data', self.instance_data]
- with mock.patch('sys.argv', cmd):
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with self.assertRaises(SystemExit) as context_manager:
- cloud_id.main()
- self.assertEqual(0, context_manager.exception.code)
- self.assertEqual("mycloud\n", m_stdout.getvalue())
-
- def test_cloud_id_long_name_from_instance_data(self):
- """Report long cloud-id format from cloud_name and region."""
- util.write_file(
- self.instance_data,
- '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}')
- cmd = ['cloud-id', '--instance-data', self.instance_data, '--long']
- with mock.patch('sys.argv', cmd):
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with self.assertRaises(SystemExit) as context_manager:
- cloud_id.main()
- self.assertEqual(0, context_manager.exception.code)
- self.assertEqual("mycloud\tsomereg\n", m_stdout.getvalue())
-
- def test_cloud_id_lookup_from_instance_data_region(self):
- """Report discovered canonical cloud_id when region lookup matches."""
- util.write_file(
- self.instance_data,
- '{"v1": {"cloud_name": "aws", "region": "cn-north-1",'
- ' "platform": "ec2"}}')
- cmd = ['cloud-id', '--instance-data', self.instance_data, '--long']
- with mock.patch('sys.argv', cmd):
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with self.assertRaises(SystemExit) as context_manager:
- cloud_id.main()
- self.assertEqual(0, context_manager.exception.code)
- self.assertEqual("aws-china\tcn-north-1\n", m_stdout.getvalue())
-
- def test_cloud_id_lookup_json_instance_data_adds_cloud_id_to_json(self):
- """Report v1 instance-data content with cloud_id when --json set."""
- util.write_file(
- self.instance_data,
- '{"v1": {"cloud_name": "unknown", "region": "dfw",'
- ' "platform": "openstack", "public_ssh_keys": []}}')
- expected = util.json_dumps({
- 'cloud_id': 'openstack', 'cloud_name': 'unknown',
- 'platform': 'openstack', 'public_ssh_keys': [], 'region': 'dfw'})
- cmd = ['cloud-id', '--instance-data', self.instance_data, '--json']
- with mock.patch('sys.argv', cmd):
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with self.assertRaises(SystemExit) as context_manager:
- cloud_id.main()
- self.assertEqual(0, context_manager.exception.code)
- self.assertEqual(expected + '\n', m_stdout.getvalue())
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
deleted file mode 100644
index 585b3b0e..00000000
--- a/cloudinit/cmd/tests/test_main.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from collections import namedtuple
-import copy
-import os
-from io import StringIO
-
-from cloudinit.cmd import main
-from cloudinit import safeyaml
-from cloudinit.util import (
- ensure_dir, load_file, write_file)
-from cloudinit.tests.helpers import (
- FilesystemMockingTestCase, wrap_and_call)
-
-mypaths = namedtuple('MyPaths', 'run_dir')
-myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand')
-
-
-class TestMain(FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestMain, self).setUp()
- self.new_root = self.tmp_dir()
- self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root)
- os.makedirs(self.cloud_dir)
- self.replicateTestRoot('simple_ubuntu', self.new_root)
- self.cfg = {
- 'datasource_list': ['None'],
- 'runcmd': ['ls /etc'], # test ALL_DISTROS
- 'system_info': {'paths': {'cloud_dir': self.cloud_dir,
- 'run_dir': self.new_root}},
- 'write_files': [
- {
- 'path': '/etc/blah.ini',
- 'content': 'blah',
- 'permissions': 0o755,
- },
- ],
- 'cloud_init_modules': ['write-files', 'runcmd'],
- }
- cloud_cfg = safeyaml.dumps(self.cfg)
- ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
- self.cloud_cfg_file = os.path.join(
- self.new_root, 'etc', 'cloud', 'cloud.cfg')
- write_file(self.cloud_cfg_file, cloud_cfg)
- self.patchOS(self.new_root)
- self.patchUtils(self.new_root)
- self.stderr = StringIO()
- self.patchStdoutAndStderr(stderr=self.stderr)
-
- def test_main_init_run_net_stops_on_file_no_net(self):
- """When no-net file is present, main_init does not process modules."""
- stop_file = os.path.join(self.cloud_dir, 'data', 'no-net') # stop file
- write_file(stop_file, '')
- cmdargs = myargs(
- debug=False, files=None, force=False, local=False, reporter=None,
- subcommand='init')
- (_item1, item2) = wrap_and_call(
- 'cloudinit.cmd.main',
- {'util.close_stdin': True,
- 'netinfo.debug_info': 'my net debug info',
- 'util.fixup_output': ('outfmt', 'errfmt')},
- main.main_init, 'init', cmdargs)
- # We should not run write_files module
- self.assertFalse(
- os.path.exists(os.path.join(self.new_root, 'etc/blah.ini')),
- 'Unexpected run of write_files module produced blah.ini')
- self.assertEqual([], item2)
- # Instancify is called
- instance_id_path = 'var/lib/cloud/data/instance-id'
- self.assertFalse(
- os.path.exists(os.path.join(self.new_root, instance_id_path)),
- 'Unexpected call to datasource.instancify produced instance-id')
- expected_logs = [
- "Exiting. stop file ['{stop_file}'] existed\n".format(
- stop_file=stop_file),
- 'my net debug info' # netinfo.debug_info
- ]
- for log in expected_logs:
- self.assertIn(log, self.stderr.getvalue())
-
- def test_main_init_run_net_runs_modules(self):
- """Modules like write_files are run in 'net' mode."""
- cmdargs = myargs(
- debug=False, files=None, force=False, local=False, reporter=None,
- subcommand='init')
- (_item1, item2) = wrap_and_call(
- 'cloudinit.cmd.main',
- {'util.close_stdin': True,
- 'netinfo.debug_info': 'my net debug info',
- 'util.fixup_output': ('outfmt', 'errfmt')},
- main.main_init, 'init', cmdargs)
- self.assertEqual([], item2)
- # Instancify is called
- instance_id_path = 'var/lib/cloud/data/instance-id'
- self.assertEqual(
- 'iid-datasource-none\n',
- os.path.join(load_file(
- os.path.join(self.new_root, instance_id_path))))
- # modules are run (including write_files)
- self.assertEqual(
- 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini')))
- expected_logs = [
- 'network config is disabled by fallback', # apply_network_config
- 'my net debug info', # netinfo.debug_info
- 'no previous run detected'
- ]
- for log in expected_logs:
- self.assertIn(log, self.stderr.getvalue())
-
- def test_main_init_run_net_calls_set_hostname_when_metadata_present(self):
- """When local-hostname metadata is present, call cc_set_hostname."""
- self.cfg['datasource'] = {
- 'None': {'metadata': {'local-hostname': 'md-hostname'}}}
- cloud_cfg = safeyaml.dumps(self.cfg)
- write_file(self.cloud_cfg_file, cloud_cfg)
- cmdargs = myargs(
- debug=False, files=None, force=False, local=False, reporter=None,
- subcommand='init')
-
- def set_hostname(name, cfg, cloud, log, args):
- self.assertEqual('set-hostname', name)
- updated_cfg = copy.deepcopy(self.cfg)
- updated_cfg.update(
- {'def_log_file': '/var/log/cloud-init.log',
- 'log_cfgs': [],
- 'syslog_fix_perms': [
- 'syslog:adm', 'root:adm', 'root:wheel', 'root:root'
- ],
- 'vendor_data': {'enabled': True, 'prefix': []}})
- updated_cfg.pop('system_info')
-
- self.assertEqual(updated_cfg, cfg)
- self.assertEqual(main.LOG, log)
- self.assertIsNone(args)
-
- (_item1, item2) = wrap_and_call(
- 'cloudinit.cmd.main',
- {'util.close_stdin': True,
- 'netinfo.debug_info': 'my net debug info',
- 'cc_set_hostname.handle': {'side_effect': set_hostname},
- 'util.fixup_output': ('outfmt', 'errfmt')},
- main.main_init, 'init', cmdargs)
- self.assertEqual([], item2)
- # Instancify is called
- instance_id_path = 'var/lib/cloud/data/instance-id'
- self.assertEqual(
- 'iid-datasource-none\n',
- os.path.join(load_file(
- os.path.join(self.new_root, instance_id_path))))
- # modules are run (including write_files)
- self.assertEqual(
- 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini')))
- expected_logs = [
- 'network config is disabled by fallback', # apply_network_config
- 'my net debug info', # netinfo.debug_info
- 'no previous run detected'
- ]
- for log in expected_logs:
- self.assertIn(log, self.stderr.getvalue())
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py
deleted file mode 100644
index c258d321..00000000
--- a/cloudinit/cmd/tests/test_query.py
+++ /dev/null
@@ -1,341 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import errno
-import gzip
-from io import BytesIO
-import json
-from textwrap import dedent
-
-import pytest
-
-from collections import namedtuple
-from cloudinit.cmd import query
-from cloudinit.helpers import Paths
-from cloudinit.sources import (
- REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE)
-from cloudinit.tests.helpers import mock
-
-from cloudinit.util import b64e, write_file
-
-
-def _gzip_data(data):
- with BytesIO() as iobuf:
- with gzip.GzipFile(mode="wb", fileobj=iobuf) as gzfp:
- gzfp.write(data)
- return iobuf.getvalue()
-
-
-@mock.patch("cloudinit.cmd.query.addLogHandlerCLI", lambda *args: "")
-class TestQuery:
-
- args = namedtuple(
- 'queryargs',
- ('debug dump_all format instance_data list_keys user_data vendor_data'
- ' varname'))
-
- def _setup_paths(self, tmpdir, ud_val=None, vd_val=None):
- """Write userdata and vendordata into a tmpdir.
-
- Return:
- 4-tuple : (paths, run_dir_path, userdata_path, vendordata_path)
- """
- if ud_val:
- user_data = tmpdir.join('user-data')
- write_file(user_data.strpath, ud_val)
- else:
- user_data = None
- if vd_val:
- vendor_data = tmpdir.join('vendor-data')
- write_file(vendor_data.strpath, vd_val)
- else:
- vendor_data = None
- run_dir = tmpdir.join('run_dir')
- run_dir.ensure_dir()
- return (
- Paths({'run_dir': run_dir.strpath}),
- run_dir,
- user_data,
- vendor_data
- )
-
- def test_handle_args_error_on_missing_param(self, caplog, capsys):
- """Error when missing required parameters and print usage."""
- args = self.args(
- debug=False, dump_all=False, format=None, instance_data=None,
- list_keys=False, user_data=None, vendor_data=None, varname=None)
- with mock.patch(
- "cloudinit.cmd.query.addLogHandlerCLI", return_value=""
- ) as m_cli_log:
- assert 1 == query.handle_args('anyname', args)
- expected_error = (
- 'Expected one of the options: --all, --format, --list-keys'
- ' or varname\n')
- assert expected_error in caplog.text
- out, _err = capsys.readouterr()
- assert 'usage: query' in out
- assert 1 == m_cli_log.call_count
-
- def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir):
- """When instance_data file path does not exist, log an error."""
- absent_fn = tmpdir.join('absent')
- args = self.args(
- debug=False, dump_all=True, format=None,
- instance_data=absent_fn.strpath,
- list_keys=False, user_data='ud', vendor_data='vd', varname=None)
- assert 1 == query.handle_args('anyname', args)
-
- msg = 'Missing instance-data file: %s' % absent_fn
- assert msg in caplog.text
-
- def test_handle_args_error_when_no_read_permission_instance_data(
- self, caplog, tmpdir
- ):
- """When instance_data file is unreadable, log an error."""
- noread_fn = tmpdir.join('unreadable')
- noread_fn.write('thou shall not pass')
- args = self.args(
- debug=False, dump_all=True, format=None,
- instance_data=noread_fn.strpath,
- list_keys=False, user_data='ud', vendor_data='vd', varname=None)
- with mock.patch('cloudinit.cmd.query.util.load_file') as m_load:
- m_load.side_effect = OSError(errno.EACCES, 'Not allowed')
- assert 1 == query.handle_args('anyname', args)
- msg = "No read permission on '%s'. Try sudo" % noread_fn
- assert msg in caplog.text
-
- def test_handle_args_defaults_instance_data(self, caplog, tmpdir):
- """When no instance_data argument, default to configured run_dir."""
- args = self.args(
- debug=False, dump_all=True, format=None, instance_data=None,
- list_keys=False, user_data=None, vendor_data=None, varname=None)
- paths, run_dir, _, _ = self._setup_paths(tmpdir)
- with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
- m_paths.return_value = paths
- assert 1 == query.handle_args('anyname', args)
- json_file = run_dir.join(INSTANCE_JSON_FILE)
- msg = 'Missing instance-data file: %s' % json_file.strpath
- assert msg in caplog.text
-
- def test_handle_args_root_fallsback_to_instance_data(self, caplog, tmpdir):
- """When no instance_data argument, root falls back to redacted json."""
- args = self.args(
- debug=False, dump_all=True, format=None, instance_data=None,
- list_keys=False, user_data=None, vendor_data=None, varname=None)
- paths, run_dir, _, _ = self._setup_paths(tmpdir)
- with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
- m_paths.return_value = paths
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 0
- assert 1 == query.handle_args('anyname', args)
- json_file = run_dir.join(INSTANCE_JSON_FILE)
- sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
- msg = (
- 'Missing root-readable %s. Using redacted %s instead.' %
- (
- sensitive_file.strpath, json_file.strpath
- )
- )
- assert msg in caplog.text
-
- @pytest.mark.parametrize(
- 'ud_src,ud_expected,vd_src,vd_expected',
- (
- ('hi mom', 'hi mom', 'hi pops', 'hi pops'),
- ('ud'.encode('utf-8'), 'ud', 'vd'.encode('utf-8'), 'vd'),
- (_gzip_data(b'ud'), 'ud', _gzip_data(b'vd'), 'vd'),
- (_gzip_data('ud'.encode('utf-8')), 'ud', _gzip_data(b'vd'), 'vd'),
- )
- )
- def test_handle_args_root_processes_user_data(
- self, ud_src, ud_expected, vd_src, vd_expected, capsys, tmpdir
- ):
- """Support reading multiple user-data file content types"""
- paths, run_dir, user_data, vendor_data = self._setup_paths(
- tmpdir, ud_val=ud_src, vd_val=vd_src
- )
- sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
- sensitive_file.write('{"my-var": "it worked"}')
- args = self.args(
- debug=False, dump_all=True, format=None, instance_data=None,
- list_keys=False, user_data=user_data.strpath,
- vendor_data=vendor_data.strpath, varname=None)
- with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
- m_paths.return_value = paths
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 0
- assert 0 == query.handle_args('anyname', args)
- out, _err = capsys.readouterr()
- cmd_output = json.loads(out)
- assert "it worked" == cmd_output['my_var']
- if ud_expected == "ci-b64:":
- ud_expected = "ci-b64:{}".format(b64e(ud_src))
- if vd_expected == "ci-b64:":
- vd_expected = "ci-b64:{}".format(b64e(vd_src))
- assert ud_expected == cmd_output['userdata']
- assert vd_expected == cmd_output['vendordata']
-
- def test_handle_args_root_uses_instance_sensitive_data(
- self, capsys, tmpdir
- ):
- """When no instance_data argument, root uses sensitive json."""
- paths, run_dir, user_data, vendor_data = self._setup_paths(
- tmpdir, ud_val='ud', vd_val='vd'
- )
- sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
- sensitive_file.write('{"my-var": "it worked"}')
- args = self.args(
- debug=False, dump_all=True, format=None, instance_data=None,
- list_keys=False, user_data=user_data.strpath,
- vendor_data=vendor_data.strpath, varname=None)
- with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
- m_paths.return_value = paths
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 0
- assert 0 == query.handle_args('anyname', args)
- expected = (
- '{\n "my_var": "it worked",\n "userdata": "ud",\n '
- '"vendordata": "vd"\n}\n'
- )
- out, _err = capsys.readouterr()
- assert expected == out
-
- def test_handle_args_dumps_all_instance_data(self, capsys, tmpdir):
- """When --all is specified query will dump all instance data vars."""
- instance_data = tmpdir.join('instance-data')
- instance_data.write('{"my-var": "it worked"}')
- args = self.args(
- debug=False, dump_all=True, format=None,
- instance_data=instance_data.strpath, list_keys=False,
- user_data='ud', vendor_data='vd', varname=None)
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- assert 0 == query.handle_args('anyname', args)
- expected = (
- '{\n "my_var": "it worked",\n "userdata": "<%s> file:ud",\n'
- ' "vendordata": "<%s> file:vd"\n}\n' % (
- REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE
- )
- )
- out, _err = capsys.readouterr()
- assert expected == out
-
- def test_handle_args_returns_top_level_varname(self, capsys, tmpdir):
- """When the argument varname is passed, report its value."""
- instance_data = tmpdir.join('instance-data')
- instance_data.write('{"my-var": "it worked"}')
- args = self.args(
- debug=False, dump_all=True, format=None,
- instance_data=instance_data.strpath, list_keys=False,
- user_data='ud', vendor_data='vd', varname='my_var')
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- assert 0 == query.handle_args('anyname', args)
- out, _err = capsys.readouterr()
- assert 'it worked\n' == out
-
- def test_handle_args_returns_nested_varname(self, capsys, tmpdir):
- """If user_data file is a jinja template render instance-data vars."""
- instance_data = tmpdir.join('instance-data')
- instance_data.write(
- '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}'
- )
- args = self.args(
- debug=False, dump_all=False, format=None,
- instance_data=instance_data.strpath, user_data='ud',
- vendor_data='vd', list_keys=False, varname='v1.key_2')
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- assert 0 == query.handle_args('anyname', args)
- out, _err = capsys.readouterr()
- assert 'value-2\n' == out
-
- def test_handle_args_returns_standardized_vars_to_top_level_aliases(
- self, capsys, tmpdir
- ):
- """Any standardized vars under v# are promoted as top-level aliases."""
- instance_data = tmpdir.join('instance-data')
- instance_data.write(
- '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
- ' "top": "gun"}')
- expected = dedent("""\
- {
- "top": "gun",
- "userdata": "<redacted for non-root user> file:ud",
- "v1": {
- "v1_1": "val1.1"
- },
- "v1_1": "val1.1",
- "v2": {
- "v2_2": "val2.2"
- },
- "v2_2": "val2.2",
- "vendordata": "<redacted for non-root user> file:vd"
- }
- """)
- args = self.args(
- debug=False, dump_all=True, format=None,
- instance_data=instance_data.strpath, user_data='ud',
- vendor_data='vd', list_keys=False, varname=None)
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- assert 0 == query.handle_args('anyname', args)
- out, _err = capsys.readouterr()
- assert expected == out
-
- def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(
- self, capsys, tmpdir
- ):
- """Sort all top-level keys when only --list-keys provided."""
- instance_data = tmpdir.join('instance-data')
- instance_data.write(
- '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
- ' "top": "gun"}')
- expected = 'top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n'
- args = self.args(
- debug=False, dump_all=False, format=None,
- instance_data=instance_data.strpath, list_keys=True,
- user_data='ud', vendor_data='vd', varname=None)
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- assert 0 == query.handle_args('anyname', args)
- out, _err = capsys.readouterr()
- assert expected == out
-
- def test_handle_args_list_keys_sorts_nested_keys_when_varname(
- self, capsys, tmpdir
- ):
- """Sort all nested keys of varname object when --list-keys provided."""
- instance_data = tmpdir.join('instance-data')
- instance_data.write(
- '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":' +
- ' {"v2_2": "val2.2"}, "top": "gun"}')
- expected = 'v1_1\nv1_2\n'
- args = self.args(
- debug=False, dump_all=False, format=None,
- instance_data=instance_data.strpath, list_keys=True,
- user_data='ud', vendor_data='vd', varname='v1')
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- assert 0 == query.handle_args('anyname', args)
- out, _err = capsys.readouterr()
- assert expected == out
-
- def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(
- self, caplog, tmpdir
- ):
- """Raise an error when --list-keys and varname specify a non-list."""
- instance_data = tmpdir.join('instance-data')
- instance_data.write(
- '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' +
- '{"v2_2": "val2.2"}, "top": "gun"}')
- expected_error = "--list-keys provided but 'top' is not a dict"
- args = self.args(
- debug=False, dump_all=False, format=None,
- instance_data=instance_data.strpath, list_keys=True,
- user_data='ud', vendor_data='vd', varname='top')
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- assert 1 == query.handle_args('anyname', args)
- assert expected_error in caplog.text
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py
deleted file mode 100644
index 1c9eec37..00000000
--- a/cloudinit/cmd/tests/test_status.py
+++ /dev/null
@@ -1,391 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from collections import namedtuple
-import os
-from io import StringIO
-from textwrap import dedent
-
-from cloudinit.atomic_helper import write_json
-from cloudinit.cmd import status
-from cloudinit.util import ensure_file
-from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock
-
-mypaths = namedtuple('MyPaths', 'run_dir')
-myargs = namedtuple('MyArgs', 'long wait')
-
-
-class TestStatus(CiTestCase):
-
- def setUp(self):
- super(TestStatus, self).setUp()
- self.new_root = self.tmp_dir()
- self.status_file = self.tmp_path('status.json', self.new_root)
- self.disable_file = self.tmp_path('cloudinit-disable', self.new_root)
- self.paths = mypaths(run_dir=self.new_root)
-
- class FakeInit(object):
- paths = self.paths
-
- def __init__(self, ds_deps):
- pass
-
- def read_cfg(self):
- pass
-
- self.init_class = FakeInit
-
- def test__is_cloudinit_disabled_false_on_sysvinit(self):
- '''When not in an environment using systemd, return False.'''
- ensure_file(self.disable_file) # Create the ignored disable file
- (is_disabled, reason) = wrap_and_call(
- 'cloudinit.cmd.status',
- {'uses_systemd': False,
- 'get_cmdline': "root=/dev/my-root not-important"},
- status._is_cloudinit_disabled, self.disable_file, self.paths)
- self.assertFalse(
- is_disabled, 'expected enabled cloud-init on sysvinit')
- self.assertEqual('Cloud-init enabled on sysvinit', reason)
-
- def test__is_cloudinit_disabled_true_on_disable_file(self):
- '''When using systemd and disable_file is present return disabled.'''
- ensure_file(self.disable_file) # Create observed disable file
- (is_disabled, reason) = wrap_and_call(
- 'cloudinit.cmd.status',
- {'uses_systemd': True,
- 'get_cmdline': "root=/dev/my-root not-important"},
- status._is_cloudinit_disabled, self.disable_file, self.paths)
- self.assertTrue(is_disabled, 'expected disabled cloud-init')
- self.assertEqual(
- 'Cloud-init disabled by {0}'.format(self.disable_file), reason)
-
- def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self):
- '''Not disabled when using systemd and enabled via commandline.'''
- ensure_file(self.disable_file) # Create ignored disable file
- (is_disabled, reason) = wrap_and_call(
- 'cloudinit.cmd.status',
- {'uses_systemd': True,
- 'get_cmdline': 'something cloud-init=enabled else'},
- status._is_cloudinit_disabled, self.disable_file, self.paths)
- self.assertFalse(is_disabled, 'expected enabled cloud-init')
- self.assertEqual(
- 'Cloud-init enabled by kernel command line cloud-init=enabled',
- reason)
-
- def test__is_cloudinit_disabled_true_on_kernel_cmdline(self):
- '''When using systemd and disable_file is present return disabled.'''
- (is_disabled, reason) = wrap_and_call(
- 'cloudinit.cmd.status',
- {'uses_systemd': True,
- 'get_cmdline': 'something cloud-init=disabled else'},
- status._is_cloudinit_disabled, self.disable_file, self.paths)
- self.assertTrue(is_disabled, 'expected disabled cloud-init')
- self.assertEqual(
- 'Cloud-init disabled by kernel parameter cloud-init=disabled',
- reason)
-
- def test__is_cloudinit_disabled_true_when_generator_disables(self):
- '''When cloud-init-generator doesn't write enabled file return True.'''
- enabled_file = os.path.join(self.paths.run_dir, 'enabled')
- self.assertFalse(os.path.exists(enabled_file))
- (is_disabled, reason) = wrap_and_call(
- 'cloudinit.cmd.status',
- {'uses_systemd': True,
- 'get_cmdline': 'something'},
- status._is_cloudinit_disabled, self.disable_file, self.paths)
- self.assertTrue(is_disabled, 'expected disabled cloud-init')
- self.assertEqual('Cloud-init disabled by cloud-init-generator', reason)
-
- def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self):
- '''Report enabled when systemd generator creates the enabled file.'''
- enabled_file = os.path.join(self.paths.run_dir, 'enabled')
- ensure_file(enabled_file)
- (is_disabled, reason) = wrap_and_call(
- 'cloudinit.cmd.status',
- {'uses_systemd': True,
- 'get_cmdline': 'something ignored'},
- status._is_cloudinit_disabled, self.disable_file, self.paths)
- self.assertFalse(is_disabled, 'expected enabled cloud-init')
- self.assertEqual(
- 'Cloud-init enabled by systemd cloud-init-generator', reason)
-
- def test_status_returns_not_run(self):
- '''When status.json does not exist yet, return 'not run'.'''
- self.assertFalse(
- os.path.exists(self.status_file), 'Unexpected status.json found')
- cmdargs = myargs(long=False, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- self.assertEqual('status: not run\n', m_stdout.getvalue())
-
- def test_status_returns_disabled_long_on_presence_of_disable_file(self):
- '''When cloudinit is disabled, return disabled reason.'''
-
- checked_files = []
-
- def fakeexists(filepath):
- checked_files.append(filepath)
- status_file = os.path.join(self.paths.run_dir, 'status.json')
- return bool(not filepath == status_file)
-
- cmdargs = myargs(long=True, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'os.path.exists': {'side_effect': fakeexists},
- '_is_cloudinit_disabled': (True, 'disabled for some reason'),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- self.assertEqual(
- [os.path.join(self.paths.run_dir, 'status.json')],
- checked_files)
- expected = dedent('''\
- status: disabled
- detail:
- disabled for some reason
- ''')
- self.assertEqual(expected, m_stdout.getvalue())
-
- def test_status_returns_running_on_no_results_json(self):
- '''Report running when status.json exists but result.json does not.'''
- result_file = self.tmp_path('result.json', self.new_root)
- write_json(self.status_file, {})
- self.assertFalse(
- os.path.exists(result_file), 'Unexpected result.json found')
- cmdargs = myargs(long=False, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- self.assertEqual('status: running\n', m_stdout.getvalue())
-
- def test_status_returns_running(self):
- '''Report running when status exists with an unfinished stage.'''
- ensure_file(self.tmp_path('result.json', self.new_root))
- write_json(self.status_file,
- {'v1': {'init': {'start': 1, 'finished': None}}})
- cmdargs = myargs(long=False, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- self.assertEqual('status: running\n', m_stdout.getvalue())
-
- def test_status_returns_done(self):
- '''Report done results.json exists no stages are unfinished.'''
- ensure_file(self.tmp_path('result.json', self.new_root))
- write_json(
- self.status_file,
- {'v1': {'stage': None, # No current stage running
- 'datasource': (
- 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]'
- '[dsmode=net]'),
- 'blah': {'finished': 123.456},
- 'init': {'errors': [], 'start': 124.567,
- 'finished': 125.678},
- 'init-local': {'start': 123.45, 'finished': 123.46}}})
- cmdargs = myargs(long=False, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- self.assertEqual('status: done\n', m_stdout.getvalue())
-
- def test_status_returns_done_long(self):
- '''Long format of done status includes datasource info.'''
- ensure_file(self.tmp_path('result.json', self.new_root))
- write_json(
- self.status_file,
- {'v1': {'stage': None,
- 'datasource': (
- 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]'
- '[dsmode=net]'),
- 'init': {'start': 124.567, 'finished': 125.678},
- 'init-local': {'start': 123.45, 'finished': 123.46}}})
- cmdargs = myargs(long=True, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- expected = dedent('''\
- status: done
- time: Thu, 01 Jan 1970 00:02:05 +0000
- detail:
- DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net]
- ''')
- self.assertEqual(expected, m_stdout.getvalue())
-
- def test_status_on_errors(self):
- '''Reports error when any stage has errors.'''
- write_json(
- self.status_file,
- {'v1': {'stage': None,
- 'blah': {'errors': [], 'finished': 123.456},
- 'init': {'errors': ['error1'], 'start': 124.567,
- 'finished': 125.678},
- 'init-local': {'start': 123.45, 'finished': 123.46}}})
- cmdargs = myargs(long=False, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(1, retcode)
- self.assertEqual('status: error\n', m_stdout.getvalue())
-
- def test_status_on_errors_long(self):
- '''Long format of error status includes all error messages.'''
- write_json(
- self.status_file,
- {'v1': {'stage': None,
- 'datasource': (
- 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]'
- '[dsmode=net]'),
- 'init': {'errors': ['error1'], 'start': 124.567,
- 'finished': 125.678},
- 'init-local': {'errors': ['error2', 'error3'],
- 'start': 123.45, 'finished': 123.46}}})
- cmdargs = myargs(long=True, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(1, retcode)
- expected = dedent('''\
- status: error
- time: Thu, 01 Jan 1970 00:02:05 +0000
- detail:
- error1
- error2
- error3
- ''')
- self.assertEqual(expected, m_stdout.getvalue())
-
- def test_status_returns_running_long_format(self):
- '''Long format reports the stage in which we are running.'''
- write_json(
- self.status_file,
- {'v1': {'stage': 'init',
- 'init': {'start': 124.456, 'finished': None},
- 'init-local': {'start': 123.45, 'finished': 123.46}}})
- cmdargs = myargs(long=True, wait=False)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- expected = dedent('''\
- status: running
- time: Thu, 01 Jan 1970 00:02:04 +0000
- detail:
- Running in stage: init
- ''')
- self.assertEqual(expected, m_stdout.getvalue())
-
- def test_status_wait_blocks_until_done(self):
- '''Specifying wait will poll every 1/4 second until done state.'''
- running_json = {
- 'v1': {'stage': 'init',
- 'init': {'start': 124.456, 'finished': None},
- 'init-local': {'start': 123.45, 'finished': 123.46}}}
- done_json = {
- 'v1': {'stage': None,
- 'init': {'start': 124.456, 'finished': 125.678},
- 'init-local': {'start': 123.45, 'finished': 123.46}}}
-
- self.sleep_calls = 0
-
- def fake_sleep(interval):
- self.assertEqual(0.25, interval)
- self.sleep_calls += 1
- if self.sleep_calls == 2:
- write_json(self.status_file, running_json)
- elif self.sleep_calls == 3:
- write_json(self.status_file, done_json)
- result_file = self.tmp_path('result.json', self.new_root)
- ensure_file(result_file)
-
- cmdargs = myargs(long=False, wait=True)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'sleep': {'side_effect': fake_sleep},
- '_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(0, retcode)
- self.assertEqual(4, self.sleep_calls)
- self.assertEqual('....\nstatus: done\n', m_stdout.getvalue())
-
- def test_status_wait_blocks_until_error(self):
- '''Specifying wait will poll every 1/4 second until error state.'''
- running_json = {
- 'v1': {'stage': 'init',
- 'init': {'start': 124.456, 'finished': None},
- 'init-local': {'start': 123.45, 'finished': 123.46}}}
- error_json = {
- 'v1': {'stage': None,
- 'init': {'errors': ['error1'], 'start': 124.456,
- 'finished': 125.678},
- 'init-local': {'start': 123.45, 'finished': 123.46}}}
-
- self.sleep_calls = 0
-
- def fake_sleep(interval):
- self.assertEqual(0.25, interval)
- self.sleep_calls += 1
- if self.sleep_calls == 2:
- write_json(self.status_file, running_json)
- elif self.sleep_calls == 3:
- write_json(self.status_file, error_json)
-
- cmdargs = myargs(long=False, wait=True)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- 'cloudinit.cmd.status',
- {'sleep': {'side_effect': fake_sleep},
- '_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.handle_status_args, 'ignored', cmdargs)
- self.assertEqual(1, retcode)
- self.assertEqual(4, self.sleep_calls)
- self.assertEqual('....\nstatus: error\n', m_stdout.getvalue())
-
- def test_status_main(self):
- '''status.main can be run as a standalone script.'''
- write_json(self.status_file,
- {'v1': {'init': {'start': 1, 'finished': None}}})
- with self.assertRaises(SystemExit) as context_manager:
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- wrap_and_call(
- 'cloudinit.cmd.status',
- {'sys.argv': {'new': ['status']},
- '_is_cloudinit_disabled': (False, ''),
- 'Init': {'side_effect': self.init_class}},
- status.main)
- self.assertEqual(0, context_manager.exception.code)
- self.assertEqual('status: running\n', m_stdout.getvalue())
-
-# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py
index 0ef9a748..ed124180 100644
--- a/cloudinit/config/__init__.py
+++ b/cloudinit/config/__init__.py
@@ -6,9 +6,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.settings import (PER_INSTANCE, FREQUENCIES)
-
from cloudinit import log as logging
+from cloudinit.settings import FREQUENCIES, PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -22,26 +21,27 @@ MOD_PREFIX = "cc_"
def form_module_name(name):
canon_name = name.replace("-", "_")
if canon_name.lower().endswith(".py"):
- canon_name = canon_name[0:(len(canon_name) - 3)]
+ canon_name = canon_name[0 : (len(canon_name) - 3)]
canon_name = canon_name.strip()
if not canon_name:
return None
if not canon_name.startswith(MOD_PREFIX):
- canon_name = '%s%s' % (MOD_PREFIX, canon_name)
+ canon_name = "%s%s" % (MOD_PREFIX, canon_name)
return canon_name
def fixup_module(mod, def_freq=PER_INSTANCE):
- if not hasattr(mod, 'frequency'):
- setattr(mod, 'frequency', def_freq)
+ if not hasattr(mod, "frequency"):
+ setattr(mod, "frequency", def_freq)
else:
freq = mod.frequency
if freq and freq not in FREQUENCIES:
LOG.warning("Module %s has an unknown frequency %s", mod, freq)
- if not hasattr(mod, 'distros'):
- setattr(mod, 'distros', [])
- if not hasattr(mod, 'osfamilies'):
- setattr(mod, 'osfamilies', [])
+ if not hasattr(mod, "distros"):
+ setattr(mod, "distros", [])
+ if not hasattr(mod, "osfamilies"):
+ setattr(mod, "osfamilies", [])
return mod
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py
index 84d7a0b6..0952c971 100644
--- a/cloudinit/config/cc_apk_configure.py
+++ b/cloudinit/config/cc_apk_configure.py
@@ -9,11 +9,8 @@
from textwrap import dedent
from cloudinit import log as logging
-from cloudinit import temp_utils
-from cloudinit import templater
-from cloudinit import util
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
+from cloudinit import temp_utils, templater, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -55,34 +52,41 @@ REPOSITORIES_TEMPLATE = """\
frequency = PER_INSTANCE
-distros = ['alpine']
-schema = {
- 'id': 'cc_apk_configure',
- 'name': 'APK Configure',
- 'title': 'Configure apk repositories file',
- 'description': dedent("""\
+distros = ["alpine"]
+meta: MetaSchema = {
+ "id": "cc_apk_configure",
+ "name": "APK Configure",
+ "title": "Configure apk repositories file",
+ "description": dedent(
+ """\
This module handles configuration of the /etc/apk/repositories file.
.. note::
To ensure that apk configuration is valid yaml, any strings
containing special characters, especially ``:`` should be quoted.
- """),
- 'distros': distros,
- 'examples': [
- dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Keep the existing /etc/apk/repositories file unaltered.
apk_repos:
preserve_repositories: true
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Create repositories file for Alpine v3.12 main and community
# using default mirror site.
apk_repos:
alpine_repo:
community_enabled: true
version: 'v3.12'
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Create repositories file for Alpine Edge main, community, and
# testing using a specified mirror site and also a local repo.
apk_repos:
@@ -92,93 +96,13 @@ schema = {
testing_enabled: true
version: 'edge'
local_repo_base_url: 'https://my-local-server/local-alpine'
- """),
+ """
+ ),
],
- 'frequency': frequency,
- 'type': 'object',
- 'properties': {
- 'apk_repos': {
- 'type': 'object',
- 'properties': {
- 'preserve_repositories': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
- By default, cloud-init will generate a new repositories
- file ``/etc/apk/repositories`` based on any valid
- configuration settings specified within a apk_repos
- section of cloud config. To disable this behavior and
- preserve the repositories file from the pristine image,
- set ``preserve_repositories`` to ``true``.
-
- The ``preserve_repositories`` option overrides
- all other config keys that would alter
- ``/etc/apk/repositories``.
- """)
- },
- 'alpine_repo': {
- 'type': ['object', 'null'],
- 'properties': {
- 'base_url': {
- 'type': 'string',
- 'default': DEFAULT_MIRROR,
- 'description': dedent("""\
- The base URL of an Alpine repository, or
- mirror, to download official packages from.
- If not specified then it defaults to ``{}``
- """.format(DEFAULT_MIRROR))
- },
- 'community_enabled': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
- Whether to add the Community repo to the
- repositories file. By default the Community
- repo is not included.
- """)
- },
- 'testing_enabled': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
- Whether to add the Testing repo to the
- repositories file. By default the Testing
- repo is not included. It is only recommended
- to use the Testing repo on a machine running
- the ``Edge`` version of Alpine as packages
- installed from Testing may have dependancies
- that conflict with those in non-Edge Main or
- Community repos."
- """)
- },
- 'version': {
- 'type': 'string',
- 'description': dedent("""\
- The Alpine version to use (e.g. ``v3.12`` or
- ``edge``)
- """)
- },
- },
- 'required': ['version'],
- 'minProperties': 1,
- 'additionalProperties': False,
- },
- 'local_repo_base_url': {
- 'type': 'string',
- 'description': dedent("""\
- The base URL of an Alpine repository containing
- unofficial packages
- """)
- }
- },
- 'required': [],
- 'minProperties': 1, # Either preserve_repositories or alpine_repo
- 'additionalProperties': False,
- }
- }
+ "frequency": frequency,
}
-__doc__ = get_schema_doc(schema)
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, _args):
@@ -194,38 +118,42 @@ def handle(name, cfg, cloud, log, _args):
# If there is no "apk_repos" section in the configuration
# then do nothing.
- apk_section = cfg.get('apk_repos')
+ apk_section = cfg.get("apk_repos")
if not apk_section:
- LOG.debug(("Skipping module named %s,"
- " no 'apk_repos' section found"), name)
+ LOG.debug(
+ "Skipping module named %s, no 'apk_repos' section found", name
+ )
return
- validate_cloudconfig_schema(cfg, schema)
-
# If "preserve_repositories" is explicitly set to True in
# the configuration do nothing.
- if util.get_cfg_option_bool(apk_section, 'preserve_repositories', False):
- LOG.debug(("Skipping module named %s,"
- " 'preserve_repositories' is set"), name)
+ if util.get_cfg_option_bool(apk_section, "preserve_repositories", False):
+ LOG.debug(
+ "Skipping module named %s, 'preserve_repositories' is set", name
+ )
return
# If there is no "alpine_repo" subsection of "apk_repos" present in the
# configuration then do nothing, as at least "version" is required to
# create valid repositories entries.
- alpine_repo = apk_section.get('alpine_repo')
+ alpine_repo = apk_section.get("alpine_repo")
if not alpine_repo:
- LOG.debug(("Skipping module named %s,"
- " no 'alpine_repo' configuration found"), name)
+ LOG.debug(
+ "Skipping module named %s, no 'alpine_repo' configuration found",
+ name,
+ )
return
# If there is no "version" value present in configuration then do nothing.
- alpine_version = alpine_repo.get('version')
+ alpine_version = alpine_repo.get("version")
if not alpine_version:
- LOG.debug(("Skipping module named %s,"
- " 'version' not specified in alpine_repo"), name)
+ LOG.debug(
+ "Skipping module named %s, 'version' not specified in alpine_repo",
+ name,
+ )
return
- local_repo = apk_section.get('local_repo_base_url', '')
+ local_repo = apk_section.get("local_repo_base_url", "")
_write_repositories_file(alpine_repo, alpine_version, local_repo)
@@ -239,22 +167,23 @@ def _write_repositories_file(alpine_repo, alpine_version, local_repo):
@param local_repo: A string containing the base URL of a local repo.
"""
- repo_file = '/etc/apk/repositories'
+ repo_file = "/etc/apk/repositories"
- alpine_baseurl = alpine_repo.get('base_url', DEFAULT_MIRROR)
+ alpine_baseurl = alpine_repo.get("base_url", DEFAULT_MIRROR)
- params = {'alpine_baseurl': alpine_baseurl,
- 'alpine_version': alpine_version,
- 'community_enabled': alpine_repo.get('community_enabled'),
- 'testing_enabled': alpine_repo.get('testing_enabled'),
- 'local_repo': local_repo}
+ params = {
+ "alpine_baseurl": alpine_baseurl,
+ "alpine_version": alpine_version,
+ "community_enabled": alpine_repo.get("community_enabled"),
+ "testing_enabled": alpine_repo.get("testing_enabled"),
+ "local_repo": local_repo,
+ }
- tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
+ tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl")
template_fn = tfile[1] # Filepath is second item in tuple
util.write_file(template_fn, content=REPOSITORIES_TEMPLATE)
- LOG.debug('Generating Alpine repository configuration file: %s',
- repo_file)
+ LOG.debug("Generating Alpine repository configuration file: %s", repo_file)
templater.render_to_file(template_fn, repo_file, params)
# Clean up temporary template
util.del_file(template_fn)
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 73d8719f..c558311a 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -10,16 +10,14 @@
import glob
import os
+import pathlib
import re
from textwrap import dedent
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
from cloudinit import gpg
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import templater
-from cloudinit import util
+from cloudinit import subp, templater, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -27,45 +25,19 @@ LOG = logging.getLogger(__name__)
# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
+APT_LOCAL_KEYS = "/etc/apt/trusted.gpg"
+APT_TRUSTED_GPG_DIR = "/etc/apt/trusted.gpg.d/"
+CLOUD_INIT_GPG_DIR = "/etc/apt/cloud-init.gpg.d/"
+
frequency = PER_INSTANCE
distros = ["ubuntu", "debian"]
-mirror_property = {
- 'type': 'array',
- 'item': {
- 'type': 'object',
- 'additionalProperties': False,
- 'required': ['arches'],
- 'properties': {
- 'arches': {
- 'type': 'array',
- 'item': {
- 'type': 'string'
- },
- 'minItems': 1
- },
- 'uri': {
- 'type': 'string',
- 'format': 'uri'
- },
- 'search': {
- 'type': 'array',
- 'item': {
- 'type': 'string',
- 'format': 'uri'
- },
- 'minItems': 1
- },
- 'search_dns': {
- 'type': 'boolean',
- }
- }
- }
-}
-schema = {
- 'id': 'cc_apt_configure',
- 'name': 'Apt Configure',
- 'title': 'Configure apt for the user',
- 'description': dedent("""\
+
+meta: MetaSchema = {
+ "id": "cc_apt_configure",
+ "name": "Apt Configure",
+ "title": "Configure apt for the user",
+ "description": dedent(
+ """\
This module handles both configuration of apt options and adding
source lists. There are configuration options such as
``apt_get_wrapper`` and ``apt_get_command`` that control how
@@ -80,9 +52,12 @@ schema = {
.. note::
For more information about apt configuration, see the
- ``Additional apt configuration`` example."""),
- 'distros': distros,
- 'examples': [dedent("""\
+ ``Additional apt configuration`` example."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
apt:
preserve_sources_list: false
disable_suites:
@@ -99,11 +74,12 @@ schema = {
search:
- 'http://cool.but-sometimes-unreachable.com/ubuntu'
- 'http://us.archive.ubuntu.com/ubuntu'
- search_dns: <true/false>
+ search_dns: false
- arches:
- s390x
- arm64
uri: 'http://archive-to-use-for-arm64.example.com/ubuntu'
+
security:
- arches:
- default
@@ -130,7 +106,7 @@ schema = {
source1:
keyid: 'keyid'
keyserver: 'keyserverurl'
- source: 'deb http://<url>/ xenial main'
+ source: 'deb [signed-by=$KEY_FILE] http://<url>/ bionic main'
source2:
source: 'ppa:<ppa-name>'
source3:
@@ -138,239 +114,13 @@ schema = {
key: |
------BEGIN PGP PUBLIC KEY BLOCK-------
<key data>
- ------END PGP PUBLIC KEY BLOCK-------""")],
- 'frequency': frequency,
- 'type': 'object',
- 'properties': {
- 'apt': {
- 'type': 'object',
- 'additionalProperties': False,
- 'properties': {
- 'preserve_sources_list': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
- By default, cloud-init will generate a new sources
- list in ``/etc/apt/sources.list.d`` based on any
- changes specified in cloud config. To disable this
- behavior and preserve the sources list from the
- pristine image, set ``preserve_sources_list``
- to ``true``.
-
- The ``preserve_sources_list`` option overrides
- all other config keys that would alter
- ``sources.list`` or ``sources.list.d``,
- **except** for additional sources to be added
- to ``sources.list.d``.""")
- },
- 'disable_suites': {
- 'type': 'array',
- 'items': {
- 'type': 'string'
- },
- 'uniqueItems': True,
- 'description': dedent("""\
- Entries in the sources list can be disabled using
- ``disable_suites``, which takes a list of suites
- to be disabled. If the string ``$RELEASE`` is
- present in a suite in the ``disable_suites`` list,
- it will be replaced with the release name. If a
- suite specified in ``disable_suites`` is not
- present in ``sources.list`` it will be ignored.
- For convenience, several aliases are provided for
- ``disable_suites``:
-
- - ``updates`` => ``$RELEASE-updates``
- - ``backports`` => ``$RELEASE-backports``
- - ``security`` => ``$RELEASE-security``
- - ``proposed`` => ``$RELEASE-proposed``
- - ``release`` => ``$RELEASE``.
-
- When a suite is disabled using ``disable_suites``,
- its entry in ``sources.list`` is not deleted; it
- is just commented out.""")
- },
- 'primary': {
- **mirror_property,
- 'description': dedent("""\
- The primary and security archive mirrors can
- be specified using the ``primary`` and
- ``security`` keys, respectively. Both the
- ``primary`` and ``security`` keys take a list
- of configs, allowing mirrors to be specified
- on a per-architecture basis. Each config is a
- dictionary which must have an entry for
- ``arches``, specifying which architectures
- that config entry is for. The keyword
- ``default`` applies to any architecture not
- explicitly listed. The mirror url can be specified
- with the ``uri`` key, or a list of mirrors to
- check can be provided in order, with the first
- mirror that can be resolved being selected. This
- allows the same configuration to be used in
- different environment, with different hosts used
- for a local apt mirror. If no mirror is provided
- by ``uri`` or ``search``, ``search_dns`` may be
- used to search for dns names in the format
- ``<distro>-mirror`` in each of the following:
-
- - fqdn of this host per cloud metadata,
- - localdomain,
- - domains listed in ``/etc/resolv.conf``.
-
- If there is a dns entry for ``<distro>-mirror``,
- then it is assumed that there is a distro mirror
- at ``http://<distro>-mirror.<domain>/<distro>``.
- If the ``primary`` key is defined, but not the
- ``security`` key, then then configuration for
- ``primary`` is also used for ``security``.
- If ``search_dns`` is used for the ``security``
- key, the search pattern will be
- ``<distro>-security-mirror``.
-
- If no mirrors are specified, or all lookups fail,
- then default mirrors defined in the datasource
- are used. If none are present in the datasource
- either the following defaults are used:
-
- - ``primary`` => \
- ``http://archive.ubuntu.com/ubuntu``.
- - ``security`` => \
- ``http://security.ubuntu.com/ubuntu``
- """)},
- 'security': {
- **mirror_property,
- 'description': dedent("""\
- Please refer to the primary config documentation""")
- },
- 'add_apt_repo_match': {
- 'type': 'string',
- 'default': ADD_APT_REPO_MATCH,
- 'description': dedent("""\
- All source entries in ``apt-sources`` that match
- regex in ``add_apt_repo_match`` will be added to
- the system using ``add-apt-repository``. If
- ``add_apt_repo_match`` is not specified, it
- defaults to ``{}``""".format(ADD_APT_REPO_MATCH))
- },
- 'debconf_selections': {
- 'type': 'object',
- 'items': {'type': 'string'},
- 'description': dedent("""\
- Debconf additional configurations can be specified as a
- dictionary under the ``debconf_selections`` config
- key, with each key in the dict representing a
- different set of configurations. The value of each key
- must be a string containing all the debconf
- configurations that must be applied. We will bundle
- all of the values and pass them to
- ``debconf-set-selections``. Therefore, each value line
- must be a valid entry for ``debconf-set-selections``,
- meaning that they must possess for distinct fields:
-
- ``pkgname question type answer``
-
- Where:
-
- - ``pkgname`` is the name of the package.
- - ``question`` the name of the questions.
- - ``type`` is the type of question.
- - ``answer`` is the value used to ansert the \
- question.
-
- For example: \
- ``ippackage ippackage/ip string 127.0.01``
- """)
- },
- 'sources_list': {
- 'type': 'string',
- 'description': dedent("""\
- Specifies a custom template for rendering
- ``sources.list`` . If no ``sources_list`` template
- is given, cloud-init will use sane default. Within
- this template, the following strings will be
- replaced with the appropriate values:
-
- - ``$MIRROR``
- - ``$RELEASE``
- - ``$PRIMARY``
- - ``$SECURITY``""")
- },
- 'conf': {
- 'type': 'string',
- 'description': dedent("""\
- Specify configuration for apt, such as proxy
- configuration. This configuration is specified as a
- string. For multiline apt configuration, make sure
- to follow yaml syntax.""")
- },
- 'https_proxy': {
- 'type': 'string',
- 'description': dedent("""\
- More convenient way to specify https apt proxy.
- https proxy url is specified in the format
- ``https://[[user][:pass]@]host[:port]/``.""")
- },
- 'http_proxy': {
- 'type': 'string',
- 'description': dedent("""\
- More convenient way to specify http apt proxy.
- http proxy url is specified in the format
- ``http://[[user][:pass]@]host[:port]/``.""")
- },
- 'proxy': {
- 'type': 'string',
- 'description': 'Alias for defining a http apt proxy.'
- },
- 'ftp_proxy': {
- 'type': 'string',
- 'description': dedent("""\
- More convenient way to specify ftp apt proxy.
- ftp proxy url is specified in the format
- ``ftp://[[user][:pass]@]host[:port]/``.""")
- },
- 'sources': {
- 'type': 'object',
- 'items': {'type': 'string'},
- 'description': dedent("""\
- Source list entries can be specified as a
- dictionary under the ``sources`` config key, with
- each key in the dict representing a different source
- file. The key of each source entry will be used
- as an id that can be referenced in other config
- entries, as well as the filename for the source's
- configuration under ``/etc/apt/sources.list.d``.
- If the name does not end with ``.list``, it will
- be appended. If there is no configuration for a
- key in ``sources``, no file will be written, but
- the key may still be referred to as an id in other
- ``sources`` entries.
-
- Each entry under ``sources`` is a dictionary which
- may contain any of the following optional keys:
-
- - ``source``: a sources.list entry \
- (some variable replacements apply).
- - ``keyid``: a key to import via shortid or \
- fingerprint.
- - ``key``: a raw PGP key.
- - ``keyserver``: alternate keyserver to pull \
- ``keyid`` key from.
-
- The ``source`` key supports variable
- replacements for the following strings:
-
- - ``$MIRROR``
- - ``$PRIMARY``
- - ``$SECURITY``
- - ``$RELEASE``""")
- }
- }
- }
- }
+ ------END PGP PUBLIC KEY BLOCK-------"""
+ )
+ ],
+ "frequency": frequency,
}
-__doc__ = get_schema_doc(schema)
+__doc__ = get_meta_doc(meta)
# place where apt stores cached repository data
@@ -384,18 +134,22 @@ APT_PROXY_FN = "/etc/apt/apt.conf.d/90cloud-init-aptproxy"
DEFAULT_KEYSERVER = "keyserver.ubuntu.com"
# Default archive mirrors
-PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/",
- "SECURITY": "http://security.ubuntu.com/ubuntu/"}
-PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
- "SECURITY": "http://ports.ubuntu.com/ubuntu-ports"}
-PRIMARY_ARCHES = ['amd64', 'i386']
-PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el']
+PRIMARY_ARCH_MIRRORS = {
+ "PRIMARY": "http://archive.ubuntu.com/ubuntu/",
+ "SECURITY": "http://security.ubuntu.com/ubuntu/",
+}
+PORTS_MIRRORS = {
+ "PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
+ "SECURITY": "http://ports.ubuntu.com/ubuntu-ports",
+}
+PRIMARY_ARCHES = ["amd64", "i386"]
+PORTS_ARCHES = ["s390x", "arm64", "armhf", "powerpc", "ppc64el", "riscv64"]
def get_default_mirrors(arch=None, target=None):
"""returns the default mirrors for the target. These depend on the
- architecture, for more see:
- https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports"""
+ architecture, for more see:
+ https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports"""
if arch is None:
arch = util.get_dpkg_architecture(target)
if arch in PRIMARY_ARCHES:
@@ -407,8 +161,8 @@ def get_default_mirrors(arch=None, target=None):
def handle(name, ocfg, cloud, log, _):
"""process the config for apt_config. This can be called from
- curthooks if a global apt config was provided or via the "apt"
- standalone command."""
+ curthooks if a global apt config was provided or via the "apt"
+ standalone command."""
# keeping code close to curtin codebase via entry handler
target = None
if log is not None:
@@ -416,14 +170,15 @@ def handle(name, ocfg, cloud, log, _):
LOG = log
# feed back converted config, but only work on the subset under 'apt'
ocfg = convert_to_v3_apt_format(ocfg)
- cfg = ocfg.get('apt', {})
+ cfg = ocfg.get("apt", {})
if not isinstance(cfg, dict):
raise ValueError(
"Expected dictionary for 'apt' config, found {config_type}".format(
- config_type=type(cfg)))
+ config_type=type(cfg)
+ )
+ )
- validate_cloudconfig_schema(cfg, schema)
apply_debconf_selections(cfg, target)
apply_apt(cfg, cloud, target)
@@ -432,7 +187,7 @@ def _should_configure_on_empty_apt():
# if no config was provided, should apt configuration be done?
if util.system_is_snappy():
return False, "system is snappy."
- if not (subp.which('apt-get') or subp.which('apt')):
+ if not (subp.which("apt-get") or subp.which("apt")):
return False, "no apt commands."
return True, "Apt is available."
@@ -447,12 +202,13 @@ def apply_apt(cfg, cloud, target):
LOG.debug("handling apt config: %s", cfg)
- release = util.lsb_release(target=target)['codename']
+ release = util.lsb_release(target=target)["codename"]
arch = util.get_dpkg_architecture(target)
mirrors = find_apt_mirror_info(cfg, cloud, arch=arch)
LOG.debug("Apt Mirror info: %s", mirrors)
- if util.is_false(cfg.get('preserve_sources_list', False)):
+ if util.is_false(cfg.get("preserve_sources_list", False)):
+ add_mirror_keys(cfg, target)
generate_sources_list(cfg, release, mirrors, cloud)
rename_apt_lists(mirrors, target, arch)
@@ -462,25 +218,34 @@ def apply_apt(cfg, cloud, target):
LOG.exception("Failed to apply proxy or apt config info:")
# Process 'apt_source -> sources {dict}'
- if 'sources' in cfg:
+ if "sources" in cfg:
params = mirrors
- params['RELEASE'] = release
- params['MIRROR'] = mirrors["MIRROR"]
+ params["RELEASE"] = release
+ params["MIRROR"] = mirrors["MIRROR"]
matcher = None
- matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH)
+ matchcfg = cfg.get("add_apt_repo_match", ADD_APT_REPO_MATCH)
if matchcfg:
matcher = re.compile(matchcfg).search
- add_apt_sources(cfg['sources'], cloud, target=target,
- template_params=params, aa_repo_match=matcher)
+ add_apt_sources(
+ cfg["sources"],
+ cloud,
+ target=target,
+ template_params=params,
+ aa_repo_match=matcher,
+ )
def debconf_set_selections(selections, target=None):
- if not selections.endswith(b'\n'):
- selections += b'\n'
- subp.subp(['debconf-set-selections'], data=selections, target=target,
- capture=True)
+ if not selections.endswith(b"\n"):
+ selections += b"\n"
+ subp.subp(
+ ["debconf-set-selections"],
+ data=selections,
+ target=target,
+ capture=True,
+ )
def dpkg_reconfigure(packages, target=None):
@@ -500,12 +265,20 @@ def dpkg_reconfigure(packages, target=None):
unhandled.append(pkg)
if len(unhandled):
- LOG.warning("The following packages were installed and preseeded, "
- "but cannot be unconfigured: %s", unhandled)
+ LOG.warning(
+ "The following packages were installed and preseeded, "
+ "but cannot be unconfigured: %s",
+ unhandled,
+ )
if len(to_config):
- subp.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
- list(to_config), data=None, target=target, capture=True)
+ subp.subp(
+ ["dpkg-reconfigure", "--frontend=noninteractive"]
+ + list(to_config),
+ data=None,
+ target=target,
+ capture=True,
+ )
def apply_debconf_selections(cfg, target=None):
@@ -514,13 +287,12 @@ def apply_debconf_selections(cfg, target=None):
# set1: |
# cloud-init cloud-init/datasources multiselect MAAS
# set2: pkg pkg/value string bar
- selsets = cfg.get('debconf_selections')
+ selsets = cfg.get("debconf_selections")
if not selsets:
LOG.debug("debconf_selections was not set in config")
return
- selections = '\n'.join(
- [selsets[key] for key in sorted(selsets.keys())])
+ selections = "\n".join([selsets[key] for key in sorted(selsets.keys())])
debconf_set_selections(selections.encode(), target=target)
# get a complete list of packages listed in input
@@ -547,7 +319,8 @@ def apply_debconf_selections(cfg, target=None):
def clean_cloud_init(target):
"""clean out any local cloud-init config"""
flist = glob.glob(
- subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
+ subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")
+ )
LOG.debug("cleaning cloud-init config from: %s", flist)
for dpkg_cfg in flist:
@@ -556,18 +329,18 @@ def clean_cloud_init(target):
def mirrorurl_to_apt_fileprefix(mirror):
"""mirrorurl_to_apt_fileprefix
- Convert a mirror url to the file prefix used by apt on disk to
- store cache information for that mirror.
- To do so do:
- - take off ???://
- - drop tailing /
- - convert in string / to _"""
+ Convert a mirror url to the file prefix used by apt on disk to
+ store cache information for that mirror.
+ To do so do:
+ - take off ???://
+ - drop tailing /
+ - convert in string / to _"""
string = mirror
if string.endswith("/"):
string = string[0:-1]
pos = string.find("://")
if pos >= 0:
- string = string[pos + 3:]
+ string = string[pos + 3 :]
string = string.replace("/", "_")
return string
@@ -599,8 +372,8 @@ def rename_apt_lists(new_mirrors, target, arch):
def mirror_to_placeholder(tmpl, mirror, placeholder):
"""mirror_to_placeholder
- replace the specified mirror in a template with a placeholder string
- Checks for existance of the expected mirror and warns if not found"""
+ replace the specified mirror in a template with a placeholder string
+ Checks for existance of the expected mirror and warns if not found"""
if mirror not in tmpl:
LOG.warning("Expected mirror '%s' not found in: %s", mirror, tmpl)
return tmpl.replace(mirror, placeholder)
@@ -608,13 +381,15 @@ def mirror_to_placeholder(tmpl, mirror, placeholder):
def map_known_suites(suite):
"""there are a few default names which will be auto-extended.
- This comes at the inability to use those names literally as suites,
- but on the other hand increases readability of the cfg quite a lot"""
- mapping = {'updates': '$RELEASE-updates',
- 'backports': '$RELEASE-backports',
- 'security': '$RELEASE-security',
- 'proposed': '$RELEASE-proposed',
- 'release': '$RELEASE'}
+ This comes at the inability to use those names literally as suites,
+ but on the other hand increases readability of the cfg quite a lot"""
+ mapping = {
+ "updates": "$RELEASE-updates",
+ "backports": "$RELEASE-backports",
+ "security": "$RELEASE-security",
+ "proposed": "$RELEASE-proposed",
+ "release": "$RELEASE",
+ }
try:
retsuite = mapping[suite]
except KeyError:
@@ -624,14 +399,14 @@ def map_known_suites(suite):
def disable_suites(disabled, src, release):
"""reads the config for suites to be disabled and removes those
- from the template"""
+ from the template"""
if not disabled:
return src
retsrc = src
for suite in disabled:
suite = map_known_suites(suite)
- releasesuite = templater.render_string(suite, {'RELEASE': release})
+ releasesuite = templater.render_string(suite, {"RELEASE": release})
LOG.debug("Disabling suite %s as %s", suite, releasesuite)
newsrc = ""
@@ -653,109 +428,146 @@ def disable_suites(disabled, src, release):
break
if cols[pcol] == releasesuite:
- line = '# suite disabled by cloud-init: %s' % line
+ line = "# suite disabled by cloud-init: %s" % line
newsrc += line
retsrc = newsrc
return retsrc
+def add_mirror_keys(cfg, target):
+ """Adds any keys included in the primary/security mirror clauses"""
+ for key in ("primary", "security"):
+ for mirror in cfg.get(key, []):
+ add_apt_key(mirror, target, file_name=key)
+
+
def generate_sources_list(cfg, release, mirrors, cloud):
"""generate_sources_list
- create a source.list file based on a custom or default template
- by replacing mirrors and release in the template"""
+ create a source.list file based on a custom or default template
+ by replacing mirrors and release in the template"""
aptsrc = "/etc/apt/sources.list"
- params = {'RELEASE': release, 'codename': release}
+ params = {"RELEASE": release, "codename": release}
for k in mirrors:
params[k] = mirrors[k]
params[k.lower()] = mirrors[k]
- tmpl = cfg.get('sources_list', None)
+ tmpl = cfg.get("sources_list", None)
if tmpl is None:
LOG.info("No custom template provided, fall back to builtin")
- template_fn = cloud.get_template_filename('sources.list.%s' %
- (cloud.distro.name))
+ template_fn = cloud.get_template_filename(
+ "sources.list.%s" % (cloud.distro.name)
+ )
if not template_fn:
- template_fn = cloud.get_template_filename('sources.list')
+ template_fn = cloud.get_template_filename("sources.list")
if not template_fn:
- LOG.warning("No template found, "
- "not rendering /etc/apt/sources.list")
+ LOG.warning(
+ "No template found, not rendering /etc/apt/sources.list"
+ )
return
tmpl = util.load_file(template_fn)
rendered = templater.render_string(tmpl, params)
- disabled = disable_suites(cfg.get('disable_suites'), rendered, release)
+ disabled = disable_suites(cfg.get("disable_suites"), rendered, release)
util.write_file(aptsrc, disabled, mode=0o644)
-def add_apt_key_raw(key, target=None):
+def add_apt_key_raw(key, file_name, hardened=False, target=None):
"""
actual adding of a key as defined in key argument
to the system
"""
LOG.debug("Adding key:\n'%s'", key)
try:
- subp.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
+ name = pathlib.Path(file_name).stem
+ return apt_key("add", output_file=name, data=key, hardened=hardened)
except subp.ProcessExecutionError:
LOG.exception("failed to add apt GPG Key to apt keyring")
raise
-def add_apt_key(ent, target=None):
+def add_apt_key(ent, target=None, hardened=False, file_name=None):
"""
Add key to the system as defined in ent (if any).
Supports raw keys or keyid's
The latter will as a first step fetched to get the raw key
"""
- if 'keyid' in ent and 'key' not in ent:
+ if "keyid" in ent and "key" not in ent:
keyserver = DEFAULT_KEYSERVER
- if 'keyserver' in ent:
- keyserver = ent['keyserver']
+ if "keyserver" in ent:
+ keyserver = ent["keyserver"]
- ent['key'] = gpg.getkeybyid(ent['keyid'], keyserver)
+ ent["key"] = gpg.getkeybyid(ent["keyid"], keyserver)
- if 'key' in ent:
- add_apt_key_raw(ent['key'], target)
+ if "key" in ent:
+ return add_apt_key_raw(
+ ent["key"], file_name or ent["filename"], hardened=hardened
+ )
def update_packages(cloud):
cloud.distro.update_package_sources()
-def add_apt_sources(srcdict, cloud, target=None, template_params=None,
- aa_repo_match=None):
+def add_apt_sources(
+ srcdict, cloud, target=None, template_params=None, aa_repo_match=None
+):
"""
- add entries in /etc/apt/sources.list.d for each abbreviated
- sources.list entry in 'srcdict'. When rendering template, also
- include the values in dictionary searchList
+ install keys and repo source .list files defined in 'sources'
+
+ for each 'source' entry in the config:
+ 1. expand template variables and write source .list file in
+ /etc/apt/sources.list.d/
+ 2. install defined keys
+ 3. update packages via distro-specific method (i.e. apt-key update)
+
+
+ @param srcdict: a dict containing elements required
+ @param cloud: cloud instance object
+
+ Example srcdict value:
+ {
+ 'rio-grande-repo': {
+ 'source': 'deb [signed-by=$KEY_FILE] $MIRROR $RELEASE main',
+ 'keyid': 'B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77',
+ 'keyserver': 'pgp.mit.edu'
+ }
+ }
+
+ Note: Deb822 format is not supported
"""
if template_params is None:
template_params = {}
if aa_repo_match is None:
- raise ValueError('did not get a valid repo matcher')
+ raise ValueError("did not get a valid repo matcher")
if not isinstance(srcdict, dict):
- raise TypeError('unknown apt format: %s' % (srcdict))
+ raise TypeError("unknown apt format: %s" % (srcdict))
for filename in srcdict:
ent = srcdict[filename]
LOG.debug("adding source/key '%s'", ent)
- if 'filename' not in ent:
- ent['filename'] = filename
+ if "filename" not in ent:
+ ent["filename"] = filename
- add_apt_key(ent, target)
+ if "source" in ent and "$KEY_FILE" in ent["source"]:
+ key_file = add_apt_key(ent, target, hardened=True)
+ template_params["KEY_FILE"] = key_file
+ else:
+ key_file = add_apt_key(ent, target)
- if 'source' not in ent:
+ if "source" not in ent:
continue
- source = ent['source']
+ source = ent["source"]
source = templater.render_string(source, template_params)
- if not ent['filename'].startswith("/"):
- ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
- ent['filename'])
- if not ent['filename'].endswith(".list"):
- ent['filename'] += ".list"
+ if not ent["filename"].startswith("/"):
+ ent["filename"] = os.path.join(
+ "/etc/apt/sources.list.d/", ent["filename"]
+ )
+ if not ent["filename"].endswith(".list"):
+ ent["filename"] += ".list"
if aa_repo_match(source):
try:
@@ -765,7 +577,7 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None,
raise
continue
- sourcefn = subp.target_path(target, ent['filename'])
+ sourcefn = subp.target_path(target, ent["filename"])
try:
contents = "%s\n" % (source)
util.write_file(sourcefn, contents, omode="a")
@@ -781,17 +593,21 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None,
def convert_v1_to_v2_apt_format(srclist):
"""convert v1 apt format to v2 (dict in apt_sources)"""
srcdict = {}
+ LOG.warning(
+ "DEPRECATION: 'apt_sources' deprecated config key found."
+ " Use 'apt' instead"
+ )
if isinstance(srclist, list):
LOG.debug("apt config: convert V1 to V2 format (source list to dict)")
for srcent in srclist:
- if 'filename' not in srcent:
+ if "filename" not in srcent:
# file collides for multiple !filename cases for compatibility
# yet we need them all processed, so not same dictionary key
- srcent['filename'] = "cloud_config_sources.list"
+ srcent["filename"] = "cloud_config_sources.list"
key = util.rand_dict_key(srcdict, "cloud_config_sources.list")
else:
# all with filename use that as key (matching new format)
- key = srcent['filename']
+ key = srcent["filename"]
srcdict[key] = srcent
elif isinstance(srclist, dict):
srcdict = srclist
@@ -803,7 +619,7 @@ def convert_v1_to_v2_apt_format(srclist):
def convert_key(oldcfg, aptcfg, oldkey, newkey):
"""convert an old key to the new one if the old one exists
- returns true if a key was found and converted"""
+ returns true if a key was found and converted"""
if oldcfg.get(oldkey, None) is not None:
aptcfg[newkey] = oldcfg.get(oldkey)
del oldcfg[oldkey]
@@ -813,33 +629,37 @@ def convert_key(oldcfg, aptcfg, oldkey, newkey):
def convert_mirror(oldcfg, aptcfg):
"""convert old apt_mirror keys into the new more advanced mirror spec"""
- keymap = [('apt_mirror', 'uri'),
- ('apt_mirror_search', 'search'),
- ('apt_mirror_search_dns', 'search_dns')]
+ keymap = [
+ ("apt_mirror", "uri"),
+ ("apt_mirror_search", "search"),
+ ("apt_mirror_search_dns", "search_dns"),
+ ]
converted = False
- newmcfg = {'arches': ['default']}
+ newmcfg = {"arches": ["default"]}
for oldkey, newkey in keymap:
if convert_key(oldcfg, newmcfg, oldkey, newkey):
converted = True
# only insert new style config if anything was converted
if converted:
- aptcfg['primary'] = [newmcfg]
+ aptcfg["primary"] = [newmcfg]
def convert_v2_to_v3_apt_format(oldcfg):
"""convert old to new keys and adapt restructured mirror spec"""
- mapoldkeys = {'apt_sources': 'sources',
- 'apt_mirror': None,
- 'apt_mirror_search': None,
- 'apt_mirror_search_dns': None,
- 'apt_proxy': 'proxy',
- 'apt_http_proxy': 'http_proxy',
- 'apt_ftp_proxy': 'https_proxy',
- 'apt_https_proxy': 'ftp_proxy',
- 'apt_preserve_sources_list': 'preserve_sources_list',
- 'apt_custom_sources_list': 'sources_list',
- 'add_apt_repo_match': 'add_apt_repo_match'}
+ mapoldkeys = {
+ "apt_sources": "sources",
+ "apt_mirror": None,
+ "apt_mirror_search": None,
+ "apt_mirror_search_dns": None,
+ "apt_proxy": "proxy",
+ "apt_http_proxy": "http_proxy",
+ "apt_ftp_proxy": "https_proxy",
+ "apt_https_proxy": "ftp_proxy",
+ "apt_preserve_sources_list": "preserve_sources_list",
+ "apt_custom_sources_list": "sources_list",
+ "add_apt_repo_match": "add_apt_repo_match",
+ }
needtoconvert = []
for oldkey in mapoldkeys:
if oldkey in oldcfg:
@@ -851,13 +671,19 @@ def convert_v2_to_v3_apt_format(oldcfg):
# no old config, so no new one to be created
if not needtoconvert:
return oldcfg
- LOG.debug("apt config: convert V2 to V3 format for keys '%s'",
- ", ".join(needtoconvert))
+ LOG.warning(
+ "DEPRECATION apt: converted deprecated config V2 to V3 format for"
+ " keys '%s'. Use updated config keys.",
+ ", ".join(needtoconvert),
+ )
# if old AND new config are provided, prefer the new one (LP #1616831)
- newaptcfg = oldcfg.get('apt', None)
+ newaptcfg = oldcfg.get("apt", None)
if newaptcfg is not None:
- LOG.debug("apt config: V1/2 and V3 format specified, preferring V3")
+ LOG.warning(
+ "DEPRECATION: apt config: deprecated V1/2 and V3 format specified,"
+ " preferring V3"
+ )
for oldkey in needtoconvert:
newkey = mapoldkeys[oldkey]
verify = oldcfg[oldkey] # drop, but keep a ref for verification
@@ -866,10 +692,11 @@ def convert_v2_to_v3_apt_format(oldcfg):
# no simple mapping or no collision on this particular key
continue
if verify != newaptcfg[newkey]:
- raise ValueError("Old and New apt format defined with unequal "
- "values %s vs %s @ %s" % (verify,
- newaptcfg[newkey],
- oldkey))
+ raise ValueError(
+ "Old and New apt format defined with unequal "
+ "values %s vs %s @ %s"
+ % (verify, newaptcfg[newkey], oldkey)
+ )
# return conf after clearing conflicting V1/2 keys
return oldcfg
@@ -889,17 +716,17 @@ def convert_v2_to_v3_apt_format(oldcfg):
raise ValueError("old apt key '%s' left after conversion" % oldkey)
# insert new format into config and return full cfg with only v3 content
- oldcfg['apt'] = aptcfg
+ oldcfg["apt"] = aptcfg
return oldcfg
def convert_to_v3_apt_format(cfg):
"""convert the old list based format to the new dict based one. After that
- convert the old dict keys/format to v3 a.k.a 'new apt config'"""
+ convert the old dict keys/format to v3 a.k.a 'new apt config'"""
# V1 -> V2, the apt_sources entry from list to dict
- apt_sources = cfg.get('apt_sources', None)
+ apt_sources = cfg.get("apt_sources", None)
if apt_sources is not None:
- cfg['apt_sources'] = convert_v1_to_v2_apt_format(apt_sources)
+ cfg["apt_sources"] = convert_v1_to_v2_apt_format(apt_sources)
# V2 -> V3, move all former globals under the "apt" key
# Restructure into new key names and mirror hierarchy
@@ -931,7 +758,12 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
if mydom:
doms.append(".%s" % mydom)
- doms.extend((".localdomain", "",))
+ doms.extend(
+ (
+ ".localdomain",
+ "",
+ )
+ )
mirror_list = []
distro = cloud.distro.name
@@ -946,12 +778,11 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
def update_mirror_info(pmirror, smirror, arch, cloud):
"""sets security mirror to primary if not defined.
- returns defaults if no mirrors are defined"""
+ returns defaults if no mirrors are defined"""
if pmirror is not None:
if smirror is None:
smirror = pmirror
- return {'PRIMARY': pmirror,
- 'SECURITY': smirror}
+ return {"PRIMARY": pmirror, "SECURITY": smirror}
# None specified at all, get default mirrors from cloud
mirror_info = cloud.datasource.get_package_mirror_info()
@@ -960,8 +791,8 @@ def update_mirror_info(pmirror, smirror, arch, cloud):
# arbitrary key/value pairs including 'primary' and 'security' keys.
# caller expects dict with PRIMARY and SECURITY.
m = mirror_info.copy()
- m['PRIMARY'] = m['primary']
- m['SECURITY'] = m['security']
+ m["PRIMARY"] = m["primary"]
+ m["SECURITY"] = m["security"]
return m
@@ -971,7 +802,7 @@ def update_mirror_info(pmirror, smirror, arch, cloud):
def get_arch_mirrorconfig(cfg, mirrortype, arch):
"""out of a list of potential mirror configurations select
- and return the one matching the architecture (or default)"""
+ and return the one matching the architecture (or default)"""
# select the mirror specification (if-any)
mirror_cfg_list = cfg.get(mirrortype, None)
if mirror_cfg_list is None:
@@ -980,7 +811,7 @@ def get_arch_mirrorconfig(cfg, mirrortype, arch):
# select the specification matching the target arch
default = None
for mirror_cfg_elem in mirror_cfg_list:
- arches = mirror_cfg_elem.get("arches")
+ arches = mirror_cfg_elem.get("arches") or []
if arch in arches:
return mirror_cfg_elem
if "default" in arches:
@@ -990,8 +821,8 @@ def get_arch_mirrorconfig(cfg, mirrortype, arch):
def get_mirror(cfg, mirrortype, arch, cloud):
"""pass the three potential stages of mirror specification
- returns None is neither of them found anything otherwise the first
- hit is returned"""
+ returns None is neither of them found anything otherwise the first
+ hit is returned"""
mcfg = get_arch_mirrorconfig(cfg, mirrortype, arch)
if mcfg is None:
return None
@@ -1007,18 +838,19 @@ def get_mirror(cfg, mirrortype, arch, cloud):
# fallback to search_dns if specified
if mirror is None:
# list of mirrors to try to resolve
- mirror = search_for_mirror_dns(mcfg.get("search_dns", None),
- mirrortype, cfg, cloud)
+ mirror = search_for_mirror_dns(
+ mcfg.get("search_dns", None), mirrortype, cfg, cloud
+ )
return mirror
def find_apt_mirror_info(cfg, cloud, arch=None):
"""find_apt_mirror_info
- find an apt_mirror given the cfg provided.
- It can check for separate config of primary and security mirrors
- If only primary is given security is assumed to be equal to primary
- If the generic apt_mirror is given that is defining for both
+ find an apt_mirror given the cfg provided.
+ It can check for separate config of primary and security mirrors
+ If only primary is given security is assumed to be equal to primary
+ If the generic apt_mirror is given that is defining for both
"""
if arch is None:
@@ -1039,32 +871,115 @@ def find_apt_mirror_info(cfg, cloud, arch=None):
def apply_apt_config(cfg, proxy_fname, config_fname):
"""apply_apt_config
- Applies any apt*proxy config from if specified
+ Applies any apt*proxy config from if specified
"""
# Set up any apt proxy
- cfgs = (('proxy', 'Acquire::http::Proxy "%s";'),
- ('http_proxy', 'Acquire::http::Proxy "%s";'),
- ('ftp_proxy', 'Acquire::ftp::Proxy "%s";'),
- ('https_proxy', 'Acquire::https::Proxy "%s";'))
+ cfgs = (
+ ("proxy", 'Acquire::http::Proxy "%s";'),
+ ("http_proxy", 'Acquire::http::Proxy "%s";'),
+ ("ftp_proxy", 'Acquire::ftp::Proxy "%s";'),
+ ("https_proxy", 'Acquire::https::Proxy "%s";'),
+ )
proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
if len(proxies):
LOG.debug("write apt proxy info to %s", proxy_fname)
- util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
+ util.write_file(proxy_fname, "\n".join(proxies) + "\n")
elif os.path.isfile(proxy_fname):
util.del_file(proxy_fname)
LOG.debug("no apt proxy configured, removed %s", proxy_fname)
- if cfg.get('conf', None):
+ if cfg.get("conf", None):
LOG.debug("write apt config info to %s", config_fname)
- util.write_file(config_fname, cfg.get('conf'))
+ util.write_file(config_fname, cfg.get("conf"))
elif os.path.isfile(config_fname):
util.del_file(config_fname)
LOG.debug("no apt config configured, removed %s", config_fname)
+def apt_key(
+ command, output_file=None, data=None, hardened=False, human_output=True
+):
+ """apt-key replacement
+
+ commands implemented: 'add', 'list', 'finger'
+
+ @param output_file: name of output gpg file (without .gpg or .asc)
+ @param data: key contents
+ @param human_output: list keys formatted for human parsing
+ @param hardened: write keys to to /etc/apt/cloud-init.gpg.d/ (referred to
+ with [signed-by] in sources file)
+ """
+
+ def _get_key_files():
+ """return all apt keys
+
+ /etc/apt/trusted.gpg (if it exists) and all keyfiles (and symlinks to
+ keyfiles) in /etc/apt/trusted.gpg.d/ are returned
+
+ based on apt-key implementation
+ """
+ key_files = [APT_LOCAL_KEYS] if os.path.isfile(APT_LOCAL_KEYS) else []
+
+ for file in os.listdir(APT_TRUSTED_GPG_DIR):
+ if file.endswith(".gpg") or file.endswith(".asc"):
+ key_files.append(APT_TRUSTED_GPG_DIR + file)
+ return key_files if key_files else ""
+
+ def apt_key_add():
+ """apt-key add <file>
+
+ returns filepath to new keyring, or '/dev/null' when an error occurs
+ """
+ file_name = "/dev/null"
+ if not output_file:
+ util.logexc(
+ LOG, 'Unknown filename, failed to add key: "{}"'.format(data)
+ )
+ else:
+ try:
+ key_dir = (
+ CLOUD_INIT_GPG_DIR if hardened else APT_TRUSTED_GPG_DIR
+ )
+ stdout = gpg.dearmor(data)
+ file_name = "{}{}.gpg".format(key_dir, output_file)
+ util.write_file(file_name, stdout)
+ except subp.ProcessExecutionError:
+ util.logexc(
+ LOG, "Gpg error, failed to add key: {}".format(data)
+ )
+ except UnicodeDecodeError:
+ util.logexc(
+ LOG, "Decode error, failed to add key: {}".format(data)
+ )
+ return file_name
+
+ def apt_key_list():
+ """apt-key list
+
+ returns string of all trusted keys (in /etc/apt/trusted.gpg and
+ /etc/apt/trusted.gpg.d/)
+ """
+ key_list = []
+ for key_file in _get_key_files():
+ try:
+ key_list.append(gpg.list(key_file, human_output=human_output))
+ except subp.ProcessExecutionError as error:
+ LOG.warning('Failed to list key "%s": %s', key_file, error)
+ return "\n".join(key_list)
+
+ if command == "add":
+ return apt_key_add()
+ elif command == "finger" or command == "list":
+ return apt_key_list()
+ else:
+ raise ValueError(
+ "apt_key() commands add, list, and finger are currently supported"
+ )
+
+
CONFIG_CLEANERS = {
- 'cloud-init': clean_cloud_init,
+ "cloud-init": clean_cloud_init,
}
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index aa186ce2..901633d3 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -4,52 +4,59 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Apt Pipelining
---------------
-**Summary:** configure apt pipelining
+"""Apt Pipelining: configure apt pipelining."""
-This module configures apt's ``Acquite::http::Pipeline-Depth`` option, which
-controls how apt handles HTTP pipelining. It may be useful for pipelining to be
-disabled, because some web servers, such as S3 do not pipeline properly (LP:
-#948461). The ``apt_pipelining`` config key may be set to ``false`` to disable
-pipelining altogether. This is the default behavior. If it is set to ``none``,
-``unchanged``, or ``os``, no change will be made to apt configuration and the
-default setting for the distro will be used. The pipeline depth can also be
-manually specified by setting ``apt_pipelining`` to a number. However, this is
-not recommended.
+from textwrap import dedent
-**Internal name:** ``cc_apt_pipelining``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu, debian
-
-**Config keys**::
- apt_pipelining: <false/none/unchanged/os/number>
-"""
-
-from cloudinit.settings import PER_INSTANCE
from cloudinit import util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-
-distros = ['ubuntu', 'debian']
-
+distros = ["ubuntu", "debian"]
DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
-
-APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n"
- 'Acquire::http::Pipeline-Depth "%s";\n')
-
+APT_PIPE_TPL = (
+ "//Written by cloud-init per 'apt_pipelining'\n"
+ 'Acquire::http::Pipeline-Depth "%s";\n'
+)
# Acquire::http::Pipeline-Depth can be a value
# from 0 to 5 indicating how many outstanding requests APT should send.
# A value of zero MUST be specified if the remote host does not properly linger
# on TCP connections - otherwise data corruption will occur.
+meta: MetaSchema = {
+ "id": "cc_apt_pipelining",
+ "name": "Apt Pipelining",
+ "title": "Configure apt pipelining",
+ "description": dedent(
+ """\
+ This module configures apt's ``Acquite::http::Pipeline-Depth`` option,
+ which controls how apt handles HTTP pipelining. It may be useful for
+ pipelining to be disabled, because some web servers, such as S3 do not
+ pipeline properly (LP: #948461).
+
+ Value configuration options for this module are:
+
+ * ``false`` (Default): disable pipelining altogether
+ * ``none``, ``unchanged``, or ``os``: use distro default
+ * ``<number>``: Manually specify pipeline depth. This is not recommended.""" # noqa: E501
+ ),
+ "distros": distros,
+ "frequency": frequency,
+ "examples": [
+ "apt_pipelining: false",
+ "apt_pipelining: none",
+ "apt_pipelining: unchanged",
+ "apt_pipelining: os",
+ "apt_pipelining: 3",
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
-def handle(_name, cfg, _cloud, log, _args):
- apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", 'os')
+def handle(_name, cfg, _cloud, log, _args):
+ apt_pipe_value = cfg.get("apt_pipelining", "os")
apt_pipe_value_s = str(apt_pipe_value).lower().strip()
if apt_pipe_value_s == "false":
@@ -69,4 +76,5 @@ def write_apt_snippet(setting, log, f_name):
util.write_file(f_name, file_contents)
log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 246e4497..bd14aede 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -12,28 +12,20 @@
import os
from textwrap import dedent
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
+from cloudinit import subp, temp_utils, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_ALWAYS
-from cloudinit import temp_utils
-from cloudinit import subp
-from cloudinit import util
frequency = PER_ALWAYS
-# The schema definition for each cloud-config module is a strict contract for
-# describing supported configuration parameters for each cloud-config section.
-# It allows cloud-config to validate and alert users to invalid or ignored
-# configuration options before actually attempting to deploy with said
-# configuration.
+distros = ["all"]
-distros = ['all']
-
-schema = {
- 'id': 'cc_bootcmd',
- 'name': 'Bootcmd',
- 'title': 'Run arbitrary commands early in the boot process',
- 'description': dedent("""\
+meta: MetaSchema = {
+ "id": "cc_bootcmd",
+ "name": "Bootcmd",
+ "title": "Run arbitrary commands early in the boot process",
+ "description": dedent(
+ """\
This module runs arbitrary commands very early in the boot process,
only slightly after a boothook would run. This is very similar to a
boothook, but more user friendly. The environment variable
@@ -49,42 +41,32 @@ schema = {
when writing files, do not use /tmp dir as it races with
systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
- """),
- 'distros': distros,
- 'examples': [dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
bootcmd:
- echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
- [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
- """)],
- 'frequency': PER_ALWAYS,
- 'type': 'object',
- 'properties': {
- 'bootcmd': {
- 'type': 'array',
- 'items': {
- 'oneOf': [
- {'type': 'array', 'items': {'type': 'string'}},
- {'type': 'string'}]
- },
- 'additionalItems': False, # Reject items of non-string non-list
- 'additionalProperties': False,
- 'minItems': 1,
- 'required': [],
- }
- }
+ """
+ )
+ ],
+ "frequency": PER_ALWAYS,
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, _args):
if "bootcmd" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'bootcmd' key in configuration"), name)
+ log.debug(
+ "Skipping module named %s, no 'bootcmd' key in configuration", name
+ )
return
- validate_cloudconfig_schema(cfg, schema)
with temp_utils.ExtendedTemporaryFile(suffix=".sh") as tmpf:
try:
content = util.shellify(cfg["bootcmd"])
@@ -98,11 +80,12 @@ def handle(name, cfg, cloud, log, _args):
env = os.environ.copy()
iid = cloud.get_instance_id()
if iid:
- env['INSTANCE_ID'] = str(iid)
- cmd = ['/bin/sh', tmpf.name]
+ env["INSTANCE_ID"] = str(iid)
+ cmd = ["/bin/sh", tmpf.name]
subp.subp(cmd, env=env, capture=False)
except Exception:
util.logexc(log, "Failed to run bootcmd module %s", name)
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index 9fdaeba1..fbc20410 100755
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -6,11 +6,14 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Byobu
------
-**Summary:** enable/disable byobu system wide and for default user
+"""Byobu: Enable/disable byobu system wide and for default user."""
+
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ug_util
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module controls whether byobu is enabled or disabled system wide and for
the default system user. If byobu is to be enabled, this module will ensure it
is installed. Likewise, if it is to be disabled, it will be removed if
@@ -26,23 +29,23 @@ Valid configuration options for this module are:
- ``disable``: disable byobu for all users
- ``user``: alias for ``enable-user``
- ``system``: alias for ``enable-system``
-
-**Internal name:** ``cc_byobu``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu, debian
-
-**Config keys**::
-
- byobu_by_default: <user/system>
"""
+distros = ["ubuntu", "debian"]
-from cloudinit.distros import ug_util
-from cloudinit import subp
-from cloudinit import util
+meta: MetaSchema = {
+ "id": "cc_byobu",
+ "name": "Byobu",
+ "title": "Enable/disable byobu system wide and for default user",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "frequency": PER_INSTANCE,
+ "examples": [
+ "byobu_by_default: enable-user",
+ "byobu_by_default: disable-system",
+ ],
+}
-distros = ['ubuntu', 'debian']
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, args):
@@ -58,8 +61,14 @@ def handle(name, cfg, cloud, log, args):
if value == "user" or value == "system":
value = "enable-%s" % value
- valid = ("enable-user", "enable-system", "enable",
- "disable-user", "disable-system", "disable")
+ valid = (
+ "enable-user",
+ "enable-system",
+ "enable",
+ "disable-user",
+ "disable-system",
+ "disable",
+ )
if value not in valid:
log.warning("Unknown value %s for byobu_by_default", value)
@@ -81,13 +90,16 @@ def handle(name, cfg, cloud, log, args):
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(user, _user_config) = ug_util.extract_default(users)
if not user:
- log.warning(("No default byobu user provided, "
- "can not launch %s for the default user"), bl_inst)
+ log.warning(
+ "No default byobu user provided, "
+ "can not launch %s for the default user",
+ bl_inst,
+ )
else:
- shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
+ shcmd += ' sudo -Hu "%s" byobu-launcher-%s' % (user, bl_inst)
shcmd += " || X=$(($X+1)); "
if mod_sys:
- shcmd += "echo \"%s\" | debconf-set-selections" % dc_val
+ shcmd += 'echo "%s" | debconf-set-selections' % dc_val
shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive"
shcmd += " || X=$(($X+1)); "
@@ -96,4 +108,5 @@ def handle(name, cfg, cloud, log, args):
log.debug("Setting byobu to %s", value)
subp.subp(cmd, capture=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 3c453d91..6084cb4c 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -2,105 +2,161 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-CA Certs
---------
-**Summary:** add ca certificates
+"""CA Certs: Add ca certificates."""
+import os
+from textwrap import dedent
+
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+
+DEFAULT_CONFIG = {
+ "ca_cert_path": "/usr/share/ca-certificates/",
+ "ca_cert_filename": "cloud-init-ca-certs.crt",
+ "ca_cert_config": "/etc/ca-certificates.conf",
+ "ca_cert_system_path": "/etc/ssl/certs/",
+ "ca_cert_update_cmd": ["update-ca-certificates"],
+}
+DISTRO_OVERRIDES = {
+ "rhel": {
+ "ca_cert_path": "/usr/share/pki/ca-trust-source/",
+ "ca_cert_filename": "anchors/cloud-init-ca-certs.crt",
+ "ca_cert_config": None,
+ "ca_cert_system_path": "/etc/pki/ca-trust/",
+ "ca_cert_update_cmd": ["update-ca-trust"],
+ }
+}
+
+MODULE_DESCRIPTION = """\
This module adds CA certificates to ``/etc/ca-certificates.conf`` and updates
the ssl cert cache using ``update-ca-certificates``. The default certificates
can be removed from the system with the configuration option
-``remove-defaults``.
+``remove_defaults``.
.. note::
certificates must be specified using valid yaml. in order to specify a
multiline certificate, the yaml multiline list syntax must be used
.. note::
- For Alpine Linux the "remove-defaults" functionality works if the
+ For Alpine Linux the "remove_defaults" functionality works if the
ca-certificates package is installed but not if the
ca-certificates-bundle package is installed.
-
-**Internal name:** ``cc_ca_certs``
-
-**Module frequency:** per instance
-
-**Supported distros:** alpine, debian, ubuntu
-
-**Config keys**::
-
- ca-certs:
- remove-defaults: <true/false>
- trusted:
- - <single line cert>
- - |
- -----BEGIN CERTIFICATE-----
- YOUR-ORGS-TRUSTED-CA-CERT-HERE
- -----END CERTIFICATE-----
"""
-
-import os
-
-from cloudinit import subp
-from cloudinit import util
-
-CA_CERT_PATH = "/usr/share/ca-certificates/"
-CA_CERT_FILENAME = "cloud-init-ca-certs.crt"
-CA_CERT_CONFIG = "/etc/ca-certificates.conf"
-CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"
-CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
-
-distros = ['alpine', 'debian', 'ubuntu']
+distros = ["alpine", "debian", "ubuntu", "rhel"]
+
+meta: MetaSchema = {
+ "id": "cc_ca_certs",
+ "name": "CA Certificates",
+ "title": "Add ca certificates",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ ca_certs:
+ remove_defaults: true
+ trusted:
+ - single_line_cert
+ - |
+ -----BEGIN CERTIFICATE-----
+ YOUR-ORGS-TRUSTED-CA-CERT-HERE
+ -----END CERTIFICATE-----
+ """
+ )
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
+
+
+def _distro_ca_certs_configs(distro_name):
+ """Return a distro-specific ca_certs config dictionary
+
+ @param distro_name: String providing the distro class name.
+ @returns: Dict of distro configurations for ca-cert.
+ """
+ cfg = DISTRO_OVERRIDES.get(distro_name, DEFAULT_CONFIG)
+ cfg["ca_cert_full_path"] = os.path.join(
+ cfg["ca_cert_path"], cfg["ca_cert_filename"]
+ )
+ return cfg
-def update_ca_certs():
+def update_ca_certs(distro_cfg):
"""
Updates the CA certificate cache on the current machine.
+
+ @param distro_cfg: A hash providing _distro_ca_certs_configs function.
"""
- subp.subp(["update-ca-certificates"], capture=False)
+ subp.subp(distro_cfg["ca_cert_update_cmd"], capture=False)
-def add_ca_certs(certs):
+def add_ca_certs(distro_cfg, certs):
"""
Adds certificates to the system. To actually apply the new certificates
you must also call L{update_ca_certs}.
+ @param distro_cfg: A hash providing _distro_ca_certs_configs function.
@param certs: A list of certificate strings.
"""
- if certs:
- # First ensure they are strings...
- cert_file_contents = "\n".join([str(c) for c in certs])
- util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644)
-
- if os.stat(CA_CERT_CONFIG).st_size == 0:
- # If the CA_CERT_CONFIG file is empty (i.e. all existing
- # CA certs have been deleted) then simply output a single
- # line with the cloud-init cert filename.
- out = "%s\n" % CA_CERT_FILENAME
- else:
- # Append cert filename to CA_CERT_CONFIG file.
- # We have to strip the content because blank lines in the file
- # causes subsequent entries to be ignored. (LP: #1077020)
- orig = util.load_file(CA_CERT_CONFIG)
- cur_cont = '\n'.join([line for line in orig.splitlines()
- if line != CA_CERT_FILENAME])
- out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME)
- util.write_file(CA_CERT_CONFIG, out, omode="wb")
-
-
-def remove_default_ca_certs(distro_name):
+ if not certs:
+ return
+ # First ensure they are strings...
+ cert_file_contents = "\n".join([str(c) for c in certs])
+ util.write_file(
+ distro_cfg["ca_cert_full_path"], cert_file_contents, mode=0o644
+ )
+ update_cert_config(distro_cfg)
+
+
+def update_cert_config(distro_cfg):
+ """
+ Update Certificate config file to add the file path managed cloud-init
+
+ @param distro_cfg: A hash providing _distro_ca_certs_configs function.
+ """
+ if distro_cfg["ca_cert_config"] is None:
+ return
+ if os.stat(distro_cfg["ca_cert_config"]).st_size == 0:
+ # If the CA_CERT_CONFIG file is empty (i.e. all existing
+ # CA certs have been deleted) then simply output a single
+ # line with the cloud-init cert filename.
+ out = "%s\n" % distro_cfg["ca_cert_filename"]
+ else:
+ # Append cert filename to CA_CERT_CONFIG file.
+ # We have to strip the content because blank lines in the file
+ # causes subsequent entries to be ignored. (LP: #1077020)
+ orig = util.load_file(distro_cfg["ca_cert_config"])
+ cr_cont = "\n".join(
+ [
+ line
+ for line in orig.splitlines()
+ if line != distro_cfg["ca_cert_filename"]
+ ]
+ )
+ out = "%s\n%s\n" % (cr_cont.rstrip(), distro_cfg["ca_cert_filename"])
+ util.write_file(distro_cfg["ca_cert_config"], out, omode="wb")
+
+
+def remove_default_ca_certs(distro_name, distro_cfg):
"""
Removes all default trusted CA certificates from the system. To actually
apply the change you must also call L{update_ca_certs}.
+
+ @param distro_name: String providing the distro class name.
+ @param distro_cfg: A hash providing _distro_ca_certs_configs function.
"""
- util.delete_dir_contents(CA_CERT_PATH)
- util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
- util.write_file(CA_CERT_CONFIG, "", mode=0o644)
+ util.delete_dir_contents(distro_cfg["ca_cert_path"])
+ util.delete_dir_contents(distro_cfg["ca_cert_system_path"])
+ util.write_file(distro_cfg["ca_cert_config"], "", mode=0o644)
- if distro_name != 'alpine':
+ if distro_name in ["debian", "ubuntu"]:
debconf_sel = (
- "ca-certificates ca-certificates/trust_new_crts " + "select no")
- subp.subp(('debconf-set-selections', '-'), debconf_sel)
+ "ca-certificates ca-certificates/trust_new_crts " + "select no"
+ )
+ subp.subp(("debconf-set-selections", "-"), debconf_sel)
def handle(name, cfg, cloud, log, _args):
@@ -113,29 +169,50 @@ def handle(name, cfg, cloud, log, _args):
@param log: Pre-initialized Python logger object to use for logging.
@param args: Any module arguments from cloud.cfg
"""
- # If there isn't a ca-certs section in the configuration don't do anything
- if "ca-certs" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'ca-certs' key in configuration"), name)
+ if "ca-certs" in cfg:
+ log.warning(
+ "DEPRECATION: key 'ca-certs' is now deprecated. Use 'ca_certs'"
+ " instead."
+ )
+ elif "ca_certs" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'ca_certs' key in configuration",
+ name,
+ )
return
- ca_cert_cfg = cfg['ca-certs']
+ if "ca-certs" in cfg and "ca_certs" in cfg:
+ log.warning(
+ "Found both ca-certs (deprecated) and ca_certs config keys."
+ " Ignoring ca-certs."
+ )
+ ca_cert_cfg = cfg.get("ca_certs", cfg.get("ca-certs"))
+ distro_cfg = _distro_ca_certs_configs(cloud.distro.name)
- # If there is a remove-defaults option set to true, remove the system
+ # If there is a remove_defaults option set to true, remove the system
# default trusted CA certs first.
- if ca_cert_cfg.get("remove-defaults", False):
+ if "remove-defaults" in ca_cert_cfg:
+ log.warning(
+ "DEPRECATION: key 'ca-certs.remove-defaults' is now deprecated."
+ " Use 'ca_certs.remove_defaults' instead."
+ )
+ if ca_cert_cfg.get("remove-defaults", False):
+ log.debug("Removing default certificates")
+ remove_default_ca_certs(cloud.distro.name, distro_cfg)
+ elif ca_cert_cfg.get("remove_defaults", False):
log.debug("Removing default certificates")
- remove_default_ca_certs(cloud.distro.name)
+ remove_default_ca_certs(cloud.distro.name, distro_cfg)
# If we are given any new trusted CA certs to add, add them.
if "trusted" in ca_cert_cfg:
trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted")
if trusted_certs:
log.debug("Adding %d certificates" % len(trusted_certs))
- add_ca_certs(trusted_certs)
+ add_ca_certs(distro_cfg, trusted_certs)
# Update the system with the new cert configuration.
log.debug("Updating certificates")
- update_ca_certs()
+ update_ca_certs(distro_cfg)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index aaf71366..fdb3a6e3 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -13,87 +13,91 @@ import json
import os
from textwrap import dedent
-from cloudinit import subp
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
-from cloudinit import templater
-from cloudinit import temp_utils
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import subp, temp_utils, templater, url_helper, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_ALWAYS
-
RUBY_VERSION_DEFAULT = "1.8"
-CHEF_DIRS = tuple([
- '/etc/chef',
- '/var/log/chef',
- '/var/lib/chef',
- '/var/cache/chef',
- '/var/backups/chef',
- '/var/run/chef',
-])
-REQUIRED_CHEF_DIRS = tuple([
- '/etc/chef',
-])
+CHEF_DIRS = tuple(
+ [
+ "/etc/chef",
+ "/var/log/chef",
+ "/var/lib/chef",
+ "/var/cache/chef",
+ "/var/backups/chef",
+ "/var/run/chef",
+ ]
+)
+REQUIRED_CHEF_DIRS = tuple(
+ [
+ "/etc/chef",
+ ]
+)
# Used if fetching chef from a omnibus style package
OMNIBUS_URL = "https://www.chef.io/chef/install.sh"
OMNIBUS_URL_RETRIES = 5
-CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem'
-CHEF_ENCRYPTED_DATA_BAG_PATH = '/etc/chef/encrypted_data_bag_secret'
-CHEF_ENVIRONMENT = '_default'
-CHEF_FB_PATH = '/etc/chef/firstboot.json'
+CHEF_VALIDATION_PEM_PATH = "/etc/chef/validation.pem"
+CHEF_ENCRYPTED_DATA_BAG_PATH = "/etc/chef/encrypted_data_bag_secret"
+CHEF_ENVIRONMENT = "_default"
+CHEF_FB_PATH = "/etc/chef/firstboot.json"
CHEF_RB_TPL_DEFAULTS = {
# These are ruby symbols...
- 'ssl_verify_mode': ':verify_none',
- 'log_level': ':info',
+ "ssl_verify_mode": ":verify_none",
+ "log_level": ":info",
# These are not symbols...
- 'log_location': '/var/log/chef/client.log',
- 'validation_key': CHEF_VALIDATION_PEM_PATH,
- 'validation_cert': None,
- 'client_key': '/etc/chef/client.pem',
- 'json_attribs': CHEF_FB_PATH,
- 'file_cache_path': '/var/cache/chef',
- 'file_backup_path': '/var/backups/chef',
- 'pid_file': '/var/run/chef/client.pid',
- 'show_time': True,
- 'encrypted_data_bag_secret': None,
+ "log_location": "/var/log/chef/client.log",
+ "validation_key": CHEF_VALIDATION_PEM_PATH,
+ "validation_cert": None,
+ "client_key": "/etc/chef/client.pem",
+ "json_attribs": CHEF_FB_PATH,
+ "file_cache_path": "/var/cache/chef",
+ "file_backup_path": "/var/backups/chef",
+ "pid_file": "/var/run/chef/client.pid",
+ "show_time": True,
+ "encrypted_data_bag_secret": None,
}
-CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time'])
-CHEF_RB_TPL_PATH_KEYS = frozenset([
- 'log_location',
- 'validation_key',
- 'client_key',
- 'file_cache_path',
- 'json_attribs',
- 'pid_file',
- 'encrypted_data_bag_secret',
- 'chef_license',
-])
+CHEF_RB_TPL_BOOL_KEYS = frozenset(["show_time"])
+CHEF_RB_TPL_PATH_KEYS = frozenset(
+ [
+ "log_location",
+ "validation_key",
+ "client_key",
+ "file_cache_path",
+ "json_attribs",
+ "pid_file",
+ "encrypted_data_bag_secret",
+ ]
+)
CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys())
CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS)
CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS)
-CHEF_RB_TPL_KEYS.extend([
- 'server_url',
- 'node_name',
- 'environment',
- 'validation_name',
-])
+CHEF_RB_TPL_KEYS.extend(
+ [
+ "server_url",
+ "node_name",
+ "environment",
+ "validation_name",
+ "chef_license",
+ ]
+)
CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS)
-CHEF_RB_PATH = '/etc/chef/client.rb'
-CHEF_EXEC_PATH = '/usr/bin/chef-client'
-CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20'])
+CHEF_RB_PATH = "/etc/chef/client.rb"
+CHEF_EXEC_PATH = "/usr/bin/chef-client"
+CHEF_EXEC_DEF_ARGS = tuple(["-d", "-i", "1800", "-s", "20"])
frequency = PER_ALWAYS
distros = ["all"]
-schema = {
- 'id': 'cc_chef',
- 'name': 'Chef',
- 'title': 'module that configures, starts and installs chef',
- 'description': dedent("""\
+
+meta: MetaSchema = {
+ "id": "cc_chef",
+ "name": "Chef",
+ "title": "module that configures, starts and installs chef",
+ "description": dedent(
+ """\
This module enables chef to be installed (from packages,
gems, or from omnibus). Before this occurs, chef configuration is
written to disk (validation.pem, client.pem, firstboot.json,
@@ -101,9 +105,12 @@ schema = {
/var/log/chef and so-on). If configured, chef will be
installed and started in either daemon or non-daemon mode.
If run in non-daemon mode, post run actions are executed to do
- finishing activities such as removing validation.pem."""),
- 'distros': distros,
- 'examples': [dedent("""
+ finishing activities such as removing validation.pem."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """
chef:
directories:
- /etc/chef
@@ -124,246 +131,19 @@ schema = {
omnibus_url_retries: 2
server_url: https://chef.yourorg.com:4000
ssl_verify_mode: :verify_peer
- validation_name: yourorg-validator""")],
- 'frequency': frequency,
- 'type': 'object',
- 'properties': {
- 'chef': {
- 'type': 'object',
- 'additionalProperties': False,
- 'properties': {
- 'directories': {
- 'type': 'array',
- 'items': {
- 'type': 'string'
- },
- 'uniqueItems': True,
- 'description': dedent("""\
- Create the necessary directories for chef to run. By
- default, it creates the following directories:
-
- {chef_dirs}""").format(
- chef_dirs="\n".join(
- [" - ``{}``".format(d) for d in CHEF_DIRS]
- )
- )
- },
- 'validation_cert': {
- 'type': 'string',
- 'description': dedent("""\
- Optional string to be written to file validation_key.
- Special value ``system`` means set use existing file.
- """)
- },
- 'validation_key': {
- 'type': 'string',
- 'default': CHEF_VALIDATION_PEM_PATH,
- 'description': dedent("""\
- Optional path for validation_cert. default to
- ``{}``.""".format(CHEF_VALIDATION_PEM_PATH))
- },
- 'firstboot_path': {
- 'type': 'string',
- 'default': CHEF_FB_PATH,
- 'description': dedent("""\
- Path to write run_list and initial_attributes keys that
- should also be present in this configuration, defaults
- to ``{}``.""".format(CHEF_FB_PATH))
- },
- 'exec': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
- define if we should run or not run chef (defaults to
- false, unless a gem installed is requested where this
- will then default to true).""")
- },
- 'client_key': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['client_key'],
- 'description': dedent("""\
- Optional path for client_cert. default to
- ``{}``.""".format(CHEF_RB_TPL_DEFAULTS['client_key']))
- },
- 'encrypted_data_bag_secret': {
- 'type': 'string',
- 'default': None,
- 'description': dedent("""\
- Specifies the location of the secret key used by chef
- to encrypt data items. By default, this path is set
- to None, meaning that chef will have to look at the
- path ``{}`` for it.
- """.format(CHEF_ENCRYPTED_DATA_BAG_PATH))
- },
- 'environment': {
- 'type': 'string',
- 'default': CHEF_ENVIRONMENT,
- 'description': dedent("""\
- Specifies which environment chef will use. By default,
- it will use the ``{}`` configuration.
- """.format(CHEF_ENVIRONMENT))
- },
- 'file_backup_path': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['file_backup_path'],
- 'description': dedent("""\
- Specifies the location in which backup files are
- stored. By default, it uses the
- ``{}`` location.""".format(
- CHEF_RB_TPL_DEFAULTS['file_backup_path']))
- },
- 'file_cache_path': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['file_cache_path'],
- 'description': dedent("""\
- Specifies the location in which chef cache files will
- be saved. By default, it uses the ``{}``
- location.""".format(
- CHEF_RB_TPL_DEFAULTS['file_cache_path']))
- },
- 'json_attribs': {
- 'type': 'string',
- 'default': CHEF_FB_PATH,
- 'description': dedent("""\
- Specifies the location in which some chef json data is
- stored. By default, it uses the
- ``{}`` location.""".format(CHEF_FB_PATH))
- },
- 'log_level': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['log_level'],
- 'description': dedent("""\
- Defines the level of logging to be stored in the log
- file. By default this value is set to ``{}``.
- """.format(CHEF_RB_TPL_DEFAULTS['log_level']))
- },
- 'log_location': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['log_location'],
- 'description': dedent("""\
- Specifies the location of the chef lof file. By
- default, the location is specified at
- ``{}``.""".format(
- CHEF_RB_TPL_DEFAULTS['log_location']))
- },
- 'node_name': {
- 'type': 'string',
- 'description': dedent("""\
- The name of the node to run. By default, we will
- use th instance id as the node name.""")
- },
- 'omnibus_url': {
- 'type': 'string',
- 'default': OMNIBUS_URL,
- 'description': dedent("""\
- Omnibus URL if chef should be installed through
- Omnibus. By default, it uses the
- ``{}``.""".format(OMNIBUS_URL))
- },
- 'omnibus_url_retries': {
- 'type': 'integer',
- 'default': OMNIBUS_URL_RETRIES,
- 'description': dedent("""\
- The number of retries that will be attempted to reach
- the Omnibus URL""")
- },
- 'omnibus_version': {
- 'type': 'string',
- 'description': dedent("""\
- Optional version string to require for omnibus
- install.""")
- },
- 'pid_file': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['pid_file'],
- 'description': dedent("""\
- The location in which a process identification
- number (pid) is saved. By default, it saves
- in the ``{}`` location.""".format(
- CHEF_RB_TPL_DEFAULTS['pid_file']))
- },
- 'server_url': {
- 'type': 'string',
- 'description': 'The URL for the chef server'
- },
- 'show_time': {
- 'type': 'boolean',
- 'default': True,
- 'description': 'Show time in chef logs'
- },
- 'ssl_verify_mode': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'],
- 'description': dedent("""\
- Set the verify mode for HTTPS requests. We can have
- two possible values for this parameter:
-
- - ``:verify_none``: No validation of SSL \
- certificates.
- - ``:verify_peer``: Validate all SSL certificates.
-
- By default, the parameter is set as ``{}``.
- """.format(CHEF_RB_TPL_DEFAULTS['ssl_verify_mode']))
- },
- 'validation_name': {
- 'type': 'string',
- 'description': dedent("""\
- The name of the chef-validator key that Chef Infra
- Client uses to access the Chef Infra Server during
- the initial Chef Infra Client run.""")
- },
- 'force_install': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
- If set to ``True``, forces chef installation, even
- if it is already installed.""")
- },
- 'initial_attributes': {
- 'type': 'object',
- 'items': {
- 'type': 'string'
- },
- 'description': dedent("""\
- Specify a list of initial attributes used by the
- cookbooks.""")
- },
- 'install_type': {
- 'type': 'string',
- 'default': 'packages',
- 'description': dedent("""\
- The type of installation for chef. It can be one of
- the following values:
-
- - ``packages``
- - ``gems``
- - ``omnibus``""")
- },
- 'run_list': {
- 'type': 'array',
- 'items': {
- 'type': 'string'
- },
- 'description': 'A run list for a first boot json.'
- },
- "chef_license": {
- 'type': 'string',
- 'description': dedent("""\
- string that indicates if user accepts or not license
- related to some of chef products""")
- }
- }
- }
- }
+ validation_name: yourorg-validator"""
+ )
+ ],
+ "frequency": frequency,
}
-__doc__ = get_schema_doc(schema)
+__doc__ = get_meta_doc(meta)
def post_run_chef(chef_cfg, log):
- delete_pem = util.get_cfg_option_bool(chef_cfg,
- 'delete_validation_post_exec',
- default=False)
+ delete_pem = util.get_cfg_option_bool(
+ chef_cfg, "delete_validation_post_exec", default=False
+ )
if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH):
os.unlink(CHEF_VALIDATION_PEM_PATH)
@@ -386,16 +166,20 @@ def get_template_params(iid, chef_cfg, log):
else:
params[k] = util.get_cfg_option_str(chef_cfg, k)
# These ones are overwritten to be exact values...
- params.update({
- 'generated_by': util.make_header(),
- 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
- default=iid),
- 'environment': util.get_cfg_option_str(chef_cfg, 'environment',
- default='_default'),
- # These two are mandatory...
- 'server_url': chef_cfg['server_url'],
- 'validation_name': chef_cfg['validation_name'],
- })
+ params.update(
+ {
+ "generated_by": util.make_header(),
+ "node_name": util.get_cfg_option_str(
+ chef_cfg, "node_name", default=iid
+ ),
+ "environment": util.get_cfg_option_str(
+ chef_cfg, "environment", default="_default"
+ ),
+ # These two are mandatory...
+ "server_url": chef_cfg["server_url"],
+ "validation_name": chef_cfg["validation_name"],
+ }
+ )
return params
@@ -403,35 +187,37 @@ def handle(name, cfg, cloud, log, _args):
"""Handler method activated by cloud-init."""
# If there isn't a chef key in the configuration don't do anything
- if 'chef' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'chef' key in configuration"), name)
+ if "chef" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'chef' key in configuration", name
+ )
return
- validate_cloudconfig_schema(cfg, schema)
- chef_cfg = cfg['chef']
+ chef_cfg = cfg["chef"]
# Ensure the chef directories we use exist
- chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories')
+ chef_dirs = util.get_cfg_option_list(chef_cfg, "directories")
if not chef_dirs:
chef_dirs = list(CHEF_DIRS)
for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS):
util.ensure_dir(d)
- vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH)
- vcert = chef_cfg.get('validation_cert')
+ vkey_path = chef_cfg.get("validation_key", CHEF_VALIDATION_PEM_PATH)
+ vcert = chef_cfg.get("validation_cert")
# special value 'system' means do not overwrite the file
# but still render the template to contain 'validation_key'
if vcert:
if vcert != "system":
util.write_file(vkey_path, vcert)
elif not os.path.isfile(vkey_path):
- log.warning("chef validation_cert provided as 'system', but "
- "validation_key path '%s' does not exist.",
- vkey_path)
+ log.warning(
+ "chef validation_cert provided as 'system', but "
+ "validation_key path '%s' does not exist.",
+ vkey_path,
+ )
# Create the chef config from template
- template_fn = cloud.get_template_filename('chef_client.rb')
+ template_fn = cloud.get_template_filename("chef_client.rb")
if template_fn:
iid = str(cloud.datasource.get_instance_id())
params = get_template_params(iid, chef_cfg, log)
@@ -445,32 +231,33 @@ def handle(name, cfg, cloud, log, _args):
util.ensure_dirs(param_paths)
templater.render_to_file(template_fn, CHEF_RB_PATH, params)
else:
- log.warning("No template found, not rendering to %s",
- CHEF_RB_PATH)
+ log.warning("No template found, not rendering to %s", CHEF_RB_PATH)
# Set the firstboot json
- fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path',
- default=CHEF_FB_PATH)
+ fb_filename = util.get_cfg_option_str(
+ chef_cfg, "firstboot_path", default=CHEF_FB_PATH
+ )
if not fb_filename:
log.info("First boot path empty, not writing first boot json file")
else:
initial_json = {}
- if 'run_list' in chef_cfg:
- initial_json['run_list'] = chef_cfg['run_list']
- if 'initial_attributes' in chef_cfg:
- initial_attributes = chef_cfg['initial_attributes']
+ if "run_list" in chef_cfg:
+ initial_json["run_list"] = chef_cfg["run_list"]
+ if "initial_attributes" in chef_cfg:
+ initial_attributes = chef_cfg["initial_attributes"]
for k in list(initial_attributes.keys()):
initial_json[k] = initial_attributes[k]
util.write_file(fb_filename, json.dumps(initial_json))
# Try to install chef, if its not already installed...
- force_install = util.get_cfg_option_bool(chef_cfg,
- 'force_install', default=False)
+ force_install = util.get_cfg_option_bool(
+ chef_cfg, "force_install", default=False
+ )
installed = subp.is_exe(CHEF_EXEC_PATH)
if not installed or force_install:
run = install_chef(cloud, chef_cfg, log)
elif installed:
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
+ run = util.get_cfg_option_bool(chef_cfg, "exec", default=False)
else:
run = False
if run:
@@ -479,18 +266,21 @@ def handle(name, cfg, cloud, log, _args):
def run_chef(chef_cfg, log):
- log.debug('Running chef-client')
+ log.debug("Running chef-client")
cmd = [CHEF_EXEC_PATH]
- if 'exec_arguments' in chef_cfg:
- cmd_args = chef_cfg['exec_arguments']
+ if "exec_arguments" in chef_cfg:
+ cmd_args = chef_cfg["exec_arguments"]
if isinstance(cmd_args, (list, tuple)):
cmd.extend(cmd_args)
elif isinstance(cmd_args, str):
cmd.append(cmd_args)
else:
- log.warning("Unknown type %s provided for chef"
- " 'exec_arguments' expected list, tuple,"
- " or string", type(cmd_args))
+ log.warning(
+ "Unknown type %s provided for chef"
+ " 'exec_arguments' expected list, tuple,"
+ " or string",
+ type(cmd_args),
+ )
cmd.extend(CHEF_EXEC_DEF_ARGS)
else:
cmd.extend(CHEF_EXEC_DEF_ARGS)
@@ -504,16 +294,16 @@ def subp_blob_in_tempfile(blob, *args, **kwargs):
The 'args' argument to subp will be updated with the full path to the
filename as the first argument.
"""
- basename = kwargs.pop('basename', "subp_blob")
+ basename = kwargs.pop("basename", "subp_blob")
- if len(args) == 0 and 'args' not in kwargs:
+ if len(args) == 0 and "args" not in kwargs:
args = [tuple()]
# Use tmpdir over tmpfile to avoid 'text file busy' on execute
with temp_utils.tempdir(needs_exe=True) as tmpd:
tmpf = os.path.join(tmpd, basename)
- if 'args' in kwargs:
- kwargs['args'] = [tmpf] + list(kwargs['args'])
+ if "args" in kwargs:
+ kwargs["args"] = [tmpf] + list(kwargs["args"])
else:
args = list(args)
args[0] = [tmpf] + args[0]
@@ -540,36 +330,39 @@ def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
if omnibus_version is None:
args = []
else:
- args = ['-v', omnibus_version]
+ args = ["-v", omnibus_version]
content = url_helper.readurl(url=url, retries=retries).contents
return subp_blob_in_tempfile(
- blob=content, args=args,
- basename='chef-omnibus-install', capture=False)
+ blob=content, args=args, basename="chef-omnibus-install", capture=False
+ )
def install_chef(cloud, chef_cfg, log):
# If chef is not installed, we install chef based on 'install_type'
- install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
- 'packages')
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
+ install_type = util.get_cfg_option_str(
+ chef_cfg, "install_type", "packages"
+ )
+ run = util.get_cfg_option_bool(chef_cfg, "exec", default=False)
if install_type == "gems":
# This will install and run the chef-client from gems
- chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
- ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
- RUBY_VERSION_DEFAULT)
+ chef_version = util.get_cfg_option_str(chef_cfg, "version", None)
+ ruby_version = util.get_cfg_option_str(
+ chef_cfg, "ruby_version", RUBY_VERSION_DEFAULT
+ )
install_chef_from_gems(ruby_version, chef_version, cloud.distro)
# Retain backwards compat, by preferring True instead of False
# when not provided/overriden...
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True)
- elif install_type == 'packages':
+ run = util.get_cfg_option_bool(chef_cfg, "exec", default=True)
+ elif install_type == "packages":
# This will install and run the chef-client from packages
- cloud.distro.install_packages(('chef',))
- elif install_type == 'omnibus':
+ cloud.distro.install_packages(("chef",))
+ elif install_type == "omnibus":
omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version")
install_chef_from_omnibus(
url=util.get_cfg_option_str(chef_cfg, "omnibus_url"),
retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"),
- omnibus_version=omnibus_version)
+ omnibus_version=omnibus_version,
+ )
else:
log.warning("Unknown chef install type '%s'", install_type)
run = False
@@ -578,25 +371,47 @@ def install_chef(cloud, chef_cfg, log):
def get_ruby_packages(version):
# return a list of packages needed to install ruby at version
- pkgs = ['ruby%s' % version, 'ruby%s-dev' % version]
+ pkgs = ["ruby%s" % version, "ruby%s-dev" % version]
if version == "1.8":
- pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8'))
+ pkgs.extend(("libopenssl-ruby1.8", "rubygems1.8"))
return pkgs
def install_chef_from_gems(ruby_version, chef_version, distro):
distro.install_packages(get_ruby_packages(ruby_version))
- if not os.path.exists('/usr/bin/gem'):
- util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')
- if not os.path.exists('/usr/bin/ruby'):
- util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
+ if not os.path.exists("/usr/bin/gem"):
+ util.sym_link("/usr/bin/gem%s" % ruby_version, "/usr/bin/gem")
+ if not os.path.exists("/usr/bin/ruby"):
+ util.sym_link("/usr/bin/ruby%s" % ruby_version, "/usr/bin/ruby")
if chef_version:
- subp.subp(['/usr/bin/gem', 'install', 'chef',
- '-v %s' % chef_version, '--no-ri',
- '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False)
+ subp.subp(
+ [
+ "/usr/bin/gem",
+ "install",
+ "chef",
+ "-v %s" % chef_version,
+ "--no-ri",
+ "--no-rdoc",
+ "--bindir",
+ "/usr/bin",
+ "-q",
+ ],
+ capture=False,
+ )
else:
- subp.subp(['/usr/bin/gem', 'install', 'chef',
- '--no-ri', '--no-rdoc', '--bindir',
- '/usr/bin', '-q'], capture=False)
+ subp.subp(
+ [
+ "/usr/bin/gem",
+ "install",
+ "chef",
+ "--no-ri",
+ "--no-rdoc",
+ "--bindir",
+ "/usr/bin",
+ "-q",
+ ],
+ capture=False,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
index 4d5a6aa2..c51818c3 100644
--- a/cloudinit/config/cc_debug.py
+++ b/cloudinit/config/cc_debug.py
@@ -2,46 +2,54 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Debug
------
-**Summary:** helper to debug cloud-init *internal* datastructures.
+"""Debug: Helper to debug cloud-init *internal* datastructures."""
+
+import copy
+from io import StringIO
+from textwrap import dedent
+
+from cloudinit import safeyaml, type_utils, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+SKIP_KEYS = frozenset(["log_cfgs"])
+
+MODULE_DESCRIPTION = """\
This module will enable for outputting various internal information that
cloud-init sources provide to either a file or to the output console/log
location that this cloud-init has been configured with when running.
.. note::
Log configurations are not output.
-
-**Internal name:** ``cc_debug``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- debug:
- verbose: true/false (defaulting to true)
- output: (location to write output, defaulting to console + log)
"""
-import copy
-from io import StringIO
-
-from cloudinit import type_utils
-from cloudinit import util
-from cloudinit import safeyaml
-
-SKIP_KEYS = frozenset(['log_cfgs'])
+meta: MetaSchema = {
+ "id": "cc_debug",
+ "name": "Debug",
+ "title": "Helper to debug cloud-init *internal* datastructures",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ debug:
+ verbose: true
+ output: /tmp/my_debug.log
+ """
+ )
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
def _make_header(text):
header = StringIO()
header.write("-" * 80)
header.write("\n")
- header.write(text.center(80, ' '))
+ header.write(text.center(80, " "))
header.write("\n")
header.write("-" * 80)
header.write("\n")
@@ -55,18 +63,16 @@ def _dumps(obj):
def handle(name, cfg, cloud, log, args):
"""Handler method activated by cloud-init."""
-
- verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True)
+ verbose = util.get_cfg_by_path(cfg, ("debug", "verbose"), default=True)
if args:
# if args are provided (from cmdline) then explicitly set verbose
out_file = args[0]
verbose = True
else:
- out_file = util.get_cfg_by_path(cfg, ('debug', 'output'))
+ out_file = util.get_cfg_by_path(cfg, ("debug", "output"))
if not verbose:
- log.debug(("Skipping module named %s,"
- " verbose printing disabled"), name)
+ log.debug("Skipping module named %s, verbose printing disabled", name)
return
# Clean out some keys that we just don't care about showing...
dump_cfg = copy.deepcopy(cfg)
@@ -85,8 +91,9 @@ def handle(name, cfg, cloud, log, args):
to_print.write(_dumps(cloud.datasource.metadata))
to_print.write("\n")
to_print.write(_make_header("Misc"))
- to_print.write("Datasource: %s\n" %
- (type_utils.obj_name(cloud.datasource)))
+ to_print.write(
+ "Datasource: %s\n" % (type_utils.obj_name(cloud.datasource))
+ )
to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro)))
to_print.write("Hostname: %s\n" % (cloud.get_hostname(True)))
to_print.write("Instance ID: %s\n" % (cloud.get_instance_id()))
@@ -102,4 +109,5 @@ def handle(name, cfg, cloud, log, args):
else:
util.multi_log("".join(content_to_file), console=True, stderr=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index dff93245..88cc28e2 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -6,52 +6,56 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Disable EC2 Metadata
---------------------
-**Summary:** disable aws ec2 metadata
+"""Disable EC2 Metadata: Disable AWS EC2 metadata."""
-This module can disable the ec2 datasource by rejecting the route to
-``169.254.169.254``, the usual route to the datasource. This module is disabled
-by default.
-
-**Internal name:** ``cc_disable_ec2_metadata``
-
-**Module frequency:** per always
-
-**Supported distros:** all
-
-**Config keys**::
-
- disable_ec2_metadata: <true/false>
-"""
-
-from cloudinit import subp
-from cloudinit import util
+from textwrap import dedent
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_ALWAYS
-frequency = PER_ALWAYS
+REJECT_CMD_IF = ["route", "add", "-host", "169.254.169.254", "reject"]
+REJECT_CMD_IP = ["ip", "route", "add", "prohibit", "169.254.169.254"]
-REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject']
-REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254']
+meta: MetaSchema = {
+ "id": "cc_disable_ec2_metadata",
+ "name": "Disable EC2 Metadata",
+ "title": "Disable AWS EC2 Metadata",
+ "description": dedent(
+ """\
+ This module can disable the ec2 datasource by rejecting the route to
+ ``169.254.169.254``, the usual route to the datasource. This module
+ is disabled by default."""
+ ),
+ "distros": [ALL_DISTROS],
+ "frequency": PER_ALWAYS,
+ "examples": ["disable_ec2_metadata: true"],
+}
+
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, _cloud, log, _args):
disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
if disabled:
reject_cmd = None
- if subp.which('ip'):
+ if subp.which("ip"):
reject_cmd = REJECT_CMD_IP
- elif subp.which('ifconfig'):
+ elif subp.which("ifconfig"):
reject_cmd = REJECT_CMD_IF
else:
- log.error(('Neither "route" nor "ip" command found, unable to '
- 'manipulate routing table'))
+ log.error(
+ 'Neither "route" nor "ip" command found, unable to '
+ "manipulate routing table"
+ )
return
subp.subp(reject_cmd, capture=False)
else:
- log.debug(("Skipping module named %s,"
- " disabling the ec2 route not enabled"), name)
+ log.debug(
+ "Skipping module named %s, disabling the ec2 route not enabled",
+ name,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index d1200694..ee05ea87 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -5,11 +5,31 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Disk Setup
-----------
-**Summary:** configure partitions and filesystems
+"""Disk Setup: Configure partitions and filesystems."""
+
+import logging
+import os
+import shlex
+from textwrap import dedent
+
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+
+# Define the commands to use
+SFDISK_CMD = subp.which("sfdisk")
+SGDISK_CMD = subp.which("sgdisk")
+LSBLK_CMD = subp.which("lsblk")
+BLKID_CMD = subp.which("blkid")
+BLKDEV_CMD = subp.which("blockdev")
+PARTPROBE_CMD = subp.which("partprobe")
+WIPEFS_CMD = subp.which("wipefs")
+LANG_C_ENV = {"LANG": "C"}
+LOG = logging.getLogger(__name__)
+
+MODULE_DESCRIPTION = """\
This module is able to configure simple partition tables and filesystems.
.. note::
@@ -25,99 +45,45 @@ will refer to the block device of the ephemeral image.
Disk partitioning is done using the ``disk_setup`` directive. This config
directive accepts a dictionary where each key is either a path to a block
device or an alias specified in ``device_aliases``, and each value is the
-configuration options for the device. The ``table_type`` option specifies the
-partition table type, either ``mbr`` or ``gpt``. The ``layout`` option
-specifies how partitions on the device are to be arranged. If ``layout`` is set
-to ``true``, a single partition using all the space on the device will be
-created. If set to ``false``, no partitions will be created. Partitions can be
-specified by providing a list to ``layout``, where each entry in the list is
-either a size or a list containing a size and the numerical value for a
-partition type. The size for partitions is specified in **percentage** of disk
-space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space).
-The ``overwrite`` option controls whether this module tries to be safe about
-writing partition tables or not. If ``overwrite: false`` is set, the device
-will be checked for a partition table and for a file system and if either is
-found, the operation will be skipped. If ``overwrite: true`` is set, no checks
-will be performed.
-
-.. note::
- Using ``overwrite: true`` is dangerous and can lead to data loss, so double
- check that the correct device has been specified if using this option.
-
-File system configuration is done using the ``fs_setup`` directive. This config
-directive accepts a list of filesystem configs. The device to create the
-filesystem on may be specified either as a path or as an alias in the format
-``<alias name>.<y>`` where ``<y>`` denotes the partition number on the device.
-The partition can also be specified by setting ``partition`` to the desired
-partition number. The ``partition`` option may also be set to ``auto``, in
-which this module will search for the existance of a filesystem matching the
-``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip
-creating the filesystem if one is found. The ``partition`` option may also be
-set to ``any``, in which case any file system that matches ``type`` and
-``device`` will cause this module to skip filesystem creation for the
-``fs_setup`` entry, regardless of ``label`` matching or not. To write a
-filesystem directly to a device, use ``partition: none``. A label can be
-specified for the filesystem using ``label``, and the filesystem type can be
-specified using ``filesystem``.
-
-.. note::
- If specifying device using the ``<device name>.<partition number>`` format,
- the value of ``partition`` will be overwritten.
-
-.. note::
- Using ``overwrite: true`` for filesystems is dangerous and can lead to data
- loss, so double check the entry in ``fs_setup``.
-
-.. note::
- ``replace_fs`` is ignored unless ``partition`` is ``auto`` or ``any``.
-
-**Internal name:** ``cc_disk_setup``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- device_aliases:
- <alias name>: <device path>
- disk_setup:
- <alias name/path>:
- table_type: <'mbr'/'gpt'>
- layout:
- - [33,82]
- - 66
- overwrite: <true/false>
- fs_setup:
- - label: <label>
- filesystem: <filesystem type>
- device: <device>
- partition: <"auto"/"any"/"none"/<partition number>>
- overwrite: <true/false>
- replace_fs: <filesystem type>
+configuration options for the device. File system configuration is done using
+the ``fs_setup`` directive. This config directive accepts a list of
+filesystem configs.
"""
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-from cloudinit import subp
-import logging
-import os
-import shlex
-
-frequency = PER_INSTANCE
-
-# Define the commands to use
-UDEVADM_CMD = subp.which('udevadm')
-SFDISK_CMD = subp.which("sfdisk")
-SGDISK_CMD = subp.which("sgdisk")
-LSBLK_CMD = subp.which("lsblk")
-BLKID_CMD = subp.which("blkid")
-BLKDEV_CMD = subp.which("blockdev")
-WIPEFS_CMD = subp.which("wipefs")
-
-LANG_C_ENV = {'LANG': 'C'}
-
-LOG = logging.getLogger(__name__)
+meta: MetaSchema = {
+ "id": "cc_disk_setup",
+ "name": "Disk Setup",
+ "title": "Configure partitions and filesystems",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ device_aliases:
+ my_alias: /dev/sdb
+ disk_setup:
+ my_alias:
+ table_type: gpt
+ layout: [50, 50]
+ overwrite: true
+ fs_setup:
+ - label: fs1
+ filesystem: ext4
+ device: my_alias.1
+ cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s
+ - label: fs2
+ device: my_alias.2
+ filesystem: ext4
+ mounts:
+ - ["my_alias.1", "/mnt1"]
+ - ["my_alias.2", "/mnt2"]
+ """
+ )
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
def handle(_name, cfg, cloud, log, _args):
@@ -125,9 +91,15 @@ def handle(_name, cfg, cloud, log, _args):
See doc/examples/cloud-config-disk-setup.txt for documentation on the
format.
"""
+ device_aliases = cfg.get("device_aliases", {})
+
+ def alias_to_device(cand):
+ name = device_aliases.get(cand)
+ return cloud.device_name_to_device(name or cand) or name
+
disk_setup = cfg.get("disk_setup")
if isinstance(disk_setup, dict):
- update_disk_setup_devices(disk_setup, cloud.device_name_to_device)
+ update_disk_setup_devices(disk_setup, alias_to_device)
log.debug("Partitioning disks: %s", str(disk_setup))
for disk, definition in disk_setup.items():
if not isinstance(definition, dict):
@@ -136,16 +108,19 @@ def handle(_name, cfg, cloud, log, _args):
try:
log.debug("Creating new partition table/disk")
- util.log_time(logfunc=LOG.debug,
- msg="Creating partition on %s" % disk,
- func=mkpart, args=(disk, definition))
+ util.log_time(
+ logfunc=LOG.debug,
+ msg="Creating partition on %s" % disk,
+ func=mkpart,
+ args=(disk, definition),
+ )
except Exception as e:
util.logexc(LOG, "Failed partitioning operation\n%s" % e)
fs_setup = cfg.get("fs_setup")
if isinstance(fs_setup, list):
log.debug("setting up filesystems: %s", str(fs_setup))
- update_fs_setup_devices(fs_setup, cloud.device_name_to_device)
+ update_fs_setup_devices(fs_setup, alias_to_device)
for definition in fs_setup:
if not isinstance(definition, dict):
log.warning("Invalid file system definition: %s" % definition)
@@ -153,10 +128,13 @@ def handle(_name, cfg, cloud, log, _args):
try:
log.debug("Creating new filesystem.")
- device = definition.get('device')
- util.log_time(logfunc=LOG.debug,
- msg="Creating fs for %s" % device,
- func=mkfs, args=(definition,))
+ device = definition.get("device")
+ util.log_time(
+ logfunc=LOG.debug,
+ msg="Creating fs for %s" % device,
+ func=mkfs,
+ args=(definition,),
+ )
except Exception as e:
util.logexc(LOG, "Failed during filesystem operation\n%s" % e)
@@ -169,15 +147,22 @@ def update_disk_setup_devices(disk_setup, tformer):
if transformed is None or transformed == origname:
continue
if transformed in disk_setup:
- LOG.info("Replacing %s in disk_setup for translation of %s",
- origname, transformed)
+ LOG.info(
+ "Replacing %s in disk_setup for translation of %s",
+ origname,
+ transformed,
+ )
del disk_setup[transformed]
disk_setup[transformed] = disk_setup[origname]
- disk_setup[transformed]['_origname'] = origname
+ if isinstance(disk_setup[transformed], dict):
+ disk_setup[transformed]["_origname"] = origname
del disk_setup[origname]
- LOG.debug("updated disk_setup device entry '%s' to '%s'",
- origname, transformed)
+ LOG.debug(
+ "updated disk_setup device entry '%s' to '%s'",
+ origname,
+ transformed,
+ )
def update_fs_setup_devices(disk_setup, tformer):
@@ -188,7 +173,7 @@ def update_fs_setup_devices(disk_setup, tformer):
LOG.warning("entry in disk_setup not a dict: %s", definition)
continue
- origname = definition.get('device')
+ origname = definition.get("device")
if origname is None:
continue
@@ -198,19 +183,24 @@ def update_fs_setup_devices(disk_setup, tformer):
tformed = tformer(dev)
if tformed is not None:
dev = tformed
- LOG.debug("%s is mapped to disk=%s part=%s",
- origname, tformed, part)
- definition['_origname'] = origname
- definition['device'] = tformed
+ LOG.debug(
+ "%s is mapped to disk=%s part=%s", origname, tformed, part
+ )
+ definition["_origname"] = origname
+ definition["device"] = tformed
if part:
# In origname with <dev>.N, N overrides 'partition' key.
- if 'partition' in definition:
- LOG.warning("Partition '%s' from dotted device name '%s' "
- "overrides 'partition' key in %s", part, origname,
- definition)
- definition['_partition'] = definition['partition']
- definition['partition'] = part
+ if "partition" in definition:
+ LOG.warning(
+ "Partition '%s' from dotted device name '%s' "
+ "overrides 'partition' key in %s",
+ part,
+ origname,
+ definition,
+ )
+ definition["_partition"] = definition["partition"]
+ definition["partition"] = part
def value_splitter(values, start=None):
@@ -222,7 +212,7 @@ def value_splitter(values, start=None):
if start:
_values = _values[start:]
- for key, value in [x.split('=') for x in _values]:
+ for key, value in [x.split("=") for x in _values]:
yield key, value
@@ -241,11 +231,16 @@ def enumerate_disk(device, nodeps=False):
name: the device name, i.e. sda
"""
- lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL',
- device]
+ lsblk_cmd = [
+ LSBLK_CMD,
+ "--pairs",
+ "--output",
+ "NAME,TYPE,FSTYPE,LABEL",
+ device,
+ ]
if nodeps:
- lsblk_cmd.append('--nodeps')
+ lsblk_cmd.append("--nodeps")
info = None
try:
@@ -259,10 +254,10 @@ def enumerate_disk(device, nodeps=False):
for part in parts:
d = {
- 'name': None,
- 'type': None,
- 'fstype': None,
- 'label': None,
+ "name": None,
+ "type": None,
+ "fstype": None,
+ "label": None,
}
for key, value in value_splitter(part):
@@ -293,9 +288,9 @@ def is_device_valid(name, partition=False):
LOG.warning("Query against device %s failed", name)
return False
- if partition and d_type == 'part':
+ if partition and d_type == "part":
return True
- elif not partition and d_type == 'disk':
+ elif not partition and d_type == "disk":
return True
return False
@@ -311,7 +306,7 @@ def check_fs(device):
"""
out, label, fs_type, uuid = None, None, None, None
- blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
+ blkid_cmd = [BLKID_CMD, "-c", "/dev/null", device]
try:
out, _err = subp.subp(blkid_cmd, rcs=[0, 2])
except Exception as e:
@@ -322,11 +317,11 @@ def check_fs(device):
if out:
if len(out.splitlines()) == 1:
for key, value in value_splitter(out, start=1):
- if key.lower() == 'label':
+ if key.lower() == "label":
label = value
- elif key.lower() == 'type':
+ elif key.lower() == "type":
fs_type = value
- elif key.lower() == 'uuid':
+ elif key.lower() == "uuid":
uuid = value
return label, fs_type, uuid
@@ -340,8 +335,14 @@ def is_filesystem(device):
return fs_type
-def find_device_node(device, fs_type=None, label=None, valid_targets=None,
- label_match=True, replace_fs=None):
+def find_device_node(
+ device,
+ fs_type=None,
+ label=None,
+ valid_targets=None,
+ label_match=True,
+ replace_fs=None,
+):
"""
Find a device that is either matches the spec, or the first
@@ -356,31 +357,32 @@ def find_device_node(device, fs_type=None, label=None, valid_targets=None,
label = ""
if not valid_targets:
- valid_targets = ['disk', 'part']
+ valid_targets = ["disk", "part"]
raw_device_used = False
for d in enumerate_disk(device):
- if d['fstype'] == replace_fs and label_match is False:
+ if d["fstype"] == replace_fs and label_match is False:
# We found a device where we want to replace the FS
- return ('/dev/%s' % d['name'], False)
+ return ("/dev/%s" % d["name"], False)
- if (d['fstype'] == fs_type and
- ((label_match and d['label'] == label) or not label_match)):
+ if d["fstype"] == fs_type and (
+ (label_match and d["label"] == label) or not label_match
+ ):
# If we find a matching device, we return that
- return ('/dev/%s' % d['name'], True)
+ return ("/dev/%s" % d["name"], True)
- if d['type'] in valid_targets:
+ if d["type"] in valid_targets:
- if d['type'] != 'disk' or d['fstype']:
+ if d["type"] != "disk" or d["fstype"]:
raw_device_used = True
- if d['type'] == 'disk':
+ if d["type"] == "disk":
# Skip the raw disk, its the default
pass
- elif not d['fstype']:
- return ('/dev/%s' % d['name'], False)
+ elif not d["fstype"]:
+ return ("/dev/%s" % d["name"], False)
if not raw_device_used:
return (device, False)
@@ -423,7 +425,7 @@ def get_dyn_func(*args):
if len(args) < 2:
raise Exception("Unable to determine dynamic funcation name")
- func_name = (args[0] % args[1])
+ func_name = args[0] % args[1]
func_args = args[2:]
try:
@@ -438,8 +440,8 @@ def get_dyn_func(*args):
def get_hdd_size(device):
try:
- size_in_bytes, _ = subp.subp([BLKDEV_CMD, '--getsize64', device])
- sector_size, _ = subp.subp([BLKDEV_CMD, '--getss', device])
+ size_in_bytes, _ = subp.subp([BLKDEV_CMD, "--getsize64", device])
+ sector_size, _ = subp.subp([BLKDEV_CMD, "--getss", device])
except Exception as e:
raise Exception("Failed to get %s size\n%s" % (device, e)) from e
@@ -471,13 +473,13 @@ def check_partition_mbr_layout(device, layout):
if device in _line[0]:
# We don't understand extended partitions yet
- if _line[-1].lower() in ['extended', 'empty']:
+ if _line[-1].lower() in ["extended", "empty"]:
continue
# Find the partition types
type_label = None
for x in sorted(range(1, len(_line)), reverse=True):
- if _line[x].isdigit() and _line[x] != '/':
+ if _line[x].isdigit() and _line[x] != "/":
type_label = _line[x]
break
@@ -486,7 +488,7 @@ def check_partition_mbr_layout(device, layout):
def check_partition_gpt_layout(device, layout):
- prt_cmd = [SGDISK_CMD, '-p', device]
+ prt_cmd = [SGDISK_CMD, "-p", device]
try:
out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV)
except Exception as e:
@@ -512,7 +514,7 @@ def check_partition_gpt_layout(device, layout):
# Number Start (sector) End (sector) Size Code Name
# 1 2048 206847 100.0 MiB 0700 Microsoft basic data
for line in out_lines:
- if line.strip().startswith('Number'):
+ if line.strip().startswith("Number"):
break
codes = [line.strip().split()[5] for line in out_lines]
@@ -535,10 +537,16 @@ def check_partition_layout(table_type, device, layout):
function called check_partition_%s_layout
"""
found_layout = get_dyn_func(
- "check_partition_%s_layout", table_type, device, layout)
-
- LOG.debug("called check_partition_%s_layout(%s, %s), returned: %s",
- table_type, device, layout, found_layout)
+ "check_partition_%s_layout", table_type, device, layout
+ )
+
+ LOG.debug(
+ "called check_partition_%s_layout(%s, %s), returned: %s",
+ table_type,
+ device,
+ layout,
+ found_layout,
+ )
if isinstance(layout, bool):
# if we are using auto partitioning, or "True" be happy
# if a single partition exists.
@@ -549,10 +557,12 @@ def check_partition_layout(table_type, device, layout):
elif len(found_layout) == len(layout):
# This just makes sure that the number of requested
# partitions and the type labels are right
- layout_types = [str(x[1]) if isinstance(x, (tuple, list)) else None
- for x in layout]
- LOG.debug("Layout types=%s. Found types=%s",
- layout_types, found_layout)
+ layout_types = [
+ str(x[1]) if isinstance(x, (tuple, list)) else None for x in layout
+ ]
+ LOG.debug(
+ "Layout types=%s. Found types=%s", layout_types, found_layout
+ )
for itype, ftype in zip(layout_types, found_layout):
if itype is not None and str(ftype) != str(itype):
return False
@@ -578,8 +588,9 @@ def get_partition_mbr_layout(size, layout):
# Create a single partition
return "0,"
- if ((len(layout) == 0 and isinstance(layout, list)) or
- not isinstance(layout, list)):
+ if (len(layout) == 0 and isinstance(layout, list)) or not isinstance(
+ layout, list
+ ):
raise Exception("Partition layout is invalid")
last_part_num = len(layout)
@@ -607,8 +618,10 @@ def get_partition_mbr_layout(size, layout):
sfdisk_definition = "\n".join(part_definition)
if len(part_definition) > 4:
- raise Exception("Calculated partition definition is too big\n%s" %
- sfdisk_definition)
+ raise Exception(
+ "Calculated partition definition is too big\n%s"
+ % sfdisk_definition
+ )
return sfdisk_definition
@@ -622,14 +635,15 @@ def get_partition_gpt_layout(size, layout):
if isinstance(partition, list):
if len(partition) != 2:
raise Exception(
- "Partition was incorrectly defined: %s" % partition)
+ "Partition was incorrectly defined: %s" % partition
+ )
percent, partition_type = partition
else:
percent = partition
partition_type = None
part_size = int(float(size) * (float(percent) / 100))
- partition_specs.append((partition_type, [0, '+{}'.format(part_size)]))
+ partition_specs.append((partition_type, [0, "+{}".format(part_size)]))
# The last partition should use up all remaining space
partition_specs[-1][-1][-1] = 0
@@ -639,7 +653,7 @@ def get_partition_gpt_layout(size, layout):
def purge_disk_ptable(device):
# wipe the first and last megabyte of a disk (or file)
# gpt stores partition table both at front and at end.
- null = '\0'
+ null = "\0"
start_len = 1024 * 1024
end_len = 1024 * 1024
with open(device, "rb+") as fp:
@@ -658,14 +672,14 @@ def purge_disk(device):
# wipe any file systems first
for d in enumerate_disk(device):
- if d['type'] not in ["disk", "crypt"]:
- wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
+ if d["type"] not in ["disk", "crypt"]:
+ wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d["name"]]
try:
- LOG.info("Purging filesystem on /dev/%s", d['name'])
+ LOG.info("Purging filesystem on /dev/%s", d["name"])
subp.subp(wipefs_cmd)
except Exception as e:
raise Exception(
- "Failed FS purge of /dev/%s" % d['name']
+ "Failed FS purge of /dev/%s" % d["name"]
) from e
purge_disk_ptable(device)
@@ -685,13 +699,16 @@ def get_partition_layout(table_type, size, layout):
def read_parttbl(device):
"""
- Use partprobe instead of 'udevadm'. Partprobe is the only
- reliable way to probe the partition table.
+ `Partprobe` is preferred over `blkdev` since it is more reliably
+ able to probe the partition table.
"""
- blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
+ if PARTPROBE_CMD is not None:
+ probe_cmd = [PARTPROBE_CMD, device]
+ else:
+ probe_cmd = [BLKDEV_CMD, "--rereadpt", device]
util.udevadm_settle()
try:
- subp.subp(blkdev_cmd)
+ subp.subp(probe_cmd)
except Exception as e:
util.logexc(LOG, "Failed reading the partition table %s" % e)
@@ -717,17 +734,24 @@ def exec_mkpart_mbr(device, layout):
def exec_mkpart_gpt(device, layout):
try:
- subp.subp([SGDISK_CMD, '-Z', device])
+ subp.subp([SGDISK_CMD, "-Z", device])
for index, (partition_type, (start, end)) in enumerate(layout):
index += 1
- subp.subp([SGDISK_CMD,
- '-n', '{}:{}:{}'.format(index, start, end), device])
+ subp.subp(
+ [
+ SGDISK_CMD,
+ "-n",
+ "{}:{}:{}".format(index, start, end),
+ device,
+ ]
+ )
if partition_type is not None:
# convert to a 4 char (or more) string right padded with 0
# 82 -> 8200. 'Linux' -> 'Linux'
pinput = str(partition_type).ljust(4, "0")
subp.subp(
- [SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device])
+ [SGDISK_CMD, "-t", "{}:{}".format(index, pinput), device]
+ )
except Exception:
LOG.warning("Failed to partition device %s", device)
raise
@@ -753,8 +777,10 @@ def assert_and_settle_device(device):
if not os.path.exists(device):
util.udevadm_settle()
if not os.path.exists(device):
- raise RuntimeError("Device %s did not exist and was not created "
- "with a udevadm settle." % device)
+ raise RuntimeError(
+ "Device %s did not exist and was not created "
+ "with a udevadm settle." % device
+ )
# Whether or not the device existed above, it is possible that udev
# events that would populate udev database (for reading by lsdname) have
@@ -781,9 +807,9 @@ def mkpart(device, definition):
device = os.path.realpath(device)
LOG.debug("Checking values for %s definition", device)
- overwrite = definition.get('overwrite', False)
- layout = definition.get('layout', False)
- table_type = definition.get('table_type', 'mbr')
+ overwrite = definition.get("overwrite", False)
+ layout = definition.get("layout", False)
+ table_type = definition.get("table_type", "mbr")
# Check if the default device is a partition or not
LOG.debug("Checking against default devices")
@@ -796,7 +822,8 @@ def mkpart(device, definition):
LOG.debug("Checking if device %s is a valid device", device)
if not is_device_valid(device):
raise Exception(
- 'Device {device} is not a disk device!'.format(device=device))
+ "Device {device} is not a disk device!".format(device=device)
+ )
# Remove the partition table entries
if isinstance(layout, str) and layout.lower() == "remove":
@@ -832,21 +859,21 @@ def lookup_force_flag(fs):
A force flag might be -F or -F, this look it up
"""
flags = {
- 'ext': '-F',
- 'btrfs': '-f',
- 'xfs': '-f',
- 'reiserfs': '-f',
- 'swap': '-f',
+ "ext": "-F",
+ "btrfs": "-f",
+ "xfs": "-f",
+ "reiserfs": "-f",
+ "swap": "-f",
}
- if 'ext' in fs.lower():
- fs = 'ext'
+ if "ext" in fs.lower():
+ fs = "ext"
if fs.lower() in flags:
return flags[fs]
LOG.warning("Force flag for %s is unknown.", fs)
- return ''
+ return ""
def mkfs(fs_cfg):
@@ -870,14 +897,14 @@ def mkfs(fs_cfg):
When 'cmd' is provided then no other parameter is required.
"""
- label = fs_cfg.get('label')
- device = fs_cfg.get('device')
- partition = str(fs_cfg.get('partition', 'any'))
- fs_type = fs_cfg.get('filesystem')
- fs_cmd = fs_cfg.get('cmd', [])
- fs_opts = fs_cfg.get('extra_opts', [])
- fs_replace = fs_cfg.get('replace_fs', False)
- overwrite = fs_cfg.get('overwrite', False)
+ label = fs_cfg.get("label")
+ device = fs_cfg.get("device")
+ partition = str(fs_cfg.get("partition", "any"))
+ fs_type = fs_cfg.get("filesystem")
+ fs_cmd = fs_cfg.get("cmd", [])
+ fs_opts = fs_cfg.get("extra_opts", [])
+ fs_replace = fs_cfg.get("replace_fs", False)
+ overwrite = fs_cfg.get("overwrite", False)
# ensure that we get a real device rather than a symbolic link
assert_and_settle_device(device)
@@ -890,14 +917,19 @@ def mkfs(fs_cfg):
# Handle manual definition of partition
if partition.isdigit():
device = "%s%s" % (device, partition)
- LOG.debug("Manual request of partition %s for %s",
- partition, device)
+ LOG.debug(
+ "Manual request of partition %s for %s", partition, device
+ )
# Check to see if the fs already exists
LOG.debug("Checking device %s", device)
check_label, check_fstype, _ = check_fs(device)
- LOG.debug("Device '%s' has check_label='%s' check_fstype=%s",
- device, check_label, check_fstype)
+ LOG.debug(
+ "Device '%s' has check_label='%s' check_fstype=%s",
+ device,
+ check_label,
+ check_fstype,
+ )
if check_label == label and check_fstype == fs_type:
LOG.debug("Existing file system found at %s", device)
@@ -911,19 +943,23 @@ def mkfs(fs_cfg):
else:
LOG.debug("Device %s is cleared for formating", device)
- elif partition and str(partition).lower() in ('auto', 'any'):
+ elif partition and str(partition).lower() in ("auto", "any"):
# For auto devices, we match if the filesystem does exist
odevice = device
LOG.debug("Identifying device to create %s filesytem on", label)
# any mean pick the first match on the device with matching fs_type
label_match = True
- if partition.lower() == 'any':
+ if partition.lower() == "any":
label_match = False
- device, reuse = find_device_node(device, fs_type=fs_type, label=label,
- label_match=label_match,
- replace_fs=fs_replace)
+ device, reuse = find_device_node(
+ device,
+ fs_type=fs_type,
+ label=label,
+ label_match=label_match,
+ replace_fs=fs_replace,
+ )
LOG.debug("Automatic device for %s identified as %s", odevice, device)
if reuse:
@@ -934,18 +970,25 @@ def mkfs(fs_cfg):
LOG.debug("Replacing file system on %s as instructed.", device)
if not device:
- LOG.debug("No device aviable that matches request. "
- "Skipping fs creation for %s", fs_cfg)
+ LOG.debug(
+ "No device available that matches request. "
+ "Skipping fs creation for %s",
+ fs_cfg,
+ )
return
- elif not partition or str(partition).lower() == 'none':
+ elif not partition or str(partition).lower() == "none":
LOG.debug("Using the raw device to place filesystem %s on", label)
else:
LOG.debug("Error in device identification handling.")
return
- LOG.debug("File system type '%s' with label '%s' will be created on %s",
- fs_type, label, device)
+ LOG.debug(
+ "File system type '%s' with label '%s' will be created on %s",
+ fs_type,
+ label,
+ device,
+ )
# Make sure the device is defined
if not device:
@@ -956,26 +999,29 @@ def mkfs(fs_cfg):
if not (fs_type or fs_cmd):
raise Exception(
"No way to create filesystem '{label}'. fs_type or fs_cmd "
- "must be set.".format(label=label))
+ "must be set.".format(label=label)
+ )
# Create the commands
shell = False
if fs_cmd:
- fs_cmd = fs_cfg['cmd'] % {
- 'label': label,
- 'filesystem': fs_type,
- 'device': device,
+ fs_cmd = fs_cfg["cmd"] % {
+ "label": label,
+ "filesystem": fs_type,
+ "device": device,
}
shell = True
if overwrite:
LOG.warning(
"fs_setup:overwrite ignored because cmd was specified: %s",
- fs_cmd)
+ fs_cmd,
+ )
if fs_opts:
LOG.warning(
"fs_setup:extra_opts ignored because cmd was specified: %s",
- fs_cmd)
+ fs_cmd,
+ )
else:
# Find the mkfs command
mkfs_cmd = subp.which("mkfs.%s" % fs_type)
@@ -983,8 +1029,11 @@ def mkfs(fs_cfg):
mkfs_cmd = subp.which("mk%s" % fs_type)
if not mkfs_cmd:
- LOG.warning("Cannot create fstype '%s'. No mkfs.%s command",
- fs_type, fs_type)
+ LOG.warning(
+ "Cannot create fstype '%s'. No mkfs.%s command",
+ fs_type,
+ fs_type,
+ )
return
fs_cmd = [mkfs_cmd, device]
@@ -1009,4 +1058,5 @@ def mkfs(fs_cfg):
except Exception as e:
raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index b1d99f97..a928082b 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -16,7 +16,7 @@ user configuration should be required.
**Internal name:** ``cc_emit_upstart``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** ubuntu, debian
"""
@@ -24,12 +24,12 @@ user configuration should be required.
import os
from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
from cloudinit import subp
+from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
-distros = ['ubuntu', 'debian']
+distros = ["ubuntu", "debian"]
LOG = logging.getLogger(__name__)
@@ -39,15 +39,18 @@ def is_upstart_system():
return False
myenv = os.environ.copy()
- if 'UPSTART_SESSION' in myenv:
- del myenv['UPSTART_SESSION']
- check_cmd = ['initctl', 'version']
+ if "UPSTART_SESSION" in myenv:
+ del myenv["UPSTART_SESSION"]
+ check_cmd = ["initctl", "version"]
try:
(out, _err) = subp.subp(check_cmd, env=myenv)
- return 'upstart' in out
+ return "upstart" in out
except subp.ProcessExecutionError as e:
- LOG.debug("'%s' returned '%s', not using upstart",
- ' '.join(check_cmd), e.exit_code)
+ LOG.debug(
+ "'%s' returned '%s', not using upstart",
+ " ".join(check_cmd),
+ e.exit_code,
+ )
return False
@@ -56,7 +59,7 @@ def handle(name, _cfg, cloud, log, args):
if not event_names:
# Default to the 'cloud-config'
# event for backwards compat.
- event_names = ['cloud-config']
+ event_names = ["cloud-config"]
if not is_upstart_system():
log.debug("not upstart system, '%s' disabled", name)
@@ -64,11 +67,12 @@ def handle(name, _cfg, cloud, log, args):
cfgpath = cloud.paths.get_ipath_cur("cloud_config")
for n in event_names:
- cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
+ cmd = ["initctl", "emit", str(n), "CLOUD_CFG=%s" % cfgpath]
try:
subp.subp(cmd)
except Exception as e:
# TODO(harlowja), use log exception from utils??
log.warning("Emission of upstart event %s failed due to: %s", n, e)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index 77984bca..50a81744 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -38,68 +38,62 @@ If cloud-init sees a ``fan`` entry in cloud-config it will:
"""
from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
BUILTIN_CFG = {
- 'config': None,
- 'config_path': '/etc/network/fan',
+ "config": None,
+ "config_path": "/etc/network/fan",
}
-def stop_update_start(service, config_file, content, systemd=False):
- if systemd:
- cmds = {'stop': ['systemctl', 'stop', service],
- 'start': ['systemctl', 'start', service],
- 'enable': ['systemctl', 'enable', service]}
- else:
- cmds = {'stop': ['service', 'stop'],
- 'start': ['service', 'start']}
-
- def run(cmd, msg):
- try:
- return subp.subp(cmd, capture=True)
- except subp.ProcessExecutionError as e:
- LOG.warning("failed: %s (%s): %s", service, cmd, e)
- return False
-
- stop_failed = not run(cmds['stop'], msg='stop %s' % service)
- if not content.endswith('\n'):
- content += '\n'
- util.write_file(config_file, content, omode="w")
+def stop_update_start(distro, service, config_file, content):
+ try:
+ distro.manage_service("stop", service)
+ stop_failed = False
+ except subp.ProcessExecutionError as e:
+ stop_failed = True
+ LOG.warning("failed to stop %s: %s", service, e)
- ret = run(cmds['start'], msg='start %s' % service)
- if ret and stop_failed:
- LOG.warning("success: %s started", service)
+ if not content.endswith("\n"):
+ content += "\n"
+ util.write_file(config_file, content, omode="w")
- if 'enable' in cmds:
- ret = run(cmds['enable'], msg='enable %s' % service)
+ try:
+ distro.manage_service("start", service)
+ if stop_failed:
+ LOG.warning("success: %s started", service)
+ except subp.ProcessExecutionError as e:
+ LOG.warning("failed to start %s: %s", service, e)
- return ret
+ distro.manage_service("enable", service)
def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('fan')
+ cfgin = cfg.get("fan")
if not cfgin:
cfgin = {}
mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
- if not mycfg.get('config'):
+ if not mycfg.get("config"):
LOG.debug("%s: no 'fan' config entry. disabling", name)
return
- util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w")
+ util.write_file(mycfg.get("config_path"), mycfg.get("config"), omode="w")
distro = cloud.distro
- if not subp.which('fanctl'):
- distro.install_packages(['ubuntu-fan'])
+ if not subp.which("fanctl"):
+ distro.install_packages(["ubuntu-fan"])
stop_update_start(
- service='ubuntu-fan', config_file=mycfg.get('config_path'),
- content=mycfg.get('config'), systemd=distro.uses_systemd())
+ distro,
+ service="ubuntu-fan",
+ config_file=mycfg.get("config_path"),
+ content=mycfg.get("config"),
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index 3441f7a9..f443ccd8 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -21,7 +21,7 @@ specified as a jinja template with the following variables set:
**Internal name:** ``cc_final_message``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
@@ -31,10 +31,7 @@ specified as a jinja template with the following variables set:
"""
-from cloudinit import templater
-from cloudinit import util
-from cloudinit import version
-
+from cloudinit import templater, util, version
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
@@ -49,7 +46,7 @@ FINAL_MESSAGE_DEF = (
def handle(_name, cfg, cloud, log, args):
- msg_in = ''
+ msg_in = ""
if len(args) != 0:
msg_in = str(args[0])
else:
@@ -64,14 +61,18 @@ def handle(_name, cfg, cloud, log, args):
cver = version.version_string()
try:
subs = {
- 'uptime': uptime,
- 'timestamp': ts,
- 'version': cver,
- 'datasource': str(cloud.datasource),
+ "uptime": uptime,
+ "timestamp": ts,
+ "version": cver,
+ "datasource": str(cloud.datasource),
}
subs.update(dict([(k.upper(), v) for k, v in subs.items()]))
- util.multi_log("%s\n" % (templater.render_string(msg_in, subs)),
- console=False, stderr=True, log=log)
+ util.multi_log(
+ "%s\n" % (templater.render_string(msg_in, subs)),
+ console=False,
+ stderr=True,
+ log=log,
+ )
except Exception:
util.logexc(log, "Failed to render final message template")
@@ -85,4 +86,5 @@ def handle(_name, cfg, cloud, log, args):
if cloud.datasource.is_disconnected:
log.warning("Used fallback datasource")
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py
index 924b967c..3c307153 100644
--- a/cloudinit/config/cc_foo.py
+++ b/cloudinit/config/cc_foo.py
@@ -53,4 +53,5 @@ frequency = PER_INSTANCE
def handle(name, _cfg, _cloud, log, _args):
log.debug("Hi from module %s", name)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 9f338ad1..43334caa 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -50,7 +50,7 @@ growpart is::
**Internal name:** ``cc_growpart``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
@@ -70,16 +70,15 @@ import re
import stat
from cloudinit import log as logging
+from cloudinit import subp, temp_utils, util
from cloudinit.settings import PER_ALWAYS
-from cloudinit import subp
-from cloudinit import util
frequency = PER_ALWAYS
DEFAULT_CONFIG = {
- 'mode': 'auto',
- 'devices': ['/'],
- 'ignore_growroot_disabled': False,
+ "mode": "auto",
+ "devices": ["/"],
+ "ignore_growroot_disabled": False,
}
@@ -130,7 +129,7 @@ class ResizeFailedException(Exception):
class ResizeGrowPart(object):
def available(self):
myenv = os.environ.copy()
- myenv['LANG'] = 'C'
+ myenv["LANG"] = "C"
try:
(out, _err) = subp.subp(["growpart", "--help"], env=myenv)
@@ -142,21 +141,37 @@ class ResizeGrowPart(object):
return False
def resize(self, diskdev, partnum, partdev):
+ myenv = os.environ.copy()
+ myenv["LANG"] = "C"
before = get_size(partdev)
- try:
- subp.subp(["growpart", '--dry-run', diskdev, partnum])
- except subp.ProcessExecutionError as e:
- if e.exit_code != 1:
- util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)",
- diskdev, partnum)
- raise ResizeFailedException(e) from e
- return (before, before)
- try:
- subp.subp(["growpart", diskdev, partnum])
- except subp.ProcessExecutionError as e:
- util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)
- raise ResizeFailedException(e) from e
+ # growpart uses tmp dir to store intermediate states
+ # and may conflict with systemd-tmpfiles-clean
+ with temp_utils.tempdir(needs_exe=True) as tmpd:
+ growpart_tmp = os.path.join(tmpd, "growpart")
+ if not os.path.exists(growpart_tmp):
+ os.mkdir(growpart_tmp, 0o700)
+ myenv["TMPDIR"] = growpart_tmp
+ try:
+ subp.subp(
+ ["growpart", "--dry-run", diskdev, partnum], env=myenv
+ )
+ except subp.ProcessExecutionError as e:
+ if e.exit_code != 1:
+ util.logexc(
+ LOG,
+ "Failed growpart --dry-run for (%s, %s)",
+ diskdev,
+ partnum,
+ )
+ raise ResizeFailedException(e) from e
+ return (before, before)
+
+ try:
+ subp.subp(["growpart", diskdev, partnum], env=myenv)
+ except subp.ProcessExecutionError as e:
+ util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)
+ raise ResizeFailedException(e) from e
return (before, get_size(partdev))
@@ -164,7 +179,7 @@ class ResizeGrowPart(object):
class ResizeGpart(object):
def available(self):
myenv = os.environ.copy()
- myenv['LANG'] = 'C'
+ myenv["LANG"] = "C"
try:
(_out, err) = subp.subp(["gpart", "help"], env=myenv, rcs=[0, 1])
@@ -222,7 +237,11 @@ def device_part_info(devpath):
# the device, like /dev/vtbd0p2.
if util.is_FreeBSD():
freebsd_part = "/dev/" + util.find_freebsd_part(devpath)
- m = re.search('^(/dev/.+)p([0-9])$', freebsd_part)
+ m = re.search("^(/dev/.+)p([0-9])$", freebsd_part)
+ return (m.group(1), m.group(2))
+ elif util.is_DragonFlyBSD():
+ dragonflybsd_part = "/dev/" + util.find_dragonflybsd_part(devpath)
+ m = re.search("^(/dev/.+)s([0-9])$", dragonflybsd_part)
return (m.group(1), m.group(2))
if not os.path.exists(syspath):
@@ -259,7 +278,7 @@ def devent2dev(devent):
container = util.is_container()
# Ensure the path is a block device.
- if (dev == "/dev/root" and not container):
+ if dev == "/dev/root" and not container:
dev = util.rootdev_from_cmdline(util.get_cmdline())
if dev is None:
if os.path.exists(dev):
@@ -277,65 +296,102 @@ def resize_devices(resizer, devices):
try:
blockdev = devent2dev(devent)
except ValueError as e:
- info.append((devent, RESIZE.SKIPPED,
- "unable to convert to device: %s" % e,))
+ info.append(
+ (
+ devent,
+ RESIZE.SKIPPED,
+ "unable to convert to device: %s" % e,
+ )
+ )
continue
try:
statret = os.stat(blockdev)
except OSError as e:
- info.append((devent, RESIZE.SKIPPED,
- "stat of '%s' failed: %s" % (blockdev, e),))
+ info.append(
+ (
+ devent,
+ RESIZE.SKIPPED,
+ "stat of '%s' failed: %s" % (blockdev, e),
+ )
+ )
continue
- if (not stat.S_ISBLK(statret.st_mode) and
- not stat.S_ISCHR(statret.st_mode)):
- info.append((devent, RESIZE.SKIPPED,
- "device '%s' not a block device" % blockdev,))
+ if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(
+ statret.st_mode
+ ):
+ info.append(
+ (
+ devent,
+ RESIZE.SKIPPED,
+ "device '%s' not a block device" % blockdev,
+ )
+ )
continue
try:
(disk, ptnum) = device_part_info(blockdev)
except (TypeError, ValueError) as e:
- info.append((devent, RESIZE.SKIPPED,
- "device_part_info(%s) failed: %s" % (blockdev, e),))
+ info.append(
+ (
+ devent,
+ RESIZE.SKIPPED,
+ "device_part_info(%s) failed: %s" % (blockdev, e),
+ )
+ )
continue
try:
(old, new) = resizer.resize(disk, ptnum, blockdev)
if old == new:
- info.append((devent, RESIZE.NOCHANGE,
- "no change necessary (%s, %s)" % (disk, ptnum),))
+ info.append(
+ (
+ devent,
+ RESIZE.NOCHANGE,
+ "no change necessary (%s, %s)" % (disk, ptnum),
+ )
+ )
else:
- info.append((devent, RESIZE.CHANGED,
- "changed (%s, %s) from %s to %s" %
- (disk, ptnum, old, new),))
+ info.append(
+ (
+ devent,
+ RESIZE.CHANGED,
+ "changed (%s, %s) from %s to %s"
+ % (disk, ptnum, old, new),
+ )
+ )
except ResizeFailedException as e:
- info.append((devent, RESIZE.FAILED,
- "failed to resize: disk=%s, ptnum=%s: %s" %
- (disk, ptnum, e),))
+ info.append(
+ (
+ devent,
+ RESIZE.FAILED,
+ "failed to resize: disk=%s, ptnum=%s: %s"
+ % (disk, ptnum, e),
+ )
+ )
return info
def handle(_name, cfg, _cloud, log, _args):
- if 'growpart' not in cfg:
- log.debug("No 'growpart' entry in cfg. Using default: %s" %
- DEFAULT_CONFIG)
- cfg['growpart'] = DEFAULT_CONFIG
+ if "growpart" not in cfg:
+ log.debug(
+ "No 'growpart' entry in cfg. Using default: %s" % DEFAULT_CONFIG
+ )
+ cfg["growpart"] = DEFAULT_CONFIG
- mycfg = cfg.get('growpart')
+ mycfg = cfg.get("growpart")
if not isinstance(mycfg, dict):
log.warning("'growpart' in config was not a dict")
return
- mode = mycfg.get('mode', "auto")
+ mode = mycfg.get("mode", "auto")
if util.is_false(mode):
log.debug("growpart disabled: mode=%s" % mode)
return
- if util.is_false(mycfg.get('ignore_growroot_disabled', False)):
+ if util.is_false(mycfg.get("ignore_growroot_disabled", False)):
if os.path.isfile("/etc/growroot-disabled"):
log.debug("growpart disabled: /etc/growroot-disabled exists")
log.debug("use ignore_growroot_disabled to ignore")
@@ -354,8 +410,12 @@ def handle(_name, cfg, _cloud, log, _args):
raise e
return
- resized = util.log_time(logfunc=log.debug, msg="resize_devices",
- func=resize_devices, args=(resizer, devices))
+ resized = util.log_time(
+ logfunc=log.debug,
+ msg="resize_devices",
+ func=resize_devices,
+ args=(resizer, devices),
+ )
for (entry, action, msg) in resized:
if action == RESIZE.CHANGED:
log.info("'%s' resized: %s" % (entry, msg))
@@ -363,6 +423,6 @@ def handle(_name, cfg, _cloud, log, _args):
log.debug("'%s' %s: %s" % (entry, action, msg))
-RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart))
+RESIZERS = (("growpart", ResizeGrowPart), ("gpart", ResizeGpart))
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index eb03c664..ad7243d9 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -43,11 +43,10 @@ seeded with empty values, and install_devices_empty is set to true.
import os
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
from cloudinit.subp import ProcessExecutionError
-distros = ['ubuntu', 'debian']
+distros = ["ubuntu", "debian"]
def fetch_idevs(log):
@@ -60,8 +59,9 @@ def fetch_idevs(log):
try:
# get the root disk where the /boot directory resides.
- disk = subp.subp(['grub-probe', '-t', 'disk', '/boot'],
- capture=True)[0].strip()
+ disk = subp.subp(["grub-probe", "-t", "disk", "/boot"], capture=True)[
+ 0
+ ].strip()
except ProcessExecutionError as e:
# grub-common may not be installed, especially on containers
# FileNotFoundError is a nested exception of ProcessExecutionError
@@ -81,26 +81,30 @@ def fetch_idevs(log):
if not disk or not os.path.exists(disk):
# If we failed to detect a disk, we can return early
- return ''
+ return ""
try:
# check if disk exists and use udevadm to fetch symlinks
- devices = subp.subp(
- ['udevadm', 'info', '--root', '--query=symlink', disk],
- capture=True
- )[0].strip().split()
+ devices = (
+ subp.subp(
+ ["udevadm", "info", "--root", "--query=symlink", disk],
+ capture=True,
+ )[0]
+ .strip()
+ .split()
+ )
except Exception:
util.logexc(
log, "udevadm DEVLINKS symlink query failed for disk='%s'", disk
)
- log.debug('considering these device symlinks: %s', ','.join(devices))
+ log.debug("considering these device symlinks: %s", ",".join(devices))
# filter symlinks for /dev/disk/by-id entries
- devices = [dev for dev in devices if 'disk/by-id' in dev]
- log.debug('filtered to these disk/by-id symlinks: %s', ','.join(devices))
+ devices = [dev for dev in devices if "disk/by-id" in dev]
+ log.debug("filtered to these disk/by-id symlinks: %s", ",".join(devices))
# select first device if there is one, else fall back to plain name
idevs = sorted(devices)[0] if devices else disk
- log.debug('selected %s', idevs)
+ log.debug("selected %s", idevs)
return idevs
@@ -111,14 +115,15 @@ def handle(name, cfg, _cloud, log, _args):
if not mycfg:
mycfg = {}
- enabled = mycfg.get('enabled', True)
+ enabled = mycfg.get("enabled", True)
if util.is_false(enabled):
log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled)
return
idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
idevs_empty = util.get_cfg_option_str(
- mycfg, "grub-pc/install_devices_empty", None)
+ mycfg, "grub-pc/install_devices_empty", None
+ )
if idevs is None:
idevs = fetch_idevs(log)
@@ -128,16 +133,21 @@ def handle(name, cfg, _cloud, log, _args):
# now idevs and idevs_empty are set to determined values
# or, those set by user
- dconf_sel = (("grub-pc grub-pc/install_devices string %s\n"
- "grub-pc grub-pc/install_devices_empty boolean %s\n") %
- (idevs, idevs_empty))
+ dconf_sel = (
+ "grub-pc grub-pc/install_devices string %s\n"
+ "grub-pc grub-pc/install_devices_empty boolean %s\n"
+ % (idevs, idevs_empty)
+ )
- log.debug("Setting grub debconf-set-selections with '%s','%s'" %
- (idevs, idevs_empty))
+ log.debug(
+ "Setting grub debconf-set-selections with '%s','%s'"
+ % (idevs, idevs_empty)
+ )
try:
- subp.subp(['debconf-set-selections'], dconf_sel)
+ subp.subp(["debconf-set-selections"], dconf_sel)
except Exception:
util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py
new file mode 100644
index 00000000..34c4557e
--- /dev/null
+++ b/cloudinit/config/cc_install_hotplug.py
@@ -0,0 +1,151 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Install hotplug udev rules if supported and enabled"""
+import os
+from textwrap import dedent
+
+from cloudinit import stages, subp, util
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.event import EventScope, EventType
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+distros = [ALL_DISTROS]
+
+meta: MetaSchema = {
+ "id": "cc_install_hotplug",
+ "name": "Install Hotplug",
+ "title": "Install hotplug if supported and enabled",
+ "description": dedent(
+ """\
+ This module will install the udev rules to enable hotplug if
+ supported by the datasource and enabled in the userdata. The udev
+ rules will be installed as
+ ``/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules``.
+
+ When hotplug is enabled, newly added network devices will be added
+ to the system by cloud-init. After udev detects the event,
+ cloud-init will referesh the instance metadata from the datasource,
+ detect the device in the updated metadata, then apply the updated
+ network configuration.
+
+ Currently supported datasources: Openstack, EC2
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # Enable hotplug of network devices
+ updates:
+ network:
+ when: ["hotplug"]
+ """
+ ),
+ dedent(
+ """\
+ # Enable network hotplug alongside boot event
+ updates:
+ network:
+ when: ["boot", "hotplug"]
+ """
+ ),
+ ],
+ "frequency": frequency,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "updates": {
+ "type": "object",
+ "additionalProperties": False,
+ "properties": {
+ "network": {
+ "type": "object",
+ "required": ["when"],
+ "additionalProperties": False,
+ "properties": {
+ "when": {
+ "type": "array",
+ "additionalProperties": False,
+ "items": {
+ "type": "string",
+ "additionalProperties": False,
+ "enum": [
+ "boot-new-instance",
+ "boot-legacy",
+ "boot",
+ "hotplug",
+ ],
+ },
+ }
+ },
+ }
+ },
+ }
+ },
+}
+
+__doc__ = get_meta_doc(meta, schema)
+
+
+HOTPLUG_UDEV_PATH = "/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules"
+HOTPLUG_UDEV_RULES_TEMPLATE = """\
+# Installed by cloud-init due to network hotplug userdata
+ACTION!="add|remove", GOTO="cloudinit_end"
+LABEL="cloudinit_hook"
+SUBSYSTEM=="net", RUN+="{libexecdir}/hook-hotplug"
+LABEL="cloudinit_end"
+"""
+
+
+def handle(_name, cfg, cloud, log, _args):
+ validate_cloudconfig_schema(cfg, schema)
+ network_hotplug_enabled = (
+ "updates" in cfg
+ and "network" in cfg["updates"]
+ and "when" in cfg["updates"]["network"]
+ and "hotplug" in cfg["updates"]["network"]["when"]
+ )
+ hotplug_supported = EventType.HOTPLUG in (
+ cloud.datasource.get_supported_events([EventType.HOTPLUG]).get(
+ EventScope.NETWORK, set()
+ )
+ )
+ hotplug_enabled = stages.update_event_enabled(
+ datasource=cloud.datasource,
+ cfg=cfg,
+ event_source_type=EventType.HOTPLUG,
+ scope=EventScope.NETWORK,
+ )
+ if not (hotplug_supported and hotplug_enabled):
+ if os.path.exists(HOTPLUG_UDEV_PATH):
+ log.debug("Uninstalling hotplug, not enabled")
+ util.del_file(HOTPLUG_UDEV_PATH)
+ subp.subp(["udevadm", "control", "--reload-rules"])
+ elif network_hotplug_enabled:
+ log.warning(
+ "Hotplug is unsupported by current datasource. "
+ "Udev rules will NOT be installed."
+ )
+ else:
+ log.debug("Skipping hotplug install, not enabled")
+ return
+ if not subp.which("udevadm"):
+ log.debug("Skipping hotplug install, udevadm not found")
+ return
+
+ # This may need to turn into a distro property at some point
+ libexecdir = "/usr/libexec/cloud-init"
+ if not os.path.exists(libexecdir):
+ libexecdir = "/usr/lib/cloud-init"
+ util.write_file(
+ filename=HOTPLUG_UDEV_PATH,
+ content=HOTPLUG_UDEV_RULES_TEMPLATE.format(libexecdir=libexecdir),
+ )
+ subp.subp(["udevadm", "control", "--reload-rules"])
diff --git a/cloudinit/config/cc_keyboard.py b/cloudinit/config/cc_keyboard.py
new file mode 100644
index 00000000..98ef326a
--- /dev/null
+++ b/cloudinit/config/cc_keyboard.py
@@ -0,0 +1,129 @@
+# Copyright (c) 2022 Floris Bos
+#
+# Author: Floris Bos <bos@je-eigen-domein.nl>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""keyboard: set keyboard layout"""
+
+from textwrap import dedent
+
+from cloudinit import distros
+from cloudinit import log as logging
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+
+# FIXME: setting keyboard layout should be supported by all OSes.
+# But currently only implemented for Linux distributions that use systemd.
+osfamilies = ["arch", "debian", "redhat", "suse"]
+distros = distros.Distro.expand_osfamily(osfamilies)
+
+DEFAULT_KEYBOARD_MODEL = "pc105"
+
+meta: MetaSchema = {
+ "id": "cc_keyboard",
+ "name": "Keyboard",
+ "title": "Set keyboard layout",
+ "description": dedent(
+ """\
+ Handle keyboard configuration.
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # Set keyboard layout to "us"
+ keyboard:
+ layout: us
+ """
+ ),
+ dedent(
+ """\
+ # Set specific keyboard layout, model, variant, options
+ keyboard:
+ layout: de
+ model: pc105
+ variant: nodeadkeys
+ options: compose:rwin
+ """
+ ),
+ ],
+ "frequency": frequency,
+}
+
+
+schema = {
+ "type": "object",
+ "properties": {
+ "keyboard": {
+ "type": "object",
+ "properties": {
+ "layout": {
+ "type": "string",
+ "description": dedent(
+ """\
+ Required. Keyboard layout. Corresponds to XKBLAYOUT.
+ """
+ ),
+ },
+ "model": {
+ "type": "string",
+ "default": DEFAULT_KEYBOARD_MODEL,
+ "description": dedent(
+ """\
+ Optional. Keyboard model. Corresponds to XKBMODEL.
+ """
+ ),
+ },
+ "variant": {
+ "type": "string",
+ "description": dedent(
+ """\
+ Optional. Keyboard variant. Corresponds to XKBVARIANT.
+ """
+ ),
+ },
+ "options": {
+ "type": "string",
+ "description": dedent(
+ """\
+ Optional. Keyboard options. Corresponds to XKBOPTIONS.
+ """
+ ),
+ },
+ },
+ "required": ["layout"],
+ "additionalProperties": False,
+ }
+ },
+}
+
+__doc__ = get_meta_doc(meta, schema)
+
+LOG = logging.getLogger(__name__)
+
+
+def handle(name, cfg, cloud, log, args):
+ if "keyboard" not in cfg:
+ LOG.debug(
+ "Skipping module named %s, no 'keyboard' section found", name
+ )
+ return
+ validate_cloudconfig_schema(cfg, schema)
+ kb_cfg = cfg["keyboard"]
+ layout = kb_cfg["layout"]
+ model = kb_cfg.get("model", DEFAULT_KEYBOARD_MODEL)
+ variant = kb_cfg.get("variant", "")
+ options = kb_cfg.get("options", "")
+ LOG.debug("Setting keyboard layout to '%s'", layout)
+ cloud.distro.set_keymap(layout, model, variant, options)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index 0f2be52b..ab35e136 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -9,14 +9,17 @@
"""
Keys to Console
---------------
-**Summary:** control which SSH keys may be written to console
-
-For security reasons it may be desirable not to write SSH fingerprints and keys
-to the console. To avoid the fingerprint of types of SSH keys being written to
-console the ``ssh_fp_console_blacklist`` config key can be used. By default all
-types of keys will have their fingerprints written to console. To avoid keys
-of a key type being written to console the ``ssh_key_console_blacklist`` config
-key can be used. By default ``ssh-dss`` keys are not written to console.
+**Summary:** control which SSH host keys may be written to console
+
+For security reasons it may be desirable not to write SSH host keys and their
+fingerprints to the console. To avoid either being written to the console the
+``emit_keys_to_console`` config key under the main ``ssh`` config key can be
+used. To avoid the fingerprint of types of SSH host keys being written to
+console the ``ssh_fp_console_blacklist`` config key can be used. By default
+all types of keys will have their fingerprints written to console. To avoid
+host keys of a key type being written to console the
+``ssh_key_console_blacklist`` config key can be used. By default ``ssh-dss``
+host keys are not written to console.
**Internal name:** ``cc_keys_to_console``
@@ -26,50 +29,62 @@ key can be used. By default ``ssh-dss`` keys are not written to console.
**Config keys**::
+ ssh:
+ emit_keys_to_console: false
+
ssh_fp_console_blacklist: <list of key types>
ssh_key_console_blacklist: <list of key types>
"""
import os
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
frequency = PER_INSTANCE
# This is a tool that cloud init provides
-HELPER_TOOL_TPL = '%s/cloud-init/write-ssh-key-fingerprints'
+HELPER_TOOL_TPL = "%s/cloud-init/write-ssh-key-fingerprints"
def _get_helper_tool_path(distro):
try:
base_lib = distro.usr_lib_exec
except AttributeError:
- base_lib = '/usr/lib'
+ base_lib = "/usr/lib"
return HELPER_TOOL_TPL % base_lib
def handle(name, cfg, cloud, log, _args):
+ if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)):
+ log.debug(
+ "Skipping module named %s, logging of SSH host keys disabled", name
+ )
+ return
+
helper_path = _get_helper_tool_path(cloud.distro)
if not os.path.exists(helper_path):
- log.warning(("Unable to activate module %s,"
- " helper tool not found at %s"), name, helper_path)
+ log.warning(
+ "Unable to activate module %s, helper tool not found at %s",
+ name,
+ helper_path,
+ )
return
- fp_blacklist = util.get_cfg_option_list(cfg,
- "ssh_fp_console_blacklist", [])
- key_blacklist = util.get_cfg_option_list(cfg,
- "ssh_key_console_blacklist",
- ["ssh-dss"])
+ fp_blacklist = util.get_cfg_option_list(
+ cfg, "ssh_fp_console_blacklist", []
+ )
+ key_blacklist = util.get_cfg_option_list(
+ cfg, "ssh_key_console_blacklist", ["ssh-dss"]
+ )
try:
- cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)]
+ cmd = [helper_path, ",".join(fp_blacklist), ",".join(key_blacklist)]
(stdout, _stderr) = subp.subp(cmd)
- util.multi_log("%s\n" % (stdout.strip()),
- stderr=False, console=True)
+ util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True)
except Exception:
log.warning("Writing keys to the system console failed!")
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 299c4d01..03ebf411 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -60,10 +60,7 @@ from io import BytesIO
from configobj import ConfigObj
-from cloudinit import type_utils
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, type_utils, util
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
@@ -71,15 +68,15 @@ frequency = PER_INSTANCE
LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
LS_DEFAULT_FILE = "/etc/default/landscape-client"
-distros = ['ubuntu']
+distros = ["ubuntu"]
# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
LSC_BUILTIN_CFG = {
- 'client': {
- 'log_level': "info",
- 'url': "https://landscape.canonical.com/message-system",
- 'ping_url': "http://landscape.canonical.com/ping",
- 'data_path': "/var/lib/landscape/client",
+ "client": {
+ "log_level": "info",
+ "url": "https://landscape.canonical.com/message-system",
+ "ping_url": "http://landscape.canonical.com/ping",
+ "data_path": "/var/lib/landscape/client",
}
}
@@ -97,11 +94,13 @@ def handle(_name, cfg, cloud, log, _args):
raise RuntimeError(
"'landscape' key existed in config, but not a dictionary type,"
" is a {_type} instead".format(
- _type=type_utils.obj_name(ls_cloudcfg)))
+ _type=type_utils.obj_name(ls_cloudcfg)
+ )
+ )
if not ls_cloudcfg:
return
- cloud.distro.install_packages(('landscape-client',))
+ cloud.distro.install_packages(("landscape-client",))
merge_data = [
LSC_BUILTIN_CFG,
@@ -135,4 +134,5 @@ def merge_together(objs):
cfg.merge(ConfigObj(obj))
return cfg
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index 4f8b7bf6..29f6a9b6 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -11,45 +11,55 @@
from textwrap import dedent
from cloudinit import util
-from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
from cloudinit.settings import PER_INSTANCE
-
frequency = PER_INSTANCE
-distros = ['all']
-schema = {
- 'id': 'cc_locale',
- 'name': 'Locale',
- 'title': 'Set system locale',
- 'description': dedent(
+distros = ["all"]
+meta: MetaSchema = {
+ "id": "cc_locale",
+ "name": "Locale",
+ "title": "Set system locale",
+ "description": dedent(
"""\
Configure the system locale and apply it system wide. By default use
the locale specified by the datasource."""
),
- 'distros': distros,
- 'examples': [
- dedent("""\
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Set the locale to ar_AE
locale: ar_AE
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Set the locale to fr_CA in /etc/alternate_path/locale
locale: fr_CA
locale_configfile: /etc/alternate_path/locale
- """),
+ """
+ ),
],
- 'frequency': frequency,
- 'type': 'object',
- 'properties': {
- 'locale': {
- 'type': 'string',
- 'description': (
+ "frequency": frequency,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "locale": {
+ "type": "string",
+ "description": (
"The locale to set as the system's locale (e.g. ar_PS)"
),
},
- 'locale_configfile': {
- 'type': 'string',
- 'description': (
+ "locale_configfile": {
+ "type": "string",
+ "description": (
"The file in which to write the locale configuration (defaults"
" to the distro's default location)"
),
@@ -57,7 +67,7 @@ schema = {
},
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
def handle(name, cfg, cloud, log, args):
@@ -67,8 +77,9 @@ def handle(name, cfg, cloud, log, args):
locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())
if util.is_false(locale):
- log.debug("Skipping module named %s, disabled by config: %s",
- name, locale)
+ log.debug(
+ "Skipping module named %s, disabled by config: %s", name, locale
+ )
return
validate_cloudconfig_schema(cfg, schema)
@@ -77,4 +88,5 @@ def handle(name, cfg, cloud, log, args):
locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
cloud.distro.apply_locale(locale, locale_cfgfile)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 486037d9..13ddcbe9 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -47,12 +47,12 @@ lxd-bridge will be configured accordingly.
domain: <domain>
"""
-from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
import os
-distros = ['ubuntu']
+from cloudinit import log as logging
+from cloudinit import subp, util
+
+distros = ["ubuntu"]
LOG = logging.getLogger(__name__)
@@ -61,36 +61,42 @@ _DEFAULT_NETWORK_NAME = "lxdbr0"
def handle(name, cfg, cloud, log, args):
# Get config
- lxd_cfg = cfg.get('lxd')
+ lxd_cfg = cfg.get("lxd")
if not lxd_cfg:
- log.debug("Skipping module named %s, not present or disabled by cfg",
- name)
+ log.debug(
+ "Skipping module named %s, not present or disabled by cfg", name
+ )
return
if not isinstance(lxd_cfg, dict):
- log.warning("lxd config must be a dictionary. found a '%s'",
- type(lxd_cfg))
+ log.warning(
+ "lxd config must be a dictionary. found a '%s'", type(lxd_cfg)
+ )
return
# Grab the configuration
- init_cfg = lxd_cfg.get('init')
+ init_cfg = lxd_cfg.get("init")
if not isinstance(init_cfg, dict):
- log.warning("lxd/init config must be a dictionary. found a '%s'",
- type(init_cfg))
+ log.warning(
+ "lxd/init config must be a dictionary. found a '%s'",
+ type(init_cfg),
+ )
init_cfg = {}
- bridge_cfg = lxd_cfg.get('bridge', {})
+ bridge_cfg = lxd_cfg.get("bridge", {})
if not isinstance(bridge_cfg, dict):
- log.warning("lxd/bridge config must be a dictionary. found a '%s'",
- type(bridge_cfg))
+ log.warning(
+ "lxd/bridge config must be a dictionary. found a '%s'",
+ type(bridge_cfg),
+ )
bridge_cfg = {}
# Install the needed packages
packages = []
if not subp.which("lxd"):
- packages.append('lxd')
+ packages.append("lxd")
- if init_cfg.get("storage_backend") == "zfs" and not subp.which('zfs'):
- packages.append('zfsutils-linux')
+ if init_cfg.get("storage_backend") == "zfs" and not subp.which("zfs"):
+ packages.append("zfsutils-linux")
if len(packages):
try:
@@ -102,23 +108,30 @@ def handle(name, cfg, cloud, log, args):
# Set up lxd if init config is given
if init_cfg:
init_keys = (
- 'network_address', 'network_port', 'storage_backend',
- 'storage_create_device', 'storage_create_loop',
- 'storage_pool', 'trust_password')
- subp.subp(['lxd', 'waitready', '--timeout=300'])
- cmd = ['lxd', 'init', '--auto']
+ "network_address",
+ "network_port",
+ "storage_backend",
+ "storage_create_device",
+ "storage_create_loop",
+ "storage_pool",
+ "trust_password",
+ )
+ subp.subp(["lxd", "waitready", "--timeout=300"])
+ cmd = ["lxd", "init", "--auto"]
for k in init_keys:
if init_cfg.get(k):
- cmd.extend(["--%s=%s" %
- (k.replace('_', '-'), str(init_cfg[k]))])
+ cmd.extend(
+ ["--%s=%s" % (k.replace("_", "-"), str(init_cfg[k]))]
+ )
subp.subp(cmd)
# Set up lxd-bridge if bridge config is given
dconf_comm = "debconf-communicate"
if bridge_cfg:
net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
- if os.path.exists("/etc/default/lxd-bridge") \
- and subp.which(dconf_comm):
+ if os.path.exists("/etc/default/lxd-bridge") and subp.which(
+ dconf_comm
+ ):
# Bridge configured through packaging
debconf = bridge_to_debconf(bridge_cfg)
@@ -126,39 +139,47 @@ def handle(name, cfg, cloud, log, args):
# Update debconf database
try:
log.debug("Setting lxd debconf via " + dconf_comm)
- data = "\n".join(["set %s %s" % (k, v)
- for k, v in debconf.items()]) + "\n"
- subp.subp(['debconf-communicate'], data)
+ data = (
+ "\n".join(
+ ["set %s %s" % (k, v) for k, v in debconf.items()]
+ )
+ + "\n"
+ )
+ subp.subp(["debconf-communicate"], data)
except Exception:
- util.logexc(log, "Failed to run '%s' for lxd with" %
- dconf_comm)
+ util.logexc(
+ log, "Failed to run '%s' for lxd with" % dconf_comm
+ )
# Remove the existing configuration file (forces re-generation)
util.del_file("/etc/default/lxd-bridge")
# Run reconfigure
log.debug("Running dpkg-reconfigure for lxd")
- subp.subp(['dpkg-reconfigure', 'lxd',
- '--frontend=noninteractive'])
+ subp.subp(["dpkg-reconfigure", "lxd", "--frontend=noninteractive"])
else:
# Built-in LXD bridge support
cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg)
maybe_cleanup_default(
- net_name=net_name, did_init=bool(init_cfg),
- create=bool(cmd_create), attach=bool(cmd_attach))
+ net_name=net_name,
+ did_init=bool(init_cfg),
+ create=bool(cmd_create),
+ attach=bool(cmd_attach),
+ )
if cmd_create:
- log.debug("Creating lxd bridge: %s" %
- " ".join(cmd_create))
+ log.debug("Creating lxd bridge: %s" % " ".join(cmd_create))
_lxc(cmd_create)
if cmd_attach:
- log.debug("Setting up default lxd bridge: %s" %
- " ".join(cmd_attach))
+ log.debug(
+ "Setting up default lxd bridge: %s" % " ".join(cmd_attach)
+ )
_lxc(cmd_attach)
elif bridge_cfg:
raise RuntimeError(
- "Unable to configure lxd bridge without %s." + dconf_comm)
+ "Unable to configure lxd bridge without %s." + dconf_comm
+ )
def bridge_to_debconf(bridge_cfg):
@@ -180,33 +201,32 @@ def bridge_to_debconf(bridge_cfg):
if bridge_cfg.get("ipv4_address"):
debconf["lxd/bridge-ipv4"] = "true"
- debconf["lxd/bridge-ipv4-address"] = \
- bridge_cfg.get("ipv4_address")
- debconf["lxd/bridge-ipv4-netmask"] = \
- bridge_cfg.get("ipv4_netmask")
- debconf["lxd/bridge-ipv4-dhcp-first"] = \
- bridge_cfg.get("ipv4_dhcp_first")
- debconf["lxd/bridge-ipv4-dhcp-last"] = \
- bridge_cfg.get("ipv4_dhcp_last")
- debconf["lxd/bridge-ipv4-dhcp-leases"] = \
- bridge_cfg.get("ipv4_dhcp_leases")
- debconf["lxd/bridge-ipv4-nat"] = \
- bridge_cfg.get("ipv4_nat", "true")
+ debconf["lxd/bridge-ipv4-address"] = bridge_cfg.get("ipv4_address")
+ debconf["lxd/bridge-ipv4-netmask"] = bridge_cfg.get("ipv4_netmask")
+ debconf["lxd/bridge-ipv4-dhcp-first"] = bridge_cfg.get(
+ "ipv4_dhcp_first"
+ )
+ debconf["lxd/bridge-ipv4-dhcp-last"] = bridge_cfg.get(
+ "ipv4_dhcp_last"
+ )
+ debconf["lxd/bridge-ipv4-dhcp-leases"] = bridge_cfg.get(
+ "ipv4_dhcp_leases"
+ )
+ debconf["lxd/bridge-ipv4-nat"] = bridge_cfg.get("ipv4_nat", "true")
if bridge_cfg.get("ipv6_address"):
debconf["lxd/bridge-ipv6"] = "true"
- debconf["lxd/bridge-ipv6-address"] = \
- bridge_cfg.get("ipv6_address")
- debconf["lxd/bridge-ipv6-netmask"] = \
- bridge_cfg.get("ipv6_netmask")
- debconf["lxd/bridge-ipv6-nat"] = \
- bridge_cfg.get("ipv6_nat", "false")
+ debconf["lxd/bridge-ipv6-address"] = bridge_cfg.get("ipv6_address")
+ debconf["lxd/bridge-ipv6-netmask"] = bridge_cfg.get("ipv6_netmask")
+ debconf["lxd/bridge-ipv6-nat"] = bridge_cfg.get(
+ "ipv6_nat", "false"
+ )
if bridge_cfg.get("domain"):
debconf["lxd/bridge-domain"] = bridge_cfg.get("domain")
else:
- raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
+ raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode"))
return debconf
@@ -217,37 +237,41 @@ def bridge_to_cmd(bridge_cfg):
bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
cmd_create = []
- cmd_attach = ["network", "attach-profile", bridge_name,
- "default", "eth0"]
+ cmd_attach = ["network", "attach-profile", bridge_name, "default", "eth0"]
if bridge_cfg.get("mode") == "existing":
return None, cmd_attach
if bridge_cfg.get("mode") != "new":
- raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
+ raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode"))
cmd_create = ["network", "create", bridge_name]
if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"):
- cmd_create.append("ipv4.address=%s/%s" %
- (bridge_cfg.get("ipv4_address"),
- bridge_cfg.get("ipv4_netmask")))
+ cmd_create.append(
+ "ipv4.address=%s/%s"
+ % (bridge_cfg.get("ipv4_address"), bridge_cfg.get("ipv4_netmask"))
+ )
if bridge_cfg.get("ipv4_nat", "true") == "true":
cmd_create.append("ipv4.nat=true")
- if bridge_cfg.get("ipv4_dhcp_first") and \
- bridge_cfg.get("ipv4_dhcp_last"):
- dhcp_range = "%s-%s" % (bridge_cfg.get("ipv4_dhcp_first"),
- bridge_cfg.get("ipv4_dhcp_last"))
+ if bridge_cfg.get("ipv4_dhcp_first") and bridge_cfg.get(
+ "ipv4_dhcp_last"
+ ):
+ dhcp_range = "%s-%s" % (
+ bridge_cfg.get("ipv4_dhcp_first"),
+ bridge_cfg.get("ipv4_dhcp_last"),
+ )
cmd_create.append("ipv4.dhcp.ranges=%s" % dhcp_range)
else:
cmd_create.append("ipv4.address=none")
if bridge_cfg.get("ipv6_address") and bridge_cfg.get("ipv6_netmask"):
- cmd_create.append("ipv6.address=%s/%s" %
- (bridge_cfg.get("ipv6_address"),
- bridge_cfg.get("ipv6_netmask")))
+ cmd_create.append(
+ "ipv6.address=%s/%s"
+ % (bridge_cfg.get("ipv6_address"), bridge_cfg.get("ipv6_netmask"))
+ )
if bridge_cfg.get("ipv6_nat", "false") == "true":
cmd_create.append("ipv6.nat=true")
@@ -262,14 +286,17 @@ def bridge_to_cmd(bridge_cfg):
def _lxc(cmd):
- env = {'LC_ALL': 'C',
- 'HOME': os.environ.get('HOME', '/root'),
- 'USER': os.environ.get('USER', 'root')}
- subp.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
+ env = {
+ "LC_ALL": "C",
+ "HOME": os.environ.get("HOME", "/root"),
+ "USER": os.environ.get("USER", "root"),
+ }
+ subp.subp(["lxc"] + list(cmd) + ["--force-local"], update_env=env)
-def maybe_cleanup_default(net_name, did_init, create, attach,
- profile="default", nic_name="eth0"):
+def maybe_cleanup_default(
+ net_name, did_init, create, attach, profile="default", nic_name="eth0"
+):
"""Newer versions of lxc (3.0.1+) create a lxdbr0 network when
'lxd init --auto' is run. Older versions did not.
@@ -306,4 +333,5 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
raise e
LOG.debug(msg, nic_name, profile, fail_assume_enoent)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index 41ea4fc9..1b0158ec 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -56,18 +56,21 @@ import io
from configobj import ConfigObj
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
-SERVER_CFG = '/etc/mcollective/server.cfg'
+SERVER_CFG = "/etc/mcollective/server.cfg"
LOG = logging.getLogger(__name__)
-def configure(config, server_cfg=SERVER_CFG,
- pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE):
+def configure(
+ config,
+ server_cfg=SERVER_CFG,
+ pubcert_file=PUBCERT_FILE,
+ pricert_file=PRICERT_FILE,
+):
# Read server.cfg (if it exists) values from the
# original file in order to be able to mix the rest up.
try:
@@ -77,20 +80,20 @@ def configure(config, server_cfg=SERVER_CFG,
if e.errno != errno.ENOENT:
raise
else:
- LOG.debug("Did not find file %s (starting with an empty"
- " config)", server_cfg)
+ LOG.debug(
+ "Did not find file %s (starting with an empty config)",
+ server_cfg,
+ )
mcollective_config = ConfigObj()
for (cfg_name, cfg) in config.items():
- if cfg_name == 'public-cert':
+ if cfg_name == "public-cert":
util.write_file(pubcert_file, cfg, mode=0o644)
- mcollective_config[
- 'plugin.ssl_server_public'] = pubcert_file
- mcollective_config['securityprovider'] = 'ssl'
- elif cfg_name == 'private-cert':
+ mcollective_config["plugin.ssl_server_public"] = pubcert_file
+ mcollective_config["securityprovider"] = "ssl"
+ elif cfg_name == "private-cert":
util.write_file(pricert_file, cfg, mode=0o600)
- mcollective_config[
- 'plugin.ssl_server_private'] = pricert_file
- mcollective_config['securityprovider'] = 'ssl'
+ mcollective_config["plugin.ssl_server_private"] = pricert_file
+ mcollective_config["securityprovider"] = "ssl"
else:
if isinstance(cfg, str):
# Just set it in the 'main' section
@@ -126,21 +129,24 @@ def configure(config, server_cfg=SERVER_CFG,
def handle(name, cfg, cloud, log, _args):
# If there isn't a mcollective key in the configuration don't do anything
- if 'mcollective' not in cfg:
- log.debug(("Skipping module named %s, "
- "no 'mcollective' key in configuration"), name)
+ if "mcollective" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'mcollective' key in configuration",
+ name,
+ )
return
- mcollective_cfg = cfg['mcollective']
+ mcollective_cfg = cfg["mcollective"]
# Start by installing the mcollective package ...
cloud.distro.install_packages(("mcollective",))
# ... and then update the mcollective configuration
- if 'conf' in mcollective_cfg:
- configure(config=mcollective_cfg['conf'])
+ if "conf" in mcollective_cfg:
+ configure(config=mcollective_cfg["conf"])
# restart mcollective to handle updated config
- subp.subp(['service', 'mcollective', 'restart'], capture=False)
+ subp.subp(["service", "mcollective", "restart"], capture=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
index 3995704a..4fafb4af 100644
--- a/cloudinit/config/cc_migrator.py
+++ b/cloudinit/config/cc_migrator.py
@@ -17,7 +17,7 @@ false`` in config.
**Internal name:** ``cc_migrator``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
@@ -29,16 +29,14 @@ false`` in config.
import os
import shutil
-from cloudinit import helpers
-from cloudinit import util
-
+from cloudinit import helpers, util
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
def _migrate_canon_sems(cloud):
- paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
+ paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem"))
am_adjusted = 0
for sem_path in paths:
if not sem_path or not os.path.exists(sem_path):
@@ -57,12 +55,12 @@ def _migrate_canon_sems(cloud):
def _migrate_legacy_sems(cloud, log):
legacy_adjust = {
- 'apt-update-upgrade': [
- 'apt-configure',
- 'package-update-upgrade-install',
+ "apt-update-upgrade": [
+ "apt-configure",
+ "package-update-upgrade-install",
],
}
- paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
+ paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem"))
for sem_path in paths:
if not sem_path or not os.path.exists(sem_path):
continue
@@ -78,8 +76,9 @@ def _migrate_legacy_sems(cloud, log):
util.del_file(os.path.join(sem_path, p))
(_name, freq) = os.path.splitext(p)
for m in migrate_to:
- log.debug("Migrating %s => %s with the same frequency",
- p, m)
+ log.debug(
+ "Migrating %s => %s with the same frequency", p, m
+ )
with sem_helper.lock(m, freq):
pass
@@ -90,8 +89,10 @@ def handle(name, cfg, cloud, log, _args):
log.debug("Skipping module named %s, migration disabled", name)
return
sems_moved = _migrate_canon_sems(cloud)
- log.debug("Migrated %s semaphore files to there canonicalized names",
- sems_moved)
+ log.debug(
+ "Migrated %s semaphore files to there canonicalized names", sems_moved
+ )
_migrate_legacy_sems(cloud, log)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index c22d1698..83eb5b1b 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -62,15 +62,12 @@ swap file is created.
maxsize: <size in bytes>
"""
-from string import whitespace
-
import logging
import os
import re
+from string import whitespace
-from cloudinit import type_utils
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, type_utils, util
# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
@@ -105,25 +102,29 @@ def is_network_device(name):
def _get_nth_partition_for_device(device_path, partition_number):
- potential_suffixes = [str(partition_number), 'p%s' % (partition_number,),
- '-part%s' % (partition_number,)]
+ potential_suffixes = [
+ str(partition_number),
+ "p%s" % (partition_number,),
+ "-part%s" % (partition_number,),
+ ]
for suffix in potential_suffixes:
- potential_partition_device = '%s%s' % (device_path, suffix)
+ potential_partition_device = "%s%s" % (device_path, suffix)
if os.path.exists(potential_partition_device):
return potential_partition_device
return None
def _is_block_device(device_path, partition_path=None):
- device_name = os.path.realpath(device_path).split('/')[-1]
- sys_path = os.path.join('/sys/block/', device_name)
+ device_name = os.path.realpath(device_path).split("/")[-1]
+ sys_path = os.path.join("/sys/block/", device_name)
if partition_path is not None:
sys_path = os.path.join(
- sys_path, os.path.realpath(partition_path).split('/')[-1])
+ sys_path, os.path.realpath(partition_path).split("/")[-1]
+ )
return os.path.exists(sys_path)
-def sanitize_devname(startname, transformer, log):
+def sanitize_devname(startname, transformer, log, aliases=None):
log.debug("Attempting to determine the real name of %s", startname)
# workaround, allow user to specify 'ephemeral'
@@ -137,9 +138,14 @@ def sanitize_devname(startname, transformer, log):
return startname
device_path, partition_number = util.expand_dotted_devname(devname)
+ orig = device_path
+
+ if aliases:
+ device_path = aliases.get(device_path, device_path)
+ if orig != device_path:
+ log.debug("Mapped device alias %s to %s", orig, device_path)
if is_meta_device_name(device_path):
- orig = device_path
device_path = transformer(device_path)
if not device_path:
return None
@@ -154,8 +160,9 @@ def sanitize_devname(startname, transformer, log):
if partition_number is None:
partition_path = _get_nth_partition_for_device(device_path, 1)
else:
- partition_path = _get_nth_partition_for_device(device_path,
- partition_number)
+ partition_path = _get_nth_partition_for_device(
+ device_path, partition_number
+ )
if partition_path is None:
return None
@@ -169,12 +176,12 @@ def sanitize_devname(startname, transformer, log):
def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
# make a suggestion on the size of swap for this system.
if memsize is None:
- memsize = util.read_meminfo()['total']
+ memsize = util.read_meminfo()["total"]
GB = 2 ** 30
sugg_max = 8 * GB
- info = {'avail': 'na', 'max_in': maxsize, 'mem': memsize}
+ info = {"avail": "na", "max_in": maxsize, "mem": memsize}
if fsys is None and maxsize is None:
# set max to 8GB default if no filesystem given
@@ -182,18 +189,18 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
elif fsys:
statvfs = os.statvfs(fsys)
avail = statvfs.f_frsize * statvfs.f_bfree
- info['avail'] = avail
+ info["avail"] = avail
if maxsize is None:
# set to 25% of filesystem space
maxsize = min(int(avail / 4), sugg_max)
- elif maxsize > ((avail * .9)):
+ elif maxsize > ((avail * 0.9)):
# set to 90% of available disk space
- maxsize = int(avail * .9)
+ maxsize = int(avail * 0.9)
elif maxsize is None:
maxsize = sugg_max
- info['max'] = maxsize
+ info["max"] = maxsize
formulas = [
# < 1G: swap = double memory
@@ -221,7 +228,7 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
if size is not None:
size = maxsize
- info['size'] = size
+ info["size"] = size
MB = 2 ** 20
pinfo = {}
@@ -231,9 +238,14 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
else:
pinfo[k] = v
- LOG.debug("suggest %s swap for %s memory with '%s'"
- " disk given max=%s [max=%s]'", pinfo['size'], pinfo['mem'],
- pinfo['avail'], pinfo['max_in'], pinfo['max'])
+ LOG.debug(
+ "suggest %s swap for %s memory with '%s' disk given max=%s [max=%s]'",
+ pinfo["size"],
+ pinfo["mem"],
+ pinfo["avail"],
+ pinfo["max_in"],
+ pinfo["max"],
+ )
return size
@@ -243,14 +255,23 @@ def create_swapfile(fname: str, size: str) -> None:
errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s"
def create_swap(fname, size, method):
- LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'",
- fname, fstype, method)
+ LOG.debug(
+ "Creating swapfile in '%s' on fstype '%s' using '%s'",
+ fname,
+ fstype,
+ method,
+ )
if method == "fallocate":
- cmd = ['fallocate', '-l', '%sM' % size, fname]
+ cmd = ["fallocate", "-l", "%sM" % size, fname]
elif method == "dd":
- cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M',
- 'count=%s' % size]
+ cmd = [
+ "dd",
+ "if=/dev/zero",
+ "of=%s" % fname,
+ "bs=1M",
+ "count=%s" % size,
+ ]
try:
subp.subp(cmd, capture=True)
@@ -264,8 +285,9 @@ def create_swapfile(fname: str, size: str) -> None:
fstype = util.get_mount_info(swap_dir)[1]
- if (fstype == "xfs" and
- util.kernel_version() < (4, 18)) or fstype == "btrfs":
+ if (
+ fstype == "xfs" and util.kernel_version() < (4, 18)
+ ) or fstype == "btrfs":
create_swap(fname, size, "dd")
else:
try:
@@ -277,7 +299,7 @@ def create_swapfile(fname: str, size: str) -> None:
if os.path.exists(fname):
util.chmod(fname, 0o600)
try:
- subp.subp(['mkswap', fname])
+ subp.subp(["mkswap", fname])
except subp.ProcessExecutionError:
util.del_file(fname)
raise
@@ -292,37 +314,42 @@ def setup_swapfile(fname, size=None, maxsize=None):
swap_dir = os.path.dirname(fname)
if str(size).lower() == "auto":
try:
- memsize = util.read_meminfo()['total']
+ memsize = util.read_meminfo()["total"]
except IOError:
LOG.debug("Not creating swap: failed to read meminfo")
return
util.ensure_dir(swap_dir)
- size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize,
- memsize=memsize)
+ size = suggested_swapsize(
+ fsys=swap_dir, maxsize=maxsize, memsize=memsize
+ )
mibsize = str(int(size / (2 ** 20)))
if not size:
LOG.debug("Not creating swap: suggested size was 0")
return
- util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile,
- args=[fname, mibsize])
+ util.log_time(
+ LOG.debug,
+ msg="Setting up swap file",
+ func=create_swapfile,
+ args=[fname, mibsize],
+ )
return fname
def handle_swapcfg(swapcfg):
"""handle the swap config, calling setup_swap if necessary.
- return None or (filename, size)
+ return None or (filename, size)
"""
if not isinstance(swapcfg, dict):
LOG.warning("input for swap config was not a dict.")
return None
- fname = swapcfg.get('filename', '/swap.img')
- size = swapcfg.get('size', 0)
- maxsize = swapcfg.get('maxsize', None)
+ fname = swapcfg.get("filename", "/swap.img")
+ size = swapcfg.get("size", 0)
+ maxsize = swapcfg.get("maxsize", None)
if not (size and fname):
LOG.debug("no need to setup swap")
@@ -330,8 +357,10 @@ def handle_swapcfg(swapcfg):
if os.path.exists(fname):
if not os.path.exists("/proc/swaps"):
- LOG.debug("swap file %s exists, but no /proc/swaps exists, "
- "being safe", fname)
+ LOG.debug(
+ "swap file %s exists, but no /proc/swaps exists, being safe",
+ fname,
+ )
return fname
try:
for line in util.load_file("/proc/swaps").splitlines():
@@ -340,8 +369,9 @@ def handle_swapcfg(swapcfg):
return fname
LOG.debug("swap file %s exists, but not in /proc/swaps", fname)
except Exception:
- LOG.warning("swap file %s exists. Error reading /proc/swaps",
- fname)
+ LOG.warning(
+ "swap file %s exists. Error reading /proc/swaps", fname
+ )
return fname
try:
@@ -362,14 +392,18 @@ def handle(_name, cfg, cloud, log, _args):
def_mnt_opts = "defaults,nobootwait"
uses_systemd = cloud.distro.uses_systemd()
if uses_systemd:
- def_mnt_opts = "defaults,nofail,x-systemd.requires=cloud-init.service"
+ def_mnt_opts = (
+ "defaults,nofail,x-systemd.requires=cloud-init.service,_netdev"
+ )
defvals = [None, None, "auto", def_mnt_opts, "0", "2"]
defvals = cfg.get("mount_default_fields", defvals)
# these are our default set of mounts
- defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"],
- ["swap", "none", "swap", "sw", "0", "0"]]
+ defmnts = [
+ ["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"],
+ ["swap", "none", "swap", "sw", "0", "0"],
+ ]
cfgmnt = []
if "mounts" in cfg:
@@ -394,15 +428,22 @@ def handle(_name, cfg, cloud, log, _args):
fstab_devs[toks[0]] = line
fstab_lines.append(line)
+ device_aliases = cfg.get("device_aliases", {})
+
for i in range(len(cfgmnt)):
# skip something that wasn't a list
if not isinstance(cfgmnt[i], list):
- log.warning("Mount option %s not a list, got a %s instead",
- (i + 1), type_utils.obj_name(cfgmnt[i]))
+ log.warning(
+ "Mount option %s not a list, got a %s instead",
+ (i + 1),
+ type_utils.obj_name(cfgmnt[i]),
+ )
continue
start = str(cfgmnt[i][0])
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
+ sanitized = sanitize_devname(
+ start, cloud.device_name_to_device, log, aliases=device_aliases
+ )
if sanitized != start:
log.debug("changed %s => %s" % (start, sanitized))
@@ -410,8 +451,11 @@ def handle(_name, cfg, cloud, log, _args):
log.debug("Ignoring nonexistent named mount %s", start)
continue
elif sanitized in fstab_devs:
- log.info("Device %s already defined in fstab: %s",
- sanitized, fstab_devs[sanitized])
+ log.info(
+ "Device %s already defined in fstab: %s",
+ sanitized,
+ fstab_devs[sanitized],
+ )
continue
cfgmnt[i][0] = sanitized
@@ -444,7 +488,9 @@ def handle(_name, cfg, cloud, log, _args):
# entry has the same device name
for defmnt in defmnts:
start = defmnt[0]
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
+ sanitized = sanitize_devname(
+ start, cloud.device_name_to_device, log, aliases=device_aliases
+ )
if sanitized != start:
log.debug("changed default device %s => %s" % (start, sanitized))
@@ -452,8 +498,11 @@ def handle(_name, cfg, cloud, log, _args):
log.debug("Ignoring nonexistent default named mount %s", start)
continue
elif sanitized in fstab_devs:
- log.debug("Device %s already defined in fstab: %s",
- sanitized, fstab_devs[sanitized])
+ log.debug(
+ "Device %s already defined in fstab: %s",
+ sanitized,
+ fstab_devs[sanitized],
+ )
continue
defmnt[0] = sanitized
@@ -465,8 +514,7 @@ def handle(_name, cfg, cloud, log, _args):
break
if cfgmnt_has:
- log.debug(("Not including %s, already"
- " previously included"), start)
+ log.debug("Not including %s, already previously included", start)
continue
cfgmnt.append(defmnt)
@@ -479,7 +527,7 @@ def handle(_name, cfg, cloud, log, _args):
else:
actlist.append(x)
- swapret = handle_swapcfg(cfg.get('swap', {}))
+ swapret = handle_swapcfg(cfg.get("swap", {}))
if swapret:
actlist.append([swapret, "none", "swap", "sw", "0", "0"])
@@ -498,10 +546,11 @@ def handle(_name, cfg, cloud, log, _args):
needswap = True
if line[1].startswith("/"):
dirs.append(line[1])
- cc_lines.append('\t'.join(line))
+ cc_lines.append("\t".join(line))
- mount_points = [v['mountpoint'] for k, v in util.mounts().items()
- if 'mountpoint' in v]
+ mount_points = [
+ v["mountpoint"] for k, v in util.mounts().items() if "mountpoint" in v
+ ]
for d in dirs:
try:
util.ensure_dir(d)
@@ -516,11 +565,12 @@ def handle(_name, cfg, cloud, log, _args):
sadds = [WS.sub(" ", n) for n in cc_lines]
sdrops = [WS.sub(" ", n) for n in fstab_removed]
- sops = (["- " + drop for drop in sdrops if drop not in sadds] +
- ["+ " + add for add in sadds if add not in sdrops])
+ sops = ["- " + drop for drop in sdrops if drop not in sadds] + [
+ "+ " + add for add in sadds if add not in sdrops
+ ]
fstab_lines.extend(cc_lines)
- contents = "%s\n" % ('\n'.join(fstab_lines))
+ contents = "%s\n" % "\n".join(fstab_lines)
util.write_file(FSTAB_PATH, contents)
activate_cmds = []
@@ -540,7 +590,7 @@ def handle(_name, cfg, cloud, log, _args):
fmt = "Activating swap and mounts with: %s"
for cmd in activate_cmds:
- fmt = "Activate mounts: %s:" + ' '.join(cmd)
+ fmt = "Activate mounts: %s:" + " ".join(cmd)
try:
subp.subp(cmd)
log.debug(fmt, "PASS")
@@ -548,4 +598,5 @@ def handle(_name, cfg, cloud, log, _args):
log.warning(fmt, "FAIL")
util.logexc(log, fmt, "FAIL")
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index e183993f..25bba764 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -11,110 +11,136 @@ import os
from textwrap import dedent
from cloudinit import log as logging
-from cloudinit import temp_utils
-from cloudinit import templater
-from cloudinit import type_utils
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
+from cloudinit import subp, temp_utils, templater, type_utils, util
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-NTP_CONF = '/etc/ntp.conf'
+NTP_CONF = "/etc/ntp.conf"
NR_POOL_SERVERS = 4
-distros = ['alpine', 'centos', 'debian', 'fedora', 'opensuse', 'rhel',
- 'sles', 'ubuntu']
+distros = [
+ "almalinux",
+ "alpine",
+ "centos",
+ "cloudlinux",
+ "debian",
+ "eurolinux",
+ "fedora",
+ "miraclelinux",
+ "openEuler",
+ "opensuse",
+ "photon",
+ "rhel",
+ "rocky",
+ "sles",
+ "ubuntu",
+ "virtuozzo",
+]
NTP_CLIENT_CONFIG = {
- 'chrony': {
- 'check_exe': 'chronyd',
- 'confpath': '/etc/chrony.conf',
- 'packages': ['chrony'],
- 'service_name': 'chrony',
- 'template_name': 'chrony.conf.{distro}',
- 'template': None,
+ "chrony": {
+ "check_exe": "chronyd",
+ "confpath": "/etc/chrony.conf",
+ "packages": ["chrony"],
+ "service_name": "chrony",
+ "template_name": "chrony.conf.{distro}",
+ "template": None,
},
- 'ntp': {
- 'check_exe': 'ntpd',
- 'confpath': NTP_CONF,
- 'packages': ['ntp'],
- 'service_name': 'ntp',
- 'template_name': 'ntp.conf.{distro}',
- 'template': None,
+ "ntp": {
+ "check_exe": "ntpd",
+ "confpath": NTP_CONF,
+ "packages": ["ntp"],
+ "service_name": "ntp",
+ "template_name": "ntp.conf.{distro}",
+ "template": None,
},
- 'ntpdate': {
- 'check_exe': 'ntpdate',
- 'confpath': NTP_CONF,
- 'packages': ['ntpdate'],
- 'service_name': 'ntpdate',
- 'template_name': 'ntp.conf.{distro}',
- 'template': None,
+ "ntpdate": {
+ "check_exe": "ntpdate",
+ "confpath": NTP_CONF,
+ "packages": ["ntpdate"],
+ "service_name": "ntpdate",
+ "template_name": "ntp.conf.{distro}",
+ "template": None,
},
- 'systemd-timesyncd': {
- 'check_exe': '/lib/systemd/systemd-timesyncd',
- 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf',
- 'packages': [],
- 'service_name': 'systemd-timesyncd',
- 'template_name': 'timesyncd.conf',
- 'template': None,
+ "systemd-timesyncd": {
+ "check_exe": "/lib/systemd/systemd-timesyncd",
+ "confpath": "/etc/systemd/timesyncd.conf.d/cloud-init.conf",
+ "packages": [],
+ "service_name": "systemd-timesyncd",
+ "template_name": "timesyncd.conf",
+ "template": None,
},
}
# This is Distro-specific configuration overrides of the base config
DISTRO_CLIENT_CONFIG = {
- 'alpine': {
- 'chrony': {
- 'confpath': '/etc/chrony/chrony.conf',
- 'service_name': 'chronyd',
+ "alpine": {
+ "chrony": {
+ "confpath": "/etc/chrony/chrony.conf",
+ "service_name": "chronyd",
},
- 'ntp': {
- 'confpath': '/etc/ntp.conf',
- 'packages': [],
- 'service_name': 'ntpd',
+ "ntp": {
+ "confpath": "/etc/ntp.conf",
+ "packages": [],
+ "service_name": "ntpd",
},
},
- 'debian': {
- 'chrony': {
- 'confpath': '/etc/chrony/chrony.conf',
+ "debian": {
+ "chrony": {
+ "confpath": "/etc/chrony/chrony.conf",
},
},
- 'rhel': {
- 'ntp': {
- 'service_name': 'ntpd',
+ "opensuse": {
+ "chrony": {
+ "service_name": "chronyd",
},
- 'chrony': {
- 'service_name': 'chronyd',
+ "ntp": {
+ "confpath": "/etc/ntp.conf",
+ "service_name": "ntpd",
+ },
+ "systemd-timesyncd": {
+ "check_exe": "/usr/lib/systemd/systemd-timesyncd",
},
},
- 'opensuse': {
- 'chrony': {
- 'service_name': 'chronyd',
+ "photon": {
+ "chrony": {
+ "service_name": "chronyd",
+ },
+ "ntp": {"service_name": "ntpd", "confpath": "/etc/ntp.conf"},
+ "systemd-timesyncd": {
+ "check_exe": "/usr/lib/systemd/systemd-timesyncd",
+ "confpath": "/etc/systemd/timesyncd.conf",
},
- 'ntp': {
- 'confpath': '/etc/ntp.conf',
- 'service_name': 'ntpd',
+ },
+ "rhel": {
+ "ntp": {
+ "service_name": "ntpd",
},
- 'systemd-timesyncd': {
- 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
+ "chrony": {
+ "service_name": "chronyd",
},
},
- 'sles': {
- 'chrony': {
- 'service_name': 'chronyd',
+ "sles": {
+ "chrony": {
+ "service_name": "chronyd",
},
- 'ntp': {
- 'confpath': '/etc/ntp.conf',
- 'service_name': 'ntpd',
+ "ntp": {
+ "confpath": "/etc/ntp.conf",
+ "service_name": "ntpd",
},
- 'systemd-timesyncd': {
- 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
+ "systemd-timesyncd": {
+ "check_exe": "/usr/lib/systemd/systemd-timesyncd",
},
},
- 'ubuntu': {
- 'chrony': {
- 'confpath': '/etc/chrony/chrony.conf',
+ "ubuntu": {
+ "chrony": {
+ "confpath": "/etc/chrony/chrony.conf",
},
},
}
@@ -126,11 +152,12 @@ DISTRO_CLIENT_CONFIG = {
# configuration options before actually attempting to deploy with said
# configuration.
-schema = {
- 'id': 'cc_ntp',
- 'name': 'NTP',
- 'title': 'enable and configure ntp',
- 'description': dedent("""\
+meta: MetaSchema = {
+ "id": "cc_ntp",
+ "name": "NTP",
+ "title": "enable and configure ntp",
+ "description": dedent(
+ """\
Handle ntp configuration. If ntp is not installed on the system and
ntp configuration is specified, ntp will be installed. If there is a
default ntp config file in the image or one is present in the
@@ -138,16 +165,20 @@ schema = {
appended to the filename before any changes are made. A list of ntp
pools and ntp servers can be provided under the ``ntp`` config key.
If no ntp ``servers`` or ``pools`` are provided, 4 pools will be used
- in the format ``{0-3}.{distro}.pool.ntp.org``."""),
- 'distros': distros,
- 'examples': [
- dedent("""\
+ in the format ``{0-3}.{distro}.pool.ntp.org``."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Override ntp with chrony configuration on Ubuntu
ntp:
enabled: true
ntp_client: chrony # Uses cloud-init default chrony configuration
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Provide a custom ntp client configuration
ntp:
enabled: true
@@ -174,122 +205,140 @@ schema = {
servers:
- ntp.server.local
- ntp.ubuntu.com
- - 192.168.23.2""")],
- 'frequency': PER_INSTANCE,
- 'type': 'object',
- 'properties': {
- 'ntp': {
- 'type': ['object', 'null'],
- 'properties': {
- 'pools': {
- 'type': 'array',
- 'items': {
- 'type': 'string',
- 'format': 'hostname'
- },
- 'uniqueItems': True,
- 'description': dedent("""\
+ - 192.168.23.2"""
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "ntp": {
+ "type": ["object", "null"],
+ "properties": {
+ "pools": {
+ "type": "array",
+ "items": {"type": "string", "format": "hostname"},
+ "uniqueItems": True,
+ "description": dedent(
+ """\
List of ntp pools. If both pools and servers are
empty, 4 default pool servers will be provided of
the format ``{0-3}.{distro}.pool.ntp.org``. NOTE:
for Alpine Linux when using the Busybox NTP client
this setting will be ignored due to the limited
- functionality of Busybox's ntpd.""")
+ functionality of Busybox's ntpd."""
+ ),
},
- 'servers': {
- 'type': 'array',
- 'items': {
- 'type': 'string',
- 'format': 'hostname'
- },
- 'uniqueItems': True,
- 'description': dedent("""\
+ "servers": {
+ "type": "array",
+ "items": {"type": "string", "format": "hostname"},
+ "uniqueItems": True,
+ "description": dedent(
+ """\
List of ntp servers. If both pools and servers are
empty, 4 default pool servers will be provided with
- the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ the format ``{0-3}.{distro}.pool.ntp.org``."""
+ ),
},
- 'ntp_client': {
- 'type': 'string',
- 'default': 'auto',
- 'description': dedent("""\
+ "ntp_client": {
+ "type": "string",
+ "default": "auto",
+ "description": dedent(
+ """\
Name of an NTP client to use to configure system NTP.
When unprovided or 'auto' the default client preferred
by the distribution will be used. The following
built-in client names can be used to override existing
configuration defaults: chrony, ntp, ntpdate,
- systemd-timesyncd."""),
+ systemd-timesyncd."""
+ ),
},
- 'enabled': {
- 'type': 'boolean',
- 'default': True,
- 'description': dedent("""\
+ "enabled": {
+ "type": "boolean",
+ "default": True,
+ "description": dedent(
+ """\
Attempt to enable ntp clients if set to True. If set
to False, ntp client will not be configured or
- installed"""),
+ installed"""
+ ),
},
- 'config': {
- 'description': dedent("""\
+ "config": {
+ "description": dedent(
+ """\
Configuration settings or overrides for the
- ``ntp_client`` specified."""),
- 'type': ['object'],
- 'properties': {
- 'confpath': {
- 'type': 'string',
- 'description': dedent("""\
+ ``ntp_client`` specified."""
+ ),
+ "type": ["object"],
+ "properties": {
+ "confpath": {
+ "type": "string",
+ "description": dedent(
+ """\
The path to where the ``ntp_client``
- configuration is written."""),
+ configuration is written."""
+ ),
},
- 'check_exe': {
- 'type': 'string',
- 'description': dedent("""\
+ "check_exe": {
+ "type": "string",
+ "description": dedent(
+ """\
The executable name for the ``ntp_client``.
For example, ntp service ``check_exe`` is
- 'ntpd' because it runs the ntpd binary."""),
+ 'ntpd' because it runs the ntpd binary."""
+ ),
},
- 'packages': {
- 'type': 'array',
- 'items': {
- 'type': 'string',
+ "packages": {
+ "type": "array",
+ "items": {
+ "type": "string",
},
- 'uniqueItems': True,
- 'description': dedent("""\
+ "uniqueItems": True,
+ "description": dedent(
+ """\
List of packages needed to be installed for the
- selected ``ntp_client``."""),
+ selected ``ntp_client``."""
+ ),
},
- 'service_name': {
- 'type': 'string',
- 'description': dedent("""\
+ "service_name": {
+ "type": "string",
+ "description": dedent(
+ """\
The systemd or sysvinit service name used to
start and stop the ``ntp_client``
- service."""),
+ service."""
+ ),
},
- 'template': {
- 'type': 'string',
- 'description': dedent("""\
+ "template": {
+ "type": "string",
+ "description": dedent(
+ """\
Inline template allowing users to define their
own ``ntp_client`` configuration template.
The value must start with '## template:jinja'
to enable use of templating support.
- """),
+ """
+ ),
},
},
# Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override
# of builtin client values.
- 'required': [],
- 'minProperties': 1, # If we have config, define something
- 'additionalProperties': False
+ "minProperties": 1, # If we have config, define something
+ "additionalProperties": False,
},
},
- 'required': [],
- 'additionalProperties': False
+ "additionalProperties": False,
}
- }
+ },
}
-REQUIRED_NTP_CONFIG_KEYS = frozenset([
- 'check_exe', 'confpath', 'packages', 'service_name'])
+REQUIRED_NTP_CONFIG_KEYS = frozenset(
+ ["check_exe", "confpath", "packages", "service_name"]
+)
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
def distro_ntp_client_configs(distro):
@@ -319,21 +368,23 @@ def select_ntp_client(ntp_client, distro):
distro_cfg = distro_ntp_client_configs(distro.name)
# user specified client, return its config
- if ntp_client and ntp_client != 'auto':
- LOG.debug('Selected NTP client "%s" via user-data configuration',
- ntp_client)
+ if ntp_client and ntp_client != "auto":
+ LOG.debug(
+ 'Selected NTP client "%s" via user-data configuration', ntp_client
+ )
return distro_cfg.get(ntp_client, {})
# default to auto if unset in distro
- distro_ntp_client = distro.get_option('ntp_client', 'auto')
+ distro_ntp_client = distro.get_option("ntp_client", "auto")
clientcfg = {}
if distro_ntp_client == "auto":
for client in distro.preferred_ntp_clients:
cfg = distro_cfg.get(client)
- if subp.which(cfg.get('check_exe')):
- LOG.debug('Selected NTP client "%s", already installed',
- client)
+ if subp.which(cfg.get("check_exe")):
+ LOG.debug(
+ 'Selected NTP client "%s", already installed', client
+ )
clientcfg = cfg
break
@@ -341,11 +392,14 @@ def select_ntp_client(ntp_client, distro):
client = distro.preferred_ntp_clients[0]
LOG.debug(
'Selected distro preferred NTP client "%s", not yet installed',
- client)
+ client,
+ )
clientcfg = distro_cfg.get(client)
else:
- LOG.debug('Selected NTP client "%s" via distro system config',
- distro_ntp_client)
+ LOG.debug(
+ 'Selected NTP client "%s" via distro system config',
+ distro_ntp_client,
+ )
clientcfg = distro_cfg.get(distro_ntp_client, {})
return clientcfg
@@ -363,7 +417,7 @@ def install_ntp_client(install_func, packages=None, check_exe="ntpd"):
if subp.which(check_exe):
return
if packages is None:
- packages = ['ntp']
+ packages = ["ntp"]
install_func(packages)
@@ -388,25 +442,34 @@ def generate_server_names(distro):
names = []
pool_distro = distro
- if distro == 'sles':
+ if distro == "sles":
# For legal reasons x.pool.sles.ntp.org does not exist,
# use the opensuse pool
- pool_distro = 'opensuse'
- elif distro == 'alpine':
+ pool_distro = "opensuse"
+ elif distro == "alpine" or distro == "eurolinux":
# Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist
- # so use general x.pool.ntp.org instead.
- pool_distro = ''
+ # so use general x.pool.ntp.org instead. The same applies to EuroLinux
+ pool_distro = ""
for x in range(0, NR_POOL_SERVERS):
- names.append(".".join(
- [n for n in [str(x)] + [pool_distro] + ['pool.ntp.org'] if n]))
+ names.append(
+ ".".join(
+ [n for n in [str(x)] + [pool_distro] + ["pool.ntp.org"] if n]
+ )
+ )
return names
-def write_ntp_config_template(distro_name, service_name=None, servers=None,
- pools=None, path=None, template_fn=None,
- template=None):
+def write_ntp_config_template(
+ distro_name,
+ service_name=None,
+ servers=None,
+ pools=None,
+ path=None,
+ template_fn=None,
+ template=None,
+):
"""Render a ntp client configuration for the specified client.
@param distro_name: string. The distro class name.
@@ -429,27 +492,30 @@ def write_ntp_config_template(distro_name, service_name=None, servers=None,
if not pools:
pools = []
- if (len(servers) == 0 and distro_name == 'alpine' and
- service_name == 'ntpd'):
+ if (
+ len(servers) == 0
+ and distro_name == "alpine"
+ and service_name == "ntpd"
+ ):
# Alpine's Busybox ntpd only understands "servers" configuration
# and not "pool" configuration.
servers = generate_server_names(distro_name)
- LOG.debug(
- 'Adding distro default ntp servers: %s', ','.join(servers))
+ LOG.debug("Adding distro default ntp servers: %s", ",".join(servers))
elif len(servers) == 0 and len(pools) == 0:
pools = generate_server_names(distro_name)
LOG.debug(
- 'Adding distro default ntp pool servers: %s', ','.join(pools))
+ "Adding distro default ntp pool servers: %s", ",".join(pools)
+ )
if not path:
- raise ValueError('Invalid value for path parameter')
+ raise ValueError("Invalid value for path parameter")
if not template_fn and not template:
- raise ValueError('Not template_fn or template provided')
+ raise ValueError("Not template_fn or template provided")
- params = {'servers': servers, 'pools': pools}
+ params = {"servers": servers, "pools": pools}
if template:
- tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
+ tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl")
template_fn = tfile[1] # filepath is second item in tuple
util.write_file(template_fn, content=template)
@@ -459,21 +525,6 @@ def write_ntp_config_template(distro_name, service_name=None, servers=None,
util.del_file(template_fn)
-def reload_ntp(service, systemd=False):
- """Restart or reload an ntp system service.
-
- @param service: A string specifying the name of the service to be affected.
- @param systemd: A boolean indicating if the distro uses systemd, defaults
- to False.
- @returns: A tuple of stdout, stderr results from executing the action.
- """
- if systemd:
- cmd = ['systemctl', 'reload-or-restart', service]
- else:
- cmd = ['service', service, 'restart']
- subp.subp(cmd, capture=True)
-
-
def supplemental_schema_validation(ntp_config):
"""Validate user-provided ntp:config option values.
@@ -487,50 +538,62 @@ def supplemental_schema_validation(ntp_config):
errors = []
missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys()))
if missing:
- keys = ', '.join(sorted(missing))
+ keys = ", ".join(sorted(missing))
errors.append(
- 'Missing required ntp:config keys: {keys}'.format(keys=keys))
- elif not any([ntp_config.get('template'),
- ntp_config.get('template_name')]):
+ "Missing required ntp:config keys: {keys}".format(keys=keys)
+ )
+ elif not any(
+ [ntp_config.get("template"), ntp_config.get("template_name")]
+ ):
errors.append(
- 'Either ntp:config:template or ntp:config:template_name values'
- ' are required')
+ "Either ntp:config:template or ntp:config:template_name values"
+ " are required"
+ )
for key, value in sorted(ntp_config.items()):
- keypath = 'ntp:config:' + key
- if key == 'confpath':
+ keypath = "ntp:config:" + key
+ if key == "confpath":
if not all([value, isinstance(value, str)]):
errors.append(
- 'Expected a config file path {keypath}.'
- ' Found ({value})'.format(keypath=keypath, value=value))
- elif key == 'packages':
+ "Expected a config file path {keypath}."
+ " Found ({value})".format(keypath=keypath, value=value)
+ )
+ elif key == "packages":
if not isinstance(value, list):
errors.append(
- 'Expected a list of required package names for {keypath}.'
- ' Found ({value})'.format(keypath=keypath, value=value))
- elif key in ('template', 'template_name'):
+ "Expected a list of required package names for {keypath}."
+ " Found ({value})".format(keypath=keypath, value=value)
+ )
+ elif key in ("template", "template_name"):
if value is None: # Either template or template_name can be none
continue
if not isinstance(value, str):
errors.append(
- 'Expected a string type for {keypath}.'
- ' Found ({value})'.format(keypath=keypath, value=value))
+ "Expected a string type for {keypath}."
+ " Found ({value})".format(keypath=keypath, value=value)
+ )
elif not isinstance(value, str):
errors.append(
- 'Expected a string type for {keypath}.'
- ' Found ({value})'.format(keypath=keypath, value=value))
+ "Expected a string type for {keypath}. Found ({value})".format(
+ keypath=keypath, value=value
+ )
+ )
if errors:
- raise ValueError(r'Invalid ntp configuration:\n{errors}'.format(
- errors='\n'.join(errors)))
+ raise ValueError(
+ r"Invalid ntp configuration:\n{errors}".format(
+ errors="\n".join(errors)
+ )
+ )
def handle(name, cfg, cloud, log, _args):
"""Enable and configure ntp."""
- if 'ntp' not in cfg:
+ if "ntp" not in cfg:
LOG.debug(
- "Skipping module named %s, not present or disabled by cfg", name)
+ "Skipping module named %s, not present or disabled by cfg", name
+ )
return
- ntp_cfg = cfg['ntp']
+ ntp_cfg = cfg["ntp"]
if ntp_cfg is None:
ntp_cfg = {} # Allow empty config which will install the package
@@ -538,55 +601,64 @@ def handle(name, cfg, cloud, log, _args):
if not isinstance(ntp_cfg, (dict)):
raise RuntimeError(
"'ntp' key existed in config, but not a dictionary type,"
- " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)))
+ " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))
+ )
validate_cloudconfig_schema(cfg, schema)
# Allow users to explicitly enable/disable
- enabled = ntp_cfg.get('enabled', True)
+ enabled = ntp_cfg.get("enabled", True)
if util.is_false(enabled):
LOG.debug("Skipping module named %s, disabled by cfg", name)
return
# Select which client is going to be used and get the configuration
- ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'),
- cloud.distro)
-
+ ntp_client_config = select_ntp_client(
+ ntp_cfg.get("ntp_client"), cloud.distro
+ )
# Allow user ntp config to override distro configurations
ntp_client_config = util.mergemanydict(
- [ntp_client_config, ntp_cfg.get('config', {})], reverse=True)
+ [ntp_client_config, ntp_cfg.get("config", {})], reverse=True
+ )
supplemental_schema_validation(ntp_client_config)
- rename_ntp_conf(confpath=ntp_client_config.get('confpath'))
+ rename_ntp_conf(confpath=ntp_client_config.get("confpath"))
template_fn = None
- if not ntp_client_config.get('template'):
- template_name = (
- ntp_client_config.get('template_name').replace('{distro}',
- cloud.distro.name))
+ if not ntp_client_config.get("template"):
+ template_name = ntp_client_config.get("template_name").replace(
+ "{distro}", cloud.distro.name
+ )
template_fn = cloud.get_template_filename(template_name)
if not template_fn:
- msg = ('No template found, not rendering %s' %
- ntp_client_config.get('template_name'))
+ msg = (
+ "No template found, not rendering %s"
+ % ntp_client_config.get("template_name")
+ )
raise RuntimeError(msg)
- write_ntp_config_template(cloud.distro.name,
- service_name=ntp_client_config.get(
- 'service_name'),
- servers=ntp_cfg.get('servers', []),
- pools=ntp_cfg.get('pools', []),
- path=ntp_client_config.get('confpath'),
- template_fn=template_fn,
- template=ntp_client_config.get('template'))
-
- install_ntp_client(cloud.distro.install_packages,
- packages=ntp_client_config['packages'],
- check_exe=ntp_client_config['check_exe'])
+ write_ntp_config_template(
+ cloud.distro.name,
+ service_name=ntp_client_config.get("service_name"),
+ servers=ntp_cfg.get("servers", []),
+ pools=ntp_cfg.get("pools", []),
+ path=ntp_client_config.get("confpath"),
+ template_fn=template_fn,
+ template=ntp_client_config.get("template"),
+ )
+
+ install_ntp_client(
+ cloud.distro.install_packages,
+ packages=ntp_client_config["packages"],
+ check_exe=ntp_client_config["check_exe"],
+ )
try:
- reload_ntp(ntp_client_config['service_name'],
- systemd=cloud.distro.uses_systemd())
+ cloud.distro.manage_service(
+ "reload", ntp_client_config.get("service_name")
+ )
except subp.ProcessExecutionError as e:
LOG.exception("Failed to reload/start ntp service: %s", e)
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index 036baf85..14cdfab8 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -43,8 +43,7 @@ import os
import time
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
REBOOT_FILE = "/var/run/reboot-required"
REBOOT_CMD = ["/sbin/reboot"]
@@ -68,17 +67,19 @@ def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
log.debug("Rebooted, but still running after %s seconds", int(elapsed))
# If we got here, not good
elapsed = time.time() - start
- raise RuntimeError(("Reboot did not happen"
- " after %s seconds!") % (int(elapsed)))
+ raise RuntimeError(
+ "Reboot did not happen after %s seconds!" % (int(elapsed))
+ )
def handle(_name, cfg, cloud, log, _args):
# Handle the old style + new config names
- update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update')
- upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade')
- reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required',
- 'package_reboot_if_required')
- pkglist = util.get_cfg_option_list(cfg, 'packages', [])
+ update = _multi_cfg_bool_get(cfg, "apt_update", "package_update")
+ upgrade = _multi_cfg_bool_get(cfg, "package_upgrade", "apt_upgrade")
+ reboot_if_required = _multi_cfg_bool_get(
+ cfg, "apt_reboot_if_required", "package_reboot_if_required"
+ )
+ pkglist = util.get_cfg_option_list(cfg, "packages", [])
errors = []
if update or len(pkglist) or upgrade:
@@ -109,8 +110,9 @@ def handle(_name, cfg, cloud, log, _args):
reboot_fn_exists = os.path.isfile(REBOOT_FILE)
if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists:
try:
- log.warning("Rebooting after upgrade or install per "
- "%s", REBOOT_FILE)
+ log.warning(
+ "Rebooting after upgrade or install per %s", REBOOT_FILE
+ )
# Flush the above warning + anything else out...
logging.flushLoggers(log)
_fire_reboot(log)
@@ -119,8 +121,10 @@ def handle(_name, cfg, cloud, log, _args):
errors.append(e)
if len(errors):
- log.warning("%s failed with exceptions, re-raising the last one",
- len(errors))
+ log.warning(
+ "%s failed with exceptions, re-raising the last one", len(errors)
+ )
raise errors[-1]
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 733c3910..a0e1da78 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -24,6 +24,19 @@ keys to post. Available keys are:
- ``hostname``
- ``fdqn``
+Data is sent as ``x-www-form-urlencoded`` arguments.
+
+**Example HTTP POST**::
+
+ POST / HTTP/1.1
+ Content-Length: 1337
+ User-Agent: Cloud-Init/21.4
+ Accept-Encoding: gzip, deflate
+ Accept: */*
+ Content-Type: application/x-www-form-urlencoded
+
+ pub_key_dsa=dsa_contents&pub_key_rsa=rsa_contents&pub_key_ecdsa=ecdsa_contents&pub_key_ed25519=ed25519_contents&instance_id=i-87018aed&hostname=myhost&fqdn=myhost.internal
+
**Internal name:** ``cc_phone_home``
**Module frequency:** per instance
@@ -41,22 +54,19 @@ keys to post. Available keys are:
tries: 10
"""
-from cloudinit import templater
-from cloudinit import url_helper
-from cloudinit import util
-
+from cloudinit import templater, url_helper, util
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
POST_LIST_ALL = [
- 'pub_key_dsa',
- 'pub_key_rsa',
- 'pub_key_ecdsa',
- 'pub_key_ed25519',
- 'instance_id',
- 'hostname',
- 'fqdn'
+ "pub_key_dsa",
+ "pub_key_rsa",
+ "pub_key_ecdsa",
+ "pub_key_ed25519",
+ "instance_id",
+ "hostname",
+ "fqdn",
]
@@ -74,48 +84,58 @@ def handle(name, cfg, cloud, log, args):
if len(args) != 0:
ph_cfg = util.read_conf(args[0])
else:
- if 'phone_home' not in cfg:
- log.debug(("Skipping module named %s, "
- "no 'phone_home' configuration found"), name)
+ if "phone_home" not in cfg:
+ log.debug(
+ "Skipping module named %s, "
+ "no 'phone_home' configuration found",
+ name,
+ )
return
- ph_cfg = cfg['phone_home']
-
- if 'url' not in ph_cfg:
- log.warning(("Skipping module named %s, "
- "no 'url' found in 'phone_home' configuration"), name)
+ ph_cfg = cfg["phone_home"]
+
+ if "url" not in ph_cfg:
+ log.warning(
+ "Skipping module named %s, "
+ "no 'url' found in 'phone_home' configuration",
+ name,
+ )
return
- url = ph_cfg['url']
- post_list = ph_cfg.get('post', 'all')
- tries = ph_cfg.get('tries')
+ url = ph_cfg["url"]
+ post_list = ph_cfg.get("post", "all")
+ tries = ph_cfg.get("tries")
try:
tries = int(tries)
except Exception:
tries = 10
- util.logexc(log, "Configuration entry 'tries' is not an integer, "
- "using %s instead", tries)
+ util.logexc(
+ log,
+ "Configuration entry 'tries' is not an integer, using %s instead",
+ tries,
+ )
if post_list == "all":
post_list = POST_LIST_ALL
all_keys = {}
- all_keys['instance_id'] = cloud.get_instance_id()
- all_keys['hostname'] = cloud.get_hostname()
- all_keys['fqdn'] = cloud.get_hostname(fqdn=True)
+ all_keys["instance_id"] = cloud.get_instance_id()
+ all_keys["hostname"] = cloud.get_hostname()
+ all_keys["fqdn"] = cloud.get_hostname(fqdn=True)
pubkeys = {
- 'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub',
- 'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub',
- 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
- 'pub_key_ed25519': '/etc/ssh/ssh_host_ed25519_key.pub',
+ "pub_key_dsa": "/etc/ssh/ssh_host_dsa_key.pub",
+ "pub_key_rsa": "/etc/ssh/ssh_host_rsa_key.pub",
+ "pub_key_ecdsa": "/etc/ssh/ssh_host_ecdsa_key.pub",
+ "pub_key_ed25519": "/etc/ssh/ssh_host_ed25519_key.pub",
}
for (n, path) in pubkeys.items():
try:
all_keys[n] = util.load_file(path)
except Exception:
- util.logexc(log, "%s: failed to open, can not phone home that "
- "data!", path)
+ util.logexc(
+ log, "%s: failed to open, can not phone home that data!", path
+ )
submit_keys = {}
for k in post_list:
@@ -123,28 +143,37 @@ def handle(name, cfg, cloud, log, args):
submit_keys[k] = all_keys[k]
else:
submit_keys[k] = None
- log.warning(("Requested key %s from 'post'"
- " configuration list not available"), k)
+ log.warning(
+ "Requested key %s from 'post'"
+ " configuration list not available",
+ k,
+ )
# Get them read to be posted
real_submit_keys = {}
for (k, v) in submit_keys.items():
if v is None:
- real_submit_keys[k] = 'N/A'
+ real_submit_keys[k] = "N/A"
else:
real_submit_keys[k] = str(v)
# Incase the url is parameterized
url_params = {
- 'INSTANCE_ID': all_keys['instance_id'],
+ "INSTANCE_ID": all_keys["instance_id"],
}
url = templater.render_string(url, url_params)
try:
url_helper.read_file_or_url(
- url, data=real_submit_keys, retries=tries, sec_between=3,
- ssl_details=util.fetch_ssl_details(cloud.paths))
+ url,
+ data=real_submit_keys,
+ retries=tries,
+ sec_between=3,
+ ssl_details=util.fetch_ssl_details(cloud.paths),
+ )
except Exception:
- util.logexc(log, "Failed to post phone home data to %s in %s tries",
- url, tries)
+ util.logexc(
+ log, "Failed to post phone home data to %s in %s tries", url, tries
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 5780a7e9..d4eb68c0 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -58,9 +58,8 @@ import re
import subprocess
import time
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
frequency = PER_INSTANCE
@@ -75,9 +74,9 @@ def givecmdline(pid):
# PID COMM ARGS
# 1 init /bin/init --
if util.is_FreeBSD():
- (output, _err) = subp.subp(['procstat', '-c', str(pid)])
+ (output, _err) = subp.subp(["procstat", "-c", str(pid)])
line = output.splitlines()[1]
- m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line)
+ m = re.search(r"\d+ (\w|\.|-)+\s+(/\w.+)", line)
return m.group(2)
else:
return util.load_file("/proc/%s/cmdline" % pid)
@@ -106,8 +105,9 @@ def check_condition(cond, log=None):
return False
else:
if log:
- log.warning(pre + "unexpected exit %s. " % ret +
- "do not apply change.")
+ log.warning(
+ pre + "unexpected exit %s. " % ret + "do not apply change."
+ )
return False
except Exception as e:
if log:
@@ -138,16 +138,24 @@ def handle(_name, cfg, cloud, log, _args):
devnull_fp = open(os.devnull, "w")
- log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args)))
+ log.debug("After pid %s ends, will execute: %s" % (mypid, " ".join(args)))
- util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log,
- condition, execmd, [args, devnull_fp])
+ util.fork_cb(
+ run_after_pid_gone,
+ mypid,
+ cmdline,
+ timeout,
+ log,
+ condition,
+ execmd,
+ [args, devnull_fp],
+ )
def load_power_state(cfg, distro):
# returns a tuple of shutdown_command, timeout
# shutdown_command is None if no config found
- pstate = cfg.get('power_state')
+ pstate = cfg.get("power_state")
if pstate is None:
return (None, None, None)
@@ -155,22 +163,25 @@ def load_power_state(cfg, distro):
if not isinstance(pstate, dict):
raise TypeError("power_state is not a dict.")
- modes_ok = ['halt', 'poweroff', 'reboot']
+ modes_ok = ["halt", "poweroff", "reboot"]
mode = pstate.get("mode")
if mode not in distro.shutdown_options_map:
raise TypeError(
- "power_state[mode] required, must be one of: %s. found: '%s'." %
- (','.join(modes_ok), mode))
+ "power_state[mode] required, must be one of: %s. found: '%s'."
+ % (",".join(modes_ok), mode)
+ )
- args = distro.shutdown_command(mode=mode,
- delay=pstate.get("delay", "now"),
- message=pstate.get("message"))
+ args = distro.shutdown_command(
+ mode=mode,
+ delay=pstate.get("delay", "now"),
+ message=pstate.get("message"),
+ )
try:
- timeout = float(pstate.get('timeout', 30.0))
+ timeout = float(pstate.get("timeout", 30.0))
except ValueError as e:
raise ValueError(
- "failed to convert timeout '%s' to float." % pstate['timeout']
+ "failed to convert timeout '%s' to float." % pstate["timeout"]
) from e
condition = pstate.get("condition", True)
@@ -186,8 +197,12 @@ def doexit(sysexit):
def execmd(exe_args, output=None, data_in=None):
ret = 1
try:
- proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE,
- stdout=output, stderr=subprocess.STDOUT)
+ proc = subprocess.Popen(
+ exe_args,
+ stdin=subprocess.PIPE,
+ stdout=output,
+ stderr=subprocess.STDOUT,
+ )
proc.communicate(data_in)
ret = proc.returncode
except Exception:
@@ -230,7 +245,7 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
except Exception as e:
fatal("Unexpected Exception: %s" % e)
- time.sleep(.25)
+ time.sleep(0.25)
if not msg:
fatal("Unexpected error in run_after_pid_gone")
@@ -246,4 +261,5 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
func(*args)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index bc981cf4..f51f49bc 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -29,22 +29,44 @@ The keys are ``package_name``, ``conf_file``, ``ssl_dir`` and
ones that work with puppet 3.x and with distributions that ship modified
puppet 4.x that uses the old paths.
+Agent packages from the puppetlabs repositories can be installed by setting
+``install_type`` to ``aio``. Based on this setting, the default config/SSL/CSR
+paths will be adjusted accordingly. To maintain backwards compatibility this
+setting defaults to ``packages`` which will install puppet from the distro
+packages.
+
+If installing ``aio`` packages, ``collection`` can also be set to one of
+``puppet`` (rolling release), ``puppet6``, ``puppet7`` (or their nightly
+counterparts) in order to install specific release streams. By default, the
+puppetlabs repository will be purged after installation finishes; set
+``cleanup`` to ``false`` to prevent this. AIO packages are installed through a
+shell script which is downloaded on the machine and then executed; the path to
+this script can be overridden using the ``aio_install_url`` key.
+
Puppet configuration can be specified under the ``conf`` key. The
configuration is specified as a dictionary containing high-level ``<section>``
keys and lists of ``<key>=<value>`` pairs within each section. Each section
name and ``<key>=<value>`` pair is written directly to ``puppet.conf``. As
-such, section names should be one of: ``main``, ``master``, ``agent`` or
+such, section names should be one of: ``main``, ``server``, ``agent`` or
``user`` and keys should be valid puppet configuration options. The
``certname`` key supports string substitutions for ``%i`` and ``%f``,
corresponding to the instance id and fqdn of the machine respectively.
If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but
-instead will be used as the puppermaster certificate. It should be specified
+instead will be used as the puppetserver certificate. It should be specified
in pem format as a multi-line string (using the ``|`` yaml notation).
-Additionally it's possible to create a csr_attributes.yaml for
-CSR attributes and certificate extension requests.
+Additionally it's possible to create a ``csr_attributes.yaml`` file for CSR
+attributes and certificate extension requests.
See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
+By default, the puppet service will be automatically enabled after installation
+and set to automatically start on boot. To override this in favor of manual
+puppet execution set ``start_service`` to ``false``.
+
+A single manual run can be triggered by setting ``exec`` to ``true``, and
+additional arguments can be passed to ``puppet agent`` via the ``exec_args``
+key (by default the agent will execute with the ``--test`` flag).
+
**Internal name:** ``cc_puppet``
**Module frequency:** per instance
@@ -56,13 +78,20 @@ See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
puppet:
install: <true/false>
version: <version>
+ collection: <aio collection>
+ install_type: <packages/aio>
+ aio_install_url: 'https://git.io/JBhoQ'
+ cleanup: <true/false>
conf_file: '/etc/puppet/puppet.conf'
ssl_dir: '/var/lib/puppet/ssl'
csr_attributes_path: '/etc/puppet/csr_attributes.yaml'
package_name: 'puppet'
+ exec: <true/false>
+ exec_args: ['--test']
+ start_service: <true/false>
conf:
agent:
- server: "puppetmaster.example.org"
+ server: "puppetserver.example.org"
certname: "%i.%f"
ca_cert: |
-------BEGIN CERTIFICATE-------
@@ -79,23 +108,20 @@ See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
import os
import socket
-import yaml
from io import StringIO
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
+import yaml
-PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
-PUPPET_SSL_DIR = '/var/lib/puppet/ssl'
-PUPPET_CSR_ATTRIBUTES_PATH = '/etc/puppet/csr_attributes.yaml'
-PUPPET_PACKAGE_NAME = 'puppet'
+from cloudinit import helpers, subp, temp_utils, url_helper, util
+AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501
+PUPPET_AGENT_DEFAULT_ARGS = ["--test"]
-class PuppetConstants(object):
- def __init__(self, puppet_conf_file, puppet_ssl_dir,
- csr_attributes_path, log):
+class PuppetConstants(object):
+ def __init__(
+ self, puppet_conf_file, puppet_ssl_dir, csr_attributes_path, log
+ ):
self.conf_path = puppet_conf_file
self.ssl_dir = puppet_ssl_dir
self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs")
@@ -105,51 +131,140 @@ class PuppetConstants(object):
def _autostart_puppet(log):
# Set puppet to automatically start
- if os.path.exists('/etc/default/puppet'):
- subp.subp(['sed', '-i',
- '-e', 's/^START=.*/START=yes/',
- '/etc/default/puppet'], capture=False)
- elif os.path.exists('/bin/systemctl'):
- subp.subp(['/bin/systemctl', 'enable', 'puppet.service'],
- capture=False)
- elif os.path.exists('/sbin/chkconfig'):
- subp.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
+ if os.path.exists("/etc/default/puppet"):
+ subp.subp(
+ [
+ "sed",
+ "-i",
+ "-e",
+ "s/^START=.*/START=yes/",
+ "/etc/default/puppet",
+ ],
+ capture=False,
+ )
+ elif os.path.exists("/bin/systemctl"):
+ subp.subp(
+ ["/bin/systemctl", "enable", "puppet.service"], capture=False
+ )
+ elif os.path.exists("/sbin/chkconfig"):
+ subp.subp(["/sbin/chkconfig", "puppet", "on"], capture=False)
else:
- log.warning(("Sorry we do not know how to enable"
- " puppet services on this system"))
+ log.warning(
+ "Sorry we do not know how to enable puppet services on this system"
+ )
+
+
+def get_config_value(puppet_bin, setting):
+ """Get the config value for a given setting using `puppet config print`
+ :param puppet_bin: path to puppet binary
+ :param setting: setting to query
+ """
+ out, _ = subp.subp([puppet_bin, "config", "print", setting])
+ return out.rstrip()
+
+
+def install_puppet_aio(
+ url=AIO_INSTALL_URL, version=None, collection=None, cleanup=True
+):
+ """Install puppet-agent from the puppetlabs repositories using the one-shot
+ shell script
+
+ :param url: URL from where to download the install script
+ :param version: version to install, blank defaults to latest
+ :param collection: collection to install, blank defaults to latest
+ :param cleanup: whether to purge the puppetlabs repo after installation
+ """
+ args = []
+ if version is not None:
+ args = ["-v", version]
+ if collection is not None:
+ args += ["-c", collection]
+
+ # Purge puppetlabs repos after installation
+ if cleanup:
+ args += ["--cleanup"]
+ content = url_helper.readurl(url=url, retries=5).contents
+
+ # Use tmpdir over tmpfile to avoid 'text file busy' on execute
+ with temp_utils.tempdir(needs_exe=True) as tmpd:
+ tmpf = os.path.join(tmpd, "puppet-install")
+ util.write_file(tmpf, content, mode=0o700)
+ return subp.subp([tmpf] + args, capture=False)
def handle(name, cfg, cloud, log, _args):
# If there isn't a puppet key in the configuration don't do anything
- if 'puppet' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'puppet' configuration found"), name)
+ if "puppet" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'puppet' configuration found", name
+ )
return
- puppet_cfg = cfg['puppet']
+ puppet_cfg = cfg["puppet"]
# Start by installing the puppet package if necessary...
- install = util.get_cfg_option_bool(puppet_cfg, 'install', True)
- version = util.get_cfg_option_str(puppet_cfg, 'version', None)
+ install = util.get_cfg_option_bool(puppet_cfg, "install", True)
+ version = util.get_cfg_option_str(puppet_cfg, "version", None)
+ collection = util.get_cfg_option_str(puppet_cfg, "collection", None)
+ install_type = util.get_cfg_option_str(
+ puppet_cfg, "install_type", "packages"
+ )
+ cleanup = util.get_cfg_option_bool(puppet_cfg, "cleanup", True)
+ run = util.get_cfg_option_bool(puppet_cfg, "exec", default=False)
+ start_puppetd = util.get_cfg_option_bool(
+ puppet_cfg, "start_service", default=True
+ )
+ aio_install_url = util.get_cfg_option_str(
+ puppet_cfg, "aio_install_url", default=AIO_INSTALL_URL
+ )
+
+ # AIO and distro packages use different paths
+ if install_type == "aio":
+ puppet_user = "root"
+ puppet_bin = "/opt/puppetlabs/bin/puppet"
+ puppet_package = "puppet-agent"
+ else: # default to 'packages'
+ puppet_user = "puppet"
+ puppet_bin = "puppet"
+ puppet_package = "puppet"
+
package_name = util.get_cfg_option_str(
- puppet_cfg, 'package_name', PUPPET_PACKAGE_NAME)
+ puppet_cfg, "package_name", puppet_package
+ )
+ if not install and version:
+ log.warning(
+ "Puppet install set to false but version supplied, doing nothing."
+ )
+ elif install:
+ log.debug(
+ "Attempting to install puppet %s from %s",
+ version if version else "latest",
+ install_type,
+ )
+
+ if install_type == "packages":
+ cloud.distro.install_packages((package_name, version))
+ elif install_type == "aio":
+ install_puppet_aio(aio_install_url, version, collection, cleanup)
+ else:
+ log.warning("Unknown puppet install type '%s'", install_type)
+ run = False
+
conf_file = util.get_cfg_option_str(
- puppet_cfg, 'conf_file', PUPPET_CONF_PATH)
- ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR)
+ puppet_cfg, "conf_file", get_config_value(puppet_bin, "config")
+ )
+ ssl_dir = util.get_cfg_option_str(
+ puppet_cfg, "ssl_dir", get_config_value(puppet_bin, "ssldir")
+ )
csr_attributes_path = util.get_cfg_option_str(
- puppet_cfg, 'csr_attributes_path', PUPPET_CSR_ATTRIBUTES_PATH)
+ puppet_cfg,
+ "csr_attributes_path",
+ get_config_value(puppet_bin, "csr_attributes"),
+ )
p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log)
- if not install and version:
- log.warning(("Puppet install set false but version supplied,"
- " doing nothing."))
- elif install:
- log.debug(("Attempting to install puppet %s,"),
- version if version else 'latest')
-
- cloud.distro.install_packages((package_name, version))
# ... and then update the puppet configuration
- if 'conf' in puppet_cfg:
+ if "conf" in puppet_cfg:
# Add all sections from the conf object to puppet.conf
contents = util.load_file(p_constants.conf_path)
# Create object for reading puppet.conf values
@@ -158,29 +273,31 @@ def handle(name, cfg, cloud, log, _args):
# mix the rest up. First clean them up
# (TODO(harlowja) is this really needed??)
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
- cleaned_contents = '\n'.join(cleaned_lines)
+ cleaned_contents = "\n".join(cleaned_lines)
# Move to puppet_config.read_file when dropping py2.7
puppet_config.read_file(
- StringIO(cleaned_contents),
- source=p_constants.conf_path)
- for (cfg_name, cfg) in puppet_cfg['conf'].items():
+ StringIO(cleaned_contents), source=p_constants.conf_path
+ )
+ for (cfg_name, cfg) in puppet_cfg["conf"].items():
# Cert configuration is a special case
- # Dump the puppet master ca certificate in the correct place
- if cfg_name == 'ca_cert':
+ # Dump the puppetserver ca certificate in the correct place
+ if cfg_name == "ca_cert":
# Puppet ssl sub-directory isn't created yet
# Create it with the proper permissions and ownership
util.ensure_dir(p_constants.ssl_dir, 0o771)
- util.chownbyname(p_constants.ssl_dir, 'puppet', 'root')
+ util.chownbyname(p_constants.ssl_dir, puppet_user, "root")
util.ensure_dir(p_constants.ssl_cert_dir)
- util.chownbyname(p_constants.ssl_cert_dir, 'puppet', 'root')
+ util.chownbyname(p_constants.ssl_cert_dir, puppet_user, "root")
util.write_file(p_constants.ssl_cert_path, cfg)
- util.chownbyname(p_constants.ssl_cert_path, 'puppet', 'root')
+ util.chownbyname(
+ p_constants.ssl_cert_path, puppet_user, "root"
+ )
else:
# Iterate through the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed
for (o, v) in cfg.items():
- if o == 'certname':
+ if o == "certname":
# Expand %f as the fqdn
# TODO(harlowja) should this use the cloud fqdn??
v = v.replace("%f", socket.getfqdn())
@@ -191,19 +308,46 @@ def handle(name, cfg, cloud, log, _args):
puppet_config.set(cfg_name, o, v)
# We got all our config as wanted we'll rename
# the previous puppet.conf and create our new one
- util.rename(p_constants.conf_path, "%s.old"
- % (p_constants.conf_path))
+ util.rename(
+ p_constants.conf_path, "%s.old" % (p_constants.conf_path)
+ )
util.write_file(p_constants.conf_path, puppet_config.stringify())
- if 'csr_attributes' in puppet_cfg:
- util.write_file(p_constants.csr_attributes_path,
- yaml.dump(puppet_cfg['csr_attributes'],
- default_flow_style=False))
+ if "csr_attributes" in puppet_cfg:
+ util.write_file(
+ p_constants.csr_attributes_path,
+ yaml.dump(puppet_cfg["csr_attributes"], default_flow_style=False),
+ )
# Set it up so it autostarts
- _autostart_puppet(log)
+ if start_puppetd:
+ _autostart_puppet(log)
+
+ # Run the agent if needed
+ if run:
+ log.debug("Running puppet-agent")
+ cmd = [puppet_bin, "agent"]
+ if "exec_args" in puppet_cfg:
+ cmd_args = puppet_cfg["exec_args"]
+ if isinstance(cmd_args, (list, tuple)):
+ cmd.extend(cmd_args)
+ elif isinstance(cmd_args, str):
+ cmd.extend(cmd_args.split())
+ else:
+ log.warning(
+ "Unknown type %s provided for puppet"
+ " 'exec_args' expected list, tuple,"
+ " or string",
+ type(cmd_args),
+ )
+ cmd.extend(PUPPET_AGENT_DEFAULT_ARGS)
+ else:
+ cmd.extend(PUPPET_AGENT_DEFAULT_ARGS)
+ subp.subp(cmd, capture=False)
+
+ if start_puppetd:
+ # Start puppetd
+ subp.subp(["service", "puppet", "start"], capture=False)
- # Start puppetd
- subp.subp(['service', 'puppet', 'start'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py
index 146758ad..87be5348 100644
--- a/cloudinit/config/cc_refresh_rmc_and_interface.py
+++ b/cloudinit/config/cc_refresh_rmc_and_interface.py
@@ -28,26 +28,24 @@ This module handles
**Internal name:** ``cc_refresh_rmc_and_interface``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** RHEL
"""
+import errno
+
from cloudinit import log as logging
+from cloudinit import netinfo, subp, util
from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-from cloudinit import subp
-from cloudinit import netinfo
-
-import errno
frequency = PER_ALWAYS
LOG = logging.getLogger(__name__)
# Ensure that /opt/rsct/bin has been added to standard PATH of the
# distro. The symlink to rmcctrl is /usr/sbin/rsct/bin/rmcctrl .
-RMCCTRL = 'rmcctrl'
+RMCCTRL = "rmcctrl"
def handle(name, _cfg, _cloud, _log, _args):
@@ -56,10 +54,11 @@ def handle(name, _cfg, _cloud, _log, _args):
return
LOG.debug(
- 'Making the IPv6 up explicitly. '
- 'Ensuring IPv6 interface is not being handled by NetworkManager '
- 'and it is restarted to re-establish the communication with '
- 'the hypervisor')
+ "Making the IPv6 up explicitly. "
+ "Ensuring IPv6 interface is not being handled by NetworkManager "
+ "and it is restarted to re-establish the communication with "
+ "the hypervisor"
+ )
ifaces = find_ipv6_ifaces()
@@ -80,7 +79,7 @@ def find_ipv6_ifaces():
ifaces = []
for iface, data in info.items():
if iface == "lo":
- LOG.debug('Skipping localhost interface')
+ LOG.debug("Skipping localhost interface")
if len(data.get("ipv4", [])) != 0:
# skip this interface, as it has ipv4 addrs
continue
@@ -92,16 +91,16 @@ def refresh_ipv6(interface):
# IPv6 interface is explicitly brought up, subsequent to which the
# RMC services are restarted to re-establish the communication with
# the hypervisor.
- subp.subp(['ip', 'link', 'set', interface, 'down'])
- subp.subp(['ip', 'link', 'set', interface, 'up'])
+ subp.subp(["ip", "link", "set", interface, "down"])
+ subp.subp(["ip", "link", "set", interface, "up"])
def sysconfig_path(iface):
- return '/etc/sysconfig/network-scripts/ifcfg-' + iface
+ return "/etc/sysconfig/network-scripts/ifcfg-" + iface
def restart_network_manager():
- subp.subp(['systemctl', 'restart', 'NetworkManager'])
+ subp.subp(["systemctl", "restart", "NetworkManager"])
def disable_ipv6(iface_file):
@@ -113,12 +112,11 @@ def disable_ipv6(iface_file):
contents = util.load_file(iface_file)
except IOError as e:
if e.errno == errno.ENOENT:
- LOG.debug("IPv6 interface file %s does not exist\n",
- iface_file)
+ LOG.debug("IPv6 interface file %s does not exist\n", iface_file)
else:
raise e
- if 'IPV6INIT' not in contents:
+ if "IPV6INIT" not in contents:
LOG.debug("Interface file %s did not have IPV6INIT", iface_file)
return
@@ -135,11 +133,12 @@ def disable_ipv6(iface_file):
def search(contents):
# Search for any NM_CONTROLLED or IPV6 lines in IPv6 interface file.
- return(
- contents.startswith("IPV6ADDR") or
- contents.startswith("IPADDR6") or
- contents.startswith("IPV6INIT") or
- contents.startswith("NM_CONTROLLED"))
+ return (
+ contents.startswith("IPV6ADDR")
+ or contents.startswith("IPADDR6")
+ or contents.startswith("IPV6INIT")
+ or contents.startswith("NM_CONTROLLED")
+ )
def refresh_rmc():
@@ -152,8 +151,8 @@ def refresh_rmc():
# until the subsystem and all resource managers are stopped.
# -s : start Resource Monitoring & Control subsystem.
try:
- subp.subp([RMCCTRL, '-z'])
- subp.subp([RMCCTRL, '-s'])
+ subp.subp([RMCCTRL, "-z"])
+ subp.subp([RMCCTRL, "-s"])
except Exception:
- util.logexc(LOG, 'Failed to refresh the RMC subsystem.')
+ util.logexc(LOG, "Failed to refresh the RMC subsystem.")
raise
diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py
index 1cd72774..3b929903 100644
--- a/cloudinit/config/cc_reset_rmc.py
+++ b/cloudinit/config/cc_reset_rmc.py
@@ -39,9 +39,8 @@ Prerequisite of using this module is to install RSCT packages.
import os
from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-from cloudinit import subp
frequency = PER_INSTANCE
@@ -49,34 +48,34 @@ frequency = PER_INSTANCE
# The symlink for RMCCTRL and RECFGCT are
# /usr/sbin/rsct/bin/rmcctrl and
# /usr/sbin/rsct/install/bin/recfgct respectively.
-RSCT_PATH = '/opt/rsct/install/bin'
-RMCCTRL = 'rmcctrl'
-RECFGCT = 'recfgct'
+RSCT_PATH = "/opt/rsct/install/bin"
+RMCCTRL = "rmcctrl"
+RECFGCT = "recfgct"
LOG = logging.getLogger(__name__)
-NODE_ID_FILE = '/etc/ct_node_id'
+NODE_ID_FILE = "/etc/ct_node_id"
def handle(name, _cfg, cloud, _log, _args):
# Ensuring node id has to be generated only once during first boot
- if cloud.datasource.platform_type == 'none':
- LOG.debug('Skipping creation of new ct_node_id node')
+ if cloud.datasource.platform_type == "none":
+ LOG.debug("Skipping creation of new ct_node_id node")
return
if not os.path.isdir(RSCT_PATH):
LOG.debug("module disabled, RSCT_PATH not present")
return
- orig_path = os.environ.get('PATH')
+ orig_path = os.environ.get("PATH")
try:
add_path(orig_path)
reset_rmc()
finally:
if orig_path:
- os.environ['PATH'] = orig_path
+ os.environ["PATH"] = orig_path
else:
- del os.environ['PATH']
+ del os.environ["PATH"]
def reconfigure_rsct_subsystems():
@@ -88,17 +87,17 @@ def reconfigure_rsct_subsystems():
LOG.debug(out.strip())
return out
except subp.ProcessExecutionError:
- util.logexc(LOG, 'Failed to reconfigure the RSCT subsystems.')
+ util.logexc(LOG, "Failed to reconfigure the RSCT subsystems.")
raise
def get_node_id():
try:
fp = util.load_file(NODE_ID_FILE)
- node_id = fp.split('\n')[0]
+ node_id = fp.split("\n")[0]
return node_id
except Exception:
- util.logexc(LOG, 'Failed to get node ID from file %s.' % NODE_ID_FILE)
+ util.logexc(LOG, "Failed to get node ID from file %s." % NODE_ID_FILE)
raise
@@ -107,25 +106,25 @@ def add_path(orig_path):
# So thet cloud init automatically find and
# run RECFGCT to create new node_id.
suff = ":" + orig_path if orig_path else ""
- os.environ['PATH'] = RSCT_PATH + suff
- return os.environ['PATH']
+ os.environ["PATH"] = RSCT_PATH + suff
+ return os.environ["PATH"]
def rmcctrl():
# Stop the RMC subsystem and all resource managers so that we can make
# some changes to it
try:
- return subp.subp([RMCCTRL, '-z'])
+ return subp.subp([RMCCTRL, "-z"])
except Exception:
- util.logexc(LOG, 'Failed to stop the RMC subsystem.')
+ util.logexc(LOG, "Failed to stop the RMC subsystem.")
raise
def reset_rmc():
- LOG.debug('Attempting to reset RMC.')
+ LOG.debug("Attempting to reset RMC.")
node_id_before = get_node_id()
- LOG.debug('Node ID at beginning of module: %s', node_id_before)
+ LOG.debug("Node ID at beginning of module: %s", node_id_before)
# Stop the RMC subsystem and all resource managers so that we can make
# some changes to it
@@ -133,11 +132,11 @@ def reset_rmc():
reconfigure_rsct_subsystems()
node_id_after = get_node_id()
- LOG.debug('Node ID at end of module: %s', node_id_after)
+ LOG.debug("Node ID at end of module: %s", node_id_after)
# Check if new node ID is generated or not
# by comparing old and new node ID
if node_id_after == node_id_before:
- msg = 'New node ID did not get generated.'
+ msg = "New node ID did not get generated."
LOG.error(msg)
raise Exception(msg)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 9afbb847..19b923a8 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -13,22 +13,25 @@ import os
import stat
from textwrap import dedent
+from cloudinit import subp, util
from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
from cloudinit.settings import PER_ALWAYS
-from cloudinit import subp
-from cloudinit import util
NOBLOCK = "noblock"
frequency = PER_ALWAYS
-distros = ['all']
-
-schema = {
- 'id': 'cc_resizefs',
- 'name': 'Resizefs',
- 'title': 'Resize filesystem',
- 'description': dedent("""\
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_resizefs",
+ "name": "Resizefs",
+ "title": "Resize filesystem",
+ "description": dedent(
+ """\
Resize a filesystem to use all avaliable space on partition. This
module is useful along with ``cc_growpart`` and will ensure that if the
root partition has been resized the root filesystem will be resized
@@ -37,22 +40,29 @@ schema = {
running. Optionally, the resize operation can be performed in the
background while cloud-init continues running modules. This can be
enabled by setting ``resize_rootfs`` to ``true``. This module can be
- disabled altogether by setting ``resize_rootfs`` to ``false``."""),
- 'distros': distros,
- 'examples': [
- 'resize_rootfs: false # disable root filesystem resize operation'],
- 'frequency': PER_ALWAYS,
- 'type': 'object',
- 'properties': {
- 'resize_rootfs': {
- 'enum': [True, False, NOBLOCK],
- 'description': dedent("""\
- Whether to resize the root partition. Default: 'true'""")
+ disabled altogether by setting ``resize_rootfs`` to ``false``."""
+ ),
+ "distros": distros,
+ "examples": [
+ "resize_rootfs: false # disable root filesystem resize operation"
+ ],
+ "frequency": PER_ALWAYS,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "resize_rootfs": {
+ "enum": [True, False, NOBLOCK],
+ "description": dedent(
+ """\
+ Whether to resize the root partition. Default: 'true'"""
+ ),
}
- }
+ },
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
def _resize_btrfs(mount_point, devpth):
@@ -61,28 +71,38 @@ def _resize_btrfs(mount_point, devpth):
# Use a subvolume that is not ro to trick the resize operation to do the
# "right" thing. The use of ".snapshot" is specific to "snapper" a generic
# solution would be walk the subvolumes and find a rw mounted subvolume.
- if (not util.mount_is_read_write(mount_point) and
- os.path.isdir("%s/.snapshots" % mount_point)):
- return ('btrfs', 'filesystem', 'resize', 'max',
- '%s/.snapshots' % mount_point)
+ if not util.mount_is_read_write(mount_point) and os.path.isdir(
+ "%s/.snapshots" % mount_point
+ ):
+ return (
+ "btrfs",
+ "filesystem",
+ "resize",
+ "max",
+ "%s/.snapshots" % mount_point,
+ )
else:
- return ('btrfs', 'filesystem', 'resize', 'max', mount_point)
+ return ("btrfs", "filesystem", "resize", "max", mount_point)
def _resize_ext(mount_point, devpth):
- return ('resize2fs', devpth)
+ return ("resize2fs", devpth)
def _resize_xfs(mount_point, devpth):
- return ('xfs_growfs', mount_point)
+ return ("xfs_growfs", mount_point)
def _resize_ufs(mount_point, devpth):
- return ('growfs', '-y', mount_point)
+ return ("growfs", "-y", mount_point)
def _resize_zfs(mount_point, devpth):
- return ('zpool', 'online', '-e', mount_point, devpth)
+ return ("zpool", "online", "-e", mount_point, devpth)
+
+
+def _resize_hammer2(mount_point, devpth):
+ return ("hammer2", "growfs", mount_point)
def _can_skip_resize_ufs(mount_point, devpth):
@@ -94,7 +114,7 @@ def _can_skip_resize_ufs(mount_point, devpth):
# growfs exits with 1 for almost all cases up to this one.
# This means we can't just use rcs=[0, 1] as subp parameter:
try:
- subp.subp(['growfs', '-N', devpth])
+ subp.subp(["growfs", "-N", devpth])
except subp.ProcessExecutionError as e:
if e.stderr.startswith(skip_start) and skip_contain in e.stderr:
# This FS is already at the desired size
@@ -108,16 +128,15 @@ def _can_skip_resize_ufs(mount_point, devpth):
# for multiple filesystem types if possible, e.g. one command for
# ext2, ext3 and ext4.
RESIZE_FS_PREFIXES_CMDS = [
- ('btrfs', _resize_btrfs),
- ('ext', _resize_ext),
- ('xfs', _resize_xfs),
- ('ufs', _resize_ufs),
- ('zfs', _resize_zfs),
+ ("btrfs", _resize_btrfs),
+ ("ext", _resize_ext),
+ ("xfs", _resize_xfs),
+ ("ufs", _resize_ufs),
+ ("zfs", _resize_zfs),
+ ("hammer2", _resize_hammer2),
]
-RESIZE_FS_PRECHECK_CMDS = {
- 'ufs': _can_skip_resize_ufs
-}
+RESIZE_FS_PRECHECK_CMDS = {"ufs": _can_skip_resize_ufs}
def can_skip_resize(fs_type, resize_what, devpth):
@@ -141,52 +160,66 @@ def maybe_get_writable_device_path(devpath, info, log):
container = util.is_container()
# Ensure the path is a block device.
- if (devpath == "/dev/root" and not os.path.exists(devpath) and
- not container):
+ if (
+ devpath == "/dev/root"
+ and not os.path.exists(devpath)
+ and not container
+ ):
devpath = util.rootdev_from_cmdline(util.get_cmdline())
if devpath is None:
log.warning("Unable to find device '/dev/root'")
return None
log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath)
- if devpath == 'overlayroot':
+ if devpath == "overlayroot":
log.debug("Not attempting to resize devpath '%s': %s", devpath, info)
return None
# FreeBSD zpool can also just use gpt/<label>
# with that in mind we can not do an os.stat on "gpt/whatever"
# therefore return the devpath already here.
- if devpath.startswith('gpt/'):
- log.debug('We have a gpt label - just go ahead')
+ if devpath.startswith("gpt/"):
+ log.debug("We have a gpt label - just go ahead")
return devpath
# Alternatively, our device could simply be a name as returned by gpart,
# such as da0p3
- if not devpath.startswith('/dev/') and not os.path.exists(devpath):
- fulldevpath = '/dev/' + devpath.lstrip('/')
- log.debug("'%s' doesn't appear to be a valid device path. Trying '%s'",
- devpath, fulldevpath)
+ if not devpath.startswith("/dev/") and not os.path.exists(devpath):
+ fulldevpath = "/dev/" + devpath.lstrip("/")
+ log.debug(
+ "'%s' doesn't appear to be a valid device path. Trying '%s'",
+ devpath,
+ fulldevpath,
+ )
devpath = fulldevpath
try:
statret = os.stat(devpath)
except OSError as exc:
if container and exc.errno == errno.ENOENT:
- log.debug("Device '%s' did not exist in container. "
- "cannot resize: %s", devpath, info)
+ log.debug(
+ "Device '%s' did not exist in container. cannot resize: %s",
+ devpath,
+ info,
+ )
elif exc.errno == errno.ENOENT:
- log.warning("Device '%s' did not exist. cannot resize: %s",
- devpath, info)
+ log.warning(
+ "Device '%s' did not exist. cannot resize: %s", devpath, info
+ )
else:
raise exc
return None
if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
if container:
- log.debug("device '%s' not a block device in container."
- " cannot resize: %s" % (devpath, info))
+ log.debug(
+ "device '%s' not a block device in container."
+ " cannot resize: %s" % (devpath, info)
+ )
else:
- log.warning("device '%s' not a block device. cannot resize: %s" %
- (devpath, info))
+ log.warning(
+ "device '%s' not a block device. cannot resize: %s"
+ % (devpath, info)
+ )
return None
return devpath # The writable block devpath
@@ -215,8 +248,8 @@ def handle(name, cfg, _cloud, log, args):
# we will have to get the zpool name out of this
# and set the resize_what variable to the zpool
# so the _resize_zfs function gets the right attribute.
- if fs_type == 'zfs':
- zpool = devpth.split('/')[0]
+ if fs_type == "zfs":
+ zpool = devpth.split("/")[0]
devpth = util.get_device_info_from_zpool(zpool)
if not devpth:
return # could not find device from zpool
@@ -231,8 +264,9 @@ def handle(name, cfg, _cloud, log, args):
resizer = None
if can_skip_resize(fs_type, resize_what, devpth):
- log.debug("Skip resize filesystem type %s for %s",
- fs_type, resize_what)
+ log.debug(
+ "Skip resize filesystem type %s for %s", fs_type, resize_what
+ )
return
fstype_lc = fs_type.lower()
@@ -242,29 +276,42 @@ def handle(name, cfg, _cloud, log, args):
break
if not resizer:
- log.warning("Not resizing unknown filesystem type %s for %s",
- fs_type, resize_what)
+ log.warning(
+ "Not resizing unknown filesystem type %s for %s",
+ fs_type,
+ resize_what,
+ )
return
resize_cmd = resizer(resize_what, devpth)
- log.debug("Resizing %s (%s) using %s", resize_what, fs_type,
- ' '.join(resize_cmd))
+ log.debug(
+ "Resizing %s (%s) using %s", resize_what, fs_type, " ".join(resize_cmd)
+ )
if resize_root == NOBLOCK:
# Fork to a child that will run
# the resize command
util.fork_cb(
- util.log_time, logfunc=log.debug, msg="backgrounded Resizing",
- func=do_resize, args=(resize_cmd, log))
+ util.log_time,
+ logfunc=log.debug,
+ msg="backgrounded Resizing",
+ func=do_resize,
+ args=(resize_cmd, log),
+ )
else:
- util.log_time(logfunc=log.debug, msg="Resizing",
- func=do_resize, args=(resize_cmd, log))
-
- action = 'Resized'
+ util.log_time(
+ logfunc=log.debug,
+ msg="Resizing",
+ func=do_resize,
+ args=(resize_cmd, log),
+ )
+
+ action = "Resized"
if resize_root == NOBLOCK:
- action = 'Resizing (via forking)'
- log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type,
- resize_root)
+ action = "Resizing (via forking)"
+ log.debug(
+ "%s root filesystem (type=%s, val=%s)", action, fs_type, resize_root
+ )
def do_resize(resize_cmd, log):
@@ -276,4 +323,5 @@ def do_resize(resize_cmd, log):
# TODO(harlowja): Should we add a fsck check after this to make
# sure we didn't corrupt anything?
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_resizefs_vyos.py b/cloudinit/config/cc_resizefs_vyos.py
index f8eb84fe..b54f2e27 100644
--- a/cloudinit/config/cc_resizefs_vyos.py
+++ b/cloudinit/config/cc_resizefs_vyos.py
@@ -6,31 +6,33 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""Resizefs: cloud-config module which resizes the filesystem"""
+"""Resizefs_vyos: cloud-config module which resizes filesystems"""
import errno
import os
import stat
from textwrap import dedent
+from cloudinit import subp, util
from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
from cloudinit.settings import PER_ALWAYS
-from cloudinit import subp
-from cloudinit import util
NOBLOCK = "noblock"
RESIZEFS_LIST_DEFAULT = ['/']
frequency = PER_ALWAYS
-distros = ['all']
-
-# Renamed to schema_vyos to pass build tests without modifying upstream sources
-schema_vyos = {
- 'id': 'cc_resizefs_vyos',
- 'name': 'Resizefs',
- 'title': 'Resize filesystem',
- 'description': dedent("""\
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_resizefs_vyos",
+ "name": "Resizefs_vyos",
+ "title": "Resize filesystems",
+ "description": dedent(
+ """\
Resize filesystems to use all avaliable space on partition. This
module is useful along with ``cc_growpart`` and will ensure that if a
partition has been resized the filesystem will be resized
@@ -38,34 +40,40 @@ schema_vyos = {
partition and will block the boot process while the resize command is
running. Optionally, the resize operation can be performed in the
background while cloud-init continues running modules. This can be
- enabled by setting ``resizefs_enabled`` to ``noblock``. This module can
- be disabled altogether by setting ``resizefs_enabled`` to ``false``.
- """),
- 'distros': distros,
- 'examples': [
- 'resizefs_enabled: false # disable filesystems resize operation'
- 'resize_fs: ["/", "/dev/vda1"]'],
- 'frequency': PER_ALWAYS,
- 'type': 'object',
- 'properties': {
- 'resizefs_enabled': {
- 'enum': [True, False, NOBLOCK],
- 'description': dedent("""\
- Whether to resize the partitions. Default: 'true'""")
+ enabled by setting ``resizefs_enabled`` to ``true``. This module can
+ be disabled altogether by setting ``resizefs_enabled`` to ``false``."""
+ ),
+ "distros": distros,
+ "examples": [
+ "resizefs_enabled: false # disable filesystems resize operation",
+ "resizefs_list: [\"/\", \"/dev/vda1\"]"],
+ "frequency": PER_ALWAYS,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "resizefs_enabled": {
+ "enum": [True, False, NOBLOCK],
+ "description": dedent(
+ """\
+ Whether to resize the partitions. Default: 'true'"""
+ ),
},
- 'resizefs_list': {
- 'type': 'array',
- 'items': {'type': 'string'},
- 'additionalItems': False, # Reject items non-string
- 'description': dedent("""\
+ "resizefs_list": {
+ "type": "array",
+ "items": {"type": "string"},
+ "additionalItems": False, # Reject items non-string
+ "description": dedent(
+ """\
List of partitions filesystems on which should be resized.
- Default: '/'""")
+ Default: '/'"""
+ )
}
- }
+ },
}
-# Renamed to schema_vyos to pass build tests without modifying upstream sources
-__doc__ = get_schema_doc(schema_vyos) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
def _resize_btrfs(mount_point, devpth):
@@ -74,28 +82,38 @@ def _resize_btrfs(mount_point, devpth):
# Use a subvolume that is not ro to trick the resize operation to do the
# "right" thing. The use of ".snapshot" is specific to "snapper" a generic
# solution would be walk the subvolumes and find a rw mounted subvolume.
- if (not util.mount_is_read_write(mount_point) and
- os.path.isdir("%s/.snapshots" % mount_point)):
- return ('btrfs', 'filesystem', 'resize', 'max',
- '%s/.snapshots' % mount_point)
+ if not util.mount_is_read_write(mount_point) and os.path.isdir(
+ "%s/.snapshots" % mount_point
+ ):
+ return (
+ "btrfs",
+ "filesystem",
+ "resize",
+ "max",
+ "%s/.snapshots" % mount_point,
+ )
else:
- return ('btrfs', 'filesystem', 'resize', 'max', mount_point)
+ return ("btrfs", "filesystem", "resize", "max", mount_point)
def _resize_ext(mount_point, devpth):
- return ('resize2fs', devpth)
+ return ("resize2fs", devpth)
def _resize_xfs(mount_point, devpth):
- return ('xfs_growfs', mount_point)
+ return ("xfs_growfs", mount_point)
def _resize_ufs(mount_point, devpth):
- return ('growfs', '-y', mount_point)
+ return ("growfs", "-y", mount_point)
def _resize_zfs(mount_point, devpth):
- return ('zpool', 'online', '-e', mount_point, devpth)
+ return ("zpool", "online", "-e", mount_point, devpth)
+
+
+def _resize_hammer2(mount_point, devpth):
+ return ("hammer2", "growfs", mount_point)
def _can_skip_resize_ufs(mount_point, devpth):
@@ -107,7 +125,7 @@ def _can_skip_resize_ufs(mount_point, devpth):
# growfs exits with 1 for almost all cases up to this one.
# This means we can't just use rcs=[0, 1] as subp parameter:
try:
- subp.subp(['growfs', '-N', devpth])
+ subp.subp(["growfs", "-N", devpth])
except subp.ProcessExecutionError as e:
if e.stderr.startswith(skip_start) and skip_contain in e.stderr:
# This FS is already at the desired size
@@ -121,23 +139,22 @@ def _can_skip_resize_ufs(mount_point, devpth):
# for multiple filesystem types if possible, e.g. one command for
# ext2, ext3 and ext4.
RESIZE_FS_PREFIXES_CMDS = [
- ('btrfs', _resize_btrfs),
- ('ext', _resize_ext),
- ('xfs', _resize_xfs),
- ('ufs', _resize_ufs),
- ('zfs', _resize_zfs),
+ ("btrfs", _resize_btrfs),
+ ("ext", _resize_ext),
+ ("xfs", _resize_xfs),
+ ("ufs", _resize_ufs),
+ ("zfs", _resize_zfs),
+ ("hammer2", _resize_hammer2),
]
-RESIZE_FS_PRECHECK_CMDS = {
- 'ufs': _can_skip_resize_ufs
-}
+RESIZE_FS_PRECHECK_CMDS = {"ufs": _can_skip_resize_ufs}
-def can_skip_resize(fs_type, resize_item, devpth):
+def can_skip_resize(fs_type, resize_what, devpth):
fstype_lc = fs_type.lower()
for i, func in RESIZE_FS_PRECHECK_CMDS.items():
if fstype_lc.startswith(i):
- return func(resize_item, devpth)
+ return func(resize_what, devpth)
return False
@@ -154,56 +171,150 @@ def maybe_get_writable_device_path(devpath, info, log):
container = util.is_container()
# Ensure the path is a block device.
- if (devpath == "/dev/root" and not os.path.exists(devpath) and
- not container):
+ if (
+ devpath == "/dev/root"
+ and not os.path.exists(devpath)
+ and not container
+ ):
devpath = util.rootdev_from_cmdline(util.get_cmdline())
if devpath is None:
log.warning("Unable to find device '/dev/root'")
return None
log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath)
- if devpath == 'overlayroot':
+ if devpath == "overlayroot":
log.debug("Not attempting to resize devpath '%s': %s", devpath, info)
return None
# FreeBSD zpool can also just use gpt/<label>
# with that in mind we can not do an os.stat on "gpt/whatever"
# therefore return the devpath already here.
- if devpath.startswith('gpt/'):
- log.debug('We have a gpt label - just go ahead')
+ if devpath.startswith("gpt/"):
+ log.debug("We have a gpt label - just go ahead")
return devpath
# Alternatively, our device could simply be a name as returned by gpart,
# such as da0p3
- if not devpath.startswith('/dev/') and not os.path.exists(devpath):
- fulldevpath = '/dev/' + devpath.lstrip('/')
- log.debug("'%s' doesn't appear to be a valid device path. Trying '%s'",
- devpath, fulldevpath)
+ if not devpath.startswith("/dev/") and not os.path.exists(devpath):
+ fulldevpath = "/dev/" + devpath.lstrip("/")
+ log.debug(
+ "'%s' doesn't appear to be a valid device path. Trying '%s'",
+ devpath,
+ fulldevpath,
+ )
devpath = fulldevpath
try:
statret = os.stat(devpath)
except OSError as exc:
if container and exc.errno == errno.ENOENT:
- log.debug("Device '%s' did not exist in container. "
- "cannot resize: %s", devpath, info)
+ log.debug(
+ "Device '%s' did not exist in container. cannot resize: %s",
+ devpath,
+ info,
+ )
elif exc.errno == errno.ENOENT:
- log.warning("Device '%s' did not exist. cannot resize: %s",
- devpath, info)
+ log.warning(
+ "Device '%s' did not exist. cannot resize: %s", devpath, info
+ )
else:
raise exc
return None
if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
if container:
- log.debug("device '%s' not a block device in container."
- " cannot resize: %s" % (devpath, info))
+ log.debug(
+ "device '%s' not a block device in container."
+ " cannot resize: %s" % (devpath, info)
+ )
else:
- log.warning("device '%s' not a block device. cannot resize: %s" %
- (devpath, info))
+ log.warning(
+ "device '%s' not a block device. cannot resize: %s"
+ % (devpath, info)
+ )
return None
return devpath # The writable block devpath
+def resize_fs(resize_what, log, resize_enabled):
+ result = util.get_mount_info(resize_what, log)
+ if not result:
+ log.warning("Could not determine filesystem type of %s", resize_what)
+ return
+
+ (devpth, fs_type, mount_point) = result
+
+ # if we have a zfs then our device path at this point
+ # is the zfs label. For example: vmzroot/ROOT/freebsd
+ # we will have to get the zpool name out of this
+ # and set the resize_what variable to the zpool
+ # so the _resize_zfs function gets the right attribute.
+ if fs_type == "zfs":
+ zpool = devpth.split("/")[0]
+ devpth = util.get_device_info_from_zpool(zpool)
+ if not devpth:
+ return # could not find device from zpool
+ resize_what = zpool
+
+ info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
+ log.debug("resize_info: %s" % info)
+
+ devpth = maybe_get_writable_device_path(devpth, info, log)
+ if not devpth:
+ return # devpath was not a writable block device
+
+ resizer = None
+ if can_skip_resize(fs_type, resize_what, devpth):
+ log.debug(
+ "Skip resize filesystem type %s for %s", fs_type, resize_what
+ )
+ return
+
+ fstype_lc = fs_type.lower()
+ for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
+ if fstype_lc.startswith(pfix):
+ resizer = root_cmd
+ break
+
+ if not resizer:
+ log.warning(
+ "Not resizing unknown filesystem type %s for %s",
+ fs_type,
+ resize_what,
+ )
+ return
+
+ resize_cmd = resizer(resize_what, devpth)
+ log.debug(
+ "Resizing %s (%s) using %s", resize_what, fs_type, " ".join(resize_cmd)
+ )
+
+ if resize_enabled == NOBLOCK:
+ # Fork to a child that will run
+ # the resize command
+ util.fork_cb(
+ util.log_time,
+ logfunc=log.debug,
+ msg="backgrounded Resizing",
+ func=do_resize,
+ args=(resize_cmd, log),
+ )
+ else:
+ util.log_time(
+ logfunc=log.debug,
+ msg="Resizing",
+ func=do_resize,
+ args=(resize_cmd, log),
+ )
+
+ action = "Resized"
+ if resize_enabled == NOBLOCK:
+ action = "Resizing (via forking)"
+ log.debug(
+ "%s filesystem on %s (type=%s, val=%s)", action, resize_what,
+ fs_type, resize_enabled
+ )
+
+
def handle(name, cfg, _cloud, log, args):
if len(args) != 0:
resize_enabled = args[0]
@@ -217,84 +328,19 @@ def handle(name, cfg, _cloud, log, args):
resizefs_enabled instead!""")
resize_enabled = resize_rootfs_option
- # Renamed to schema_vyos to pass build tests without modifying upstream
- validate_cloudconfig_schema(cfg, schema_vyos)
+ validate_cloudconfig_schema(cfg, schema)
if not util.translate_bool(resize_enabled, addons=[NOBLOCK]):
log.debug("Skipping module named %s, resizing disabled", name)
return
# Get list of partitions to resize
- resize_what = util.get_cfg_option_list(cfg, "resizefs_list",
+ resize_list = util.get_cfg_option_list(cfg, "resizefs_list",
RESIZEFS_LIST_DEFAULT)
- log.debug("Filesystems to resize: %s", resize_what)
-
- # Resize all filesystems from resize_what
- for resize_item in resize_what:
-
- result = util.get_mount_info(resize_item, log)
- if not result:
- log.warning("Could not determine filesystem type of %s",
- resize_item)
- return
-
- (devpth, fs_type, mount_point) = result
-
- # if we have a zfs then our device path at this point
- # is the zfs label. For example: vmzroot/ROOT/freebsd
- # we will have to get the zpool name out of this
- # and set the resize_item variable to the zpool
- # so the _resize_zfs function gets the right attribute.
- if fs_type == 'zfs':
- zpool = devpth.split('/')[0]
- devpth = util.get_device_info_from_zpool(zpool)
- if not devpth:
- return # could not find device from zpool
- resize_item = zpool
-
- info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point,
- resize_item)
- log.debug("resize_info: %s" % info)
-
- devpth = maybe_get_writable_device_path(devpth, info, log)
- if not devpth:
- return # devpath was not a writable block device
-
- resizer = None
- if can_skip_resize(fs_type, resize_item, devpth):
- log.debug("Skip resize filesystem type %s for %s",
- fs_type, resize_item)
- return
-
- fstype_lc = fs_type.lower()
- for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
- if fstype_lc.startswith(pfix):
- resizer = root_cmd
- break
-
- if not resizer:
- log.warning("Not resizing unknown filesystem type %s for %s",
- fs_type, resize_item)
- return
-
- resize_cmd = resizer(resize_item, devpth)
- log.debug("Resizing %s (%s) using %s", resize_item, fs_type,
- ' '.join(resize_cmd))
-
- if resize_enabled == NOBLOCK:
- # Fork to a child that will run
- # the resize command
- util.fork_cb(
- util.log_time, logfunc=log.debug, msg="backgrounded Resizing",
- func=do_resize, args=(resize_cmd, log))
- else:
- util.log_time(logfunc=log.debug, msg="Resizing",
- func=do_resize, args=(resize_cmd, log))
+ log.debug("Filesystems to resize: %s", resize_list)
- action = 'Resized'
- if resize_enabled == NOBLOCK:
- action = 'Resizing (via forking)'
- log.debug("%s filesystem on %s (type=%s, val=%s)", action, resize_item,
- fs_type, resize_enabled)
+ # Resize all filesystems from resize_list
+ for resize_what in resize_list:
+ resize_fs(resize_what, log, resize_enabled)
def do_resize(resize_cmd, log):
@@ -306,4 +352,5 @@ def do_resize(resize_cmd, log):
# TODO(harlowja): Should we add a fsck check after this to make
# sure we didn't corrupt anything?
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 7beb11ca..b2970d51 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -14,12 +14,12 @@ Resolv Conf
This module is intended to manage resolv.conf in environments where early
configuration of resolv.conf is necessary for further bootstrapping and/or
where configuration management such as puppet or chef own dns configuration.
-As Debian/Ubuntu will, by default, utilize resolvconf, and similarly RedHat
+As Debian/Ubuntu will, by default, utilize resolvconf, and similarly Red Hat
will use sysconfig, this module is likely to be of little use unless those
are configured correctly.
.. note::
- For RedHat with sysconfig, be sure to set PEERDNS=no for all DHCP
+ For Red Hat with sysconfig, be sure to set PEERDNS=no for all DHCP
enabled NICs.
.. note::
@@ -30,7 +30,7 @@ are configured correctly.
**Module frequency:** per instance
-**Supported distros:** alpine, fedora, rhel, sles
+**Supported distros:** alpine, fedora, photon, rhel, sles
**Config keys**::
@@ -47,23 +47,27 @@ are configured correctly.
"""
from cloudinit import log as logging
+from cloudinit import templater, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import templater
-from cloudinit import util
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-distros = ['alpine', 'fedora', 'opensuse', 'rhel', 'sles']
+distros = ["alpine", "fedora", "opensuse", "photon", "rhel", "sles"]
+RESOLVE_CONFIG_TEMPLATE_MAP = {
+ "/etc/resolv.conf": "resolv.conf",
+ "/etc/systemd/resolved.conf": "systemd.resolved.conf",
+}
-def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
+
+def generate_resolv_conf(template_fn, params, target_fname):
flags = []
false_flags = []
- if 'options' in params:
- for key, val in params['options'].items():
+ if "options" in params:
+ for key, val in params["options"].items():
if isinstance(val, bool):
if val:
flags.append(key)
@@ -71,12 +75,12 @@ def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
false_flags.append(key)
for flag in flags + false_flags:
- del params['options'][flag]
+ del params["options"][flag]
- if not params.get('options'):
- params['options'] = {}
+ if not params.get("options"):
+ params["options"] = {}
- params['flags'] = flags
+ params["flags"] = flags
LOG.debug("Writing resolv.conf from template %s", template_fn)
templater.render_to_file(template_fn, target_fname, params)
@@ -92,24 +96,39 @@ def handle(name, cfg, cloud, log, _args):
@param args: Any module arguments from cloud.cfg
"""
if "manage_resolv_conf" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'manage_resolv_conf' key in configuration"), name)
+ log.debug(
+ "Skipping module named %s,"
+ " no 'manage_resolv_conf' key in configuration",
+ name,
+ )
return
if not util.get_cfg_option_bool(cfg, "manage_resolv_conf", False):
- log.debug(("Skipping module named %s,"
- " 'manage_resolv_conf' present but set to False"), name)
+ log.debug(
+ "Skipping module named %s,"
+ " 'manage_resolv_conf' present but set to False",
+ name,
+ )
return
if "resolv_conf" not in cfg:
log.warning("manage_resolv_conf True but no parameters provided!")
+ return
- template_fn = cloud.get_template_filename('resolv.conf')
- if not template_fn:
- log.warning("No template found, not rendering /etc/resolv.conf")
+ try:
+ template_fn = cloud.get_template_filename(
+ RESOLVE_CONFIG_TEMPLATE_MAP[cloud.distro.resolve_conf_fn]
+ )
+ except KeyError:
+ log.warning("No template found, not rendering resolve configs")
return
- generate_resolv_conf(template_fn=template_fn, params=cfg["resolv_conf"])
+ generate_resolv_conf(
+ template_fn=template_fn,
+ params=cfg["resolv_conf"],
+ target_fname=cloud.distro.resolve_conf_fn,
+ )
return
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 28d62e9d..b81a7a9b 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -5,15 +5,15 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""
-RedHat Subscription
--------------------
+Red Hat Subscription
+--------------------
**Summary:** register red hat enterprise linux based system
-Register a RedHat system either by username and password *or* activation and
+Register a Red Hat system either by username and password *or* activation and
org. Following a sucessful registration, you can auto-attach subscriptions, set
the service level, add subscriptions based on pool id, enable/disable yum
repositories based on repo id, and alter the rhsm_baseurl and server-hostname
-in ``/etc/rhsm/rhs.conf``. For more details, see the ``Register RedHat
+in ``/etc/rhsm/rhs.conf``. For more details, see the ``Register Red Hat
Subscription`` example config.
**Internal name:** ``cc_rh_subscription``
@@ -39,12 +39,11 @@ Subscription`` example config.
"""
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
-distros = ['fedora', 'rhel']
+distros = ["fedora", "rhel"]
def handle(name, cfg, _cloud, log, _args):
@@ -60,8 +59,9 @@ def handle(name, cfg, _cloud, log, _args):
raise SubscriptionError(verify_msg)
cont = sm.rhn_register()
if not cont:
- raise SubscriptionError("Registration failed or did not "
- "run completely")
+ raise SubscriptionError(
+ "Registration failed or did not run completely"
+ )
# Splitting up the registration, auto-attach, and servicelevel
# commands because the error codes, messages from subman are not
@@ -70,8 +70,7 @@ def handle(name, cfg, _cloud, log, _args):
# Attempt to change the service level
if sm.auto_attach and sm.servicelevel is not None:
if not sm._set_service_level():
- raise SubscriptionError("Setting of service-level "
- "failed")
+ raise SubscriptionError("Setting of service-level failed")
else:
sm.log.debug("Completed auto-attach with service level")
elif sm.auto_attach:
@@ -87,8 +86,9 @@ def handle(name, cfg, _cloud, log, _args):
return_stat = sm.addPool(sm.pools)
if not return_stat:
- raise SubscriptionError("Unable to attach pools {0}"
- .format(sm.pools))
+ raise SubscriptionError(
+ "Unable to attach pools {0}".format(sm.pools)
+ )
return_stat = sm.update_repos()
if not return_stat:
raise SubscriptionError("Unable to add or remove repos")
@@ -105,72 +105,87 @@ class SubscriptionError(Exception):
class SubscriptionManager(object):
- valid_rh_keys = ['org', 'activation-key', 'username', 'password',
- 'disable-repo', 'enable-repo', 'add-pool',
- 'rhsm-baseurl', 'server-hostname',
- 'auto-attach', 'service-level']
+ valid_rh_keys = [
+ "org",
+ "activation-key",
+ "username",
+ "password",
+ "disable-repo",
+ "enable-repo",
+ "add-pool",
+ "rhsm-baseurl",
+ "server-hostname",
+ "auto-attach",
+ "service-level",
+ ]
def __init__(self, cfg, log=None):
if log is None:
log = LOG
self.log = log
self.cfg = cfg
- self.rhel_cfg = self.cfg.get('rh_subscription', {})
- self.rhsm_baseurl = self.rhel_cfg.get('rhsm-baseurl')
- self.server_hostname = self.rhel_cfg.get('server-hostname')
- self.pools = self.rhel_cfg.get('add-pool')
- self.activation_key = self.rhel_cfg.get('activation-key')
- self.org = self.rhel_cfg.get('org')
- self.userid = self.rhel_cfg.get('username')
- self.password = self.rhel_cfg.get('password')
- self.auto_attach = self.rhel_cfg.get('auto-attach')
- self.enable_repo = self.rhel_cfg.get('enable-repo')
- self.disable_repo = self.rhel_cfg.get('disable-repo')
- self.servicelevel = self.rhel_cfg.get('service-level')
+ self.rhel_cfg = self.cfg.get("rh_subscription", {})
+ self.rhsm_baseurl = self.rhel_cfg.get("rhsm-baseurl")
+ self.server_hostname = self.rhel_cfg.get("server-hostname")
+ self.pools = self.rhel_cfg.get("add-pool")
+ self.activation_key = self.rhel_cfg.get("activation-key")
+ self.org = self.rhel_cfg.get("org")
+ self.userid = self.rhel_cfg.get("username")
+ self.password = self.rhel_cfg.get("password")
+ self.auto_attach = self.rhel_cfg.get("auto-attach")
+ self.enable_repo = self.rhel_cfg.get("enable-repo")
+ self.disable_repo = self.rhel_cfg.get("disable-repo")
+ self.servicelevel = self.rhel_cfg.get("service-level")
def log_success(self, msg):
- '''Simple wrapper for logging info messages. Useful for unittests'''
+ """Simple wrapper for logging info messages. Useful for unittests"""
self.log.info(msg)
def log_warn(self, msg):
- '''Simple wrapper for logging warning messages. Useful for unittests'''
+ """Simple wrapper for logging warning messages. Useful for unittests"""
self.log.warning(msg)
def _verify_keys(self):
- '''
+ """
Checks that the keys in the rh_subscription dict from the user-data
are what we expect.
- '''
+ """
for k in self.rhel_cfg:
if k not in self.valid_rh_keys:
- bad_key = "{0} is not a valid key for rh_subscription. "\
- "Valid keys are: "\
- "{1}".format(k, ', '.join(self.valid_rh_keys))
+ bad_key = (
+ "{0} is not a valid key for rh_subscription. "
+ "Valid keys are: "
+ "{1}".format(k, ", ".join(self.valid_rh_keys))
+ )
return False, bad_key
# Check for bad auto-attach value
- if (self.auto_attach is not None) and \
- not (util.is_true(self.auto_attach) or
- util.is_false(self.auto_attach)):
- not_bool = "The key auto-attach must be a boolean value "\
- "(True/False "
+ if (self.auto_attach is not None) and not (
+ util.is_true(self.auto_attach) or util.is_false(self.auto_attach)
+ ):
+ not_bool = (
+ "The key auto-attach must be a boolean value (True/False "
+ )
return False, not_bool
- if (self.servicelevel is not None) and ((not self.auto_attach) or
- (util.is_false(str(self.auto_attach)))):
- no_auto = ("The service-level key must be used in conjunction "
- "with the auto-attach key. Please re-run with "
- "auto-attach: True")
+ if (self.servicelevel is not None) and (
+ (not self.auto_attach) or (util.is_false(str(self.auto_attach)))
+ ):
+ no_auto = (
+ "The service-level key must be used in conjunction "
+ "with the auto-attach key. Please re-run with "
+ "auto-attach: True"
+ )
return False, no_auto
return True, None
def is_registered(self):
- '''
+ """
Checks if the system is already registered and returns
True if so, else False
- '''
- cmd = ['identity']
+ """
+ cmd = ["identity"]
try:
_sub_man_cli(cmd)
@@ -180,15 +195,18 @@ class SubscriptionManager(object):
return True
def rhn_register(self):
- '''
+ """
Registers the system by userid and password or activation key
and org. Returns True when successful False when not.
- '''
+ """
if (self.activation_key is not None) and (self.org is not None):
# register by activation key
- cmd = ['register', '--activationkey={0}'.
- format(self.activation_key), '--org={0}'.format(self.org)]
+ cmd = [
+ "register",
+ "--activationkey={0}".format(self.activation_key),
+ "--org={0}".format(self.org),
+ ]
# If the baseurl and/or server url are passed in, we register
# with them.
@@ -203,14 +221,18 @@ class SubscriptionManager(object):
return_out = _sub_man_cli(cmd, logstring_val=True)[0]
except subp.ProcessExecutionError as e:
if e.stdout == "":
- self.log_warn("Registration failed due "
- "to: {0}".format(e.stderr))
+ self.log_warn(
+ "Registration failed due to: {0}".format(e.stderr)
+ )
return False
elif (self.userid is not None) and (self.password is not None):
# register by username and password
- cmd = ['register', '--username={0}'.format(self.userid),
- '--password={0}'.format(self.password)]
+ cmd = [
+ "register",
+ "--username={0}".format(self.userid),
+ "--password={0}".format(self.password),
+ ]
# If the baseurl and/or server url are passed in, we register
# with them.
@@ -226,15 +248,18 @@ class SubscriptionManager(object):
return_out = _sub_man_cli(cmd, logstring_val=True)[0]
except subp.ProcessExecutionError as e:
if e.stdout == "":
- self.log_warn("Registration failed due "
- "to: {0}".format(e.stderr))
+ self.log_warn(
+ "Registration failed due to: {0}".format(e.stderr)
+ )
return False
else:
- self.log_warn("Unable to register system due to incomplete "
- "information.")
- self.log_warn("Use either activationkey and org *or* userid "
- "and password")
+ self.log_warn(
+ "Unable to register system due to incomplete information."
+ )
+ self.log_warn(
+ "Use either activationkey and org *or* userid and password"
+ )
return False
reg_id = return_out.split("ID: ")[1].rstrip()
@@ -242,19 +267,25 @@ class SubscriptionManager(object):
return True
def _set_service_level(self):
- cmd = ['attach', '--auto', '--servicelevel={0}'
- .format(self.servicelevel)]
+ cmd = [
+ "attach",
+ "--auto",
+ "--servicelevel={0}".format(self.servicelevel),
+ ]
try:
return_out = _sub_man_cli(cmd)[0]
except subp.ProcessExecutionError as e:
- if e.stdout.rstrip() != '':
+ if e.stdout.rstrip() != "":
for line in e.stdout.split("\n"):
- if line != '':
+ if line != "":
self.log_warn(line)
else:
- self.log_warn("Setting the service level failed with: "
- "{0}".format(e.stderr.strip()))
+ self.log_warn(
+ "Setting the service level failed with: {0}".format(
+ e.stderr.strip()
+ )
+ )
return False
for line in return_out.split("\n"):
if line != "":
@@ -262,7 +293,7 @@ class SubscriptionManager(object):
return True
def _set_auto_attach(self):
- cmd = ['attach', '--auto']
+ cmd = ["attach", "--auto"]
try:
return_out = _sub_man_cli(cmd)[0]
except subp.ProcessExecutionError as e:
@@ -274,52 +305,52 @@ class SubscriptionManager(object):
return True
def _getPools(self):
- '''
+ """
Gets the list pools for the active subscription and returns them
in list form.
- '''
+ """
available = []
consumed = []
# Get all available pools
- cmd = ['list', '--available', '--pool-only']
+ cmd = ["list", "--available", "--pool-only"]
results = _sub_man_cli(cmd)[0]
available = (results.rstrip()).split("\n")
# Get all consumed pools
- cmd = ['list', '--consumed', '--pool-only']
+ cmd = ["list", "--consumed", "--pool-only"]
results = _sub_man_cli(cmd)[0]
consumed = (results.rstrip()).split("\n")
return available, consumed
def _getRepos(self):
- '''
+ """
Obtains the current list of active yum repositories and returns
them in list form.
- '''
+ """
- cmd = ['repos', '--list-enabled']
+ cmd = ["repos", "--list-enabled"]
return_out = _sub_man_cli(cmd)[0]
active_repos = []
for repo in return_out.split("\n"):
if "Repo ID:" in repo:
- active_repos.append((repo.split(':')[1]).strip())
+ active_repos.append((repo.split(":")[1]).strip())
- cmd = ['repos', '--list-disabled']
+ cmd = ["repos", "--list-disabled"]
return_out = _sub_man_cli(cmd)[0]
inactive_repos = []
for repo in return_out.split("\n"):
if "Repo ID:" in repo:
- inactive_repos.append((repo.split(':')[1]).strip())
+ inactive_repos.append((repo.split(":")[1]).strip())
return active_repos, inactive_repos
def addPool(self, pools):
- '''
+ """
Takes a list of subscription pools and "attaches" them to the
current subscription
- '''
+ """
# An empty list was passed
if len(pools) == 0:
@@ -328,31 +359,33 @@ class SubscriptionManager(object):
pool_available, pool_consumed = self._getPools()
pool_list = []
- cmd = ['attach']
+ cmd = ["attach"]
for pool in pools:
if (pool not in pool_consumed) and (pool in pool_available):
- pool_list.append('--pool={0}'.format(pool))
+ pool_list.append("--pool={0}".format(pool))
else:
self.log_warn("Pool {0} is not available".format(pool))
if len(pool_list) > 0:
cmd.extend(pool_list)
try:
_sub_man_cli(cmd)
- self.log.debug("Attached the following pools to your "
- "system: %s", (", ".join(pool_list))
- .replace('--pool=', ''))
+ self.log.debug(
+ "Attached the following pools to your system: %s",
+ (", ".join(pool_list)).replace("--pool=", ""),
+ )
return True
except subp.ProcessExecutionError as e:
- self.log_warn("Unable to attach pool {0} "
- "due to {1}".format(pool, e))
+ self.log_warn(
+ "Unable to attach pool {0} due to {1}".format(pool, e)
+ )
return False
def update_repos(self):
- '''
+ """
Takes a list of yum repo ids that need to be disabled or enabled; then
it verifies if they are already enabled or disabled and finally
executes the action to disable or enable
- '''
+ """
erepos = self.enable_repo
drepos = self.disable_repo
@@ -378,7 +411,7 @@ class SubscriptionManager(object):
enable_list = []
enable_list_fail = []
for repoid in erepos:
- if (repoid in inactive_repos):
+ if repoid in inactive_repos:
enable_list.append("--enable={0}".format(repoid))
else:
enable_list_fail.append(repoid)
@@ -399,14 +432,16 @@ class SubscriptionManager(object):
if fail in active_repos:
self.log.debug("Repo %s is already enabled", fail)
else:
- self.log_warn("Repo {0} does not appear to "
- "exist".format(fail))
+ self.log_warn(
+ "Repo {0} does not appear to exist".format(fail)
+ )
if len(disable_list_fail) > 0:
for fail in disable_list_fail:
- self.log.debug("Repo %s not disabled "
- "because it is not enabled", fail)
+ self.log.debug(
+ "Repo %s not disabled because it is not enabled", fail
+ )
- cmd = ['repos']
+ cmd = ["repos"]
if len(disable_list) > 0:
cmd.extend(disable_list)
@@ -420,11 +455,15 @@ class SubscriptionManager(object):
return False
if len(enable_list) > 0:
- self.log.debug("Enabled the following repos: %s",
- (", ".join(enable_list)).replace('--enable=', ''))
+ self.log.debug(
+ "Enabled the following repos: %s",
+ (", ".join(enable_list)).replace("--enable=", ""),
+ )
if len(disable_list) > 0:
- self.log.debug("Disabled the following repos: %s",
- (", ".join(disable_list)).replace('--disable=', ''))
+ self.log.debug(
+ "Disabled the following repos: %s",
+ (", ".join(disable_list)).replace("--disable=", ""),
+ )
return True
def is_configured(self):
@@ -432,13 +471,12 @@ class SubscriptionManager(object):
def _sub_man_cli(cmd, logstring_val=False):
- '''
+ """
Uses the prefered cloud-init subprocess def of subp.subp
and runs subscription-manager. Breaking this to a
separate function for later use in mocking and unittests
- '''
- return subp.subp(['subscription-manager'] + cmd,
- logstring=logstring_val)
+ """
+ return subp.subp(["subscription-manager"] + cmd, logstring=logstring_val)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index a5aca038..36a009a2 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -44,7 +44,7 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``.
# - read the blob of data from raw user data, and parse it as key/value
# - for each key that is found, download the content to
# the local instance/scripts directory and set them executable.
-# - the files in that directory will be run by the user-scripts module
+# - the files in that directory will be run by the scripts-user module
# Therefore, this must run before that.
#
#
@@ -52,14 +52,14 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``.
import os
from urllib.parse import parse_qs
-from cloudinit.settings import PER_INSTANCE
from cloudinit import url_helper as uhelp
from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
MY_NAME = "cc_rightscale_userdata"
-MY_HOOKNAME = 'CLOUD_INIT_REMOTE_HOOK'
+MY_HOOKNAME = "CLOUD_INIT_REMOTE_HOOK"
def handle(name, _cfg, cloud, log, _args):
@@ -72,13 +72,16 @@ def handle(name, _cfg, cloud, log, _args):
try:
mdict = parse_qs(ud)
if not mdict or MY_HOOKNAME not in mdict:
- log.debug(("Skipping module %s, "
- "did not find %s in parsed"
- " raw userdata"), name, MY_HOOKNAME)
+ log.debug(
+ "Skipping module %s, did not find %s in parsed raw userdata",
+ name,
+ MY_HOOKNAME,
+ )
return
except Exception:
- util.logexc(log, "Failed to parse query string %s into a dictionary",
- ud)
+ util.logexc(
+ log, "Failed to parse query string %s into a dictionary", ud
+ )
raise
wrote_fns = []
@@ -87,7 +90,7 @@ def handle(name, _cfg, cloud, log, _args):
# These will eventually be then ran by the cc_scripts_user
# TODO(harlowja): maybe this should just be a new user data handler??
# Instead of a late module that acts like a user data handler?
- scripts_d = cloud.get_ipath_cur('scripts')
+ scripts_d = cloud.get_ipath_cur("scripts")
urls = mdict[MY_HOOKNAME]
for (i, url) in enumerate(urls):
fname = os.path.join(scripts_d, "rightscale-%02i" % (i))
@@ -99,8 +102,9 @@ def handle(name, _cfg, cloud, log, _args):
wrote_fns.append(fname)
except Exception as e:
captured_excps.append(e)
- util.logexc(log, "%s failed to read %s and write %s", MY_NAME, url,
- fname)
+ util.logexc(
+ log, "%s failed to read %s and write %s", MY_NAME, url, fname
+ )
if wrote_fns:
log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns))
@@ -110,8 +114,11 @@ def handle(name, _cfg, cloud, log, _args):
log.debug("%s urls were skipped or failed", skipped)
if captured_excps:
- log.warning("%s failed with exceptions, re-raising the last one",
- len(captured_excps))
+ log.warning(
+ "%s failed with exceptions, re-raising the last one",
+ len(captured_excps),
+ )
raise captured_excps[-1]
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 2a2bc931..db2a3c79 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -11,7 +11,7 @@
Rsyslog
-------
-**Summary:** configure system loggig via rsyslog
+**Summary:** configure system logging via rsyslog
This module configures remote system logging using rsyslog.
@@ -182,50 +182,45 @@ import os
import re
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
DEF_FILENAME = "20-cloud-config.conf"
DEF_DIR = "/etc/rsyslog.d"
DEF_RELOAD = "auto"
DEF_REMOTES = {}
-KEYNAME_CONFIGS = 'configs'
-KEYNAME_FILENAME = 'config_filename'
-KEYNAME_DIR = 'config_dir'
-KEYNAME_RELOAD = 'service_reload_command'
-KEYNAME_LEGACY_FILENAME = 'rsyslog_filename'
-KEYNAME_LEGACY_DIR = 'rsyslog_dir'
-KEYNAME_REMOTES = 'remotes'
+KEYNAME_CONFIGS = "configs"
+KEYNAME_FILENAME = "config_filename"
+KEYNAME_DIR = "config_dir"
+KEYNAME_RELOAD = "service_reload_command"
+KEYNAME_LEGACY_FILENAME = "rsyslog_filename"
+KEYNAME_LEGACY_DIR = "rsyslog_dir"
+KEYNAME_REMOTES = "remotes"
LOG = logging.getLogger(__name__)
-COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
+COMMENT_RE = re.compile(r"[ ]*[#]+[ ]*")
HOST_PORT_RE = re.compile(
- r'^(?P<proto>[@]{0,2})'
- r'(([\[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
- r'([:](?P<port>[0-9]+))?$')
+ r"^(?P<proto>[@]{0,2})"
+ r"(([\[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))"
+ r"([:](?P<port>[0-9]+))?$"
+)
-def reload_syslog(command=DEF_RELOAD, systemd=False):
- service = 'rsyslog'
+def reload_syslog(distro, command=DEF_RELOAD):
if command == DEF_RELOAD:
- if systemd:
- cmd = ['systemctl', 'reload-or-try-restart', service]
- else:
- cmd = ['service', service, 'restart']
- else:
- cmd = command
- subp.subp(cmd, capture=True)
+ service = distro.get_option("rsyslog_svcname", "rsyslog")
+ return distro.manage_service("try-reload", service)
+ return subp.subp(command, capture=True)
def load_config(cfg):
# return an updated config with entries of the correct type
# support converting the old top level format into new format
- mycfg = cfg.get('rsyslog', {})
+ mycfg = cfg.get("rsyslog", {})
- if isinstance(cfg.get('rsyslog'), list):
- mycfg = {KEYNAME_CONFIGS: cfg.get('rsyslog')}
+ if isinstance(cfg.get("rsyslog"), list):
+ mycfg = {KEYNAME_CONFIGS: cfg.get("rsyslog")}
if KEYNAME_LEGACY_FILENAME in cfg:
mycfg[KEYNAME_FILENAME] = cfg[KEYNAME_LEGACY_FILENAME]
if KEYNAME_LEGACY_DIR in cfg:
@@ -236,7 +231,8 @@ def load_config(cfg):
(KEYNAME_DIR, DEF_DIR, str),
(KEYNAME_FILENAME, DEF_FILENAME, str),
(KEYNAME_RELOAD, DEF_RELOAD, (str, list)),
- (KEYNAME_REMOTES, DEF_REMOTES, dict))
+ (KEYNAME_REMOTES, DEF_REMOTES, dict),
+ )
for key, default, vtypes in fillup:
if key not in mycfg or not isinstance(mycfg[key], vtypes):
@@ -252,10 +248,11 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir):
for cur_pos, ent in enumerate(configs):
if isinstance(ent, dict):
if "content" not in ent:
- LOG.warning("No 'content' entry in config entry %s",
- cur_pos + 1)
+ LOG.warning(
+ "No 'content' entry in config entry %s", cur_pos + 1
+ )
continue
- content = ent['content']
+ content = ent["content"]
filename = ent.get("filename", def_fname)
else:
content = ent
@@ -306,9 +303,9 @@ def parse_remotes_line(line, name=None):
if not toks:
raise ValueError("Invalid host specification '%s'" % host_port)
- proto = toks.group('proto')
- addr = toks.group('addr') or toks.group('bracket_addr')
- port = toks.group('port')
+ proto = toks.group("proto")
+ addr = toks.group("addr") or toks.group("bracket_addr")
+ port = toks.group("port")
if addr.startswith("[") and not addr.endswith("]"):
raise ValueError("host spec had invalid brackets: %s" % addr)
@@ -316,15 +313,17 @@ def parse_remotes_line(line, name=None):
if comment and not name:
name = comment
- t = SyslogRemotesLine(name=name, match=match, proto=proto,
- addr=addr, port=port)
+ t = SyslogRemotesLine(
+ name=name, match=match, proto=proto, addr=addr, port=port
+ )
t.validate()
return t
class SyslogRemotesLine(object):
- def __init__(self, name=None, match=None, proto=None, addr=None,
- port=None):
+ def __init__(
+ self, name=None, match=None, proto=None, addr=None, port=None
+ ):
if not match:
match = "*.*"
self.name = name
@@ -357,7 +356,11 @@ class SyslogRemotesLine(object):
def __repr__(self):
return "[name=%s match=%s proto=%s address=%s port=%s]" % (
- self.name, self.match, self.proto, self.addr, self.port
+ self.name,
+ self.match,
+ self.proto,
+ self.addr,
+ self.port,
)
def __str__(self):
@@ -395,13 +398,14 @@ def remotes_to_rsyslog_cfg(remotes, header=None, footer=None):
LOG.warning("failed loading remote %s: %s [%s]", name, line, e)
if footer is not None:
lines.append(footer)
- return '\n'.join(lines) + "\n"
+ return "\n".join(lines) + "\n"
def handle(name, cfg, cloud, log, _args):
- if 'rsyslog' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'rsyslog' key in configuration"), name)
+ if "rsyslog" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'rsyslog' key in configuration", name
+ )
return
mycfg = load_config(cfg)
@@ -413,25 +417,25 @@ def handle(name, cfg, cloud, log, _args):
mycfg[KEYNAME_REMOTES],
header="# begin remotes",
footer="# end remotes",
- ))
+ )
+ )
- if not mycfg['configs']:
+ if not mycfg["configs"]:
log.debug("Empty config rsyslog['configs'], nothing to do")
return
changes = apply_rsyslog_changes(
configs=mycfg[KEYNAME_CONFIGS],
def_fname=mycfg[KEYNAME_FILENAME],
- cfg_dir=mycfg[KEYNAME_DIR])
+ cfg_dir=mycfg[KEYNAME_DIR],
+ )
if not changes:
log.debug("restart of syslog not necessary, no changes made")
return
try:
- restarted = reload_syslog(
- command=mycfg[KEYNAME_RELOAD],
- systemd=cloud.distro.uses_systemd()),
+ restarted = reload_syslog(cloud.distro, command=mycfg[KEYNAME_RELOAD])
except subp.ProcessExecutionError as e:
restarted = False
log.warning("Failed to reload syslog", e)
@@ -444,4 +448,5 @@ def handle(name, cfg, cloud, log, _args):
# the logging was setup to use it...
log.debug("%s configured %s files", name, changes)
+
# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index 1f75d6c5..c5206003 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -8,15 +8,17 @@
"""Runcmd: run arbitrary commands at rc.local with output to the console"""
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
-from cloudinit.distros import ALL_DISTROS
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
import os
from textwrap import dedent
+from cloudinit import util
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
# The schema definition for each cloud-config module is a strict contract for
# describing supported configuration parameters for each cloud-config section.
@@ -26,17 +28,21 @@ from textwrap import dedent
distros = [ALL_DISTROS]
-schema = {
- 'id': 'cc_runcmd',
- 'name': 'Runcmd',
- 'title': 'Run arbitrary commands',
- 'description': dedent("""\
+meta: MetaSchema = {
+ "id": "cc_runcmd",
+ "name": "Runcmd",
+ "title": "Run arbitrary commands",
+ "description": dedent(
+ """\
Run arbitrary commands at a rc.local like level with output to the
console. Each item can be either a list or a string. If the item is a
- list, it will be properly executed as if passed to ``execve()`` (with
- the first arg as the command). If the item is a string, it will be
- written to a file and interpreted
- using ``sh``.
+ list, it will be properly quoted. Each item is written to
+ ``/var/lib/cloud/instance/runcmd`` to be later interpreted using
+ ``sh``.
+
+ Note that the ``runcmd`` module only writes the script to be run
+ later. The module that actually runs the script is ``scripts-user``
+ in the :ref:`Final` boot stage.
.. note::
@@ -47,50 +53,61 @@ schema = {
when writing files, do not use /tmp dir as it races with
systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
- """),
- 'distros': distros,
- 'examples': [dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
runcmd:
- [ ls, -l, / ]
- [ sh, -xc, "echo $(date) ': hello world!'" ]
- [ sh, -c, echo "=========hello world'=========" ]
- ls -l /root
- [ wget, "http://example.org", -O, /tmp/index.html ]
- """)],
- 'frequency': PER_INSTANCE,
- 'type': 'object',
- 'properties': {
- 'runcmd': {
- 'type': 'array',
- 'items': {
- 'oneOf': [
- {'type': 'array', 'items': {'type': 'string'}},
- {'type': 'string'}]
+ """
+ )
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "runcmd": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "string"},
+ {"type": "null"},
+ ]
},
- 'additionalItems': False, # Reject items of non-string non-list
- 'additionalProperties': False,
- 'minItems': 1,
- 'required': [],
+ "additionalItems": False, # Reject items of non-string non-list
+ "additionalProperties": False,
+ "minItems": 1,
}
- }
+ },
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
def handle(name, cfg, cloud, log, _args):
if "runcmd" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'runcmd' key in configuration"), name)
+ log.debug(
+ "Skipping module named %s, no 'runcmd' key in configuration", name
+ )
return
validate_cloudconfig_schema(cfg, schema)
- out_fn = os.path.join(cloud.get_ipath('scripts'), "runcmd")
+ out_fn = os.path.join(cloud.get_ipath("scripts"), "runcmd")
cmd = cfg["runcmd"]
try:
content = util.shellify(cmd)
util.write_file(out_fn, content, 0o700)
- except Exception:
- util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn)
+ except Exception as e:
+ raise type(e)("Failed to shellify {} into file {}".format(cmd, out_fn))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index b61876aa..0eb46664 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -46,8 +46,7 @@ specify them with ``pkg_name``, ``service_name`` and ``config_dir``.
import os
from cloudinit import safeyaml, subp, util
-from cloudinit.distros import rhel_util
-
+from cloudinit.distros import bsd_utils
# Note: see https://docs.saltstack.com/en/latest/topics/installation/
# Note: see https://docs.saltstack.com/en/latest/ref/configuration/
@@ -57,36 +56,40 @@ class SaltConstants(object):
"""
defines default distribution specific salt variables
"""
+
def __init__(self, cfg):
# constants tailored for FreeBSD
if util.is_FreeBSD():
- self.pkg_name = 'py36-salt'
- self.srv_name = 'salt_minion'
- self.conf_dir = '/usr/local/etc/salt'
+ self.pkg_name = "py-salt"
+ self.srv_name = "salt_minion"
+ self.conf_dir = "/usr/local/etc/salt"
# constants for any other OS
else:
- self.pkg_name = 'salt-minion'
- self.srv_name = 'salt-minion'
- self.conf_dir = '/etc/salt'
+ self.pkg_name = "salt-minion"
+ self.srv_name = "salt-minion"
+ self.conf_dir = "/etc/salt"
# if there are constants given in cloud config use those
- self.pkg_name = util.get_cfg_option_str(cfg, 'pkg_name',
- self.pkg_name)
- self.conf_dir = util.get_cfg_option_str(cfg, 'config_dir',
- self.conf_dir)
- self.srv_name = util.get_cfg_option_str(cfg, 'service_name',
- self.srv_name)
+ self.pkg_name = util.get_cfg_option_str(cfg, "pkg_name", self.pkg_name)
+ self.conf_dir = util.get_cfg_option_str(
+ cfg, "config_dir", self.conf_dir
+ )
+ self.srv_name = util.get_cfg_option_str(
+ cfg, "service_name", self.srv_name
+ )
def handle(name, cfg, cloud, log, _args):
# If there isn't a salt key in the configuration don't do anything
- if 'salt_minion' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'salt_minion' key in configuration"), name)
+ if "salt_minion" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'salt_minion' key in configuration",
+ name,
+ )
return
- s_cfg = cfg['salt_minion']
+ s_cfg = cfg["salt_minion"]
const = SaltConstants(cfg=s_cfg)
# Start by installing the salt package ...
@@ -96,40 +99,40 @@ def handle(name, cfg, cloud, log, _args):
util.ensure_dir(const.conf_dir)
# ... and then update the salt configuration
- if 'conf' in s_cfg:
+ if "conf" in s_cfg:
# Add all sections from the conf object to minion config file
- minion_config = os.path.join(const.conf_dir, 'minion')
- minion_data = safeyaml.dumps(s_cfg.get('conf'))
+ minion_config = os.path.join(const.conf_dir, "minion")
+ minion_data = safeyaml.dumps(s_cfg.get("conf"))
util.write_file(minion_config, minion_data)
- if 'grains' in s_cfg:
+ if "grains" in s_cfg:
# add grains to /etc/salt/grains
- grains_config = os.path.join(const.conf_dir, 'grains')
- grains_data = safeyaml.dumps(s_cfg.get('grains'))
+ grains_config = os.path.join(const.conf_dir, "grains")
+ grains_data = safeyaml.dumps(s_cfg.get("grains"))
util.write_file(grains_config, grains_data)
# ... copy the key pair if specified
- if 'public_key' in s_cfg and 'private_key' in s_cfg:
+ if "public_key" in s_cfg and "private_key" in s_cfg:
pki_dir_default = os.path.join(const.conf_dir, "pki/minion")
if not os.path.isdir(pki_dir_default):
pki_dir_default = os.path.join(const.conf_dir, "pki")
- pki_dir = s_cfg.get('pki_dir', pki_dir_default)
+ pki_dir = s_cfg.get("pki_dir", pki_dir_default)
with util.umask(0o77):
util.ensure_dir(pki_dir)
- pub_name = os.path.join(pki_dir, 'minion.pub')
- pem_name = os.path.join(pki_dir, 'minion.pem')
- util.write_file(pub_name, s_cfg['public_key'])
- util.write_file(pem_name, s_cfg['private_key'])
+ pub_name = os.path.join(pki_dir, "minion.pub")
+ pem_name = os.path.join(pki_dir, "minion.pem")
+ util.write_file(pub_name, s_cfg["public_key"])
+ util.write_file(pem_name, s_cfg["private_key"])
# we need to have the salt minion service enabled in rc in order to be
# able to start the service. this does only apply on FreeBSD servers.
- if cloud.distro.osfamily == 'freebsd':
- rhel_util.update_sysconfig_file(
- '/etc/rc.conf', {'salt_minion_enable': 'YES'})
+ if cloud.distro.osfamily == "freebsd":
+ bsd_utils.set_rc_config_value("salt_minion_enable", "YES")
# restart salt-minion. 'service' will start even if not started. if it
# was started, it needs to be restarted for config change.
- subp.subp(['service', const.srv_name, 'restart'], capture=False)
+ subp.subp(["service", const.srv_name, "restart"], capture=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index 1e3f419e..b7bfb7aa 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -17,7 +17,7 @@ module does not accept any config keys.
**Internal name:** ``cc_scripts_per_boot``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
"""
@@ -25,23 +25,27 @@ module does not accept any config keys.
import os
from cloudinit import subp
-
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
-SCRIPT_SUBDIR = 'per-boot'
+SCRIPT_SUBDIR = "per-boot"
def handle(name, _cfg, cloud, log, _args):
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
+ runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
try:
subp.runparts(runparts_path)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index 5966fb9a..ef102b1c 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -28,23 +28,27 @@ the system. As a result per-instance scripts will run again.
import os
from cloudinit import subp
-
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-SCRIPT_SUBDIR = 'per-instance'
+SCRIPT_SUBDIR = "per-instance"
def handle(name, _cfg, cloud, log, _args):
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
+ runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
try:
subp.runparts(runparts_path)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index bcca859e..bf4231e7 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -26,23 +26,27 @@ be run in alphabetical order. This module does not accept any config keys.
import os
from cloudinit import subp
-
from cloudinit.settings import PER_ONCE
frequency = PER_ONCE
-SCRIPT_SUBDIR = 'per-once'
+SCRIPT_SUBDIR = "per-once"
def handle(name, _cfg, cloud, log, _args):
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
+ runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
try:
subp.runparts(runparts_path)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index 215703ef..e0d6c560 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -28,12 +28,11 @@ This module does not accept any config keys.
import os
from cloudinit import subp
-
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-SCRIPT_SUBDIR = 'scripts'
+SCRIPT_SUBDIR = "scripts"
def handle(name, _cfg, cloud, log, _args):
@@ -44,8 +43,13 @@ def handle(name, _cfg, cloud, log, _args):
try:
subp.runparts(runparts_path)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index e0a4bfff..1b30fa1b 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -28,29 +28,33 @@ entry under the ``vendor_data`` config key.
import os
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-SCRIPT_SUBDIR = 'vendor'
+SCRIPT_SUBDIR = "vendor"
def handle(name, cfg, cloud, log, _args):
# This is written to by the vendor data handlers
# any vendor data shell scripts get placed in runparts_path
- runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts',
- SCRIPT_SUBDIR)
+ runparts_path = os.path.join(
+ cloud.get_ipath_cur(), "scripts", SCRIPT_SUBDIR
+ )
- prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
+ prefix = util.get_cfg_by_path(cfg, ("vendor_data", "prefix"), [])
try:
subp.runparts(runparts_path, exe_prefix=prefix)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 4fb9b44e..67ba8ef5 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -24,15 +24,19 @@ Configuration for this module is under the ``random_seed`` config key. The
optionally be specified in encoded form, with the encoding specified in
``encoding``.
+If the cloud provides its own random seed data, it will be appended to ``data``
+before it is written to ``file``.
+
.. note::
when using a multiline value for ``data`` or specifying binary data, be
sure to follow yaml syntax and use the ``|`` and ``!binary`` yaml format
specifiers when appropriate
-Instead of specifying a data string, a command can be run to generate/collect
-the data to be written. The command should be specified as a list of args in
-the ``command`` key. If a command is specified that cannot be run, no error
-will be reported unless ``command_required`` is set to true.
+If the ``command`` key is specified, the given command will be executed. This
+will happen after ``file`` has been populated. That command's environment will
+contain the value of the ``file`` key as ``RANDOM_SEED_FILE``. If a command is
+specified that cannot be run, no error will be reported unless
+``command_required`` is set to true.
For example, to use ``pollinate`` to gather data from a
remote entropy server and write it to ``/dev/urandom``, the following could be
@@ -64,9 +68,8 @@ import os
from io import BytesIO
from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
frequency = PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -74,12 +77,12 @@ LOG = logging.getLogger(__name__)
def _decode(data, encoding=None):
if not data:
- return b''
- if not encoding or encoding.lower() in ['raw']:
+ return b""
+ if not encoding or encoding.lower() in ["raw"]:
return util.encode_text(data)
- elif encoding.lower() in ['base64', 'b64']:
+ elif encoding.lower() in ["base64", "b64"]:
return base64.b64decode(data)
- elif encoding.lower() in ['gzip', 'gz']:
+ elif encoding.lower() in ["gzip", "gz"]:
return util.decomp_gzip(data, quiet=False, decode=None)
else:
raise IOError("Unknown random_seed encoding: %s" % (encoding))
@@ -96,7 +99,8 @@ def handle_random_seed_command(command, required, env=None):
if not subp.which(cmd):
if required:
raise ValueError(
- "command '{cmd}' not found but required=true".format(cmd=cmd))
+ "command '{cmd}' not found but required=true".format(cmd=cmd)
+ )
else:
LOG.debug("command '%s' not found for seed_command", cmd)
return
@@ -104,34 +108,39 @@ def handle_random_seed_command(command, required, env=None):
def handle(name, cfg, cloud, log, _args):
- mycfg = cfg.get('random_seed', {})
- seed_path = mycfg.get('file', '/dev/urandom')
- seed_data = mycfg.get('data', b'')
+ mycfg = cfg.get("random_seed", {})
+ seed_path = mycfg.get("file", "/dev/urandom")
+ seed_data = mycfg.get("data", b"")
seed_buf = BytesIO()
if seed_data:
- seed_buf.write(_decode(seed_data, encoding=mycfg.get('encoding')))
+ seed_buf.write(_decode(seed_data, encoding=mycfg.get("encoding")))
# 'random_seed' is set up by Azure datasource, and comes already in
# openstack meta_data.json
metadata = cloud.datasource.metadata
- if metadata and 'random_seed' in metadata:
- seed_buf.write(util.encode_text(metadata['random_seed']))
+ if metadata and "random_seed" in metadata:
+ seed_buf.write(util.encode_text(metadata["random_seed"]))
seed_data = seed_buf.getvalue()
if len(seed_data):
- log.debug("%s: adding %s bytes of random seed entropy to %s", name,
- len(seed_data), seed_path)
+ log.debug(
+ "%s: adding %s bytes of random seed entropy to %s",
+ name,
+ len(seed_data),
+ seed_path,
+ )
util.append_file(seed_path, seed_data)
- command = mycfg.get('command', None)
- req = mycfg.get('command_required', False)
+ command = mycfg.get("command", None)
+ req = mycfg.get("command_required", False)
try:
env = os.environ.copy()
- env['RANDOM_SEED_FILE'] = seed_path
+ env["RANDOM_SEED_FILE"] = seed_path
handle_random_seed_command(command=command, required=req, env=env)
except ValueError as e:
log.warning("handling random command [%s] failed: %s", command, e)
raise e
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 1d23d80d..eb0ca328 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -18,8 +18,11 @@ A hostname and fqdn can be provided by specifying a full domain name under the
``fqdn`` key. Alternatively, a hostname can be specified using the ``hostname``
key, and the fqdn of the cloud wil be used. If a fqdn specified with the
``hostname`` key, it will be handled properly, although it is better to use
-the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set, ``fqdn``
-will be used.
+the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set,
+it is distro dependent whether ``hostname`` or ``fqdn`` is used,
+unless the ``prefer_fqdn_over_hostname`` option is true and fqdn is set
+it will force the use of FQDN in all distros, and if false then it will
+force the hostname use.
This module will run in the init-local stage before networking is configured
if the hostname is set by metadata or user data on the local system.
@@ -31,22 +34,22 @@ based on initial hostname.
**Internal name:** ``cc_set_hostname``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
**Config keys**::
preserve_hostname: <true/false>
+ prefer_fqdn_over_hostname: <true/false>
fqdn: <fqdn>
hostname: <fqdn/hostname>
"""
import os
-
-from cloudinit.atomic_helper import write_json
from cloudinit import util
+from cloudinit.atomic_helper import write_json
class SetHostnameError(Exception):
@@ -59,9 +62,20 @@ class SetHostnameError(Exception):
def handle(name, cfg, cloud, log, _args):
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug(("Configuration option 'preserve_hostname' is set,"
- " not setting the hostname in module %s"), name)
+ log.debug(
+ "Configuration option 'preserve_hostname' is set,"
+ " not setting the hostname in module %s",
+ name,
+ )
return
+
+ # Set prefer_fqdn_over_hostname value in distro
+ hostname_fqdn = util.get_cfg_option_bool(
+ cfg, "prefer_fqdn_over_hostname", None
+ )
+ if hostname_fqdn is not None:
+ cloud.distro.set_option("prefer_fqdn_over_hostname", hostname_fqdn)
+
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
# Check for previous successful invocation of set-hostname
@@ -70,14 +84,15 @@ def handle(name, cfg, cloud, log, _args):
# previous-hostname file which only contains the base hostname.
# TODO consolidate previous-hostname and set-hostname artifact files and
# distro._read_hostname implementation so we only validate one artifact.
- prev_fn = os.path.join(cloud.get_cpath('data'), "set-hostname")
+ prev_fn = os.path.join(cloud.get_cpath("data"), "set-hostname")
prev_hostname = {}
if os.path.exists(prev_fn):
prev_hostname = util.load_json(util.load_file(prev_fn))
- hostname_changed = (hostname != prev_hostname.get('hostname') or
- fqdn != prev_hostname.get('fqdn'))
+ hostname_changed = hostname != prev_hostname.get(
+ "hostname"
+ ) or fqdn != prev_hostname.get("fqdn")
if not hostname_changed:
- log.debug('No hostname changes. Skipping set-hostname')
+ log.debug("No hostname changes. Skipping set-hostname")
return
log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
try:
@@ -86,6 +101,7 @@ def handle(name, cfg, cloud, log, _args):
msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname)
util.logexc(log, msg)
raise SetHostnameError("%s: %s" % (msg, e)) from e
- write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn})
+ write_json(prev_fn, {"hostname": hostname, "fqdn": fqdn})
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index d6b5682d..d8df8e23 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -78,43 +78,36 @@ password.
"""
import re
-import sys
+from string import ascii_letters, digits
-from cloudinit.distros import ug_util
from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.distros import ug_util
from cloudinit.ssh_util import update_ssh_config
-from cloudinit import subp
-from cloudinit import util
-
-from string import ascii_letters, digits
LOG = logging.getLogger(__name__)
# We are removing certain 'painful' letters/numbers
-PW_SET = (''.join([x for x in ascii_letters + digits
- if x not in 'loLOI01']))
+PW_SET = "".join([x for x in ascii_letters + digits if x not in "loLOI01"])
-def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
+def handle_ssh_pwauth(pw_auth, distro):
"""Apply sshd PasswordAuthentication changes.
@param pw_auth: config setting from 'pw_auth'.
Best given as True, False, or "unchanged".
- @param service_cmd: The service command list (['service'])
- @param service_name: The name of the sshd service for the system.
+ @param distro: an instance of the distro class for the target distribution
@return: None"""
cfg_name = "PasswordAuthentication"
- if service_cmd is None:
- service_cmd = ["service"]
if util.is_true(pw_auth):
- cfg_val = 'yes'
+ cfg_val = "yes"
elif util.is_false(pw_auth):
- cfg_val = 'no'
+ cfg_val = "no"
else:
bmsg = "Leaving SSH config '%s' unchanged." % cfg_name
- if pw_auth is None or pw_auth.lower() == 'unchanged':
+ if pw_auth is None or pw_auth.lower() == "unchanged":
LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth)
else:
LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth)
@@ -125,39 +118,35 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
LOG.debug("No need to restart SSH service, %s not updated.", cfg_name)
return
- if 'systemctl' in service_cmd:
- cmd = list(service_cmd) + ["restart", service_name]
- else:
- cmd = list(service_cmd) + [service_name, "restart"]
- subp.subp(cmd)
+ distro.manage_service("restart", distro.get_option("ssh_svcname", "ssh"))
LOG.debug("Restarted the SSH daemon.")
def handle(_name, cfg, cloud, log, args):
- if len(args) != 0:
+ if args:
# if run from command line, and give args, wipe the chpasswd['list']
password = args[0]
- if 'chpasswd' in cfg and 'list' in cfg['chpasswd']:
- del cfg['chpasswd']['list']
+ if "chpasswd" in cfg and "list" in cfg["chpasswd"]:
+ del cfg["chpasswd"]["list"]
else:
password = util.get_cfg_option_str(cfg, "password", None)
expire = True
plist = None
- if 'chpasswd' in cfg:
- chfg = cfg['chpasswd']
- if 'list' in chfg and chfg['list']:
- if isinstance(chfg['list'], list):
+ if "chpasswd" in cfg:
+ chfg = cfg["chpasswd"]
+ if "list" in chfg and chfg["list"]:
+ if isinstance(chfg["list"], list):
log.debug("Handling input for chpasswd as list.")
- plist = util.get_cfg_option_list(chfg, 'list', plist)
+ plist = util.get_cfg_option_list(chfg, "list", plist)
else:
log.debug("Handling input for chpasswd as multiline string.")
- plist = util.get_cfg_option_str(chfg, 'list', plist)
+ plist = util.get_cfg_option_str(chfg, "list", plist)
if plist:
plist = plist.splitlines()
- expire = util.get_cfg_option_bool(chfg, 'expire', expire)
+ expire = util.get_cfg_option_bool(chfg, "expire", expire)
if not plist and password:
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
@@ -176,9 +165,9 @@ def handle(_name, cfg, cloud, log, args):
users = []
# N.B. This regex is included in the documentation (i.e. the module
# docstring), so any changes to it should be reflected there.
- prog = re.compile(r'\$(1|2a|2y|5|6)(\$.+){2}')
+ prog = re.compile(r"\$(1|2a|2y|5|6)(\$.+){2}")
for line in plist:
- u, p = line.split(':', 1)
+ u, p = line.split(":", 1)
if prog.match(p) is not None and ":" not in p:
hashed_plist_in.append(line)
hashed_users.append(u)
@@ -190,7 +179,7 @@ def handle(_name, cfg, cloud, log, args):
randlist.append("%s:%s" % (u, p))
plist_in.append("%s:%s" % (u, p))
users.append(u)
- ch_in = '\n'.join(plist_in) + '\n'
+ ch_in = "\n".join(plist_in) + "\n"
if users:
try:
log.debug("Changing password for %s:", users)
@@ -198,9 +187,10 @@ def handle(_name, cfg, cloud, log, args):
except Exception as e:
errors.append(e)
util.logexc(
- log, "Failed to set passwords with chpasswd for %s", users)
+ log, "Failed to set passwords with chpasswd for %s", users
+ )
- hashed_ch_in = '\n'.join(hashed_plist_in) + '\n'
+ hashed_ch_in = "\n".join(hashed_plist_in) + "\n"
if hashed_users:
try:
log.debug("Setting hashed password for %s:", hashed_users)
@@ -208,13 +198,19 @@ def handle(_name, cfg, cloud, log, args):
except Exception as e:
errors.append(e)
util.logexc(
- log, "Failed to set hashed passwords with chpasswd for %s",
- hashed_users)
+ log,
+ "Failed to set hashed passwords with chpasswd for %s",
+ hashed_users,
+ )
if len(randlist):
- blurb = ("Set the following 'random' passwords\n",
- '\n'.join(randlist))
- sys.stderr.write("%s\n%s\n" % blurb)
+ blurb = (
+ "Set the following 'random' passwords\n",
+ "\n".join(randlist),
+ )
+ util.multi_log(
+ "%s\n%s\n" % blurb, stderr=False, fallback_to_stdout=False
+ )
if expire:
expired_users = []
@@ -228,9 +224,7 @@ def handle(_name, cfg, cloud, log, args):
if expired_users:
log.debug("Expired passwords for: %s users", expired_users)
- handle_ssh_pwauth(
- cfg.get('ssh_pwauth'), service_cmd=cloud.distro.init_cmd,
- service_name=cloud.distro.get_option('ssh_svcname', 'ssh'))
+ handle_ssh_pwauth(cfg.get("ssh_pwauth"), cloud.distro)
if len(errors):
log.debug("%s errors occured, re-raising the last one", len(errors))
@@ -247,7 +241,8 @@ def chpasswd(distro, plist_in, hashed=False):
u, p = pentry.split(":")
distro.set_passwd(u, p, hashed=hashed)
else:
- cmd = ['chpasswd'] + (['-e'] if hashed else [])
+ cmd = ["chpasswd"] + (["-e"] if hashed else [])
subp.subp(cmd, plist_in)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 20ed7d2f..9f343df0 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -8,24 +8,26 @@ import sys
from textwrap import dedent
from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
from cloudinit.settings import PER_INSTANCE
from cloudinit.subp import prepend_base_command
-from cloudinit import subp
-from cloudinit import util
-
-distros = ['ubuntu']
+distros = ["ubuntu"]
frequency = PER_INSTANCE
LOG = logging.getLogger(__name__)
-schema = {
- 'id': 'cc_snap',
- 'name': 'Snap',
- 'title': 'Install, configure and manage snapd and snap packages',
- 'description': dedent("""\
+meta: MetaSchema = {
+ "id": "cc_snap",
+ "name": "Snap",
+ "title": "Install, configure and manage snapd and snap packages",
+ "description": dedent(
+ """\
This module provides a simple configuration namespace in cloud-init to
both setup snapd and install snaps.
@@ -56,9 +58,12 @@ schema = {
**Development only**: The ``squashfuse_in_container`` boolean can be
set true to install squashfuse package when in a container to enable
snap installs. Default is false.
- """),
- 'distros': distros,
- 'examples': [dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
snap:
assertions:
00: |
@@ -69,14 +74,20 @@ schema = {
00: snap create-user --sudoer --known <snap-user>@mydomain.com
01: snap install canonical-livepatch
02: canonical-livepatch enable <AUTH_TOKEN>
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# LXC-based containers require squashfuse before snaps can be installed
snap:
commands:
00: apt-get install squashfuse -y
11: snap install emoj
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Convenience: the snap command can be omitted when specifying commands
# as a list and 'snap' will automatically be prepended.
# The following commands are equivalent:
@@ -86,7 +97,10 @@ schema = {
01: ['snap', 'install', 'vlc']
02: snap install vlc
03: 'snap install vlc'
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# You can use a list of commands
snap:
commands:
@@ -94,58 +108,64 @@ schema = {
- ['snap', 'install', 'vlc']
- snap install vlc
- 'snap install vlc'
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# You can use a list of assertions
snap:
assertions:
- signed_assertion_blob_here
- |
signed_assertion_blob_here
- """)],
- 'frequency': PER_INSTANCE,
- 'type': 'object',
- 'properties': {
- 'snap': {
- 'type': 'object',
- 'properties': {
- 'assertions': {
- 'type': ['object', 'array'], # Array of strings or dict
- 'items': {'type': 'string'},
- 'additionalItems': False, # Reject items non-string
- 'minItems': 1,
- 'minProperties': 1,
- 'uniqueItems': True,
- 'additionalProperties': {'type': 'string'},
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "snap": {
+ "type": "object",
+ "properties": {
+ "assertions": {
+ "type": ["object", "array"], # Array of strings or dict
+ "items": {"type": "string"},
+ "additionalItems": False, # Reject items non-string
+ "minItems": 1,
+ "minProperties": 1,
+ "uniqueItems": True,
+ "additionalProperties": {"type": "string"},
},
- 'commands': {
- 'type': ['object', 'array'], # Array of strings or dict
- 'items': {
- 'oneOf': [
- {'type': 'array', 'items': {'type': 'string'}},
- {'type': 'string'}]
+ "commands": {
+ "type": ["object", "array"], # Array of strings or dict
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "string"},
+ ]
},
- 'additionalItems': False, # Reject non-string & non-list
- 'minItems': 1,
- 'minProperties': 1,
- 'additionalProperties': {
- 'oneOf': [
- {'type': 'string'},
- {'type': 'array', 'items': {'type': 'string'}},
+ "additionalItems": False, # Reject non-string & non-list
+ "minItems": 1,
+ "minProperties": 1,
+ "additionalProperties": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}},
],
},
},
- 'squashfuse_in_container': {
- 'type': 'boolean'
- }
+ "squashfuse_in_container": {"type": "boolean"},
},
- 'additionalProperties': False, # Reject keys not in schema
- 'required': [],
- 'minProperties': 1
+ "additionalProperties": False, # Reject keys not in schema
+ "minProperties": 1,
}
- }
+ },
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
SNAP_CMD = "snap"
ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions"
@@ -161,45 +181,49 @@ def add_assertions(assertions):
"""
if not assertions:
return
- LOG.debug('Importing user-provided snap assertions')
+ LOG.debug("Importing user-provided snap assertions")
if isinstance(assertions, dict):
assertions = assertions.values()
elif not isinstance(assertions, list):
raise TypeError(
- 'assertion parameter was not a list or dict: {assertions}'.format(
- assertions=assertions))
+ "assertion parameter was not a list or dict: {assertions}".format(
+ assertions=assertions
+ )
+ )
- snap_cmd = [SNAP_CMD, 'ack']
+ snap_cmd = [SNAP_CMD, "ack"]
combined = "\n".join(assertions)
for asrt in assertions:
- LOG.debug('Snap acking: %s', asrt.split('\n')[0:2])
+ LOG.debug("Snap acking: %s", asrt.split("\n")[0:2])
- util.write_file(ASSERTIONS_FILE, combined.encode('utf-8'))
+ util.write_file(ASSERTIONS_FILE, combined.encode("utf-8"))
subp.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
def run_commands(commands):
"""Run the provided commands provided in snap:commands configuration.
- Commands are run individually. Any errors are collected and reported
- after attempting all commands.
+ Commands are run individually. Any errors are collected and reported
+ after attempting all commands.
- @param commands: A list or dict containing commands to run. Keys of a
- dict will be used to order the commands provided as dict values.
- """
+ @param commands: A list or dict containing commands to run. Keys of a
+ dict will be used to order the commands provided as dict values.
+ """
if not commands:
return
- LOG.debug('Running user-provided snap commands')
+ LOG.debug("Running user-provided snap commands")
if isinstance(commands, dict):
# Sort commands based on dictionary key
commands = [v for _, v in sorted(commands.items())]
elif not isinstance(commands, list):
raise TypeError(
- 'commands parameter was not a list or dict: {commands}'.format(
- commands=commands))
+ "commands parameter was not a list or dict: {commands}".format(
+ commands=commands
+ )
+ )
- fixed_snap_commands = prepend_base_command('snap', commands)
+ fixed_snap_commands = prepend_base_command("snap", commands)
cmd_failures = []
for command in fixed_snap_commands:
@@ -209,8 +233,9 @@ def run_commands(commands):
except subp.ProcessExecutionError as e:
cmd_failures.append(str(e))
if cmd_failures:
- msg = 'Failures running snap commands:\n{cmd_failures}'.format(
- cmd_failures=cmd_failures)
+ msg = "Failures running snap commands:\n{cmd_failures}".format(
+ cmd_failures=cmd_failures
+ )
util.logexc(LOG, msg)
raise RuntimeError(msg)
@@ -226,23 +251,25 @@ def maybe_install_squashfuse(cloud):
util.logexc(LOG, "Package update failed")
raise
try:
- cloud.distro.install_packages(['squashfuse'])
+ cloud.distro.install_packages(["squashfuse"])
except Exception:
util.logexc(LOG, "Failed to install squashfuse")
raise
def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('snap', {})
+ cfgin = cfg.get("snap", {})
if not cfgin:
- LOG.debug(("Skipping module named %s,"
- " no 'snap' key in configuration"), name)
+ LOG.debug(
+ "Skipping module named %s, no 'snap' key in configuration", name
+ )
return
validate_cloudconfig_schema(cfg, schema)
- if util.is_true(cfgin.get('squashfuse_in_container', False)):
+ if util.is_true(cfgin.get("squashfuse_in_container", False)):
maybe_install_squashfuse(cloud)
- add_assertions(cfgin.get('assertions', []))
- run_commands(cfgin.get('commands', []))
+ add_assertions(cfgin.get("assertions", []))
+ run_commands(cfgin.get("commands", []))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
index 95083607..3fa6c388 100644
--- a/cloudinit/config/cc_spacewalk.py
+++ b/cloudinit/config/cc_spacewalk.py
@@ -29,9 +29,8 @@ For more information about spacewalk see: https://fedorahosted.org/spacewalk/
from cloudinit import subp
-
-distros = ['redhat', 'fedora']
-required_packages = ['rhn-setup']
+distros = ["redhat", "fedora"]
+required_packages = ["rhn-setup"]
def_ca_cert_path = "/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT"
@@ -41,7 +40,7 @@ def is_registered():
# assume we aren't registered; which is sorta ghetto...
already_registered = False
try:
- subp.subp(['rhn-profile-sync', '--verbose'], capture=False)
+ subp.subp(["rhn-profile-sync", "--verbose"], capture=False)
already_registered = True
except subp.ProcessExecutionError as e:
if e.exit_code != 1:
@@ -49,42 +48,58 @@ def is_registered():
return already_registered
-def do_register(server, profile_name,
- ca_cert_path=def_ca_cert_path,
- proxy=None, log=None,
- activation_key=None):
+def do_register(
+ server,
+ profile_name,
+ ca_cert_path=def_ca_cert_path,
+ proxy=None,
+ log=None,
+ activation_key=None,
+):
if log is not None:
- log.info("Registering using `rhnreg_ks` profile '%s'"
- " into server '%s'", profile_name, server)
- cmd = ['rhnreg_ks']
- cmd.extend(['--serverUrl', 'https://%s/XMLRPC' % server])
- cmd.extend(['--profilename', str(profile_name)])
+ log.info(
+ "Registering using `rhnreg_ks` profile '%s' into server '%s'",
+ profile_name,
+ server,
+ )
+ cmd = ["rhnreg_ks"]
+ cmd.extend(["--serverUrl", "https://%s/XMLRPC" % server])
+ cmd.extend(["--profilename", str(profile_name)])
if proxy:
cmd.extend(["--proxy", str(proxy)])
if ca_cert_path:
- cmd.extend(['--sslCACert', str(ca_cert_path)])
+ cmd.extend(["--sslCACert", str(ca_cert_path)])
if activation_key:
- cmd.extend(['--activationkey', str(activation_key)])
+ cmd.extend(["--activationkey", str(activation_key)])
subp.subp(cmd, capture=False)
def handle(name, cfg, cloud, log, _args):
- if 'spacewalk' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'spacewalk' key in configuration"), name)
+ if "spacewalk" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'spacewalk' key in configuration",
+ name,
+ )
return
- cfg = cfg['spacewalk']
- spacewalk_server = cfg.get('server')
+ cfg = cfg["spacewalk"]
+ spacewalk_server = cfg.get("server")
if spacewalk_server:
# Need to have this installed before further things will work.
cloud.distro.install_packages(required_packages)
if not is_registered():
- do_register(spacewalk_server,
- cloud.datasource.get_hostname(fqdn=True),
- proxy=cfg.get("proxy"), log=log,
- activation_key=cfg.get('activation_key'))
+ do_register(
+ spacewalk_server,
+ cloud.datasource.get_hostname(fqdn=True),
+ proxy=cfg.get("proxy"),
+ log=log,
+ activation_key=cfg.get("activation_key"),
+ )
else:
- log.debug("Skipping module named %s, 'spacewalk/server' key"
- " was not found in configuration", name)
+ log.debug(
+ "Skipping module named %s, 'spacewalk/server' key"
+ " was not found in configuration",
+ name,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 05a16dbc..64486b9c 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -17,7 +17,7 @@ keys.
Authorized Keys
^^^^^^^^^^^^^^^
-Authorized keys are a list of public SSH keys that are allowed to connect to a
+Authorized keys are a list of public SSH keys that are allowed to connect to
a user account on a system. They are stored in `.ssh/authorized_keys` in that
account's home directory. Authorized keys for the default user defined in
``users`` can be specified using ``ssh_authorized_keys``. Keys
@@ -89,6 +89,10 @@ optionally, ``<key type>_certificate``, e.g. ``rsa_private: <key>``,
key types. Not all key types have to be specified, ones left unspecified will
not be used. If this config option is used, then no keys will be generated.
+When host keys are generated the output of the ssh-keygen command(s) can be
+displayed on the console using the ``ssh_quiet_keygen`` configuration key.
+This settings defaults to False which displays the keygen output.
+
.. note::
when specifying private host keys in cloud-config, care should be taken to
ensure that the communication between the data source and the instance is
@@ -151,33 +155,33 @@ config flags are:
ssh_publish_hostkeys:
enabled: <true/false> (Defaults to true)
blacklist: <list of key types> (Defaults to [dsa])
+ ssh_quiet_keygen: <true/false>
"""
import glob
import os
import sys
+from cloudinit import ssh_util, subp, util
from cloudinit.distros import ug_util
-from cloudinit import ssh_util
-from cloudinit import subp
-from cloudinit import util
-
-GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
-KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
+GENERATE_KEY_NAMES = ["rsa", "dsa", "ecdsa", "ed25519"]
+KEY_FILE_TPL = "/etc/ssh/ssh_host_%s_key"
PUBLISH_HOST_KEYS = True
# Don't publish the dsa hostkey by default since OpenSSH recommends not using
# it.
-HOST_KEY_PUBLISH_BLACKLIST = ['dsa']
+HOST_KEY_PUBLISH_BLACKLIST = ["dsa"]
CONFIG_KEY_TO_FILE = {}
PRIV_TO_PUB = {}
for k in GENERATE_KEY_NAMES:
CONFIG_KEY_TO_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
CONFIG_KEY_TO_FILE.update(
- {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)})
+ {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)}
+ )
CONFIG_KEY_TO_FILE.update(
- {"%s_certificate" % k: (KEY_FILE_TPL % k + "-cert.pub", 0o600)})
+ {"%s_certificate" % k: (KEY_FILE_TPL % k + "-cert.pub", 0o600)}
+ )
PRIV_TO_PUB["%s_private" % k] = "%s_public" % k
KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
@@ -204,57 +208,86 @@ def handle(_name, cfg, cloud, log, _args):
tgt_perms = CONFIG_KEY_TO_FILE[key][1]
util.write_file(tgt_fn, val, tgt_perms)
# set server to present the most recently identified certificate
- if '_certificate' in key:
- cert_config = {'HostCertificate': tgt_fn}
+ if "_certificate" in key:
+ cert_config = {"HostCertificate": tgt_fn}
ssh_util.update_ssh_config(cert_config)
- for (priv, pub) in PRIV_TO_PUB.items():
- if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
+ for private_type, public_type in PRIV_TO_PUB.items():
+ if (
+ public_type in cfg["ssh_keys"]
+ or private_type not in cfg["ssh_keys"]
+ ):
continue
- pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
- cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
+ private_file, public_file = (
+ CONFIG_KEY_TO_FILE[private_type][0],
+ CONFIG_KEY_TO_FILE[public_type][0],
+ )
+ cmd = ["sh", "-xc", KEY_GEN_TPL % (private_file, public_file)]
try:
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
subp.subp(cmd, capture=False)
- log.debug("Generated a key for %s from %s", pair[0], pair[1])
+ log.debug(
+ f"Generated a key for {public_file} from {private_file}"
+ )
except Exception:
- util.logexc(log, "Failed generated a key for %s from %s",
- pair[0], pair[1])
+ util.logexc(
+ log,
+ "Failed generating a key for "
+ f"{public_file} from {private_file}",
+ )
else:
# if not, generate them
- genkeys = util.get_cfg_option_list(cfg,
- 'ssh_genkeytypes',
- GENERATE_KEY_NAMES)
+ genkeys = util.get_cfg_option_list(
+ cfg, "ssh_genkeytypes", GENERATE_KEY_NAMES
+ )
lang_c = os.environ.copy()
- lang_c['LANG'] = 'C'
+ lang_c["LANG"] = "C"
for keytype in genkeys:
keyfile = KEY_FILE_TPL % (keytype)
if os.path.exists(keyfile):
continue
util.ensure_dir(os.path.dirname(keyfile))
- cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
+ cmd = ["ssh-keygen", "-t", keytype, "-N", "", "-f", keyfile]
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
try:
out, err = subp.subp(cmd, capture=True, env=lang_c)
- sys.stdout.write(util.decode_binary(out))
+ if not util.get_cfg_option_bool(
+ cfg, "ssh_quiet_keygen", False
+ ):
+ sys.stdout.write(util.decode_binary(out))
+
+ gid = util.get_group_id("ssh_keys")
+ if gid != -1:
+ # perform same "sanitize permissions" as sshd-keygen
+ os.chown(keyfile, -1, gid)
+ os.chmod(keyfile, 0o640)
+ os.chmod(keyfile + ".pub", 0o644)
except subp.ProcessExecutionError as e:
err = util.decode_binary(e.stderr).lower()
- if (e.exit_code == 1 and
- err.lower().startswith("unknown key")):
+ if e.exit_code == 1 and err.lower().startswith(
+ "unknown key"
+ ):
log.debug("ssh-keygen: unknown key type '%s'", keytype)
else:
- util.logexc(log, "Failed generating key type %s to "
- "file %s", keytype, keyfile)
+ util.logexc(
+ log,
+ "Failed generating key type %s to file %s",
+ keytype,
+ keyfile,
+ )
if "ssh_publish_hostkeys" in cfg:
host_key_blacklist = util.get_cfg_option_list(
- cfg["ssh_publish_hostkeys"], "blacklist",
- HOST_KEY_PUBLISH_BLACKLIST)
+ cfg["ssh_publish_hostkeys"],
+ "blacklist",
+ HOST_KEY_PUBLISH_BLACKLIST,
+ )
publish_hostkeys = util.get_cfg_option_bool(
- cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS)
+ cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS
+ )
else:
host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST
publish_hostkeys = PUBLISH_HOST_KEYS
@@ -270,15 +303,18 @@ def handle(_name, cfg, cloud, log, _args):
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(user, _user_config) = ug_util.extract_default(users)
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
- disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
- ssh_util.DISABLE_USER_OPTS)
+ disable_root_opts = util.get_cfg_option_str(
+ cfg, "disable_root_opts", ssh_util.DISABLE_USER_OPTS
+ )
keys = []
- if util.get_cfg_option_bool(cfg, 'allow_public_ssh_keys', True):
+ if util.get_cfg_option_bool(cfg, "allow_public_ssh_keys", True):
keys = cloud.get_public_ssh_keys() or []
else:
- log.debug('Skipping import of publish SSH keys per '
- 'config setting: allow_public_ssh_keys=False')
+ log.debug(
+ "Skipping import of publish SSH keys per "
+ "config setting: allow_public_ssh_keys=False"
+ )
if "ssh_authorized_keys" in cfg:
cfgkeys = cfg["ssh_authorized_keys"]
@@ -298,12 +334,12 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
if disable_root:
if not user:
user = "NONE"
- key_prefix = disable_root_opts.replace('$USER', user)
- key_prefix = key_prefix.replace('$DISABLE_USER', 'root')
+ key_prefix = disable_root_opts.replace("$USER", user)
+ key_prefix = key_prefix.replace("$DISABLE_USER", "root")
else:
- key_prefix = ''
+ key_prefix = ""
- ssh_util.setup_user_keys(keys, 'root', options=key_prefix)
+ ssh_util.setup_user_keys(keys, "root", options=key_prefix)
def get_public_host_keys(blacklist=None):
@@ -313,18 +349,21 @@ def get_public_host_keys(blacklist=None):
@returns: List of keys, each formatted as a two-element tuple.
e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')]
"""
- public_key_file_tmpl = '%s.pub' % (KEY_FILE_TPL,)
+ public_key_file_tmpl = "%s.pub" % (KEY_FILE_TPL,)
key_list = []
blacklist_files = []
if blacklist:
# Convert blacklist to filenames:
# 'dsa' -> '/etc/ssh/ssh_host_dsa_key.pub'
- blacklist_files = [public_key_file_tmpl % (key_type,)
- for key_type in blacklist]
+ blacklist_files = [
+ public_key_file_tmpl % (key_type,) for key_type in blacklist
+ ]
# Get list of public key files and filter out blacklisted files.
- file_list = [hostfile for hostfile
- in glob.glob(public_key_file_tmpl % ('*',))
- if hostfile not in blacklist_files]
+ file_list = [
+ hostfile
+ for hostfile in glob.glob(public_key_file_tmpl % ("*",))
+ if hostfile not in blacklist_files
+ ]
# Read host key files, retrieve first two fields as a tuple and
# append that tuple to key_list.
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 05d30ad1..020c3469 100755
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -28,23 +28,21 @@ the keys can be specified, but defaults to ``sha256``.
import base64
import hashlib
-from cloudinit.simpletable import SimpleTable
-
+from cloudinit import ssh_util, util
from cloudinit.distros import ug_util
-from cloudinit import ssh_util
-from cloudinit import util
+from cloudinit.simpletable import SimpleTable
def _split_hash(bin_hash):
split_up = []
for i in range(0, len(bin_hash), 2):
- split_up.append(bin_hash[i:i + 2])
+ split_up.append(bin_hash[i : i + 2])
return split_up
-def _gen_fingerprint(b64_text, hash_meth='sha256'):
+def _gen_fingerprint(b64_text, hash_meth="sha256"):
if not b64_text:
- return ''
+ return ""
# TBD(harlowja): Maybe we should feed this into 'ssh -lf'?
try:
hasher = hashlib.new(hash_meth)
@@ -54,58 +52,75 @@ def _gen_fingerprint(b64_text, hash_meth='sha256'):
# Raised when b64 not really b64...
# or when the hash type is not really
# a known/supported hash type...
- return '?'
+ return "?"
def _is_printable_key(entry):
if any([entry.keytype, entry.base64, entry.comment, entry.options]):
- if (entry.keytype and entry.keytype.lower().strip()
- in ssh_util.VALID_KEY_TYPES):
+ if (
+ entry.keytype
+ and entry.keytype.lower().strip() in ssh_util.VALID_KEY_TYPES
+ ):
return True
return False
-def _pprint_key_entries(user, key_fn, key_entries, hash_meth='sha256',
- prefix='ci-info: '):
+def _pprint_key_entries(
+ user, key_fn, key_entries, hash_meth="sha256", prefix="ci-info: "
+):
if not key_entries:
- message = ("%sno authorized SSH keys fingerprints found for user %s.\n"
- % (prefix, user))
- util.multi_log(message)
+ message = (
+ "%sno authorized SSH keys fingerprints found for user %s.\n"
+ % (prefix, user)
+ )
+ util.multi_log(message, console=True, stderr=False)
return
- tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
- 'Comment']
+ tbl_fields = [
+ "Keytype",
+ "Fingerprint (%s)" % (hash_meth),
+ "Options",
+ "Comment",
+ ]
tbl = SimpleTable(tbl_fields)
for entry in key_entries:
if _is_printable_key(entry):
- row = [entry.keytype or '-',
- _gen_fingerprint(entry.base64, hash_meth) or '-',
- entry.options or '-',
- entry.comment or '-']
+ row = [
+ entry.keytype or "-",
+ _gen_fingerprint(entry.base64, hash_meth) or "-",
+ entry.options or "-",
+ entry.comment or "-",
+ ]
tbl.add_row(row)
authtbl_s = tbl.get_string()
authtbl_lines = authtbl_s.splitlines()
max_len = len(max(authtbl_lines, key=len))
lines = [
- util.center("Authorized keys from %s for user %s" %
- (key_fn, user), "+", max_len),
+ util.center(
+ "Authorized keys from %s for user %s" % (key_fn, user),
+ "+",
+ max_len,
+ ),
]
lines.extend(authtbl_lines)
for line in lines:
- util.multi_log(text="%s%s\n" % (prefix, line),
- stderr=False, console=True)
+ util.multi_log(
+ text="%s%s\n" % (prefix, line), stderr=False, console=True
+ )
def handle(name, cfg, cloud, log, _args):
- if util.is_true(cfg.get('no_ssh_fingerprints', False)):
- log.debug(("Skipping module named %s, "
- "logging of SSH fingerprints disabled"), name)
+ if util.is_true(cfg.get("no_ssh_fingerprints", False)):
+ log.debug(
+ "Skipping module named %s, logging of SSH fingerprints disabled",
+ name,
+ )
return
hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "sha256")
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
for (user_name, _cfg) in users.items():
(key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
- _pprint_key_entries(user_name, key_fn,
- key_entries, hash_meth)
+ _pprint_key_entries(user_name, key_fn, key_entries, hash_meth)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 856e5a9e..a9575c59 100755
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -30,13 +30,13 @@ either ``lp:`` for launchpad or ``gh:`` for github to the username.
- lp:user
"""
-from cloudinit.distros import ug_util
-from cloudinit import subp
-from cloudinit import util
import pwd
+from cloudinit import subp, util
+from cloudinit.distros import ug_util
+
# https://launchpad.net/ssh-import-id
-distros = ['ubuntu', 'debian']
+distros = ["ubuntu", "debian"]
def handle(_name, cfg, cloud, log, args):
@@ -56,11 +56,11 @@ def handle(_name, cfg, cloud, log, args):
elist = []
for (user, user_cfg) in users.items():
import_ids = []
- if user_cfg['default']:
+ if user_cfg["default"]:
import_ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
else:
try:
- import_ids = user_cfg['ssh_import_id']
+ import_ids = user_cfg["ssh_import_id"]
except Exception:
log.debug("User %s is not configured for ssh_import_id", user)
continue
@@ -69,8 +69,9 @@ def handle(_name, cfg, cloud, log, args):
import_ids = util.uniq_merge(import_ids)
import_ids = [str(i) for i in import_ids]
except Exception:
- log.debug("User %s is not correctly configured for ssh_import_id",
- user)
+ log.debug(
+ "User %s is not correctly configured for ssh_import_id", user
+ )
continue
if not len(import_ids):
@@ -79,8 +80,9 @@ def handle(_name, cfg, cloud, log, args):
try:
import_ssh_ids(import_ids, user, log)
except Exception as exc:
- util.logexc(log, "ssh-import-id failed for: %s %s", user,
- import_ids)
+ util.logexc(
+ log, "ssh-import-id failed for: %s %s", user, import_ids
+ )
elist.append(exc)
if len(elist):
@@ -107,4 +109,5 @@ def import_ssh_ids(ids, user, log):
util.logexc(log, "Failed to run command to import %s SSH ids", user)
raise exc
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_timezone.py b/cloudinit/config/cc_timezone.py
index a9de8fac..24e6099e 100644
--- a/cloudinit/config/cc_timezone.py
+++ b/cloudinit/config/cc_timezone.py
@@ -27,7 +27,6 @@ the timezone from cloud config.
"""
from cloudinit import util
-
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
@@ -46,4 +45,5 @@ def handle(name, cfg, cloud, log, args):
# Let the distro handle settings its timezone
cloud.distro.set_timezone(timezone)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index d61dc655..e469bb22 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -4,23 +4,25 @@
from textwrap import dedent
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
-
-UA_URL = 'https://ubuntu.com/advantage'
+UA_URL = "https://ubuntu.com/advantage"
-distros = ['ubuntu']
+distros = ["ubuntu"]
-schema = {
- 'id': 'cc_ubuntu_advantage',
- 'name': 'Ubuntu Advantage',
- 'title': 'Configure Ubuntu Advantage support services',
- 'description': dedent("""\
+meta: MetaSchema = {
+ "id": "cc_ubuntu_advantage",
+ "name": "Ubuntu Advantage",
+ "title": "Configure Ubuntu Advantage support services",
+ "description": dedent(
+ """\
Attach machine to an existing Ubuntu Advantage support contract and
enable or disable support services such as Livepatch, ESM,
FIPS and FIPS Updates. When attaching a machine to Ubuntu Advantage,
@@ -32,14 +34,21 @@ schema = {
a reboot to ensure the machine is running the FIPS-compliant kernel.
See :ref:`Power State Change` for information on how to configure
cloud-init to perform this reboot.
- """),
- 'distros': distros,
- 'examples': [dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Attach the machine to an Ubuntu Advantage support contract with a
# UA contract token obtained from %s.
ubuntu_advantage:
token: <ua_contract_token>
- """ % UA_URL), dedent("""\
+ """
+ % UA_URL
+ ),
+ dedent(
+ """\
# Attach the machine to an Ubuntu Advantage support contract enabling
# only fips and esm services. Services will only be enabled if
# the environment supports said service. Otherwise warnings will
@@ -49,7 +58,10 @@ schema = {
enable:
- fips
- esm
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Attach the machine to an Ubuntu Advantage support contract and enable
# the FIPS service. Perform a reboot once cloud-init has
# completed.
@@ -59,30 +71,35 @@ schema = {
token: <ua_contract_token>
enable:
- fips
- """)],
- 'frequency': PER_INSTANCE,
- 'type': 'object',
- 'properties': {
- 'ubuntu_advantage': {
- 'type': 'object',
- 'properties': {
- 'enable': {
- 'type': 'array',
- 'items': {'type': 'string'},
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "ubuntu_advantage": {
+ "type": "object",
+ "properties": {
+ "enable": {
+ "type": "array",
+ "items": {"type": "string"},
+ },
+ "token": {
+ "type": "string",
+ "description": "A contract token obtained from %s."
+ % UA_URL,
},
- 'token': {
- 'type': 'string',
- 'description': (
- 'A contract token obtained from %s.' % UA_URL)
- }
},
- 'required': ['token'],
- 'additionalProperties': False
+ "required": ["token"],
+ "additionalProperties": False,
}
- }
+ },
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
LOG = logging.getLogger(__name__)
@@ -91,52 +108,61 @@ def configure_ua(token=None, enable=None):
"""Call ua commandline client to attach or enable services."""
error = None
if not token:
- error = ('ubuntu_advantage: token must be provided')
+ error = "ubuntu_advantage: token must be provided"
LOG.error(error)
raise RuntimeError(error)
if enable is None:
enable = []
elif isinstance(enable, str):
- LOG.warning('ubuntu_advantage: enable should be a list, not'
- ' a string; treating as a single enable')
+ LOG.warning(
+ "ubuntu_advantage: enable should be a list, not"
+ " a string; treating as a single enable"
+ )
enable = [enable]
elif not isinstance(enable, list):
- LOG.warning('ubuntu_advantage: enable should be a list, not'
- ' a %s; skipping enabling services',
- type(enable).__name__)
+ LOG.warning(
+ "ubuntu_advantage: enable should be a list, not"
+ " a %s; skipping enabling services",
+ type(enable).__name__,
+ )
enable = []
- attach_cmd = ['ua', 'attach', token]
- LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd))
+ attach_cmd = ["ua", "attach", token]
+ LOG.debug("Attaching to Ubuntu Advantage. %s", " ".join(attach_cmd))
try:
subp.subp(attach_cmd)
except subp.ProcessExecutionError as e:
- msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format(
- error=str(e))
+ msg = "Failure attaching Ubuntu Advantage:\n{error}".format(
+ error=str(e)
+ )
util.logexc(LOG, msg)
raise RuntimeError(msg) from e
enable_errors = []
for service in enable:
try:
- cmd = ['ua', 'enable', service]
+ cmd = ["ua", "enable", "--assume-yes", service]
subp.subp(cmd, capture=True)
except subp.ProcessExecutionError as e:
enable_errors.append((service, e))
if enable_errors:
for service, error in enable_errors:
msg = 'Failure enabling "{service}":\n{error}'.format(
- service=service, error=str(error))
+ service=service, error=str(error)
+ )
util.logexc(LOG, msg)
raise RuntimeError(
- 'Failure enabling Ubuntu Advantage service(s): {}'.format(
- ', '.join('"{}"'.format(service)
- for service, _ in enable_errors)))
+ "Failure enabling Ubuntu Advantage service(s): {}".format(
+ ", ".join(
+ '"{}"'.format(service) for service, _ in enable_errors
+ )
+ )
+ )
def maybe_install_ua_tools(cloud):
"""Install ubuntu-advantage-tools if not present."""
- if subp.which('ua'):
+ if subp.which("ua"):
return
try:
cloud.distro.update_package_sources()
@@ -144,7 +170,7 @@ def maybe_install_ua_tools(cloud):
util.logexc(LOG, "Package update failed")
raise
try:
- cloud.distro.install_packages(['ubuntu-advantage-tools'])
+ cloud.distro.install_packages(["ubuntu-advantage-tools"])
except Exception:
util.logexc(LOG, "Failed to install ubuntu-advantage-tools")
raise
@@ -152,27 +178,35 @@ def maybe_install_ua_tools(cloud):
def handle(name, cfg, cloud, log, args):
ua_section = None
- if 'ubuntu-advantage' in cfg:
- LOG.warning('Deprecated configuration key "ubuntu-advantage" provided.'
- ' Expected underscore delimited "ubuntu_advantage"; will'
- ' attempt to continue.')
- ua_section = cfg['ubuntu-advantage']
- if 'ubuntu_advantage' in cfg:
- ua_section = cfg['ubuntu_advantage']
+ if "ubuntu-advantage" in cfg:
+ LOG.warning(
+ 'Deprecated configuration key "ubuntu-advantage" provided.'
+ ' Expected underscore delimited "ubuntu_advantage"; will'
+ " attempt to continue."
+ )
+ ua_section = cfg["ubuntu-advantage"]
+ if "ubuntu_advantage" in cfg:
+ ua_section = cfg["ubuntu_advantage"]
if ua_section is None:
- LOG.debug("Skipping module named %s,"
- " no 'ubuntu_advantage' configuration found", name)
+ LOG.debug(
+ "Skipping module named %s,"
+ " no 'ubuntu_advantage' configuration found",
+ name,
+ )
return
validate_cloudconfig_schema(cfg, schema)
- if 'commands' in ua_section:
+ if "commands" in ua_section:
msg = (
'Deprecated configuration "ubuntu-advantage: commands" provided.'
- ' Expected "token"')
+ ' Expected "token"'
+ )
LOG.error(msg)
raise RuntimeError(msg)
maybe_install_ua_tools(cloud)
- configure_ua(token=ua_section.get('token'),
- enable=ua_section.get('enable'))
+ configure_ua(
+ token=ua_section.get("token"), enable=ua_section.get("enable")
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
index 2d1d2b32..44a3bdb4 100644
--- a/cloudinit/config/cc_ubuntu_drivers.py
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -5,55 +5,66 @@
import os
from textwrap import dedent
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
+from cloudinit import subp, temp_utils, type_utils, util
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import temp_utils
-from cloudinit import type_utils
-from cloudinit import util
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-distros = ['ubuntu']
-schema = {
- 'id': 'cc_ubuntu_drivers',
- 'name': 'Ubuntu Drivers',
- 'title': 'Interact with third party drivers in Ubuntu.',
- 'description': dedent("""\
+distros = ["ubuntu"]
+meta: MetaSchema = {
+ "id": "cc_ubuntu_drivers",
+ "name": "Ubuntu Drivers",
+ "title": "Interact with third party drivers in Ubuntu.",
+ "description": dedent(
+ """\
This module interacts with the 'ubuntu-drivers' command to install
- third party driver packages."""),
- 'distros': distros,
- 'examples': [dedent("""\
+ third party driver packages."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
drivers:
nvidia:
license-accepted: true
- """)],
- 'frequency': frequency,
- 'type': 'object',
- 'properties': {
- 'drivers': {
- 'type': 'object',
- 'additionalProperties': False,
- 'properties': {
- 'nvidia': {
- 'type': 'object',
- 'additionalProperties': False,
- 'required': ['license-accepted'],
- 'properties': {
- 'license-accepted': {
- 'type': 'boolean',
- 'description': ("Do you accept the NVIDIA driver"
- " license?"),
+ """
+ )
+ ],
+ "frequency": frequency,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "drivers": {
+ "type": "object",
+ "additionalProperties": False,
+ "properties": {
+ "nvidia": {
+ "type": "object",
+ "additionalProperties": False,
+ "required": ["license-accepted"],
+ "properties": {
+ "license-accepted": {
+ "type": "boolean",
+ "description": (
+ "Do you accept the NVIDIA driver license?"
+ ),
},
- 'version': {
- 'type': 'string',
- 'description': (
- 'The version of the driver to install (e.g.'
+ "version": {
+ "type": "string",
+ "description": (
+ "The version of the driver to install (e.g."
' "390", "410"). Defaults to the latest'
- ' version.'),
+ " version."
+ ),
},
},
},
@@ -62,9 +73,10 @@ schema = {
},
}
OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = (
- "ubuntu-drivers: error: argument <command>: invalid choice: 'install'")
+ "ubuntu-drivers: error: argument <command>: invalid choice: 'install'"
+)
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
# Use a debconf template to configure a global debconf variable
@@ -97,10 +109,11 @@ db_x_loadtemplatefile "$1" cloud-init
def install_drivers(cfg, pkg_install_func):
if not isinstance(cfg, dict):
raise TypeError(
- "'drivers' config expected dict, found '%s': %s" %
- (type_utils.obj_name(cfg), cfg))
+ "'drivers' config expected dict, found '%s': %s"
+ % (type_utils.obj_name(cfg), cfg)
+ )
- cfgpath = 'nvidia/license-accepted'
+ cfgpath = "nvidia/license-accepted"
# Call translate_bool to ensure that we treat string values like "yes" as
# acceptance and _don't_ treat string values like "nah" as acceptance
# because they're True-ish
@@ -109,46 +122,56 @@ def install_drivers(cfg, pkg_install_func):
LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc)
return
- if not subp.which('ubuntu-drivers'):
- LOG.debug("'ubuntu-drivers' command not available. "
- "Installing ubuntu-drivers-common")
- pkg_install_func(['ubuntu-drivers-common'])
+ if not subp.which("ubuntu-drivers"):
+ LOG.debug(
+ "'ubuntu-drivers' command not available. "
+ "Installing ubuntu-drivers-common"
+ )
+ pkg_install_func(["ubuntu-drivers-common"])
- driver_arg = 'nvidia'
- version_cfg = util.get_cfg_by_path(cfg, 'nvidia/version')
+ driver_arg = "nvidia"
+ version_cfg = util.get_cfg_by_path(cfg, "nvidia/version")
if version_cfg:
- driver_arg += ':{}'.format(version_cfg)
+ driver_arg += ":{}".format(version_cfg)
- LOG.debug("Installing and activating NVIDIA drivers (%s=%s, version=%s)",
- cfgpath, nv_acc, version_cfg if version_cfg else 'latest')
+ LOG.debug(
+ "Installing and activating NVIDIA drivers (%s=%s, version=%s)",
+ cfgpath,
+ nv_acc,
+ version_cfg if version_cfg else "latest",
+ )
# Register and set debconf selection linux/nvidia/latelink = true
tdir = temp_utils.mkdtemp(needs_exe=True)
- debconf_file = os.path.join(tdir, 'nvidia.template')
- debconf_script = os.path.join(tdir, 'nvidia-debconf.sh')
+ debconf_file = os.path.join(tdir, "nvidia.template")
+ debconf_script = os.path.join(tdir, "nvidia-debconf.sh")
try:
util.write_file(debconf_file, NVIDIA_DEBCONF_CONTENT)
util.write_file(
debconf_script,
util.encode_text(NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT),
- mode=0o755)
+ mode=0o755,
+ )
subp.subp([debconf_script, debconf_file])
except Exception as e:
util.logexc(
- LOG, "Failed to register NVIDIA debconf template: %s", str(e))
+ LOG, "Failed to register NVIDIA debconf template: %s", str(e)
+ )
raise
finally:
if os.path.isdir(tdir):
util.del_dir(tdir)
try:
- subp.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
+ subp.subp(["ubuntu-drivers", "install", "--gpgpu", driver_arg])
except subp.ProcessExecutionError as exc:
if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr:
- LOG.warning('the available version of ubuntu-drivers is'
- ' too old to perform requested driver installation')
- elif 'No drivers found for installation.' in exc.stdout:
- LOG.warning('ubuntu-drivers found no drivers for installation')
+ LOG.warning(
+ "the available version of ubuntu-drivers is"
+ " too old to perform requested driver installation"
+ )
+ elif "No drivers found for installation." in exc.stdout:
+ LOG.warning("ubuntu-drivers found no drivers for installation")
raise
@@ -158,4 +181,4 @@ def handle(name, cfg, cloud, log, _args):
return
validate_cloudconfig_schema(cfg, schema)
- install_drivers(cfg['drivers'], cloud.distro.install_packages)
+ install_drivers(cfg["drivers"], cloud.distro.install_packages)
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index 03fffb96..f0aa9b0f 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -9,27 +9,28 @@
"""
Update Etc Hosts
----------------
-**Summary:** update ``/etc/hosts``
+**Summary:** update the hosts file (usually ``/etc/hosts``)
-This module will update the contents of ``/etc/hosts`` based on the
-hostname/fqdn specified in config. Management of ``/etc/hosts`` is controlled
-using ``manage_etc_hosts``. If this is set to false, cloud-init will not manage
-``/etc/hosts`` at all. This is the default behavior.
+This module will update the contents of the local hosts database (hosts file;
+usually ``/etc/hosts``) based on the hostname/fqdn specified in config.
+Management of the hosts file is controlled using ``manage_etc_hosts``. If this
+is set to false, cloud-init will not manage the hosts file at all. This is the
+default behavior.
-If set to ``true`` or ``template``, cloud-init will generate ``/etc/hosts``
+If set to ``true`` or ``template``, cloud-init will generate the hosts file
using the template located in ``/etc/cloud/templates/hosts.tmpl``. In the
``/etc/cloud/templates/hosts.tmpl`` template, the strings ``$hostname`` and
``$fqdn`` will be replaced with the hostname and fqdn respectively.
If ``manage_etc_hosts`` is set to ``localhost``, then cloud-init will not
-rewrite ``/etc/hosts`` entirely, but rather will ensure that a entry for the
-fqdn with a distribution dependent ip is present in ``/etc/hosts`` (i.e.
-``ping <hostname>`` will ping ``127.0.0.1`` or ``127.0.1.1`` or other ip).
+rewrite the hosts file entirely, but rather will ensure that a entry for the
+fqdn with a distribution dependent ip is present (i.e. ``ping <hostname>`` will
+ping ``127.0.0.1`` or ``127.0.1.1`` or other ip).
.. note::
if ``manage_etc_hosts`` is set ``true`` or ``template``, the contents
- of ``/etc/hosts`` will be updated every boot. to make any changes to
- ``/etc/hosts`` persistant they must be made in
+ of the hosts file will be updated every boot. To make any changes to
+ the hosts file persistent they must be made in
``/etc/cloud/templates/hosts.tmpl``
.. note::
@@ -38,7 +39,7 @@ fqdn with a distribution dependent ip is present in ``/etc/hosts`` (i.e.
**Internal name:** ``cc_update_etc_hosts``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
@@ -49,9 +50,7 @@ fqdn with a distribution dependent ip is present in ``/etc/hosts`` (i.e.
hostname: <fqdn/hostname>
"""
-from cloudinit import templater
-from cloudinit import util
-
+from cloudinit import templater, util
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
@@ -59,35 +58,48 @@ frequency = PER_ALWAYS
def handle(name, cfg, cloud, log, _args):
manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False)
- if util.translate_bool(manage_hosts, addons=['template']):
+
+ hosts_fn = cloud.distro.hosts_fn
+
+ if util.translate_bool(manage_hosts, addons=["template"]):
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
- log.warning(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
+ log.warning(
+ "Option 'manage_etc_hosts' was set, but no hostname was found"
+ )
return
# Render from a template file
- tpl_fn_name = cloud.get_template_filename("hosts.%s" %
- (cloud.distro.osfamily))
+ tpl_fn_name = cloud.get_template_filename(
+ "hosts.%s" % (cloud.distro.osfamily)
+ )
if not tpl_fn_name:
- raise RuntimeError(("No hosts template could be"
- " found for distro %s") %
- (cloud.distro.osfamily))
+ raise RuntimeError(
+ "No hosts template could be found for distro %s"
+ % (cloud.distro.osfamily)
+ )
- templater.render_to_file(tpl_fn_name, '/etc/hosts',
- {'hostname': hostname, 'fqdn': fqdn})
+ templater.render_to_file(
+ tpl_fn_name, hosts_fn, {"hostname": hostname, "fqdn": fqdn}
+ )
elif manage_hosts == "localhost":
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
- log.warning(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
+ log.warning(
+ "Option 'manage_etc_hosts' was set, but no hostname was found"
+ )
return
- log.debug("Managing localhost in /etc/hosts")
+ log.debug("Managing localhost in %s", hosts_fn)
cloud.distro.update_etc_hosts(hostname, fqdn)
else:
- log.debug(("Configuration option 'manage_etc_hosts' is not set,"
- " not managing /etc/hosts in module %s"), name)
+ log.debug(
+ "Configuration option 'manage_etc_hosts' is not set,"
+ " not managing %s in module %s",
+ hosts_fn,
+ name,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index d5f4eb5a..09f6f6da 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -20,39 +20,52 @@ is set, then the hostname will not be altered.
**Internal name:** ``cc_update_hostname``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
**Config keys**::
preserve_hostname: <true/false>
+ prefer_fqdn_over_hostname: <true/false>
fqdn: <fqdn>
hostname: <fqdn/hostname>
"""
import os
-from cloudinit.settings import PER_ALWAYS
from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
def handle(name, cfg, cloud, log, _args):
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug(("Configuration option 'preserve_hostname' is set,"
- " not updating the hostname in module %s"), name)
+ log.debug(
+ "Configuration option 'preserve_hostname' is set,"
+ " not updating the hostname in module %s",
+ name,
+ )
return
+ # Set prefer_fqdn_over_hostname value in distro
+ hostname_fqdn = util.get_cfg_option_bool(
+ cfg, "prefer_fqdn_over_hostname", None
+ )
+ if hostname_fqdn is not None:
+ cloud.distro.set_option("prefer_fqdn_over_hostname", hostname_fqdn)
+
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
try:
- prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname")
+ prev_fn = os.path.join(cloud.get_cpath("data"), "previous-hostname")
log.debug("Updating hostname to %s (%s)", fqdn, hostname)
cloud.distro.update_hostname(hostname, fqdn, prev_fn)
except Exception:
- util.logexc(log, "Failed to update the hostname to %s (%s)", fqdn,
- hostname)
+ util.logexc(
+ log, "Failed to update the hostname to %s (%s)", fqdn, hostname
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index ac4a4410..ef77a799 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -127,12 +127,12 @@ config keys for an entry in ``users`` are as follows:
uid: <user id>
"""
+from cloudinit import log as logging
+
# Ensure this is aliased to a name not 'distros'
# since the module attribute 'distros'
# is a list of distros that are supported, not a sub-module
from cloudinit.distros import ug_util
-from cloudinit import log as logging
-
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -149,26 +149,31 @@ def handle(name, cfg, cloud, _log, _args):
for (user, config) in users.items():
ssh_redirect_user = config.pop("ssh_redirect_user", False)
if ssh_redirect_user:
- if 'ssh_authorized_keys' in config or 'ssh_import_id' in config:
+ if "ssh_authorized_keys" in config or "ssh_import_id" in config:
raise ValueError(
- 'Not creating user %s. ssh_redirect_user cannot be'
- ' provided with ssh_import_id or ssh_authorized_keys' %
- user)
- if ssh_redirect_user not in (True, 'default'):
+ "Not creating user %s. ssh_redirect_user cannot be"
+ " provided with ssh_import_id or ssh_authorized_keys"
+ % user
+ )
+ if ssh_redirect_user not in (True, "default"):
raise ValueError(
- 'Not creating user %s. Invalid value of'
- ' ssh_redirect_user: %s. Expected values: true, default'
- ' or false.' % (user, ssh_redirect_user))
+ "Not creating user %s. Invalid value of"
+ " ssh_redirect_user: %s. Expected values: true, default"
+ " or false." % (user, ssh_redirect_user)
+ )
if default_user is None:
LOG.warning(
- 'Ignoring ssh_redirect_user: %s for %s.'
- ' No default_user defined.'
- ' Perhaps missing cloud configuration users: '
- ' [default, ..].',
- ssh_redirect_user, user)
+ "Ignoring ssh_redirect_user: %s for %s."
+ " No default_user defined."
+ " Perhaps missing cloud configuration users: "
+ " [default, ..].",
+ ssh_redirect_user,
+ user,
+ )
else:
- config['ssh_redirect_user'] = default_user
- config['cloud_public_ssh_keys'] = cloud_keys
+ config["ssh_redirect_user"] = default_user
+ config["cloud_public_ssh_keys"] = cloud_keys
cloud.distro.create_user(user, **config)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_vyos.py b/cloudinit/config/cc_vyos.py
index c19ecfe8..a7f75316 100644
--- a/cloudinit/config/cc_vyos.py
+++ b/cloudinit/config/cc_vyos.py
@@ -33,7 +33,10 @@ from cloudinit.sources import INSTANCE_JSON_FILE
from cloudinit.stages import Init
from cloudinit.util import load_file, load_json, get_hostname_fqdn
from cloudinit.sources.DataSourceOVF import get_properties as ovf_get_properties
-from vyos.configtree import ConfigTree
+try:
+ from vyos.configtree import ConfigTree
+except ImportError as err:
+ print(f'The module cannot be imported: {err}')
# configure logging
logger = logging.getLogger(__name__)
diff --git a/cloudinit/config/cc_vyos_userdata.py b/cloudinit/config/cc_vyos_userdata.py
index 95ba82de..5ad27b31 100644
--- a/cloudinit/config/cc_vyos_userdata.py
+++ b/cloudinit/config/cc_vyos_userdata.py
@@ -18,7 +18,10 @@ import re
from pathlib import Path
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
-from vyos.configtree import ConfigTree
+try:
+ from vyos.configtree import ConfigTree
+except ImportError as err:
+ print(f'The module cannot be imported: {err}')
# configure logging
logger = logging.getLogger(__name__)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 8601e707..37dae392 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -10,22 +10,25 @@ import base64
import os
from textwrap import dedent
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
from cloudinit import util
-
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
+from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
DEFAULT_OWNER = "root:root"
DEFAULT_PERMS = 0o644
-UNKNOWN_ENC = 'text/plain'
+DEFAULT_DEFER = False
+UNKNOWN_ENC = "text/plain"
LOG = logging.getLogger(__name__)
-distros = ['all']
+distros = ["all"]
# The schema definition for each cloud-config module is a strict contract for
# describing supported configuration parameters for each cloud-config section.
@@ -34,14 +37,22 @@ distros = ['all']
# configuration.
supported_encoding_types = [
- 'gz', 'gzip', 'gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64', 'b64',
- 'base64']
+ "gz",
+ "gzip",
+ "gz+base64",
+ "gzip+base64",
+ "gz+b64",
+ "gzip+b64",
+ "b64",
+ "base64",
+]
-schema = {
- 'id': 'cc_write_files',
- 'name': 'Write Files',
- 'title': 'write arbitrary files',
- 'description': dedent("""\
+meta: MetaSchema = {
+ "id": "cc_write_files",
+ "name": "Write Files",
+ "title": "write arbitrary files",
+ "description": dedent(
+ """\
Write out arbitrary content to files, optionally setting permissions.
Parent folders in the path are created if absent.
Content can be specified in plain text or binary. Data encoded with
@@ -57,10 +68,12 @@ schema = {
Do not write files under /tmp during boot because of a race with
systemd-tmpfiles-clean that can cause temp files to get cleaned during
the early boot process. Use /run/somedir instead to avoid race
- LP:1707222."""),
- 'distros': distros,
- 'examples': [
- dedent("""\
+ LP:1707222."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Write out base64 encoded content to /etc/sysconfig/selinux
write_files:
- encoding: b64
@@ -68,16 +81,20 @@ schema = {
owner: root:root
path: /etc/sysconfig/selinux
permissions: '0644'
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Appending content to an existing file
write_files:
- content: |
15 * * * * root ship_logs
path: /etc/crontab
append: true
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Provide gziped binary content
write_files:
- encoding: gzip
@@ -85,110 +102,177 @@ schema = {
H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
path: /usr/bin/hello
permissions: '0755'
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Create an empty file on the system
write_files:
- path: /root/CLOUD_INIT_WAS_HERE
- """)],
- 'frequency': frequency,
- 'type': 'object',
- 'properties': {
- 'write_files': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'path': {
- 'type': 'string',
- 'description': dedent("""\
+ """
+ ),
+ dedent(
+ """\
+ # Defer writing the file until after the package (Nginx) is
+ # installed and its user is created alongside
+ write_files:
+ - path: /etc/nginx/conf.d/example.com.conf
+ content: |
+ server {
+ server_name example.com;
+ listen 80;
+ root /var/www;
+ location / {
+ try_files $uri $uri/ $uri.html =404;
+ }
+ }
+ owner: 'nginx:nginx'
+ permissions: '0640'
+ defer: true
+ """
+ ),
+ ],
+ "frequency": frequency,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "write_files": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "path": {
+ "type": "string",
+ "description": dedent(
+ """\
Path of the file to which ``content`` is decoded
and written
- """),
+ """
+ ),
},
- 'content': {
- 'type': 'string',
- 'default': '',
- 'description': dedent("""\
+ "content": {
+ "type": "string",
+ "default": "",
+ "description": dedent(
+ """\
Optional content to write to the provided ``path``.
When content is present and encoding is not '%s',
decode the content prior to writing. Default:
**''**
- """ % UNKNOWN_ENC),
+ """
+ % UNKNOWN_ENC
+ ),
},
- 'owner': {
- 'type': 'string',
- 'default': DEFAULT_OWNER,
- 'description': dedent("""\
+ "owner": {
+ "type": "string",
+ "default": DEFAULT_OWNER,
+ "description": dedent(
+ """\
Optional owner:group to chown on the file. Default:
**{owner}**
- """.format(owner=DEFAULT_OWNER)),
+ """.format(
+ owner=DEFAULT_OWNER
+ )
+ ),
},
- 'permissions': {
- 'type': 'string',
- 'default': oct(DEFAULT_PERMS).replace('o', ''),
- 'description': dedent("""\
+ "permissions": {
+ "type": "string",
+ "default": oct(DEFAULT_PERMS).replace("o", ""),
+ "description": dedent(
+ """\
Optional file permissions to set on ``path``
represented as an octal string '0###'. Default:
**'{perms}'**
- """.format(perms=oct(DEFAULT_PERMS).replace('o', ''))),
+ """.format(
+ perms=oct(DEFAULT_PERMS).replace("o", "")
+ )
+ ),
},
- 'encoding': {
- 'type': 'string',
- 'default': UNKNOWN_ENC,
- 'enum': supported_encoding_types,
- 'description': dedent("""\
+ "encoding": {
+ "type": "string",
+ "default": UNKNOWN_ENC,
+ "enum": supported_encoding_types,
+ "description": dedent(
+ """\
Optional encoding type of the content. Default is
**text/plain** and no content decoding is
performed. Supported encoding types are:
- %s.""" % ", ".join(supported_encoding_types)),
+ %s."""
+ % ", ".join(supported_encoding_types)
+ ),
},
- 'append': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
+ "append": {
+ "type": "boolean",
+ "default": False,
+ "description": dedent(
+ """\
Whether to append ``content`` to existing file if
``path`` exists. Default: **false**.
- """),
+ """
+ ),
+ },
+ "defer": {
+ "type": "boolean",
+ "default": DEFAULT_DEFER,
+ "description": dedent(
+ """\
+ Defer writing the file until 'final' stage, after
+ users were created, and packages were installed.
+ Default: **{defer}**.
+ """.format(
+ defer=DEFAULT_DEFER
+ )
+ ),
},
},
- 'required': ['path'],
- 'additionalProperties': False
+ "required": ["path"],
+ "additionalProperties": False,
},
}
- }
+ },
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
def handle(name, cfg, _cloud, log, _args):
- files = cfg.get('write_files')
- if not files:
- log.debug(("Skipping module named %s,"
- " no/empty 'write_files' key in configuration"), name)
- return
validate_cloudconfig_schema(cfg, schema)
- write_files(name, files)
+ file_list = cfg.get("write_files", [])
+ filtered_files = [
+ f
+ for f in file_list
+ if not util.get_cfg_option_bool(f, "defer", DEFAULT_DEFER)
+ ]
+ if not filtered_files:
+ log.debug(
+ "Skipping module named %s,"
+ " no/empty 'write_files' key in configuration",
+ name,
+ )
+ return
+ write_files(name, filtered_files)
def canonicalize_extraction(encoding_type):
if not encoding_type:
- encoding_type = ''
+ encoding_type = ""
encoding_type = encoding_type.lower().strip()
- if encoding_type in ['gz', 'gzip']:
- return ['application/x-gzip']
- if encoding_type in ['gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64']:
- return ['application/base64', 'application/x-gzip']
+ if encoding_type in ["gz", "gzip"]:
+ return ["application/x-gzip"]
+ if encoding_type in ["gz+base64", "gzip+base64", "gz+b64", "gzip+b64"]:
+ return ["application/base64", "application/x-gzip"]
# Yaml already encodes binary data as base64 if it is given to the
# yaml file as binary, so those will be automatically decoded for you.
# But the above b64 is just for people that are more 'comfortable'
# specifing it manually (which might be a possiblity)
- if encoding_type in ['b64', 'base64']:
- return ['application/base64']
+ if encoding_type in ["b64", "base64"]:
+ return ["application/base64"]
if encoding_type:
- LOG.warning("Unknown encoding type %s, assuming %s",
- encoding_type, UNKNOWN_ENC)
+ LOG.warning(
+ "Unknown encoding type %s, assuming %s", encoding_type, UNKNOWN_ENC
+ )
return [UNKNOWN_ENC]
@@ -197,17 +281,20 @@ def write_files(name, files):
return
for (i, f_info) in enumerate(files):
- path = f_info.get('path')
+ path = f_info.get("path")
if not path:
- LOG.warning("No path provided to write for entry %s in module %s",
- i + 1, name)
+ LOG.warning(
+ "No path provided to write for entry %s in module %s",
+ i + 1,
+ name,
+ )
continue
path = os.path.abspath(path)
- extractions = canonicalize_extraction(f_info.get('encoding'))
- contents = extract_contents(f_info.get('content', ''), extractions)
- (u, g) = util.extract_usergroup(f_info.get('owner', DEFAULT_OWNER))
- perms = decode_perms(f_info.get('permissions'), DEFAULT_PERMS)
- omode = 'ab' if util.get_cfg_option_bool(f_info, 'append') else 'wb'
+ extractions = canonicalize_extraction(f_info.get("encoding"))
+ contents = extract_contents(f_info.get("content", ""), extractions)
+ (u, g) = util.extract_usergroup(f_info.get("owner", DEFAULT_OWNER))
+ perms = decode_perms(f_info.get("permissions"), DEFAULT_PERMS)
+ omode = "ab" if util.get_cfg_option_bool(f_info, "append") else "wb"
util.write_file(path, contents, omode=omode, mode=perms)
util.chownbyname(path, u, g)
@@ -229,20 +316,20 @@ def decode_perms(perm, default):
reps.append("%o" % r)
except TypeError:
reps.append("%r" % r)
- LOG.warning(
- "Undecodable permissions %s, returning default %s", *reps)
+ LOG.warning("Undecodable permissions %s, returning default %s", *reps)
return default
def extract_contents(contents, extraction_types):
result = contents
for t in extraction_types:
- if t == 'application/x-gzip':
+ if t == "application/x-gzip":
result = util.decomp_gzip(result, quiet=False, decode=False)
- elif t == 'application/base64':
+ elif t == "application/base64":
result = base64.b64decode(result)
elif t == UNKNOWN_ENC:
pass
return result
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py
new file mode 100644
index 00000000..1294628c
--- /dev/null
+++ b/cloudinit/config/cc_write_files_deferred.py
@@ -0,0 +1,56 @@
+# Copyright (C) 2021 Canonical Ltd.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Defer writing certain files"""
+
+from cloudinit import util
+from cloudinit.config.cc_write_files import DEFAULT_DEFER
+from cloudinit.config.cc_write_files import schema as write_files_schema
+from cloudinit.config.cc_write_files import write_files
+from cloudinit.config.schema import validate_cloudconfig_schema
+
+# meta is not used in this module, but it remains as code documentation
+#
+# id: cc_write_files_deferred'
+# name: 'Write Deferred Files
+# distros: ['all'],
+# frequency: PER_INSTANCE,
+# title:
+# write certain files, whose creation as been deferred, during
+# final stage
+# description:
+# This module is based on `'Write Files' <write-files>`__, and
+# will handle all files from the write_files list, that have been
+# marked as deferred and thus are not being processed by the
+# write-files module.
+#
+# *Please note that his module is not exposed to the user through
+# its own dedicated top-level directive.*
+
+schema = write_files_schema
+
+
+# Not exposed, because related modules should document this behaviour
+__doc__ = None
+
+
+def handle(name, cfg, _cloud, log, _args):
+ validate_cloudconfig_schema(cfg, schema)
+ file_list = cfg.get("write_files", [])
+ filtered_files = [
+ f
+ for f in file_list
+ if util.get_cfg_option_bool(f, "defer", DEFAULT_DEFER)
+ ]
+ if not filtered_files:
+ log.debug(
+ "Skipping module named %s,"
+ " no deferred file defined in configuration",
+ name,
+ )
+ return
+ write_files(name, filtered_files)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 01fe683c..7a232689 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -16,9 +16,10 @@ entry, the config entry will be skipped.
**Internal name:** ``cc_yum_add_repo``
-**Module frequency:** per always
+**Module frequency:** always
-**Supported distros:** centos, fedora, rhel
+**Supported distros:** almalinux, centos, cloudlinux, eurolinux, fedora,
+ miraclelinux, openEuler, photon, rhel, rocky, virtuozzo
**Config keys**::
@@ -36,7 +37,18 @@ from configparser import ConfigParser
from cloudinit import util
-distros = ['centos', 'fedora', 'rhel']
+distros = [
+ "almalinux",
+ "centos",
+ "cloudlinux",
+ "eurolinux",
+ "fedora",
+ "openEuler",
+ "photon",
+ "rhel",
+ "rocky",
+ "virtuozzo",
+]
def _canonicalize_id(repo_id):
@@ -77,25 +89,34 @@ def _format_repository_config(repo_id, repo_config):
def handle(name, cfg, _cloud, log, _args):
- repos = cfg.get('yum_repos')
+ repos = cfg.get("yum_repos")
if not repos:
- log.debug(("Skipping module named %s,"
- " no 'yum_repos' configuration found"), name)
+ log.debug(
+ "Skipping module named %s, no 'yum_repos' configuration found",
+ name,
+ )
return
- repo_base_path = util.get_cfg_option_str(cfg, 'yum_repo_dir',
- '/etc/yum.repos.d/')
+ repo_base_path = util.get_cfg_option_str(
+ cfg, "yum_repo_dir", "/etc/yum.repos.d/"
+ )
repo_locations = {}
repo_configs = {}
for (repo_id, repo_config) in repos.items():
canon_repo_id = _canonicalize_id(repo_id)
repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
if os.path.exists(repo_fn_pth):
- log.info("Skipping repo %s, file %s already exists!",
- repo_id, repo_fn_pth)
+ log.info(
+ "Skipping repo %s, file %s already exists!",
+ repo_id,
+ repo_fn_pth,
+ )
continue
elif canon_repo_id in repo_locations:
- log.info("Skipping repo %s, file %s already pending!",
- repo_id, repo_fn_pth)
+ log.info(
+ "Skipping repo %s, file %s already pending!",
+ repo_id,
+ repo_fn_pth,
+ )
continue
if not repo_config:
repo_config = {}
@@ -107,21 +128,29 @@ def handle(name, cfg, _cloud, log, _args):
n_repo_config[k] = v
repo_config = n_repo_config
missing_required = 0
- for req_field in ['baseurl']:
+ for req_field in ["baseurl"]:
if req_field not in repo_config:
- log.warning(("Repository %s does not contain a %s"
- " configuration 'required' entry"),
- repo_id, req_field)
+ log.warning(
+ "Repository %s does not contain a %s"
+ " configuration 'required' entry",
+ repo_id,
+ req_field,
+ )
missing_required += 1
if not missing_required:
repo_configs[canon_repo_id] = repo_config
repo_locations[canon_repo_id] = repo_fn_pth
else:
- log.warning("Repository %s is missing %s required fields, "
- "skipping!", repo_id, missing_required)
+ log.warning(
+ "Repository %s is missing %s required fields, skipping!",
+ repo_id,
+ missing_required,
+ )
for (c_repo_id, path) in repo_locations.items():
- repo_blob = _format_repository_config(c_repo_id,
- repo_configs.get(c_repo_id))
+ repo_blob = _format_repository_config(
+ c_repo_id, repo_configs.get(c_repo_id)
+ )
util.write_file(path, repo_blob)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
index 05855b0c..be444cce 100644
--- a/cloudinit/config/cc_zypper_add_repo.py
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -5,22 +5,24 @@
"""zypper_add_repo: Add zyper repositories to the system"""
-import configobj
import os
from textwrap import dedent
-from cloudinit.config.schema import get_schema_doc
+import configobj
+
from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
from cloudinit import util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_ALWAYS
-distros = ['opensuse', 'sles']
+distros = ["opensuse", "sles"]
-schema = {
- 'id': 'cc_zypper_add_repo',
- 'name': 'ZypperAddRepo',
- 'title': 'Configure zypper behavior and add zypper repositories',
- 'description': dedent("""\
+meta: MetaSchema = {
+ "id": "cc_zypper_add_repo",
+ "name": "ZypperAddRepo",
+ "title": "Configure zypper behavior and add zypper repositories",
+ "description": dedent(
+ """\
Configure zypper behavior by modifying /etc/zypp/zypp.conf. The
configuration writer is "dumb" and will simply append the provided
configuration options to the configuration file. Option settings
@@ -28,9 +30,12 @@ schema = {
is parsed. The file is in INI format.
Add repositories to the system. No validation is performed on the
repository file entries, it is assumed the user is familiar with
- the zypper repository file format."""),
- 'distros': distros,
- 'examples': [dedent("""\
+ the zypper repository file format."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
zypper:
repos:
- id: opensuse-oss
@@ -49,51 +54,59 @@ schema = {
servicesdir: /etc/zypp/services.d
download.use_deltarpm: true
# any setting in /etc/zypp/zypp.conf
- """)],
- 'frequency': PER_ALWAYS,
- 'type': 'object',
- 'properties': {
- 'zypper': {
- 'type': 'object',
- 'properties': {
- 'repos': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'id': {
- 'type': 'string',
- 'description': dedent("""\
+ """
+ )
+ ],
+ "frequency": PER_ALWAYS,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "zypper": {
+ "type": "object",
+ "properties": {
+ "repos": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": dedent(
+ """\
The unique id of the repo, used when
writing
- /etc/zypp/repos.d/<id>.repo.""")
+ /etc/zypp/repos.d/<id>.repo."""
+ ),
+ },
+ "baseurl": {
+ "type": "string",
+ "format": "uri", # built-in format type
+ "description": "The base repositoy URL",
},
- 'baseurl': {
- 'type': 'string',
- 'format': 'uri', # built-in format type
- 'description': 'The base repositoy URL'
- }
},
- 'required': ['id', 'baseurl'],
- 'additionalProperties': True
+ "required": ["id", "baseurl"],
+ "additionalProperties": True,
},
- 'minItems': 1
+ "minItems": 1,
},
- 'config': {
- 'type': 'object',
- 'description': dedent("""\
+ "config": {
+ "type": "object",
+ "description": dedent(
+ """\
Any supported zypo.conf key is written to
- /etc/zypp/zypp.conf'""")
- }
+ /etc/zypp/zypp.conf'"""
+ ),
+ },
},
- 'required': [],
- 'minProperties': 1, # Either config or repo must be provided
- 'additionalProperties': False, # only repos and config allowed
+ "minProperties": 1, # Either config or repo must be provided
+ "additionalProperties": False, # only repos and config allowed
}
- }
+ },
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
LOG = logging.getLogger(__name__)
@@ -139,34 +152,43 @@ def _write_repos(repos, repo_base_path):
valid_repos = {}
for index, user_repo_config in enumerate(repos):
# Skip on absent required keys
- missing_keys = set(['id', 'baseurl']).difference(set(user_repo_config))
+ missing_keys = set(["id", "baseurl"]).difference(set(user_repo_config))
if missing_keys:
LOG.warning(
"Repo config at index %d is missing required config keys: %s",
- index, ",".join(missing_keys))
+ index,
+ ",".join(missing_keys),
+ )
continue
- repo_id = user_repo_config.get('id')
+ repo_id = user_repo_config.get("id")
canon_repo_id = _canonicalize_id(repo_id)
repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
if os.path.exists(repo_fn_pth):
- LOG.info("Skipping repo %s, file %s already exists!",
- repo_id, repo_fn_pth)
+ LOG.info(
+ "Skipping repo %s, file %s already exists!",
+ repo_id,
+ repo_fn_pth,
+ )
continue
elif repo_id in valid_repos:
- LOG.info("Skipping repo %s, file %s already pending!",
- repo_id, repo_fn_pth)
+ LOG.info(
+ "Skipping repo %s, file %s already pending!",
+ repo_id,
+ repo_fn_pth,
+ )
continue
# Do some basic key formatting
repo_config = dict(
(k.lower().strip().replace("-", "_"), v)
for k, v in user_repo_config.items()
- if k and k != 'id')
+ if k and k != "id"
+ )
# Set defaults if not present
- for field in ['enabled', 'autorefresh']:
+ for field in ["enabled", "autorefresh"]:
if field not in repo_config:
- repo_config[field] = '1'
+ repo_config[field] = "1"
valid_repos[repo_id] = (repo_fn_pth, repo_config)
@@ -179,39 +201,44 @@ def _write_zypp_config(zypper_config):
"""Write to the default zypp configuration file /etc/zypp/zypp.conf"""
if not zypper_config:
return
- zypp_config = '/etc/zypp/zypp.conf'
+ zypp_config = "/etc/zypp/zypp.conf"
zypp_conf_content = util.load_file(zypp_config)
- new_settings = ['# Added via cloud.cfg']
+ new_settings = ["# Added via cloud.cfg"]
for setting, value in zypper_config.items():
- if setting == 'configdir':
- msg = 'Changing the location of the zypper configuration is '
+ if setting == "configdir":
+ msg = "Changing the location of the zypper configuration is "
msg += 'not supported, skipping "configdir" setting'
LOG.warning(msg)
continue
if value:
- new_settings.append('%s=%s' % (setting, value))
+ new_settings.append("%s=%s" % (setting, value))
if len(new_settings) > 1:
- new_config = zypp_conf_content + '\n'.join(new_settings)
+ new_config = zypp_conf_content + "\n".join(new_settings)
else:
new_config = zypp_conf_content
util.write_file(zypp_config, new_config)
def handle(name, cfg, _cloud, log, _args):
- zypper_section = cfg.get('zypper')
+ zypper_section = cfg.get("zypper")
if not zypper_section:
- LOG.debug(("Skipping module named %s,"
- " no 'zypper' relevant configuration found"), name)
+ LOG.debug(
+ "Skipping module named %s,"
+ " no 'zypper' relevant configuration found",
+ name,
+ )
return
- repos = zypper_section.get('repos')
+ repos = zypper_section.get("repos")
if not repos:
- LOG.debug(("Skipping module named %s,"
- " no 'repos' configuration found"), name)
+ LOG.debug(
+ "Skipping module named %s, no 'repos' configuration found", name
+ )
return
- zypper_config = zypper_section.get('config', {})
- repo_base_path = zypper_config.get('reposdir', '/etc/zypp/repos.d/')
+ zypper_config = zypper_section.get("config", {})
+ repo_base_path = zypper_config.get("reposdir", "/etc/zypp/repos.d/")
_write_zypp_config(zypper_config)
_write_repos(repos, repo_base_path)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cloud-init-schema.json b/cloudinit/config/cloud-init-schema.json
new file mode 100644
index 00000000..2d43d06a
--- /dev/null
+++ b/cloudinit/config/cloud-init-schema.json
@@ -0,0 +1,560 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "$defs": {
+ "apt_configure.mirror": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": ["arches"],
+ "properties": {
+ "arches": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1
+ },
+ "uri": {"type": "string", "format": "uri"},
+ "search": {
+ "type": "array",
+ "items": {"type": "string", "format": "uri"},
+ "minItems": 1
+ },
+ "search_dns": {
+ "type": "boolean"
+ },
+ "keyid": {"type": "string"},
+ "key": {"type": "string"},
+ "keyserver": {"type": "string"}
+ }
+ },
+ "minItems": 1
+ },
+ "ca_certs.properties": {
+ "type": "object",
+ "properties": {
+ "remove-defaults": {
+ "description": "Deprecated key name. Use remove_defaults instead.",
+ "type": "boolean",
+ "default": false
+ },
+ "remove_defaults": {
+ "description": "Remove default CA certificates if true. Default: false",
+ "type": "boolean",
+ "default": false
+ },
+ "trusted": {
+ "description": "List of trusted CA certificates to add.",
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1
+ }
+ },
+ "additionalProperties": false,
+ "minProperties": 1
+ },
+ "cc_apk_configure": {
+ "type": "object",
+ "properties": {
+ "apk_repos": {
+ "type": "object",
+ "properties": {
+ "preserve_repositories": {
+ "type": "boolean",
+ "default": false,
+ "description": "By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos section of cloud config. To disable this behavior and preserve the repositories file from the pristine image, set ``preserve_repositories`` to ``true``.\n\n The ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``."
+ },
+ "alpine_repo": {
+ "type": ["object", "null"],
+ "properties": {
+ "base_url": {
+ "type": "string",
+ "default": "https://alpine.global.ssl.fastly.net/alpine",
+ "description": "The base URL of an Alpine repository, or mirror, to download official packages from. If not specified then it defaults to ``https://alpine.global.ssl.fastly.net/alpine``"
+ },
+ "community_enabled": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether to add the Community repo to the repositories file. By default the Community repo is not included."
+ },
+ "testing_enabled": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether to add the Testing repo to the repositories file. By default the Testing repo is not included. It is only recommended to use the Testing repo on a machine running the ``Edge`` version of Alpine as packages installed from Testing may have dependencies that conflict with those in non-Edge Main or Community repos."
+ },
+ "version": {
+ "type": "string",
+ "description": "The Alpine version to use (e.g. ``v3.12`` or ``edge``)"
+ }
+ },
+ "required": ["version"],
+ "minProperties": 1,
+ "additionalProperties": false
+ },
+ "local_repo_base_url": {
+ "type": "string",
+ "description": "The base URL of an Alpine repository containing unofficial packages"
+ }
+ },
+ "minProperties": 1,
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_apt_configure": {
+ "properties": {
+ "apt": {
+ "type": "object",
+ "additionalProperties": false,
+ "minProperties": 1,
+ "properties": {
+ "preserve_sources_list": {
+ "type": "boolean",
+ "default": false,
+ "description": "By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` based on any changes specified in cloud config. To disable this behavior and preserve the sources list from the pristine image, set ``preserve_sources_list`` to ``true``.\n\nThe ``preserve_sources_list`` option overrides all other config keys that would alter ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added to ``sources.list.d``."
+ },
+ "disable_suites": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1,
+ "uniqueItems": true,
+ "description": "Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is present in a suite in the ``disable_suites`` list, it will be replaced with the release name. If a suite specified in ``disable_suites`` is not present in ``sources.list`` it will be ignored. For convenience, several aliases are provided for`` disable_suites``:\n\n - ``updates`` => ``$RELEASE-updates``\n - ``backports`` => ``$RELEASE-backports``\n - ``security`` => ``$RELEASE-security``\n - ``proposed`` => ``$RELEASE-proposed``\n - ``release`` => ``$RELEASE``.\n\nWhen a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it is just commented out."
+ },
+ "primary": {
+ "$ref": "#/$defs/apt_configure.mirror",
+ "description": "The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with different hosts used for a local APT mirror. If no mirror is provided by ``uri`` or ``search``, ``search_dns`` may be used to search for dns names in the format ``<distro>-mirror`` in each of the following:\n\n - fqdn of this host per cloud metadata,\n - localdomain,\n - domains listed in ``/etc/resolv.conf``.\n\nIf there is a dns entry for ``<distro>-mirror``, then it is assumed that there is a distro mirror at ``http://<distro>-mirror.<domain>/<distro>``. If the ``primary`` key is defined, but not the ``security`` key, then then configuration for ``primary`` is also used for ``security``. If ``search_dns`` is used for the ``security`` key, the search pattern will be ``<distro>-security-mirror``.\n\nEach mirror may also specify a key to import via any of the following optional keys:\n\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n\nIf no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource either the following defaults are used:\n\n - ``primary`` => ``http://archive.ubuntu.com/ubuntu``.\n - ``security`` => ``http://security.ubuntu.com/ubuntu``"
+ },
+ "security": {
+ "$ref": "#/$defs/apt_configure.mirror",
+ "description": "Please refer to the primary config documentation"
+ },
+ "add_apt_repo_match": {
+ "type": "string",
+ "default": "^[\\w-]+:\\w",
+ "description": "All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults to ``^[\\w-]+:\\w``"
+ },
+ "debconf_selections": {
+ "type": "object",
+ "minProperties": 1,
+ "patternProperties": {
+ "^.+$": {
+ "type": "string"
+ }
+ },
+ "description": "Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a different set of configurations. The value of each key must be a string containing all the debconf configurations that must be applied. We will bundle all of the values and pass them to ``debconf-set-selections``. Therefore, each value line must be a valid entry for ``debconf-set-selections``, meaning that they must possess for distinct fields:\n\n``pkgname question type answer``\n\nWhere:\n\n - ``pkgname`` is the name of the package.\n - ``question`` the name of the questions.\n - ``type`` is the type of question.\n - ``answer`` is the value used to answer the question.\n\nFor example: ``ippackage ippackage/ip string 127.0.01``"
+ },
+ "sources_list": {
+ "type": "string",
+ "description": "Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within this template, the following strings will be replaced with the appropriate values:\n\n - ``$MIRROR``\n - ``$RELEASE``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$KEY_FILE``"
+ },
+ "conf": {
+ "type": "string",
+ "description": "Specify configuration for apt, such as proxy configuration. This configuration is specified as a string. For multiline APT configuration, make sure to follow yaml syntax."
+ },
+ "https_proxy": {
+ "type": "string",
+ "description": "More convenient way to specify https APT proxy. https proxy url is specified in the format ``https://[[user][:pass]@]host[:port]/``."
+ },
+ "http_proxy": {
+ "type": "string",
+ "description": "More convenient way to specify http APT proxy. http proxy url is specified in the format ``http://[[user][:pass]@]host[:port]/``."
+ },
+ "proxy": {
+ "type": "string",
+ "description": "Alias for defining a http APT proxy."
+ },
+ "ftp_proxy": {
+ "type": "string",
+ "description": "More convenient way to specify ftp APT proxy. ftp proxy url is specified in the format ``ftp://[[user][:pass]@]host[:port]/``."
+ },
+ "sources": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {
+ "type": "object",
+ "properties": {
+ "source": {
+ "type": "string"
+ },
+ "keyid": {
+ "type": "string"
+ },
+ "key": {
+ "type": "string"
+ },
+ "keyserver": {
+ "type": "string"
+ },
+ "filename": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false,
+ "minProperties": 1
+ }
+ },
+ "description": "Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source file. The key of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in ``sources``, no file will be written, but the key may still be referred to as an id in other ``sources`` entries.\n\nEach entry under ``sources`` is a dictionary which may contain any of the following optional keys:\n - ``source``: a sources.list entry (some variable replacements apply).\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n - ``filename``: specify the name of the list file\n\nThe ``source`` key supports variable replacements for the following strings:\n\n - ``$MIRROR``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$RELEASE``\n - ``$KEY_FILE``"
+ }
+ }
+ }
+ }
+ },
+ "cc_apt_pipelining": {
+ "type": "object",
+ "properties": {
+ "apt_pipelining": {
+ "oneOf": [
+ {"type": "integer"},
+ {"type": "boolean"},
+ {"type": "string", "enum": ["none", "unchanged", "os"]}
+ ]
+ }
+ }
+ },
+ "cc_bootcmd": {
+ "type": "object",
+ "properties": {
+ "bootcmd": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "string"}
+ ]
+ },
+ "additionalItems": false,
+ "minItems": 1
+ }
+ }
+ },
+ "cc_byobu": {
+ "type": "object",
+ "properties": {
+ "byobu_by_default": {
+ "type": "string",
+ "enum": [
+ "enable-system",
+ "enable-user",
+ "disable-system",
+ "disable-user",
+ "enable",
+ "disable",
+ "user",
+ "system"
+ ]
+ }
+ }
+ },
+ "cc_ca_certs": {
+ "type": "object",
+ "properties": {
+ "ca_certs": {
+ "$ref": "#/$defs/ca_certs.properties"
+ },
+ "ca-certs": {
+ "$ref": "#/$defs/ca_certs.properties"
+ }
+ }
+ },
+ "cc_chef": {
+ "type": "object",
+ "properties": {
+ "chef": {
+ "type": "object",
+ "additionalProperties": false,
+ "minProperties": 1,
+ "properties": {
+ "directories": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1,
+ "uniqueItems": true,
+ "description": "Create the necessary directories for chef to run. By default, it creates the following directories:\n\n - ``/etc/chef``\n - ``/var/log/chef``\n - ``/var/lib/chef``\n - ``/var/cache/chef``\n - ``/var/backups/chef``\n - ``/var/run/chef``"
+ },
+ "validation_cert": {
+ "type": "string",
+ "description": "Optional string to be written to file validation_key. Special value ``system`` means set use existing file."
+ },
+ "validation_key": {
+ "type": "string",
+ "default": "/etc/chef/validation.pem",
+ "description": "Optional path for validation_cert. default to ``/etc/chef/validation.pem``"
+ },
+ "firstboot_path": {
+ "type": "string",
+ "default": "/etc/chef/firstboot.json",
+ "description": "Path to write run_list and initial_attributes keys that should also be present in this configuration, defaults to ``/etc/chef/firstboot.json``"
+ },
+ "exec": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set true if we should run or not run chef (defaults to false, unless a gem installed is requested where this will then default to true)."
+ },
+ "client_key": {
+ "type": "string",
+ "default": "/etc/chef/client.pem",
+ "description": "Optional path for client_cert. Default to ``/etc/chef/client.pem``."
+ },
+ "encrypted_data_bag_secret": {
+ "type": "string",
+ "default": null,
+ "description": "Specifies the location of the secret key used by chef to encrypt data items. By default, this path is set to null, meaning that chef will have to look at the path ``/etc/chef/encrypted_data_bag_secret`` for it."
+ },
+ "environment": {
+ "type": "string",
+ "default": "_default",
+ "description": "Specifies which environment chef will use. By default, it will use the ``_default`` configuration."
+ },
+ "file_backup_path": {
+ "type": "string",
+ "default": "/var/backups/chef",
+ "description": "Specifies the location in which backup files are stored. By default, it uses the ``/var/backups/chef`` location."
+ },
+ "file_cache_path": {
+ "type": "string",
+ "default": "/var/cache/chef",
+ "description": "Specifies the location in which chef cache files will be saved. By default, it uses the ``/var/cache/chef`` location."
+ },
+ "json_attribs": {
+ "type": "string",
+ "default": "/etc/chef/firstboot.json",
+ "description": "Specifies the location in which some chef json data is stored. By default, it uses the ``/etc/chef/firstboot.json`` location."
+ },
+ "log_level": {
+ "type": "string",
+ "default": ":info",
+ "description": "Defines the level of logging to be stored in the log file. By default this value is set to ``:info``."
+ },
+ "log_location": {
+ "type": "string",
+ "default": "/var/log/chef/client.log",
+ "description": "Specifies the location of the chef lof file. By default, the location is specified at ``/var/log/chef/client.log``."
+ },
+ "node_name": {
+ "type": "string",
+ "description": "The name of the node to run. By default, we will use th instance id as the node name."
+ },
+ "omnibus_url": {
+ "type": "string",
+ "default": "https://www.chef.io/chef/install.sh",
+ "description": "Omnibus URL if chef should be installed through Omnibus. By default, it uses the ``https://www.chef.io/chef/install.sh``."
+ },
+ "omnibus_url_retries": {
+ "type": "integer",
+ "default": 5,
+ "description": "The number of retries that will be attempted to reach the Omnibus URL. Default is 5."
+ },
+ "omnibus_version": {
+ "type": "string",
+ "description": "Optional version string to require for omnibus install."
+ },
+ "pid_file": {
+ "type": "string",
+ "default": "/var/run/chef/client.pid",
+ "description": "The location in which a process identification number (pid) is saved. By default, it saves in the ``/var/run/chef/client.pid`` location."
+ },
+ "server_url": {
+ "type": "string",
+ "description": "The URL for the chef server"
+ },
+ "show_time": {
+ "type": "boolean",
+ "default": true,
+ "description": "Show time in chef logs"
+ },
+ "ssl_verify_mode": {
+ "type": "string",
+ "default": ":verify_none",
+ "description": "Set the verify mode for HTTPS requests. We can have two possible values for this parameter:\n\n - ``:verify_none``: No validation of SSL certificates.\n - ``:verify_peer``: Validate all SSL certificates.\n\nBy default, the parameter is set as ``:verify_none``."
+ },
+ "validation_name": {
+ "type": "string",
+ "description": "The name of the chef-validator key that Chef Infra Client uses to access the Chef Infra Server during the initial Chef Infra Client run."
+ },
+ "force_install": {
+ "type": "boolean",
+ "default": false,
+ "description": "If set to ``true``, forces chef installation, even if it is already installed."
+ },
+ "initial_attributes": {
+ "type": "object",
+ "items": {"type": "string"},
+ "description": "Specify a list of initial attributes used by the cookbooks."
+ },
+ "install_type": {
+ "type": "string",
+ "default": "packages",
+ "enum": [
+ "packages",
+ "gems",
+ "omnibus"
+ ],
+ "description": "The type of installation for chef. It can be one of the following values:\n\n - ``packages``\n - ``gems``\n - ``omnibus``"
+ },
+ "run_list": {
+ "type": "array",
+ "items": {"type": "string"},
+ "description": "A run list for a first boot json."
+ },
+ "chef_license": {
+ "type": "string",
+ "description": "string that indicates if user accepts or not license related to some of chef products"
+ }
+ }
+ }
+ }
+ },
+ "cc_debug": {
+ "type": "object",
+ "properties": {
+ "debug": {
+ "additionalProperties": false,
+ "minProperties": 1,
+ "type": "object",
+ "properties": {
+ "verbose": {
+ "description": "Should always be true for this module",
+ "type": "boolean"
+ },
+ "output": {
+ "description": "Location to write output. Defaults to console + log",
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "cc_disable_ec2_metadata": {
+ "type": "object",
+ "properties": {
+ "disable_ec2_metadata": {
+ "default": false,
+ "description": "Set true to disable IPv4 routes to EC2 metadata. Default: false.",
+ "type": "boolean"
+ }
+ }
+ },
+ "cc_disk_setup": {
+ "type": "object",
+ "properties": {
+ "device_aliases": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {
+ "label": "<alias_name>",
+ "type": "string",
+ "description": "Path to disk to be aliased by this name."
+ }
+ }
+ },
+ "disk_setup": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {
+ "label": "<alias name/path>",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "table_type": {
+ "type": "string",
+ "default": "mbr",
+ "enum": ["mbr", "gpt"],
+ "description": "Specifies the partition table type, either ``mbr`` or ``gpt``. Default: ``mbr``."
+ },
+ "layout": {
+ "type": ["string", "boolean", "array"],
+ "default": false,
+ "oneOf": [
+ {"type": "string", "enum": ["remove"]},
+ {"type": "boolean"},
+ {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type": "array",
+ "items": {"type": "integer"},
+ "minItems": 2,
+ "maxItems": 2
+ }
+ ]
+ }
+ }
+ ],
+ "description": "If set to ``true``, a single partition using all the space on the device will be created. If set to ``false``, no partitions will be created. If set to ``remove``, any existing partition table will be purged. Partitions can be specified by providing a list to ``layout``, where each entry in the list is either a size or a list containing a size and the numerical value for a partition type. The size for partitions is specified in **percentage** of disk space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space). Default: ``false``."
+ },
+ "overwrite": {
+ "type": "boolean",
+ "default": false,
+ "description": "Controls whether this module tries to be safe about writing partition tables or not. If ``overwrite: false`` is set, the device will be checked for a partition table and for a file system and if either is found, the operation will be skipped. If ``overwrite: true`` is set, no checks will be performed. Using ``overwrite: true`` is **dangerous** and can lead to data loss, so double check that the correct device has been specified if using this option. Default: ``false``"
+ }
+ }
+ }
+ }
+ },
+ "fs_setup": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "label": {
+ "type": "string",
+ "description": "Label for the filesystem."
+ },
+ "filesystem": {
+ "type": "string",
+ "description": "Filesystem type to create. E.g., ``ext4`` or ``btrfs``"
+ },
+ "device": {
+ "type": "string",
+ "description": "Specified either as a path or as an alias in the format ``<alias name>.<y>`` where ``<y>`` denotes the partition number on the device. If specifying device using the ``<device name>.<partition number>`` format, the value of ``partition`` will be overwritten."
+ },
+ "partition": {
+ "type": ["string", "integer"],
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": ["auto", "any", "none"]
+ },
+ {"type": "integer"}
+ ],
+ "description": "The partition can be specified by setting ``partition`` to the desired partition number. The ``partition`` option may also be set to ``auto``, in which this module will search for the existence of a filesystem matching the ``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The ``partition`` option may also be set to ``any``, in which case any file system that matches ``type`` and ``device`` will cause this module to skip filesystem creation for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a filesystem directly to a device, use ``partition: none``. ``partition: none`` will **always** write the filesystem, even when the ``label`` and ``filesystem`` are matched, and ``overwrite`` is ``false``."
+ },
+ "overwrite": {
+ "type": "boolean",
+ "description": "If ``true``, overwrite any existing filesystem. Using ``overwrite: true`` for filesystems is **dangerous** and can lead to data loss, so double check the entry in ``fs_setup``. Default: ``false``"
+ },
+ "replace_fs": {
+ "type": "string",
+ "description": "Ignored unless ``partition`` is ``auto`` or ``any``. Default ``false``."
+ },
+ "extra_opts": {
+ "type": ["array", "string"],
+ "items": {"type": "string"},
+ "description": "Optional options to pass to the filesystem creation command. Ignored if you using ``cmd`` directly."
+ },
+ "cmd": {
+ "type": ["array", "string"],
+ "items": {"type": "string"},
+ "description": "Optional command to run to create the filesystem. Can include string substitutions of the other ``fs_setup`` config keys. This is only necessary if you need to override the default command."
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "allOf": [
+ { "$ref": "#/$defs/cc_apk_configure" },
+ { "$ref": "#/$defs/cc_apt_configure" },
+ { "$ref": "#/$defs/cc_apt_pipelining" },
+ { "$ref": "#/$defs/cc_bootcmd" },
+ { "$ref": "#/$defs/cc_byobu" },
+ { "$ref": "#/$defs/cc_ca_certs" },
+ { "$ref": "#/$defs/cc_chef" },
+ { "$ref": "#/$defs/cc_debug" },
+ { "$ref": "#/$defs/cc_disable_ec2_metadata" },
+ { "$ref": "#/$defs/cc_disk_setup" }
+ ]
+}
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 456bab2c..1f969c97 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -1,22 +1,28 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""schema.py: Set of module functions for processing cloud-config schema."""
-from cloudinit.cmd.devel import read_cfg_paths
-from cloudinit import importer
-from cloudinit.util import find_modules, load_file
-
import argparse
-from collections import defaultdict
-from copy import deepcopy
+import json
import logging
import os
import re
import sys
+from collections import defaultdict
+from copy import deepcopy
+from functools import partial
+
import yaml
-_YAML_MAP = {True: 'true', False: 'false', None: 'null'}
-SCHEMA_UNDEFINED = b'UNDEFINED'
-CLOUD_CONFIG_HEADER = b'#cloud-config'
+from cloudinit import importer
+from cloudinit.cmd.devel import read_cfg_paths
+from cloudinit.importer import MetaSchema
+from cloudinit.util import error, find_modules, load_file
+
+error = partial(error, sys_exit=True)
+LOG = logging.getLogger(__name__)
+
+_YAML_MAP = {True: "true", False: "false", None: "null"}
+CLOUD_CONFIG_HEADER = b"#cloud-config"
SCHEMA_DOC_TMPL = """
{name}
{title_underbar}
@@ -34,11 +40,12 @@ SCHEMA_DOC_TMPL = """
{property_doc}
{examples}
"""
-SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}'
+SCHEMA_PROPERTY_TMPL = "{prefix}**{prop_name}:** ({prop_type}) {description}"
SCHEMA_LIST_ITEM_TMPL = (
- '{prefix}Each item in **{prop_name}** list supports the following keys:')
-SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n'
-SCHEMA_EXAMPLES_SPACER_TEMPLATE = '\n # --- Example{0} ---'
+ "{prefix}Each item in **{prop_name}** list supports the following keys:"
+)
+SCHEMA_EXAMPLES_HEADER = "\n**Examples**::\n\n"
+SCHEMA_EXAMPLES_SPACER_TEMPLATE = "\n # --- Example{0} ---"
class SchemaValidationError(ValueError):
@@ -52,10 +59,12 @@ class SchemaValidationError(ValueError):
"""
self.schema_errors = schema_errors
error_messages = [
- '{0}: {1}'.format(config_key, message)
- for config_key, message in schema_errors]
+ "{0}: {1}".format(config_key, message)
+ for config_key, message in schema_errors
+ ]
message = "Cloud config schema errors: {0}".format(
- ', '.join(error_messages))
+ ", ".join(error_messages)
+ )
super(SchemaValidationError, self).__init__(message)
@@ -68,60 +77,142 @@ def is_schema_byte_string(checker, instance):
from jsonschema import Draft4Validator
except ImportError:
return False
- return (Draft4Validator.TYPE_CHECKER.is_type(instance, "string") or
- isinstance(instance, (bytes,)))
+ return Draft4Validator.TYPE_CHECKER.is_type(
+ instance, "string"
+ ) or isinstance(instance, (bytes,))
+
+
+def get_jsonschema_validator():
+ """Get metaschema validator and format checker
+
+ Older versions of jsonschema require some compatibility changes.
+
+ @returns: Tuple: (jsonschema.Validator, FormatChecker)
+ @raises: ImportError when jsonschema is not present
+ """
+ from jsonschema import Draft4Validator, FormatChecker
+ from jsonschema.validators import create
+
+ # Allow for bytes to be presented as an acceptable valid value for string
+ # type jsonschema attributes in cloud-init's schema.
+ # This allows #cloud-config to provide valid yaml "content: !!binary | ..."
+
+ strict_metaschema = deepcopy(Draft4Validator.META_SCHEMA)
+ strict_metaschema["additionalProperties"] = False
+ # This additional label allows us to specify a different name
+ # than the property key when generating docs.
+ # This is especially useful when using a "patternProperties" regex,
+ # otherwise the property label in the generated docs will be a
+ # regular expression.
+ # http://json-schema.org/understanding-json-schema/reference/object.html#pattern-properties
+ strict_metaschema["properties"]["label"] = {"type": "string"}
-def validate_cloudconfig_schema(config, schema, strict=False):
+ if hasattr(Draft4Validator, "TYPE_CHECKER"): # jsonschema 3.0+
+ type_checker = Draft4Validator.TYPE_CHECKER.redefine(
+ "string", is_schema_byte_string
+ )
+ cloudinitValidator = create(
+ meta_schema=strict_metaschema,
+ validators=Draft4Validator.VALIDATORS,
+ version="draft4",
+ type_checker=type_checker,
+ )
+ else: # jsonschema 2.6 workaround
+ types = Draft4Validator.DEFAULT_TYPES # pylint: disable=E1101
+ # Allow bytes as well as string (and disable a spurious unsupported
+ # assignment-operation pylint warning which appears because this
+ # code path isn't written against the latest jsonschema).
+ types["string"] = (str, bytes) # pylint: disable=E1137
+ cloudinitValidator = create( # pylint: disable=E1123
+ meta_schema=strict_metaschema,
+ validators=Draft4Validator.VALIDATORS,
+ version="draft4",
+ default_types=types,
+ )
+ return (cloudinitValidator, FormatChecker)
+
+
+def validate_cloudconfig_metaschema(validator, schema: dict, throw=True):
+ """Validate provided schema meets the metaschema definition. Return strict
+ Validator and FormatChecker for use in validation
+ @param validator: Draft4Validator instance used to validate the schema
+ @param schema: schema to validate
+ @param throw: Sometimes the validator and checker are required, even if
+ the schema is invalid. Toggle for whether to raise
+ SchemaValidationError or log warnings.
+
+ @raises: ImportError when jsonschema is not present
+ @raises: SchemaValidationError when the schema is invalid
+ """
+
+ from jsonschema.exceptions import SchemaError
+
+ try:
+ validator.check_schema(schema)
+ except SchemaError as err:
+ # Raise SchemaValidationError to avoid jsonschema imports at call
+ # sites
+ if throw:
+ raise SchemaValidationError(
+ schema_errors=(
+ (".".join([str(p) for p in err.path]), err.message),
+ )
+ ) from err
+ LOG.warning(
+ "Meta-schema validation failed, attempting to validate config "
+ "anyway: %s",
+ err,
+ )
+
+
+def validate_cloudconfig_schema(
+ config: dict,
+ schema: dict = None,
+ strict: bool = False,
+ strict_metaschema: bool = False,
+):
"""Validate provided config meets the schema definition.
@param config: Dict of cloud configuration settings validated against
- schema.
+ schema. Ignored if strict_metaschema=True
@param schema: jsonschema dict describing the supported schema definition
- for the cloud config module (config.cc_*).
+ for the cloud config module (config.cc_*). If None, validate against
+ global schema.
@param strict: Boolean, when True raise SchemaValidationErrors instead of
logging warnings.
+ @param strict_metaschema: Boolean, when True validates schema using strict
+ metaschema definition at runtime (currently unused)
@raises: SchemaValidationError when provided config does not validate
against the provided schema.
+ @raises: RuntimeError when provided config sourced from YAML is not a dict.
"""
+ if schema is None:
+ schema = get_schema()
try:
- from jsonschema import Draft4Validator, FormatChecker
- from jsonschema.validators import create, extend
+ (cloudinitValidator, FormatChecker) = get_jsonschema_validator()
+ if strict_metaschema:
+ validate_cloudconfig_metaschema(
+ cloudinitValidator, schema, throw=False
+ )
except ImportError:
- logging.debug(
- 'Ignoring schema validation. python-jsonschema is not present')
+ LOG.debug("Ignoring schema validation. jsonschema is not present")
return
- # Allow for bytes to be presented as an acceptable valid value for string
- # type jsonschema attributes in cloud-init's schema.
- # This allows #cloud-config to provide valid yaml "content: !!binary | ..."
- if hasattr(Draft4Validator, 'TYPE_CHECKER'): # jsonschema 3.0+
- type_checker = Draft4Validator.TYPE_CHECKER.redefine(
- 'string', is_schema_byte_string)
- cloudinitValidator = extend(Draft4Validator, type_checker=type_checker)
- else: # jsonschema 2.6 workaround
- types = Draft4Validator.DEFAULT_TYPES
- # Allow bytes as well as string (and disable a spurious
- # unsupported-assignment-operation pylint warning which appears because
- # this code path isn't written against the latest jsonschema).
- types['string'] = (str, bytes) # pylint: disable=E1137
- cloudinitValidator = create(
- meta_schema=Draft4Validator.META_SCHEMA,
- validators=Draft4Validator.VALIDATORS,
- version="draft4",
- default_types=types)
validator = cloudinitValidator(schema, format_checker=FormatChecker())
errors = ()
for error in sorted(validator.iter_errors(config), key=lambda e: e.path):
- path = '.'.join([str(p) for p in error.path])
+ path = ".".join([str(p) for p in error.path])
errors += ((path, error.message),)
if errors:
if strict:
raise SchemaValidationError(errors)
else:
- messages = ['{0}: {1}'.format(k, msg) for k, msg in errors]
- logging.warning('Invalid config:\n%s', '\n'.join(messages))
+ messages = ["{0}: {1}".format(k, msg) for k, msg in errors]
+ LOG.warning(
+ "Invalid cloud-config provided:\n%s", "\n".join(messages)
+ )
def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
@@ -136,14 +227,23 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
if not schema_errors:
return original_content
schemapaths = {}
- if cloudconfig:
- schemapaths = _schemapath_for_cloudconfig(
- cloudconfig, original_content)
errors_by_line = defaultdict(list)
error_footer = []
+ error_header = "# Errors: -------------\n{0}\n\n"
annotated_content = []
+ lines = original_content.decode().split("\n")
+ if not isinstance(cloudconfig, dict):
+ # Return a meaningful message on empty cloud-config
+ return "\n".join(
+ lines
+ + [error_header.format("# E1: Cloud-config is not a YAML dict.")]
+ )
+ if cloudconfig:
+ schemapaths = _schemapath_for_cloudconfig(
+ cloudconfig, original_content
+ )
for path, msg in schema_errors:
- match = re.match(r'format-l(?P<line>\d+)\.c(?P<col>\d+).*', path)
+ match = re.match(r"format-l(?P<line>\d+)\.c(?P<col>\d+).*", path)
if match:
line, col = match.groups()
errors_by_line[int(line)].append(msg)
@@ -151,24 +251,24 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
col = None
errors_by_line[schemapaths[path]].append(msg)
if col is not None:
- msg = 'Line {line} column {col}: {msg}'.format(
- line=line, col=col, msg=msg)
- lines = original_content.decode().split('\n')
+ msg = "Line {line} column {col}: {msg}".format(
+ line=line, col=col, msg=msg
+ )
error_index = 1
for line_number, line in enumerate(lines, 1):
errors = errors_by_line[line_number]
if errors:
error_label = []
for error in errors:
- error_label.append('E{0}'.format(error_index))
- error_footer.append('# E{0}: {1}'.format(error_index, error))
+ error_label.append("E{0}".format(error_index))
+ error_footer.append("# E{0}: {1}".format(error_index, error))
error_index += 1
- annotated_content.append(line + '\t\t# ' + ','.join(error_label))
+ annotated_content.append(line + "\t\t# " + ",".join(error_label))
+
else:
annotated_content.append(line)
- annotated_content.append(
- '# Errors: -------------\n{0}\n\n'.format('\n'.join(error_footer)))
- return '\n'.join(annotated_content)
+ annotated_content.append(error_header.format("\n".join(error_footer)))
+ return "\n".join(annotated_content)
def validate_cloudconfig_file(config_path, schema, annotate=False):
@@ -196,15 +296,18 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
else:
if not os.path.exists(config_path):
raise RuntimeError(
- 'Configfile {0} does not exist'.format(
- config_path
- )
+ "Configfile {0} does not exist".format(config_path)
)
content = load_file(config_path, decode=False)
if not content.startswith(CLOUD_CONFIG_HEADER):
errors = (
- ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format(
- config_path, CLOUD_CONFIG_HEADER.decode())),)
+ (
+ "format-l1.c1",
+ 'File {0} needs to begin with "{1}"'.format(
+ config_path, CLOUD_CONFIG_HEADER.decode()
+ ),
+ ),
+ )
error = SchemaValidationError(errors)
if annotate:
print(annotated_cloudconfig_file({}, content, error.schema_errors))
@@ -214,27 +317,36 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
except (yaml.YAMLError) as e:
line = column = 1
mark = None
- if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
- mark = getattr(e, 'context_mark')
- elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'):
- mark = getattr(e, 'problem_mark')
+ if hasattr(e, "context_mark") and getattr(e, "context_mark"):
+ mark = getattr(e, "context_mark")
+ elif hasattr(e, "problem_mark") and getattr(e, "problem_mark"):
+ mark = getattr(e, "problem_mark")
if mark:
line = mark.line + 1
column = mark.column + 1
- errors = (('format-l{line}.c{col}'.format(line=line, col=column),
- 'File {0} is not valid yaml. {1}'.format(
- config_path, str(e))),)
+ errors = (
+ (
+ "format-l{line}.c{col}".format(line=line, col=column),
+ "File {0} is not valid yaml. {1}".format(config_path, str(e)),
+ ),
+ )
error = SchemaValidationError(errors)
if annotate:
print(annotated_cloudconfig_file({}, content, error.schema_errors))
raise error from e
+ if not isinstance(cloudconfig, dict):
+ # Return a meaningful message on empty cloud-config
+ if not annotate:
+ raise RuntimeError("Cloud-config is not a YAML dict.")
try:
- validate_cloudconfig_schema(
- cloudconfig, schema, strict=True)
+ validate_cloudconfig_schema(cloudconfig, schema, strict=True)
except SchemaValidationError as e:
if annotate:
- print(annotated_cloudconfig_file(
- cloudconfig, content, e.schema_errors))
+ print(
+ annotated_cloudconfig_file(
+ cloudconfig, content, e.schema_errors
+ )
+ )
raise
@@ -244,29 +356,30 @@ def _schemapath_for_cloudconfig(config, original_content):
@param config: The yaml.loaded config dictionary of a cloud-config file.
@param original_content: The simple file content of the cloud-config file
"""
- # FIXME Doesn't handle multi-line lists or multi-line strings
- content_lines = original_content.decode().split('\n')
+ # TODO( handle multi-line lists or multi-line strings, inline dicts)
+ content_lines = original_content.decode().split("\n")
schema_line_numbers = {}
list_index = 0
- RE_YAML_INDENT = r'^(\s*)'
+ RE_YAML_INDENT = r"^(\s*)"
scopes = []
+ if not config:
+ return {} # No YAML config dict, no schemapaths to annotate
for line_number, line in enumerate(content_lines, 1):
indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])
line = line.strip()
- if not line or line.startswith('#'):
+ if not line or line.startswith("#"):
continue
if scopes:
previous_depth, path_prefix = scopes[-1]
else:
previous_depth = -1
- path_prefix = ''
- if line.startswith('- '):
+ path_prefix = ""
+ if line.startswith("- "):
# Process list items adding a list_index to the path prefix
- previous_list_idx = '.%d' % (list_index - 1)
+ previous_list_idx = ".%d" % (list_index - 1)
if path_prefix and path_prefix.endswith(previous_list_idx):
- path_prefix = path_prefix[:-len(previous_list_idx)]
+ path_prefix = path_prefix[: -len(previous_list_idx)]
key = str(list_index)
- schema_line_numbers[key] = line_number
item_indent = len(re.match(RE_YAML_INDENT, line[1:]).groups()[0])
item_indent += 1 # For the leading '-' character
previous_depth = indent_depth
@@ -276,53 +389,63 @@ def _schemapath_for_cloudconfig(config, original_content):
else:
# Process non-list lines setting value if present
list_index = 0
- key, value = line.split(':', 1)
- if path_prefix:
+ key, value = line.split(":", 1)
+ if path_prefix and indent_depth > previous_depth:
# Append any existing path_prefix for a fully-pathed key
- key = path_prefix + '.' + key
+ key = path_prefix + "." + key
while indent_depth <= previous_depth:
if scopes:
previous_depth, path_prefix = scopes.pop()
if list_index > 0 and indent_depth == previous_depth:
- path_prefix = '.'.join(path_prefix.split('.')[:-1])
+ path_prefix = ".".join(path_prefix.split(".")[:-1])
break
else:
previous_depth = -1
- path_prefix = ''
+ path_prefix = ""
scopes.append((indent_depth, key))
if value:
value = value.strip()
- if value.startswith('['):
- scopes.append((indent_depth + 2, key + '.0'))
+ if value.startswith("["):
+ scopes.append((indent_depth + 2, key + ".0"))
for inner_list_index in range(0, len(yaml.safe_load(value))):
- list_key = key + '.' + str(inner_list_index)
+ list_key = key + "." + str(inner_list_index)
schema_line_numbers[list_key] = line_number
schema_line_numbers[key] = line_number
return schema_line_numbers
-def _get_property_type(property_dict):
- """Return a string representing a property type from a given jsonschema."""
- property_type = property_dict.get('type', SCHEMA_UNDEFINED)
- if property_type == SCHEMA_UNDEFINED and property_dict.get('enum'):
- property_type = [
- str(_YAML_MAP.get(k, k)) for k in property_dict['enum']]
+def _get_property_type(property_dict: dict) -> str:
+ """Return a string representing a property type from a given
+ jsonschema.
+ """
+ property_type = property_dict.get("type")
+ if property_type is None:
+ if property_dict.get("enum"):
+ property_type = [
+ str(_YAML_MAP.get(k, k)) for k in property_dict["enum"]
+ ]
+ elif property_dict.get("oneOf"):
+ property_type = [
+ subschema["type"]
+ for subschema in property_dict.get("oneOf")
+ if subschema.get("type")
+ ]
if isinstance(property_type, list):
- property_type = '/'.join(property_type)
- items = property_dict.get('items', {})
- sub_property_type = items.get('type', '')
+ property_type = "/".join(property_type)
+ items = property_dict.get("items", {})
+ sub_property_type = items.get("type", "")
# Collect each item type
- for sub_item in items.get('oneOf', {}):
+ for sub_item in items.get("oneOf", {}):
if sub_property_type:
- sub_property_type += '/'
- sub_property_type += '(' + _get_property_type(sub_item) + ')'
+ sub_property_type += "/"
+ sub_property_type += "(" + _get_property_type(sub_item) + ")"
if sub_property_type:
- return '{0} of {1}'.format(property_type, sub_property_type)
- return property_type
+ return "{0} of {1}".format(property_type, sub_property_type)
+ return property_type or "UNDEFINED"
-def _parse_description(description, prefix):
- """Parse description from the schema in a format that we can better
+def _parse_description(description, prefix) -> str:
+ """Parse description from the meta in a format that we can better
display in our docs. This parser does three things:
- Guarantee that a paragraph will be in a single line
@@ -330,125 +453,269 @@ def _parse_description(description, prefix):
the first paragraph
- Proper align lists of items
- @param description: The original description in the schema.
+ @param description: The original description in the meta.
@param prefix: The number of spaces used to align the current description
"""
list_paragraph = prefix * 3
description = re.sub(r"(\S)\n(\S)", r"\1 \2", description)
+ description = re.sub(r"\n\n", r"\n\n{}".format(prefix), description)
description = re.sub(
- r"\n\n", r"\n\n{}".format(prefix), description)
- description = re.sub(
- r"\n( +)-", r"\n{}-".format(list_paragraph), description)
+ r"\n( +)-", r"\n{}-".format(list_paragraph), description
+ )
return description
-def _get_property_doc(schema, prefix=' '):
+def _get_property_doc(schema: dict, defs: dict, prefix=" ") -> str:
"""Return restructured text describing the supported schema properties."""
- new_prefix = prefix + ' '
+ new_prefix = prefix + " "
properties = []
- for prop_key, prop_config in schema.get('properties', {}).items():
- # Define prop_name and dscription for SCHEMA_PROPERTY_TMPL
- description = prop_config.get('description', '')
-
- properties.append(SCHEMA_PROPERTY_TMPL.format(
- prefix=prefix,
- prop_name=prop_key,
- type=_get_property_type(prop_config),
- description=_parse_description(description, prefix)))
- items = prop_config.get('items')
- if items:
- if isinstance(items, list):
- for item in items:
- properties.append(
- _get_property_doc(item, prefix=new_prefix))
- elif isinstance(items, dict) and items.get('properties'):
- properties.append(SCHEMA_LIST_ITEM_TMPL.format(
- prefix=new_prefix, prop_name=prop_key))
- new_prefix += ' '
- properties.append(_get_property_doc(items, prefix=new_prefix))
- if 'properties' in prop_config:
+ property_keys = [
+ schema.get("properties", {}),
+ schema.get("patternProperties", {}),
+ ]
+
+ for props in property_keys:
+ for prop_key, prop_config in props.items():
+ if "$ref" in prop_config:
+ # Update the defined references in subschema for doc rendering
+ ref = defs[prop_config["$ref"].replace("#/$defs/", "")]
+ prop_config.update(ref)
+ # Define prop_name and description for SCHEMA_PROPERTY_TMPL
+ description = prop_config.get("description", "")
+
+ # Define prop_name and description for SCHEMA_PROPERTY_TMPL
+ label = prop_config.get("label", prop_key)
properties.append(
- _get_property_doc(prop_config, prefix=new_prefix))
- return '\n\n'.join(properties)
+ SCHEMA_PROPERTY_TMPL.format(
+ prefix=prefix,
+ prop_name=label,
+ description=_parse_description(description, prefix),
+ prop_type=_get_property_type(prop_config),
+ )
+ )
+ items = prop_config.get("items")
+ if items:
+ if isinstance(items, list):
+ for item in items:
+ properties.append(
+ _get_property_doc(
+ item, defs=defs, prefix=new_prefix
+ )
+ )
+ elif isinstance(items, dict) and (
+ items.get("properties") or items.get("patternProperties")
+ ):
+ properties.append(
+ SCHEMA_LIST_ITEM_TMPL.format(
+ prefix=new_prefix, prop_name=label
+ )
+ )
+ new_prefix += " "
+ properties.append(
+ _get_property_doc(items, defs=defs, prefix=new_prefix)
+ )
+ if (
+ "properties" in prop_config
+ or "patternProperties" in prop_config
+ ):
+ properties.append(
+ _get_property_doc(
+ prop_config, defs=defs, prefix=new_prefix
+ )
+ )
+ return "\n\n".join(properties)
-def _get_schema_examples(schema, prefix=''):
- """Return restructured text describing the schema examples if present."""
- examples = schema.get('examples')
+def _get_examples(meta: MetaSchema) -> str:
+ """Return restructured text describing the meta examples if present."""
+ examples = meta.get("examples")
if not examples:
- return ''
+ return ""
rst_content = SCHEMA_EXAMPLES_HEADER
for count, example in enumerate(examples):
# Python2.6 is missing textwrapper.indent
- lines = example.split('\n')
- indented_lines = [' {0}'.format(line) for line in lines]
+ lines = example.split("\n")
+ indented_lines = [" {0}".format(line) for line in lines]
if rst_content != SCHEMA_EXAMPLES_HEADER:
indented_lines.insert(
- 0, SCHEMA_EXAMPLES_SPACER_TEMPLATE.format(count + 1))
- rst_content += '\n'.join(indented_lines)
+ 0, SCHEMA_EXAMPLES_SPACER_TEMPLATE.format(count + 1)
+ )
+ rst_content += "\n".join(indented_lines)
return rst_content
-def get_schema_doc(schema):
- """Return reStructured text rendering the provided jsonschema.
+def get_meta_doc(meta: MetaSchema, schema: dict = None) -> str:
+ """Return reStructured text rendering the provided metadata.
- @param schema: Dict of jsonschema to render.
- @raise KeyError: If schema lacks an expected key.
+ @param meta: Dict of metadata to render.
+ @param schema: Optional module schema, if absent, read global schema.
+ @raise KeyError: If metadata lacks an expected key.
"""
- schema_copy = deepcopy(schema)
- schema_copy['property_doc'] = _get_property_doc(schema)
- schema_copy['examples'] = _get_schema_examples(schema)
- schema_copy['distros'] = ', '.join(schema['distros'])
+
+ if schema is None:
+ schema = get_schema()
+ if not meta or not schema:
+ raise ValueError("Expected non-empty meta and schema")
+ keys = set(meta.keys())
+ expected = set(
+ {
+ "id",
+ "title",
+ "examples",
+ "frequency",
+ "distros",
+ "description",
+ "name",
+ }
+ )
+ error_message = ""
+ if expected - keys:
+ error_message = "Missing expected keys in module meta: {}".format(
+ expected - keys
+ )
+ elif keys - expected:
+ error_message = (
+ "Additional unexpected keys found in module meta: {}".format(
+ keys - expected
+ )
+ )
+ if error_message:
+ raise KeyError(error_message)
+
+ # cast away type annotation
+ meta_copy = dict(deepcopy(meta))
+ defs = schema.get("$defs", {})
+ if defs.get(meta["id"]):
+ schema = defs.get(meta["id"])
+ try:
+ meta_copy["property_doc"] = _get_property_doc(schema, defs=defs)
+ except AttributeError:
+ LOG.warning("Unable to render property_doc due to invalid schema")
+ meta_copy["property_doc"] = ""
+ meta_copy["examples"] = _get_examples(meta)
+ meta_copy["distros"] = ", ".join(meta["distros"])
# Need an underbar of the same length as the name
- schema_copy['title_underbar'] = re.sub(r'.', '-', schema['name'])
- return SCHEMA_DOC_TMPL.format(**schema_copy)
+ meta_copy["title_underbar"] = re.sub(r".", "-", meta["name"])
+ template = SCHEMA_DOC_TMPL.format(**meta_copy)
+ return template
-FULL_SCHEMA = None
+def get_modules() -> dict:
+ configs_dir = os.path.dirname(os.path.abspath(__file__))
+ return find_modules(configs_dir)
-def get_schema():
- """Return jsonschema coalesced from all cc_* cloud-config module."""
- global FULL_SCHEMA
- if FULL_SCHEMA:
- return FULL_SCHEMA
- full_schema = {
- '$schema': 'http://json-schema.org/draft-04/schema#',
- 'id': 'cloud-config-schema', 'allOf': []}
+def load_doc(requested_modules: list) -> str:
+ """Load module docstrings
- configs_dir = os.path.dirname(os.path.abspath(__file__))
- potential_handlers = find_modules(configs_dir)
- for (_fname, mod_name) in potential_handlers.items():
- mod_locs, _looked_locs = importer.find_module(
- mod_name, ['cloudinit.config'], ['schema'])
+ Docstrings are generated on module load. Reduce, reuse, recycle.
+ """
+ docs = ""
+ all_modules = list(get_modules().values()) + ["all"]
+ invalid_docs = set(requested_modules).difference(set(all_modules))
+ if invalid_docs:
+ error(
+ "Invalid --docs value {}. Must be one of: {}".format(
+ list(invalid_docs),
+ ", ".join(all_modules),
+ )
+ )
+ for mod_name in all_modules:
+ if "all" in requested_modules or mod_name in requested_modules:
+ (mod_locs, _) = importer.find_module(
+ mod_name, ["cloudinit.config"], ["meta"]
+ )
+ if mod_locs:
+ mod = importer.import_module(mod_locs[0])
+ docs += mod.__doc__ or ""
+ return docs
+
+
+def get_schema() -> dict:
+ """Return jsonschema coalesced from all cc_* cloud-config modules."""
+ schema_file = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "cloud-init-schema.json"
+ )
+ full_schema = None
+ try:
+ full_schema = json.loads(load_file(schema_file))
+ except Exception as e:
+ LOG.warning("Cannot parse JSON schema file %s. %s", schema_file, e)
+ if not full_schema:
+ LOG.warning(
+ "No base JSON schema files found at %s."
+ " Setting default empty schema",
+ schema_file,
+ )
+ full_schema = {
+ "$defs": {},
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "allOf": [],
+ }
+
+ # TODO( Drop the get_modules loop when all legacy cc_* schema migrates )
+ # Supplement base_schema with any legacy modules which still contain a
+ # "schema" attribute. Legacy cc_* modules will be migrated to use the
+ # store module schema in the composite cloud-init-schema-<version>.json
+ # and will drop "schema" at that point.
+ for (_, mod_name) in get_modules().items():
+ # All cc_* modules need a "meta" attribute to represent schema defs
+ (mod_locs, _) = importer.find_module(
+ mod_name, ["cloudinit.config"], ["schema"]
+ )
if mod_locs:
mod = importer.import_module(mod_locs[0])
- full_schema['allOf'].append(mod.schema)
- FULL_SCHEMA = full_schema
+ full_schema["allOf"].append(mod.schema)
return full_schema
-def error(message):
- print(message, file=sys.stderr)
- sys.exit(1)
+def get_meta() -> dict:
+ """Return metadata coalesced from all cc_* cloud-config module."""
+ full_meta = dict()
+ for (_, mod_name) in get_modules().items():
+ mod_locs, _ = importer.find_module(
+ mod_name, ["cloudinit.config"], ["meta"]
+ )
+ if mod_locs:
+ mod = importer.import_module(mod_locs[0])
+ full_meta[mod.meta["id"]] = mod.meta
+ return full_meta
def get_parser(parser=None):
"""Return a parser for supported cmdline arguments."""
if not parser:
parser = argparse.ArgumentParser(
- prog='cloudconfig-schema',
- description='Validate cloud-config files or document schema')
- parser.add_argument('-c', '--config-file',
- help='Path of the cloud-config yaml file to validate')
- parser.add_argument('--system', action='store_true', default=False,
- help='Validate the system cloud-config userdata')
- parser.add_argument('-d', '--docs', nargs='+',
- help=('Print schema module docs. Choices: all or'
- ' space-delimited cc_names.'))
- parser.add_argument('--annotate', action="store_true", default=False,
- help='Annotate existing cloud-config file with errors')
+ prog="cloudconfig-schema",
+ description="Validate cloud-config files or document schema",
+ )
+ parser.add_argument(
+ "-c",
+ "--config-file",
+ help="Path of the cloud-config yaml file to validate",
+ )
+ parser.add_argument(
+ "--system",
+ action="store_true",
+ default=False,
+ help="Validate the system cloud-config userdata",
+ )
+ parser.add_argument(
+ "-d",
+ "--docs",
+ nargs="+",
+ help=(
+ "Print schema module docs. Choices: all or"
+ " space-delimited cc_names."
+ ),
+ )
+ parser.add_argument(
+ "--annotate",
+ action="store_true",
+ default=False,
+ help="Annotate existing cloud-config file with errors",
+ )
return parser
@@ -456,12 +723,15 @@ def handle_schema_args(name, args):
"""Handle provided schema args and perform the appropriate actions."""
exclusive_args = [args.config_file, args.docs, args.system]
if len([arg for arg in exclusive_args if arg]) != 1:
- error('Expected one of --config-file, --system or --docs arguments')
+ error("Expected one of --config-file, --system or --docs arguments")
+ if args.annotate and args.docs:
+ error("Invalid flag combination. Cannot use --annotate with --docs")
full_schema = get_schema()
if args.config_file or args.system:
try:
validate_cloudconfig_file(
- args.config_file, full_schema, args.annotate)
+ args.config_file, full_schema, args.annotate
+ )
except SchemaValidationError as e:
if not args.annotate:
error(str(e))
@@ -474,25 +744,17 @@ def handle_schema_args(name, args):
cfg_name = args.config_file
print("Valid cloud-config:", cfg_name)
elif args.docs:
- schema_ids = [subschema['id'] for subschema in full_schema['allOf']]
- schema_ids += ['all']
- invalid_docs = set(args.docs).difference(set(schema_ids))
- if invalid_docs:
- error('Invalid --docs value {0}. Must be one of: {1}'.format(
- list(invalid_docs), ', '.join(schema_ids)))
- for subschema in full_schema['allOf']:
- if 'all' in args.docs or subschema['id'] in args.docs:
- print(get_schema_doc(subschema))
+ print(load_doc(args.docs))
def main():
"""Tool to validate schema of a cloud-config file or print schema docs."""
parser = get_parser()
- handle_schema_args('cloudconfig-schema', parser.parse_args())
+ handle_schema_args("cloudconfig-schema", parser.parse_args())
return 0
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_apt_pipelining.py b/cloudinit/config/tests/test_apt_pipelining.py
deleted file mode 100644
index 2a6bb10b..00000000
--- a/cloudinit/config/tests/test_apt_pipelining.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Tests cc_apt_pipelining handler"""
-
-import cloudinit.config.cc_apt_pipelining as cc_apt_pipelining
-
-from cloudinit.tests.helpers import CiTestCase, mock
-
-
-class TestAptPipelining(CiTestCase):
-
- @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file')
- def test_not_disabled_by_default(self, m_write_file):
- """ensure that default behaviour is to not disable pipelining"""
- cc_apt_pipelining.handle('foo', {}, None, mock.MagicMock(), None)
- self.assertEqual(0, m_write_file.call_count)
-
- @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file')
- def test_false_disables_pipelining(self, m_write_file):
- """ensure that pipelining can be disabled with correct config"""
- cc_apt_pipelining.handle(
- 'foo', {'apt_pipelining': 'false'}, None, mock.MagicMock(), None)
- self.assertEqual(1, m_write_file.call_count)
- args, _ = m_write_file.call_args
- self.assertEqual(cc_apt_pipelining.DEFAULT_FILE, args[0])
- self.assertIn('Pipeline-Depth "0"', args[1])
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py
deleted file mode 100644
index b00f2083..00000000
--- a/cloudinit/config/tests/test_disable_ec2_metadata.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Tests cc_disable_ec2_metadata handler"""
-
-import cloudinit.config.cc_disable_ec2_metadata as ec2_meta
-
-from cloudinit.tests.helpers import CiTestCase, mock
-
-import logging
-
-LOG = logging.getLogger(__name__)
-
-DISABLE_CFG = {'disable_ec2_metadata': 'true'}
-
-
-class TestEC2MetadataRoute(CiTestCase):
-
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
- def test_disable_ifconfig(self, m_subp, m_which):
- """Set the route if ifconfig command is available"""
- m_which.side_effect = lambda x: x if x == 'ifconfig' else None
- ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
- m_subp.assert_called_with(
- ['route', 'add', '-host', '169.254.169.254', 'reject'],
- capture=False)
-
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
- def test_disable_ip(self, m_subp, m_which):
- """Set the route if ip command is available"""
- m_which.side_effect = lambda x: x if x == 'ip' else None
- ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
- m_subp.assert_called_with(
- ['ip', 'route', 'add', 'prohibit', '169.254.169.254'],
- capture=False)
-
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
- def test_disable_no_tool(self, m_subp, m_which):
- """Log error when neither route nor ip commands are available"""
- m_which.return_value = None # Find neither ifconfig nor ip
- ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
- self.assertEqual(
- [mock.call('ip'), mock.call('ifconfig')], m_which.call_args_list)
- m_subp.assert_not_called()
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_mounts.py b/cloudinit/config/tests/test_mounts.py
deleted file mode 100644
index 56510fd6..00000000
--- a/cloudinit/config/tests/test_mounts.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-from unittest import mock
-
-import pytest
-
-from cloudinit.config.cc_mounts import create_swapfile
-from cloudinit.subp import ProcessExecutionError
-
-
-M_PATH = 'cloudinit.config.cc_mounts.'
-
-
-class TestCreateSwapfile:
-
- @pytest.mark.parametrize('fstype', ('xfs', 'btrfs', 'ext4', 'other'))
- @mock.patch(M_PATH + 'util.get_mount_info')
- @mock.patch(M_PATH + 'subp.subp')
- def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir):
- swap_file = tmpdir.join("swap-file")
- fname = str(swap_file)
-
- # Some of the calls to subp.subp should create the swap file; this
- # roughly approximates that
- m_subp.side_effect = lambda *args, **kwargs: swap_file.write('')
-
- m_get_mount_info.return_value = (mock.ANY, fstype)
-
- create_swapfile(fname, '')
- assert mock.call(['mkswap', fname]) in m_subp.call_args_list
-
- @mock.patch(M_PATH + "util.get_mount_info")
- @mock.patch(M_PATH + "subp.subp")
- def test_fallback_from_fallocate_to_dd(
- self, m_subp, m_get_mount_info, caplog, tmpdir
- ):
- swap_file = tmpdir.join("swap-file")
- fname = str(swap_file)
-
- def subp_side_effect(cmd, *args, **kwargs):
- # Mock fallocate failing, to initiate fallback
- if cmd[0] == "fallocate":
- raise ProcessExecutionError()
-
- m_subp.side_effect = subp_side_effect
- # Use ext4 so both fallocate and dd are valid swap creation methods
- m_get_mount_info.return_value = (mock.ANY, "ext4")
-
- create_swapfile(fname, "")
-
- cmds = [args[0][0] for args, _kwargs in m_subp.call_args_list]
- assert "fallocate" in cmds, "fallocate was not called"
- assert "dd" in cmds, "fallocate failure did not fallback to dd"
-
- assert cmds.index("dd") > cmds.index(
- "fallocate"
- ), "dd ran before fallocate"
-
- assert mock.call(["mkswap", fname]) in m_subp.call_args_list
-
- msg = "fallocate swap creation failed, will attempt with dd"
- assert msg in caplog.text
diff --git a/cloudinit/config/tests/test_resolv_conf.py b/cloudinit/config/tests/test_resolv_conf.py
deleted file mode 100644
index 6546a0b5..00000000
--- a/cloudinit/config/tests/test_resolv_conf.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from unittest import mock
-
-import pytest
-
-from cloudinit.config.cc_resolv_conf import generate_resolv_conf
-
-
-EXPECTED_HEADER = """\
-# Your system has been configured with 'manage-resolv-conf' set to true.
-# As a result, cloud-init has written this file with configuration data
-# that it has been provided. Cloud-init, by default, will write this file
-# a single time (PER_ONCE).
-#\n\n"""
-
-
-class TestGenerateResolvConf:
- @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
- def test_default_target_fname_is_etc_resolvconf(self, m_render_to_file):
- generate_resolv_conf("templates/resolv.conf.tmpl", mock.MagicMock())
-
- assert [
- mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY)
- ] == m_render_to_file.call_args_list
-
- @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
- def test_target_fname_is_used_if_passed(self, m_render_to_file):
- generate_resolv_conf(
- "templates/resolv.conf.tmpl", mock.MagicMock(), "/use/this/path"
- )
-
- assert [
- mock.call(mock.ANY, "/use/this/path", mock.ANY)
- ] == m_render_to_file.call_args_list
-
- # Patch in templater so we can assert on the actual generated content
- @mock.patch("cloudinit.templater.util.write_file")
- # Parameterise with the value to be passed to generate_resolv_conf as the
- # params parameter, and the expected line after the header as
- # expected_extra_line.
- @pytest.mark.parametrize(
- "params,expected_extra_line",
- [
- # No options
- ({}, None),
- # Just a true flag
- ({"options": {"foo": True}}, "options foo"),
- # Just a false flag
- ({"options": {"foo": False}}, None),
- # Just an option
- ({"options": {"foo": "some_value"}}, "options foo:some_value"),
- # A true flag and an option
- (
- {"options": {"foo": "some_value", "bar": True}},
- "options bar foo:some_value",
- ),
- # Two options
- (
- {"options": {"foo": "some_value", "bar": "other_value"}},
- "options bar:other_value foo:some_value",
- ),
- # Everything
- (
- {
- "options": {
- "foo": "some_value",
- "bar": "other_value",
- "baz": False,
- "spam": True,
- }
- },
- "options spam bar:other_value foo:some_value",
- ),
- ],
- )
- def test_flags_and_options(
- self, m_write_file, params, expected_extra_line
- ):
- generate_resolv_conf("templates/resolv.conf.tmpl", params)
-
- expected_content = EXPECTED_HEADER
- if expected_extra_line is not None:
- # If we have any extra lines, expect a trailing newline
- expected_content += "\n".join([expected_extra_line, ""])
- assert [
- mock.call(mock.ANY, expected_content, mode=mock.ANY)
- ] == m_write_file.call_args_list
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
deleted file mode 100644
index daa1ef51..00000000
--- a/cloudinit/config/tests/test_set_passwords.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from unittest import mock
-
-from cloudinit.config import cc_set_passwords as setpass
-from cloudinit.tests.helpers import CiTestCase
-from cloudinit import util
-
-MODPATH = "cloudinit.config.cc_set_passwords."
-
-
-class TestHandleSshPwauth(CiTestCase):
- """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth."""
-
- with_logs = True
-
- @mock.patch(MODPATH + "subp.subp")
- def test_unknown_value_logs_warning(self, m_subp):
- setpass.handle_ssh_pwauth("floo")
- self.assertIn("Unrecognized value: ssh_pwauth=floo",
- self.logs.getvalue())
- m_subp.assert_not_called()
-
- @mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "subp.subp")
- def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config):
- """If systemctl in service cmd: systemctl restart name."""
- setpass.handle_ssh_pwauth(
- True, service_cmd=["systemctl"], service_name="myssh")
- self.assertEqual(mock.call(["systemctl", "restart", "myssh"]),
- m_subp.call_args)
-
- @mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "subp.subp")
- def test_service_as_service_cmd(self, m_subp, m_update_ssh_config):
- """If systemctl in service cmd: systemctl restart name."""
- setpass.handle_ssh_pwauth(
- True, service_cmd=["service"], service_name="myssh")
- self.assertEqual(mock.call(["service", "myssh", "restart"]),
- m_subp.call_args)
-
- @mock.patch(MODPATH + "update_ssh_config", return_value=False)
- @mock.patch(MODPATH + "subp.subp")
- def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config):
- """If config is not updated, then no system restart should be done."""
- setpass.handle_ssh_pwauth(True)
- m_subp.assert_not_called()
- self.assertIn("No need to restart SSH", self.logs.getvalue())
-
- @mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "subp.subp")
- def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config):
- """If 'unchanged', then no updates to config and no restart."""
- setpass.handle_ssh_pwauth(
- "unchanged", service_cmd=["systemctl"], service_name="myssh")
- m_update_ssh_config.assert_not_called()
- m_subp.assert_not_called()
-
- @mock.patch(MODPATH + "subp.subp")
- def test_valid_change_values(self, m_subp):
- """If value is a valid changen value, then update should be called."""
- upname = MODPATH + "update_ssh_config"
- optname = "PasswordAuthentication"
- for value in util.FALSE_STRINGS + util.TRUE_STRINGS:
- optval = "yes" if value in util.TRUE_STRINGS else "no"
- with mock.patch(upname, return_value=False) as m_update:
- setpass.handle_ssh_pwauth(value)
- m_update.assert_called_with({optname: optval})
- m_subp.assert_not_called()
-
-
-class TestSetPasswordsHandle(CiTestCase):
- """Test cc_set_passwords.handle"""
-
- with_logs = True
-
- def setUp(self):
- super(TestSetPasswordsHandle, self).setUp()
- self.add_patch('cloudinit.config.cc_set_passwords.sys.stderr', 'm_err')
-
- def test_handle_on_empty_config(self, *args):
- """handle logs that no password has changed when config is empty."""
- cloud = self.tmp_cloud(distro='ubuntu')
- setpass.handle(
- 'IGNORED', cfg={}, cloud=cloud, log=self.logger, args=[])
- self.assertEqual(
- "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. "
- 'ssh_pwauth=None\n',
- self.logs.getvalue())
-
- @mock.patch(MODPATH + "subp.subp")
- def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp):
- """handle parses command password hashes."""
- cloud = self.tmp_cloud(distro='ubuntu')
- valid_hashed_pwds = [
- 'root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/'
- 'Dlew1Va',
- 'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q'
- 'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1']
- cfg = {'chpasswd': {'list': valid_hashed_pwds}}
- with mock.patch(MODPATH + 'subp.subp') as m_subp:
- setpass.handle(
- 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
- self.assertIn(
- 'DEBUG: Handling input for chpasswd as list.',
- self.logs.getvalue())
- self.assertIn(
- "DEBUG: Setting hashed password for ['root', 'ubuntu']",
- self.logs.getvalue())
- self.assertEqual(
- [mock.call(['chpasswd', '-e'],
- '\n'.join(valid_hashed_pwds) + '\n')],
- m_subp.call_args_list)
-
- @mock.patch(MODPATH + "util.is_BSD")
- @mock.patch(MODPATH + "subp.subp")
- def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
- self, m_subp, m_is_bsd):
- """BSD don't use chpasswd"""
- m_is_bsd.return_value = True
- cloud = self.tmp_cloud(distro='freebsd')
- valid_pwds = ['ubuntu:passw0rd']
- cfg = {'chpasswd': {'list': valid_pwds}}
- setpass.handle(
- 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
- self.assertEqual([
- mock.call(['pw', 'usermod', 'ubuntu', '-h', '0'], data='passw0rd',
- logstring="chpasswd for ubuntu"),
- mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])],
- m_subp.call_args_list)
-
- @mock.patch(MODPATH + "util.is_BSD")
- @mock.patch(MODPATH + "subp.subp")
- def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp,
- m_is_bsd):
- """handle parses command set random passwords."""
- m_is_bsd.return_value = False
- cloud = self.tmp_cloud(distro='ubuntu')
- valid_random_pwds = [
- 'root:R',
- 'ubuntu:RANDOM']
- cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}}
- with mock.patch(MODPATH + 'subp.subp') as m_subp:
- setpass.handle(
- 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
- self.assertIn(
- 'DEBUG: Handling input for chpasswd as list.',
- self.logs.getvalue())
- self.assertNotEqual(
- [mock.call(['chpasswd'],
- '\n'.join(valid_random_pwds) + '\n')],
- m_subp.call_args_list)
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
deleted file mode 100644
index db7fb726..00000000
--- a/cloudinit/config/tests/test_ubuntu_advantage.py
+++ /dev/null
@@ -1,333 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config.cc_ubuntu_advantage import (
- configure_ua, handle, maybe_install_ua_tools, schema)
-from cloudinit.config.schema import validate_cloudconfig_schema
-from cloudinit import subp
-from cloudinit.tests.helpers import (
- CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
-
-
-# Module path used in mocks
-MPATH = 'cloudinit.config.cc_ubuntu_advantage'
-
-
-class FakeCloud(object):
- def __init__(self, distro):
- self.distro = distro
-
-
-class TestConfigureUA(CiTestCase):
-
- with_logs = True
- allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
-
- def setUp(self):
- super(TestConfigureUA, self).setUp()
- self.tmp = self.tmp_dir()
-
- @mock.patch('%s.subp.subp' % MPATH)
- def test_configure_ua_attach_error(self, m_subp):
- """Errors from ua attach command are raised."""
- m_subp.side_effect = subp.ProcessExecutionError(
- 'Invalid token SomeToken')
- with self.assertRaises(RuntimeError) as context_manager:
- configure_ua(token='SomeToken')
- self.assertEqual(
- 'Failure attaching Ubuntu Advantage:\nUnexpected error while'
- ' running command.\nCommand: -\nExit code: -\nReason: -\n'
- 'Stdout: Invalid token SomeToken\nStderr: -',
- str(context_manager.exception))
-
- @mock.patch('%s.subp.subp' % MPATH)
- def test_configure_ua_attach_with_token(self, m_subp):
- """When token is provided, attach the machine to ua using the token."""
- configure_ua(token='SomeToken')
- m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken'])
- self.assertEqual(
- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
- self.logs.getvalue())
-
- @mock.patch('%s.subp.subp' % MPATH)
- def test_configure_ua_attach_on_service_error(self, m_subp):
- """all services should be enabled and then any failures raised"""
-
- def fake_subp(cmd, capture=None):
- fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']]
- if cmd in fail_cmds and capture:
- svc = cmd[-1]
- raise subp.ProcessExecutionError(
- 'Invalid {} credentials'.format(svc.upper()))
-
- m_subp.side_effect = fake_subp
-
- with self.assertRaises(RuntimeError) as context_manager:
- configure_ua(token='SomeToken', enable=['esm', 'cc', 'fips'])
- self.assertEqual(
- m_subp.call_args_list,
- [mock.call(['ua', 'attach', 'SomeToken']),
- mock.call(['ua', 'enable', 'esm'], capture=True),
- mock.call(['ua', 'enable', 'cc'], capture=True),
- mock.call(['ua', 'enable', 'fips'], capture=True)])
- self.assertIn(
- 'WARNING: Failure enabling "esm":\nUnexpected error'
- ' while running command.\nCommand: -\nExit code: -\nReason: -\n'
- 'Stdout: Invalid ESM credentials\nStderr: -\n',
- self.logs.getvalue())
- self.assertIn(
- 'WARNING: Failure enabling "cc":\nUnexpected error'
- ' while running command.\nCommand: -\nExit code: -\nReason: -\n'
- 'Stdout: Invalid CC credentials\nStderr: -\n',
- self.logs.getvalue())
- self.assertEqual(
- 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"',
- str(context_manager.exception))
-
- @mock.patch('%s.subp.subp' % MPATH)
- def test_configure_ua_attach_with_empty_services(self, m_subp):
- """When services is an empty list, do not auto-enable attach."""
- configure_ua(token='SomeToken', enable=[])
- m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken'])
- self.assertEqual(
- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
- self.logs.getvalue())
-
- @mock.patch('%s.subp.subp' % MPATH)
- def test_configure_ua_attach_with_specific_services(self, m_subp):
- """When services a list, only enable specific services."""
- configure_ua(token='SomeToken', enable=['fips'])
- self.assertEqual(
- m_subp.call_args_list,
- [mock.call(['ua', 'attach', 'SomeToken']),
- mock.call(['ua', 'enable', 'fips'], capture=True)])
- self.assertEqual(
- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
- self.logs.getvalue())
-
- @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
- @mock.patch('%s.subp.subp' % MPATH)
- def test_configure_ua_attach_with_string_services(self, m_subp):
- """When services a string, treat as singleton list and warn"""
- configure_ua(token='SomeToken', enable='fips')
- self.assertEqual(
- m_subp.call_args_list,
- [mock.call(['ua', 'attach', 'SomeToken']),
- mock.call(['ua', 'enable', 'fips'], capture=True)])
- self.assertEqual(
- 'WARNING: ubuntu_advantage: enable should be a list, not a'
- ' string; treating as a single enable\n'
- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
- self.logs.getvalue())
-
- @mock.patch('%s.subp.subp' % MPATH)
- def test_configure_ua_attach_with_weird_services(self, m_subp):
- """When services not string or list, warn but still attach"""
- configure_ua(token='SomeToken', enable={'deffo': 'wont work'})
- self.assertEqual(
- m_subp.call_args_list,
- [mock.call(['ua', 'attach', 'SomeToken'])])
- self.assertEqual(
- 'WARNING: ubuntu_advantage: enable should be a list, not a'
- ' dict; skipping enabling services\n'
- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
- self.logs.getvalue())
-
-
-@skipUnlessJsonSchema()
-class TestSchema(CiTestCase, SchemaTestCaseMixin):
-
- with_logs = True
- schema = schema
-
- @mock.patch('%s.maybe_install_ua_tools' % MPATH)
- @mock.patch('%s.configure_ua' % MPATH)
- def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _):
- """If ubuntu_advantage configuration is not a dict, emit a warning."""
- validate_cloudconfig_schema({'ubuntu_advantage': 'wrong type'}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\nubuntu_advantage: 'wrong type' is not"
- " of type 'object'\n",
- self.logs.getvalue())
-
- @mock.patch('%s.maybe_install_ua_tools' % MPATH)
- @mock.patch('%s.configure_ua' % MPATH)
- def test_schema_disallows_unknown_keys(self, _cfg, _):
- """Unknown keys in ubuntu_advantage configuration emit warnings."""
- validate_cloudconfig_schema(
- {'ubuntu_advantage': {'token': 'winner', 'invalid-key': ''}},
- schema)
- self.assertIn(
- 'WARNING: Invalid config:\nubuntu_advantage: Additional properties'
- " are not allowed ('invalid-key' was unexpected)",
- self.logs.getvalue())
-
- @mock.patch('%s.maybe_install_ua_tools' % MPATH)
- @mock.patch('%s.configure_ua' % MPATH)
- def test_warn_schema_requires_token(self, _cfg, _):
- """Warn if ubuntu_advantage configuration lacks token."""
- validate_cloudconfig_schema(
- {'ubuntu_advantage': {'enable': ['esm']}}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\nubuntu_advantage:"
- " 'token' is a required property\n", self.logs.getvalue())
-
- @mock.patch('%s.maybe_install_ua_tools' % MPATH)
- @mock.patch('%s.configure_ua' % MPATH)
- def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _):
- """Warn when ubuntu_advantage:enable config is not a list."""
- validate_cloudconfig_schema(
- {'ubuntu_advantage': {'enable': 'needslist'}}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\nubuntu_advantage: 'token' is a"
- " required property\nubuntu_advantage.enable: 'needslist'"
- " is not of type 'array'\n",
- self.logs.getvalue())
-
-
-class TestHandle(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestHandle, self).setUp()
- self.tmp = self.tmp_dir()
-
- @mock.patch('%s.validate_cloudconfig_schema' % MPATH)
- def test_handle_no_config(self, m_schema):
- """When no ua-related configuration is provided, nothing happens."""
- cfg = {}
- handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertIn(
- "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'"
- ' configuration found',
- self.logs.getvalue())
- m_schema.assert_not_called()
-
- @mock.patch('%s.configure_ua' % MPATH)
- @mock.patch('%s.maybe_install_ua_tools' % MPATH)
- def test_handle_tries_to_install_ubuntu_advantage_tools(
- self, m_install, m_cfg):
- """If ubuntu_advantage is provided, try installing ua-tools package."""
- cfg = {'ubuntu_advantage': {'token': 'valid'}}
- mycloud = FakeCloud(None)
- handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None)
- m_install.assert_called_once_with(mycloud)
-
- @mock.patch('%s.configure_ua' % MPATH)
- @mock.patch('%s.maybe_install_ua_tools' % MPATH)
- def test_handle_passes_credentials_and_services_to_configure_ua(
- self, m_install, m_configure_ua):
- """All ubuntu_advantage config keys are passed to configure_ua."""
- cfg = {'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}}
- handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
- m_configure_ua.assert_called_once_with(
- token='token', enable=['esm'])
-
- @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
- @mock.patch('%s.configure_ua' % MPATH)
- def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config(
- self, m_configure_ua):
- """Warning when ubuntu-advantage key is present with new config"""
- cfg = {'ubuntu-advantage': {'token': 'token', 'enable': ['esm']}}
- handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertEqual(
- 'WARNING: Deprecated configuration key "ubuntu-advantage"'
- ' provided. Expected underscore delimited "ubuntu_advantage";'
- ' will attempt to continue.',
- self.logs.getvalue().splitlines()[0])
- m_configure_ua.assert_called_once_with(
- token='token', enable=['esm'])
-
- def test_handle_error_on_deprecated_commands_key_dashed(self):
- """Error when commands is present in ubuntu-advantage key."""
- cfg = {'ubuntu-advantage': {'commands': 'nogo'}}
- with self.assertRaises(RuntimeError) as context_manager:
- handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertEqual(
- 'Deprecated configuration "ubuntu-advantage: commands" provided.'
- ' Expected "token"',
- str(context_manager.exception))
-
- def test_handle_error_on_deprecated_commands_key_underscored(self):
- """Error when commands is present in ubuntu_advantage key."""
- cfg = {'ubuntu_advantage': {'commands': 'nogo'}}
- with self.assertRaises(RuntimeError) as context_manager:
- handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertEqual(
- 'Deprecated configuration "ubuntu-advantage: commands" provided.'
- ' Expected "token"',
- str(context_manager.exception))
-
- @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
- @mock.patch('%s.configure_ua' % MPATH)
- def test_handle_prefers_new_style_config(
- self, m_configure_ua):
- """ubuntu_advantage should be preferred over ubuntu-advantage"""
- cfg = {
- 'ubuntu-advantage': {'token': 'nope', 'enable': ['wrong']},
- 'ubuntu_advantage': {'token': 'token', 'enable': ['esm']},
- }
- handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertEqual(
- 'WARNING: Deprecated configuration key "ubuntu-advantage"'
- ' provided. Expected underscore delimited "ubuntu_advantage";'
- ' will attempt to continue.',
- self.logs.getvalue().splitlines()[0])
- m_configure_ua.assert_called_once_with(
- token='token', enable=['esm'])
-
-
-class TestMaybeInstallUATools(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestMaybeInstallUATools, self).setUp()
- self.tmp = self.tmp_dir()
-
- @mock.patch('%s.subp.which' % MPATH)
- def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which):
- """Do nothing if ubuntu-advantage-tools already exists."""
- m_which.return_value = '/usr/bin/ua' # already installed
- distro = mock.MagicMock()
- distro.update_package_sources.side_effect = RuntimeError(
- 'Some apt error')
- maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError
-
- @mock.patch('%s.subp.which' % MPATH)
- def test_maybe_install_ua_tools_raises_update_errors(self, m_which):
- """maybe_install_ua_tools logs and raises apt update errors."""
- m_which.return_value = None
- distro = mock.MagicMock()
- distro.update_package_sources.side_effect = RuntimeError(
- 'Some apt error')
- with self.assertRaises(RuntimeError) as context_manager:
- maybe_install_ua_tools(cloud=FakeCloud(distro))
- self.assertEqual('Some apt error', str(context_manager.exception))
- self.assertIn('Package update failed\nTraceback', self.logs.getvalue())
-
- @mock.patch('%s.subp.which' % MPATH)
- def test_maybe_install_ua_raises_install_errors(self, m_which):
- """maybe_install_ua_tools logs and raises package install errors."""
- m_which.return_value = None
- distro = mock.MagicMock()
- distro.update_package_sources.return_value = None
- distro.install_packages.side_effect = RuntimeError(
- 'Some install error')
- with self.assertRaises(RuntimeError) as context_manager:
- maybe_install_ua_tools(cloud=FakeCloud(distro))
- self.assertEqual('Some install error', str(context_manager.exception))
- self.assertIn(
- 'Failed to install ubuntu-advantage-tools\n', self.logs.getvalue())
-
- @mock.patch('%s.subp.which' % MPATH)
- def test_maybe_install_ua_tools_happy_path(self, m_which):
- """maybe_install_ua_tools installs ubuntu-advantage-tools."""
- m_which.return_value = None
- distro = mock.MagicMock() # No errors raised
- maybe_install_ua_tools(cloud=FakeCloud(distro))
- distro.update_package_sources.assert_called_once_with()
- distro.install_packages.assert_called_once_with(
- ['ubuntu-advantage-tools'])
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py
deleted file mode 100644
index df89ddb3..00000000
--- a/cloudinit/config/tests/test_users_groups.py
+++ /dev/null
@@ -1,172 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-
-from cloudinit.config import cc_users_groups
-from cloudinit.tests.helpers import CiTestCase, mock
-
-MODPATH = "cloudinit.config.cc_users_groups"
-
-
-@mock.patch('cloudinit.distros.ubuntu.Distro.create_group')
-@mock.patch('cloudinit.distros.ubuntu.Distro.create_user')
-class TestHandleUsersGroups(CiTestCase):
- """Test cc_users_groups handling of config."""
-
- with_logs = True
-
- def test_handle_no_cfg_creates_no_users_or_groups(self, m_user, m_group):
- """Test handle with no config will not create users or groups."""
- cfg = {} # merged cloud-config
- # System config defines a default user for the distro.
- sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
- 'groups': ['lxd', 'sudo'],
- 'shell': '/bin/bash'}}
- metadata = {}
- cloud = self.tmp_cloud(
- distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- m_user.assert_not_called()
- m_group.assert_not_called()
-
- def test_handle_users_in_cfg_calls_create_users(self, m_user, m_group):
- """When users in config, create users with distro.create_user."""
- cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config
- # System config defines a default user for the distro.
- sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
- 'groups': ['lxd', 'sudo'],
- 'shell': '/bin/bash'}}
- metadata = {}
- cloud = self.tmp_cloud(
- distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertCountEqual(
- m_user.call_args_list,
- [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
- shell='/bin/bash'),
- mock.call('me2', default=False)])
- m_group.assert_not_called()
-
- @mock.patch('cloudinit.distros.freebsd.Distro.create_group')
- @mock.patch('cloudinit.distros.freebsd.Distro.create_user')
- def test_handle_users_in_cfg_calls_create_users_on_bsd(
- self,
- m_fbsd_user,
- m_fbsd_group,
- m_linux_user,
- m_linux_group,
- ):
- """When users in config, create users with freebsd.create_user."""
- cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config
- # System config defines a default user for the distro.
- sys_cfg = {'default_user': {'name': 'freebsd', 'lock_passwd': True,
- 'groups': ['wheel'],
- 'shell': '/bin/tcsh'}}
- metadata = {}
- cloud = self.tmp_cloud(
- distro='freebsd', sys_cfg=sys_cfg, metadata=metadata)
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertCountEqual(
- m_fbsd_user.call_args_list,
- [mock.call('freebsd', groups='wheel', lock_passwd=True,
- shell='/bin/tcsh'),
- mock.call('me2', default=False)])
- m_fbsd_group.assert_not_called()
- m_linux_group.assert_not_called()
- m_linux_user.assert_not_called()
-
- def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group):
- """When ssh_redirect_user is True pass default user and cloud keys."""
- cfg = {
- 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]}
- # System config defines a default user for the distro.
- sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
- 'groups': ['lxd', 'sudo'],
- 'shell': '/bin/bash'}}
- metadata = {'public-keys': ['key1']}
- cloud = self.tmp_cloud(
- distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertCountEqual(
- m_user.call_args_list,
- [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
- shell='/bin/bash'),
- mock.call('me2', cloud_public_ssh_keys=['key1'], default=False,
- ssh_redirect_user='ubuntu')])
- m_group.assert_not_called()
-
- def test_users_with_ssh_redirect_user_default_str(self, m_user, m_group):
- """When ssh_redirect_user is 'default' pass default username."""
- cfg = {
- 'users': ['default', {'name': 'me2',
- 'ssh_redirect_user': 'default'}]}
- # System config defines a default user for the distro.
- sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
- 'groups': ['lxd', 'sudo'],
- 'shell': '/bin/bash'}}
- metadata = {'public-keys': ['key1']}
- cloud = self.tmp_cloud(
- distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertCountEqual(
- m_user.call_args_list,
- [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
- shell='/bin/bash'),
- mock.call('me2', cloud_public_ssh_keys=['key1'], default=False,
- ssh_redirect_user='ubuntu')])
- m_group.assert_not_called()
-
- def test_users_with_ssh_redirect_user_non_default(self, m_user, m_group):
- """Warn when ssh_redirect_user is not 'default'."""
- cfg = {
- 'users': ['default', {'name': 'me2',
- 'ssh_redirect_user': 'snowflake'}]}
- # System config defines a default user for the distro.
- sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
- 'groups': ['lxd', 'sudo'],
- 'shell': '/bin/bash'}}
- metadata = {'public-keys': ['key1']}
- cloud = self.tmp_cloud(
- distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
- with self.assertRaises(ValueError) as context_manager:
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- m_group.assert_not_called()
- self.assertEqual(
- 'Not creating user me2. Invalid value of ssh_redirect_user:'
- ' snowflake. Expected values: true, default or false.',
- str(context_manager.exception))
-
- def test_users_with_ssh_redirect_user_default_false(self, m_user, m_group):
- """When unspecified ssh_redirect_user is false and not set up."""
- cfg = {'users': ['default', {'name': 'me2'}]}
- # System config defines a default user for the distro.
- sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
- 'groups': ['lxd', 'sudo'],
- 'shell': '/bin/bash'}}
- metadata = {'public-keys': ['key1']}
- cloud = self.tmp_cloud(
- distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertCountEqual(
- m_user.call_args_list,
- [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
- shell='/bin/bash'),
- mock.call('me2', default=False)])
- m_group.assert_not_called()
-
- def test_users_ssh_redirect_user_and_no_default(self, m_user, m_group):
- """Warn when ssh_redirect_user is True and no default user present."""
- cfg = {
- 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]}
- # System config defines *no* default user for the distro.
- sys_cfg = {}
- metadata = {} # no public-keys defined
- cloud = self.tmp_cloud(
- distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- m_user.assert_called_once_with('me2', default=False)
- m_group.assert_not_called()
- self.assertEqual(
- 'WARNING: Ignoring ssh_redirect_user: True for me2. No'
- ' default_user defined. Perhaps missing'
- ' cloud configuration users: [default, ..].\n',
- self.logs.getvalue())
diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py
index 8bac9c44..6db7e117 100644
--- a/cloudinit/cs_utils.py
+++ b/cloudinit/cs_utils.py
@@ -24,14 +24,13 @@ import platform
from cloudinit import serial
-
# these high timeouts are necessary as read may read a lot of data.
READ_TIMEOUT = 60
WRITE_TIMEOUT = 10
-SERIAL_PORT = '/dev/ttyS1'
-if platform.system() == 'Windows':
- SERIAL_PORT = 'COM2'
+SERIAL_PORT = "/dev/ttyS1"
+if platform.system() == "Windows":
+ SERIAL_PORT = "COM2"
class Cepko(object):
@@ -39,6 +38,7 @@ class Cepko(object):
One instance of that object could be use for one or more
queries to the serial port.
"""
+
request_pattern = "<\n{}\n>"
def get(self, key="", request_pattern=None):
@@ -64,17 +64,18 @@ class CepkoResult(object):
as the instance is initialized and stores the result in both raw and
marshalled format.
"""
+
def __init__(self, request):
self.request = request
self.raw_result = self._execute()
self.result = self._marshal(self.raw_result)
def _execute(self):
- connection = serial.Serial(port=SERIAL_PORT,
- timeout=READ_TIMEOUT,
- writeTimeout=WRITE_TIMEOUT)
- connection.write(self.request.encode('ascii'))
- return connection.readline().strip(b'\x04\n').decode('ascii')
+ connection = serial.Serial(
+ port=SERIAL_PORT, timeout=READ_TIMEOUT, writeTimeout=WRITE_TIMEOUT
+ )
+ connection.write(self.request.encode("ascii"))
+ return connection.readline().strip(b"\x04\n").decode("ascii")
def _marshal(self, raw_result):
try:
@@ -94,4 +95,5 @@ class CepkoResult(object):
def __iter__(self):
return self.result.__iter__()
+
# vi: ts=4 expandtab
diff --git a/cloudinit/dhclient_hook.py b/cloudinit/dhclient_hook.py
index 72b51b6a..46b2e8d9 100644
--- a/cloudinit/dhclient_hook.py
+++ b/cloudinit/dhclient_hook.py
@@ -19,7 +19,7 @@ EVENTS = (UP, DOWN)
def _get_hooks_dir():
i = stages.Init()
- return os.path.join(i.paths.get_runpath(), 'dhclient.hooks')
+ return os.path.join(i.paths.get_runpath(), "dhclient.hooks")
def _filter_env_vals(info):
@@ -28,15 +28,16 @@ def _filter_env_vals(info):
new_info = {}
for k, v in info.items():
if k.startswith("DHCP4_") or k.startswith("new_"):
- key = (k.replace('DHCP4_', '').replace('new_', '')).lower()
+ key = (k.replace("DHCP4_", "").replace("new_", "")).lower()
new_info[key] = v
return new_info
def run_hook(interface, event, data_d=None, env=None):
if event not in EVENTS:
- raise ValueError("Unexpected event '%s'. Expected one of: %s" %
- (event, EVENTS))
+ raise ValueError(
+ "Unexpected event '%s'. Expected one of: %s" % (event, EVENTS)
+ )
if data_d is None:
data_d = _get_hooks_dir()
if env is None:
@@ -58,9 +59,11 @@ def get_parser(parser=None):
if parser is None:
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.add_argument(
- "event", help='event taken on the interface', choices=EVENTS)
+ "event", help="event taken on the interface", choices=EVENTS
+ )
parser.add_argument(
- "interface", help='the network interface being acted upon')
+ "interface", help="the network interface being acted upon"
+ )
# cloud-init main uses 'action'
parser.set_defaults(action=(NAME, handle_args))
return parser
@@ -72,12 +75,14 @@ def handle_args(name, args, data_d=None):
return run_hook(interface=args.interface, event=args.event, data_d=data_d)
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
parser = get_parser()
args = parser.parse_args(args=sys.argv[1:])
return_value = handle_args(
- NAME, args, data_d=os.environ.get('_CI_DHCP_HOOK_DATA_D'))
+ NAME, args, data_d=os.environ.get("_CI_DHCP_HOOK_DATA_D")
+ )
if return_value:
sys.exit(return_value)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 1e118472..76acd6a3 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -16,48 +16,53 @@ import stat
import string
import urllib.parse
from io import StringIO
+from typing import Any, Mapping, Type
from cloudinit import importer
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit.net import eni
-from cloudinit.net import network_state
-from cloudinit.net import renderers
-from cloudinit import persistence
-from cloudinit import ssh_util
-from cloudinit import type_utils
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.features import \
- ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES
-
+from cloudinit import net, persistence, ssh_util, subp, type_utils, util
from cloudinit.distros.parsers import hosts
-from .networking import LinuxNetworking
+from cloudinit.features import ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES
+from cloudinit.net import activators, eni, network_state, renderers
+from cloudinit.net.network_state import parse_net_config_data
+from .networking import LinuxNetworking, Networking
# Used when a cloud-config module can be run on all cloud-init distibutions.
# The value 'all' is surfaced in module documentation for distro support.
-ALL_DISTROS = 'all'
+ALL_DISTROS = "all"
OSFAMILIES = {
- 'alpine': ['alpine'],
- 'arch': ['arch'],
- 'debian': ['debian', 'ubuntu'],
- 'freebsd': ['freebsd'],
- 'gentoo': ['gentoo'],
- 'redhat': ['amazon', 'centos', 'fedora', 'rhel'],
- 'suse': ['opensuse', 'sles'],
+ "alpine": ["alpine"],
+ "arch": ["arch"],
+ "debian": ["debian", "ubuntu"],
+ "freebsd": ["freebsd"],
+ "gentoo": ["gentoo"],
+ "redhat": [
+ "almalinux",
+ "amazon",
+ "centos",
+ "cloudlinux",
+ "eurolinux",
+ "fedora",
+ "miraclelinux",
+ "openEuler",
+ "photon",
+ "rhel",
+ "rocky",
+ "virtuozzo",
+ ],
+ "suse": ["opensuse", "sles"],
}
LOG = logging.getLogger(__name__)
# This is a best guess regex, based on current EC2 AZs on 2017-12-11.
# It could break when Amazon adds new regions and new AZs.
-_EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$')
+_EC2_AZ_RE = re.compile("^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$")
# Default NTP Client Configurations
-PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate']
+PREFERRED_NTP_CLIENTS = ["chrony", "systemd-timesyncd", "ntp", "ntpdate"]
# Letters/Digits/Hyphen characters, for use in domain name validation
LDH_ASCII_CHARS = string.ascii_letters + string.digits + "-"
@@ -70,21 +75,23 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users"
hostname_conf_fn = "/etc/hostname"
tz_zone_dir = "/usr/share/zoneinfo"
- init_cmd = ['service'] # systemctl, service etc
- renderer_configs = {}
+ init_cmd = ["service"] # systemctl, service etc
+ renderer_configs: Mapping[str, Mapping[str, Any]] = {}
_preferred_ntp_clients = None
- networking_cls = LinuxNetworking
+ networking_cls: Type[Networking] = LinuxNetworking
# This is used by self.shutdown_command(), and can be overridden in
# subclasses
- shutdown_options_map = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}
+ shutdown_options_map = {"halt": "-H", "poweroff": "-P", "reboot": "-r"}
_ci_pkl_version = 1
+ prefer_fqdn = False
+ resolve_conf_fn = "/etc/resolv.conf"
def __init__(self, name, cfg, paths):
self._paths = paths
self._cfg = cfg
self.name = name
- self.networking = self.networking_cls()
+ self.networking: Networking = self.networking_cls()
def _unpickle(self, ci_pkl_version: int) -> None:
"""Perform deserialization fixes for Distro."""
@@ -103,34 +110,38 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
raise NotImplementedError()
def _write_network(self, settings):
- raise RuntimeError(
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
+ raise NotImplementedError(
"Legacy function '_write_network' was called in distro '%s'.\n"
- "_write_network_config needs implementation.\n" % self.name)
-
- def _write_network_config(self, settings):
- raise NotImplementedError()
+ "_write_network_config needs implementation.\n" % self.name
+ )
- def _supported_write_network_config(self, network_config):
+ def _write_network_state(self, network_state):
priority = util.get_cfg_by_path(
- self._cfg, ('network', 'renderers'), None)
+ self._cfg, ("network", "renderers"), None
+ )
name, render_cls = renderers.select(priority=priority)
- LOG.debug("Selected renderer '%s' from priority list: %s",
- name, priority)
+ LOG.debug(
+ "Selected renderer '%s' from priority list: %s", name, priority
+ )
renderer = render_cls(config=self.renderer_configs.get(name))
- renderer.render_network_config(network_config)
- return []
+ renderer.render_network_state(network_state)
def _find_tz_file(self, tz):
tz_file = os.path.join(self.tz_zone_dir, str(tz))
if not os.path.isfile(tz_file):
- raise IOError(("Invalid timezone %s,"
- " no file found at %s") % (tz, tz_file))
+ raise IOError(
+ "Invalid timezone %s, no file found at %s" % (tz, tz_file)
+ )
return tz_file
def get_option(self, opt_name, default=None):
return self._cfg.get(opt_name, default)
+ def set_option(self, opt_name, value=None):
+ self._cfg[opt_name] = value
+
def set_hostname(self, hostname, fqdn=None):
writeable_hostname = self._select_hostname(hostname, fqdn)
self._write_hostname(writeable_hostname, self.hostname_conf_fn)
@@ -141,7 +152,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
return uses_systemd()
@abc.abstractmethod
- def package_command(self, cmd, args=None, pkgs=None):
+ def package_command(self, command, args=None, pkgs=None):
raise NotImplementedError()
@abc.abstractmethod
@@ -164,10 +175,12 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# This resolves the package_mirrors config option
# down to a single dict of {mirror_name: mirror_url}
arch_info = self._get_arch_package_mirror_info(arch)
- return _get_package_mirror_info(data_source=data_source,
- mirror_info=arch_info)
+ return _get_package_mirror_info(
+ data_source=data_source, mirror_info=arch_info
+ )
def apply_network(self, settings, bring_up=True):
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
# this applies network where 'settings' is interfaces(5) style
# it is obsolete compared to apply_network_config
# Write it out
@@ -182,36 +195,62 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
return False
def _apply_network_from_network_config(self, netconfig, bring_up=True):
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
distro = self.__class__
- LOG.warning("apply_network_config is not currently implemented "
- "for distribution '%s'. Attempting to use apply_network",
- distro)
- header = '\n'.join([
- "# Converted from network_config for distro %s" % distro,
- "# Implementation of _write_network_config is needed."
- ])
+ LOG.warning(
+ "apply_network_config is not currently implemented "
+ "for distribution '%s'. Attempting to use apply_network",
+ distro,
+ )
+ header = "\n".join(
+ [
+ "# Converted from network_config for distro %s" % distro,
+ "# Implementation of _write_network_config is needed.",
+ ]
+ )
ns = network_state.parse_net_config_data(netconfig)
contents = eni.network_state_to_eni(
- ns, header=header, render_hwaddress=True)
+ ns, header=header, render_hwaddress=True
+ )
return self.apply_network(contents, bring_up=bring_up)
def generate_fallback_config(self):
return net.generate_fallback_config()
- def apply_network_config(self, netconfig, bring_up=False):
- # apply network config netconfig
+ def apply_network_config(self, netconfig, bring_up=False) -> bool:
+ """Apply the network config.
+
+ If bring_up is True, attempt to bring up the passed in devices. If
+ devices is None, attempt to bring up devices returned by
+ _write_network_config.
+
+ Returns True if any devices failed to come up, otherwise False.
+ """
# This method is preferred to apply_network which only takes
# a much less complete network config format (interfaces(5)).
+ network_state = parse_net_config_data(netconfig)
try:
- dev_names = self._write_network_config(netconfig)
+ self._write_network_state(network_state)
except NotImplementedError:
# backwards compat until all distros have apply_network_config
return self._apply_network_from_network_config(
- netconfig, bring_up=bring_up)
+ netconfig, bring_up=bring_up
+ )
# Now try to bring them up
if bring_up:
- return self._bring_up_interfaces(dev_names)
+ LOG.debug("Bringing up newly configured network interfaces")
+ try:
+ network_activator = activators.select_activator()
+ except activators.NoActivatorException:
+ LOG.warning(
+ "No network activator found, not bringing up "
+ "network interfaces"
+ )
+ return True
+ network_activator.bring_up_all_interfaces(network_state)
+ else:
+ LOG.debug("Not bringing up newly configured network interfaces")
return False
def apply_network_config_names(self, netconfig):
@@ -248,17 +287,28 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# temporarily (until reboot so it should
# not be depended on). Use the write
# hostname functions for 'permanent' adjustments.
- LOG.debug("Non-persistently setting the system hostname to %s",
- hostname)
+ LOG.debug(
+ "Non-persistently setting the system hostname to %s", hostname
+ )
try:
- subp.subp(['hostname', hostname])
+ subp.subp(["hostname", hostname])
except subp.ProcessExecutionError:
- util.logexc(LOG, "Failed to non-persistently adjust the system "
- "hostname to %s", hostname)
+ util.logexc(
+ LOG,
+ "Failed to non-persistently adjust the system hostname to %s",
+ hostname,
+ )
def _select_hostname(self, hostname, fqdn):
# Prefer the short hostname over the long
# fully qualified domain name
+ if (
+ util.get_cfg_option_bool(
+ self._cfg, "prefer_fqdn_over_hostname", self.prefer_fqdn
+ )
+ and fqdn
+ ):
+ return fqdn
if not hostname:
return fqdn
return hostname
@@ -300,32 +350,39 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# If the system hostname is different than the previous
# one or the desired one lets update it as well
- if ((not sys_hostname) or (sys_hostname == prev_hostname and
- sys_hostname != hostname)):
+ if (not sys_hostname) or (
+ sys_hostname == prev_hostname and sys_hostname != hostname
+ ):
update_files.append(sys_fn)
# If something else has changed the hostname after we set it
# initially, we should not overwrite those changes (we should
# only be setting the hostname once per instance)
- if (sys_hostname and prev_hostname and
- sys_hostname != prev_hostname):
- LOG.info("%s differs from %s, assuming user maintained hostname.",
- prev_hostname_fn, sys_fn)
+ if sys_hostname and prev_hostname and sys_hostname != prev_hostname:
+ LOG.info(
+ "%s differs from %s, assuming user maintained hostname.",
+ prev_hostname_fn,
+ sys_fn,
+ )
return
# Remove duplicates (incase the previous config filename)
# is the same as the system config filename, don't bother
# doing it twice
update_files = set([f for f in update_files if f])
- LOG.debug("Attempting to update hostname to %s in %s files",
- hostname, len(update_files))
+ LOG.debug(
+ "Attempting to update hostname to %s in %s files",
+ hostname,
+ len(update_files),
+ )
for fn in update_files:
try:
self._write_hostname(hostname, fn)
except IOError:
- util.logexc(LOG, "Failed to write hostname %s to %s", hostname,
- fn)
+ util.logexc(
+ LOG, "Failed to write hostname %s to %s", hostname, fn
+ )
# If the system hostname file name was provided set the
# non-fqdn as the transient hostname.
@@ -333,11 +390,11 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
self._apply_hostname(applying_hostname)
def update_etc_hosts(self, hostname, fqdn):
- header = ''
+ header = ""
if os.path.exists(self.hosts_fn):
eh = hosts.HostsConf(util.load_file(self.hosts_fn))
else:
- eh = hosts.HostsConf('')
+ eh = hosts.HostsConf("")
header = util.make_header(base="added")
local_ip = self._get_localhost_ip()
prev_info = eh.get_entry(local_ip)
@@ -384,20 +441,11 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
return self._preferred_ntp_clients
def _bring_up_interface(self, device_name):
- cmd = ['ifup', device_name]
- LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
- try:
- (_out, err) = subp.subp(cmd)
- if len(err):
- LOG.warning("Running %s resulted in stderr output: %s",
- cmd, err)
- return True
- except subp.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed", cmd)
- return False
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
+ raise NotImplementedError
def _bring_up_interfaces(self, device_names):
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
am_failed = 0
for d in device_names:
if not self._bring_up_interface(d):
@@ -407,7 +455,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
return False
def get_default_user(self):
- return self.get_option('default_user')
+ return self.get_option("default_user")
def add_user(self, name, **kwargs):
"""
@@ -423,43 +471,43 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
LOG.info("User %s already exists, skipping.", name)
return
- if 'create_groups' in kwargs:
- create_groups = kwargs.pop('create_groups')
+ if "create_groups" in kwargs:
+ create_groups = kwargs.pop("create_groups")
else:
create_groups = True
- useradd_cmd = ['useradd', name]
- log_useradd_cmd = ['useradd', name]
+ useradd_cmd = ["useradd", name]
+ log_useradd_cmd = ["useradd", name]
if util.system_is_snappy():
- useradd_cmd.append('--extrausers')
- log_useradd_cmd.append('--extrausers')
+ useradd_cmd.append("--extrausers")
+ log_useradd_cmd.append("--extrausers")
# Since we are creating users, we want to carefully validate the
# inputs. If something goes wrong, we can end up with a system
# that nobody can login to.
useradd_opts = {
- "gecos": '--comment',
- "homedir": '--home',
- "primary_group": '--gid',
- "uid": '--uid',
- "groups": '--groups',
- "passwd": '--password',
- "shell": '--shell',
- "expiredate": '--expiredate',
- "inactive": '--inactive',
- "selinux_user": '--selinux-user',
+ "gecos": "--comment",
+ "homedir": "--home",
+ "primary_group": "--gid",
+ "uid": "--uid",
+ "groups": "--groups",
+ "passwd": "--password",
+ "shell": "--shell",
+ "expiredate": "--expiredate",
+ "inactive": "--inactive",
+ "selinux_user": "--selinux-user",
}
useradd_flags = {
- "no_user_group": '--no-user-group',
- "system": '--system',
- "no_log_init": '--no-log-init',
+ "no_user_group": "--no-user-group",
+ "system": "--system",
+ "no_log_init": "--no-log-init",
}
- redact_opts = ['passwd']
+ redact_opts = ["passwd"]
# support kwargs having groups=[list] or groups="g1,g2"
- groups = kwargs.get('groups')
+ groups = kwargs.get("groups")
if groups:
if isinstance(groups, str):
groups = groups.split(",")
@@ -470,9 +518,9 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# kwargs.items loop below wants a comma delimeted string
# that can go right through to the command.
- kwargs['groups'] = ",".join(groups)
+ kwargs["groups"] = ",".join(groups)
- primary_group = kwargs.get('primary_group')
+ primary_group = kwargs.get("primary_group")
if primary_group:
groups.append(primary_group)
@@ -490,7 +538,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# Redact certain fields from the logs
if key in redact_opts:
- log_useradd_cmd.extend([useradd_opts[key], 'REDACTED'])
+ log_useradd_cmd.extend([useradd_opts[key], "REDACTED"])
else:
log_useradd_cmd.extend([useradd_opts[key], val])
@@ -500,12 +548,12 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# Don't create the home directory if directed so or if the user is a
# system user
- if kwargs.get('no_create_home') or kwargs.get('system'):
- useradd_cmd.append('-M')
- log_useradd_cmd.append('-M')
+ if kwargs.get("no_create_home") or kwargs.get("system"):
+ useradd_cmd.append("-M")
+ log_useradd_cmd.append("-M")
else:
- useradd_cmd.append('-m')
- log_useradd_cmd.append('-m')
+ useradd_cmd.append("-m")
+ log_useradd_cmd.append("-m")
# Run the command
LOG.debug("Adding user %s", name)
@@ -520,8 +568,8 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
Add a snappy user to the system using snappy tools
"""
- snapuser = kwargs.get('snapuser')
- known = kwargs.get('known', False)
+ snapuser = kwargs.get("snapuser")
+ known = kwargs.get("known", False)
create_user_cmd = ["snap", "create-user", "--sudoer", "--json"]
if known:
create_user_cmd.append("--known")
@@ -530,11 +578,12 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# Run the command
LOG.debug("Adding snap user %s", name)
try:
- (out, err) = subp.subp(create_user_cmd, logstring=create_user_cmd,
- capture=True)
+ (out, err) = subp.subp(
+ create_user_cmd, logstring=create_user_cmd, capture=True
+ )
LOG.debug("snap create-user returned: %s:%s", out, err)
jobj = util.load_json(out)
- username = jobj.get('username', None)
+ username = jobj.get("username", None)
except Exception as e:
util.logexc(LOG, "Failed to create snap user %s", name)
raise e
@@ -562,60 +611,66 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
"""
# Add a snap user, if requested
- if 'snapuser' in kwargs:
+ if "snapuser" in kwargs:
return self.add_snap_user(name, **kwargs)
# Add the user
self.add_user(name, **kwargs)
# Set password if plain-text password provided and non-empty
- if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
- self.set_passwd(name, kwargs['plain_text_passwd'])
+ if "plain_text_passwd" in kwargs and kwargs["plain_text_passwd"]:
+ self.set_passwd(name, kwargs["plain_text_passwd"])
# Set password if hashed password is provided and non-empty
- if 'hashed_passwd' in kwargs and kwargs['hashed_passwd']:
- self.set_passwd(name, kwargs['hashed_passwd'], hashed=True)
+ if "hashed_passwd" in kwargs and kwargs["hashed_passwd"]:
+ self.set_passwd(name, kwargs["hashed_passwd"], hashed=True)
# Default locking down the account. 'lock_passwd' defaults to True.
# lock account unless lock_password is False.
- if kwargs.get('lock_passwd', True):
+ if kwargs.get("lock_passwd", True):
self.lock_passwd(name)
# Configure sudo access
- if 'sudo' in kwargs and kwargs['sudo'] is not False:
- self.write_sudo_rules(name, kwargs['sudo'])
+ if "sudo" in kwargs and kwargs["sudo"] is not False:
+ self.write_sudo_rules(name, kwargs["sudo"])
# Import SSH keys
- if 'ssh_authorized_keys' in kwargs:
+ if "ssh_authorized_keys" in kwargs:
# Try to handle this in a smart manner.
- keys = kwargs['ssh_authorized_keys']
+ keys = kwargs["ssh_authorized_keys"]
if isinstance(keys, str):
keys = [keys]
elif isinstance(keys, dict):
keys = list(keys.values())
if keys is not None:
if not isinstance(keys, (tuple, list, set)):
- LOG.warning("Invalid type '%s' detected for"
- " 'ssh_authorized_keys', expected list,"
- " string, dict, or set.", type(keys))
+ LOG.warning(
+ "Invalid type '%s' detected for"
+ " 'ssh_authorized_keys', expected list,"
+ " string, dict, or set.",
+ type(keys),
+ )
keys = []
else:
keys = set(keys) or []
ssh_util.setup_user_keys(set(keys), name)
- if 'ssh_redirect_user' in kwargs:
- cloud_keys = kwargs.get('cloud_public_ssh_keys', [])
+ if "ssh_redirect_user" in kwargs:
+ cloud_keys = kwargs.get("cloud_public_ssh_keys", [])
if not cloud_keys:
LOG.warning(
- 'Unable to disable SSH logins for %s given'
- ' ssh_redirect_user: %s. No cloud public-keys present.',
- name, kwargs['ssh_redirect_user'])
+ "Unable to disable SSH logins for %s given"
+ " ssh_redirect_user: %s. No cloud public-keys present.",
+ name,
+ kwargs["ssh_redirect_user"],
+ )
else:
- redirect_user = kwargs['ssh_redirect_user']
+ redirect_user = kwargs["ssh_redirect_user"]
disable_option = ssh_util.DISABLE_USER_OPTS
- disable_option = disable_option.replace('$USER', redirect_user)
- disable_option = disable_option.replace('$DISABLE_USER', name)
+ disable_option = disable_option.replace("$USER", redirect_user)
+ disable_option = disable_option.replace("$DISABLE_USER", name)
ssh_util.setup_user_keys(
- set(cloud_keys), name, options=disable_option)
+ set(cloud_keys), name, options=disable_option
+ )
return True
def lock_passwd(self, name):
@@ -623,36 +678,36 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
Lock the password of a user, i.e., disable password logins
"""
# passwd must use short '-l' due to SLES11 lacking long form '--lock'
- lock_tools = (['passwd', '-l', name], ['usermod', '--lock', name])
+ lock_tools = (["passwd", "-l", name], ["usermod", "--lock", name])
try:
cmd = next(tool for tool in lock_tools if subp.which(tool[0]))
except StopIteration as e:
- raise RuntimeError((
+ raise RuntimeError(
"Unable to lock user account '%s'. No tools available. "
- " Tried: %s.") % (name, [c[0] for c in lock_tools])
+ " Tried: %s." % (name, [c[0] for c in lock_tools])
) from e
try:
subp.subp(cmd)
except Exception as e:
- util.logexc(LOG, 'Failed to disable password for user %s', name)
+ util.logexc(LOG, "Failed to disable password for user %s", name)
raise e
def expire_passwd(self, user):
try:
- subp.subp(['passwd', '--expire', user])
+ subp.subp(["passwd", "--expire", user])
except Exception as e:
util.logexc(LOG, "Failed to set 'expire' for %s", user)
raise e
def set_passwd(self, user, passwd, hashed=False):
- pass_string = '%s:%s' % (user, passwd)
- cmd = ['chpasswd']
+ pass_string = "%s:%s" % (user, passwd)
+ cmd = ["chpasswd"]
if hashed:
# Need to use the short option name '-e' instead of '--encrypted'
# (which would be more descriptive) since SLES 11 doesn't know
# about long names.
- cmd.append('-e')
+ cmd.append("-e")
try:
subp.subp(cmd, pass_string, logstring="chpasswd for %s" % user)
@@ -662,10 +717,10 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
return True
- def ensure_sudo_dir(self, path, sudo_base='/etc/sudoers'):
+ def ensure_sudo_dir(self, path, sudo_base="/etc/sudoers"):
# Ensure the dir is included and that
# it actually exists as a directory
- sudoers_contents = ''
+ sudoers_contents = ""
base_exists = False
if os.path.exists(sudo_base):
sudoers_contents = util.load_file(sudo_base)
@@ -673,7 +728,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
found_include = False
for line in sudoers_contents.splitlines():
line = line.strip()
- include_match = re.search(r"^#includedir\s+(.*)$", line)
+ include_match = re.search(r"^[#|@]includedir\s+(.*)$", line)
if not include_match:
continue
included_dir = include_match.group(1).strip()
@@ -686,15 +741,23 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
if not found_include:
try:
if not base_exists:
- lines = [('# See sudoers(5) for more information'
- ' on "#include" directives:'), '',
- util.make_header(base="added"),
- "#includedir %s" % (path), '']
+ lines = [
+ "# See sudoers(5) for more information"
+ ' on "#include" directives:',
+ "",
+ util.make_header(base="added"),
+ "#includedir %s" % (path),
+ "",
+ ]
sudoers_contents = "\n".join(lines)
util.write_file(sudo_base, sudoers_contents, 0o440)
else:
- lines = ['', util.make_header(base="added"),
- "#includedir %s" % (path), '']
+ lines = [
+ "",
+ util.make_header(base="added"),
+ "#includedir %s" % (path),
+ "",
+ ]
sudoers_contents = "\n".join(lines)
util.append_file(sudo_base, sudoers_contents)
LOG.debug("Added '#includedir %s' to %s", path, sudo_base)
@@ -708,7 +771,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
sudo_file = self.ci_sudoers_fn
lines = [
- '',
+ "",
"# User rules for %s" % user,
]
if isinstance(rules, (list, tuple)):
@@ -741,9 +804,9 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
raise e
def create_group(self, name, members=None):
- group_add_cmd = ['groupadd', name]
+ group_add_cmd = ["groupadd", name]
if util.system_is_snappy():
- group_add_cmd.append('--extrausers')
+ group_add_cmd.append("--extrausers")
if not members:
members = []
@@ -761,11 +824,15 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
if len(members) > 0:
for member in members:
if not util.is_user(member):
- LOG.warning("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
+ LOG.warning(
+ "Unable to add group member '%s' to group '%s'"
+ "; user does not exist.",
+ member,
+ name,
+ )
continue
- subp.subp(['usermod', '-a', '-G', name, member])
+ subp.subp(["usermod", "-a", "-G", name, member])
LOG.info("Added user '%s' to group '%s'", member, name)
def shutdown_command(self, *, mode, delay, message):
@@ -784,6 +851,51 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
args.append(message)
return args
+ def manage_service(self, action, service):
+ """
+ Perform the requested action on a service. This handles the common
+ 'systemctl' and 'service' cases and may be overridden in subclasses
+ as necessary.
+ May raise ProcessExecutionError
+ """
+ init_cmd = self.init_cmd
+ if self.uses_systemd() or "systemctl" in init_cmd:
+ init_cmd = ["systemctl"]
+ cmds = {
+ "stop": ["stop", service],
+ "start": ["start", service],
+ "enable": ["enable", service],
+ "restart": ["restart", service],
+ "reload": ["reload-or-restart", service],
+ "try-reload": ["reload-or-try-restart", service],
+ }
+ else:
+ cmds = {
+ "stop": [service, "stop"],
+ "start": [service, "start"],
+ "enable": [service, "start"],
+ "restart": [service, "restart"],
+ "reload": [service, "restart"],
+ "try-reload": [service, "restart"],
+ }
+ cmd = list(init_cmd) + list(cmds[action])
+ return subp.subp(cmd, capture=True)
+
+ def set_keymap(self, layout, model, variant, options):
+ if self.uses_systemd():
+ subp.subp(
+ [
+ "localectl",
+ "set-x11-keymap",
+ layout,
+ model,
+ variant,
+ options,
+ ]
+ )
+ else:
+ raise NotImplementedError()
+
def _apply_hostname_transformations_to_url(url: str, transformations: list):
"""
@@ -837,7 +949,7 @@ def _sanitize_mirror_url(url: str):
* Converts it to its IDN form (see below for details)
* Replaces any non-Letters/Digits/Hyphen (LDH) characters in it with
hyphens
- * TODO: Remove any leading/trailing hyphens from each domain name label
+ * Removes any leading/trailing hyphens from each domain name label
Before we replace any invalid domain name characters, we first need to
ensure that any valid non-ASCII characters in the hostname will not be
@@ -871,27 +983,25 @@ def _sanitize_mirror_url(url: str):
# This is an IP address, not a hostname, so no need to apply the
# transformations
lambda hostname: None if net.is_ip_address(hostname) else hostname,
-
# Encode with IDNA to get the correct characters (as `bytes`), then
# decode with ASCII so we return a `str`
- lambda hostname: hostname.encode('idna').decode('ascii'),
-
+ lambda hostname: hostname.encode("idna").decode("ascii"),
# Replace any unacceptable characters with "-"
- lambda hostname: ''.join(
+ lambda hostname: "".join(
c if c in acceptable_chars else "-" for c in hostname
),
-
# Drop leading/trailing hyphens from each part of the hostname
- lambda hostname: '.'.join(
- part.strip('-') for part in hostname.split('.')
+ lambda hostname: ".".join(
+ part.strip("-") for part in hostname.split(".")
),
]
return _apply_hostname_transformations_to_url(url, transformations)
-def _get_package_mirror_info(mirror_info, data_source=None,
- mirror_filter=util.search_for_mirror):
+def _get_package_mirror_info(
+ mirror_info, data_source=None, mirror_filter=util.search_for_mirror
+):
# given a arch specific 'mirror_info' entry (from package_mirrors)
# search through the 'search' entries, and fallback appropriately
# return a dict with only {name: mirror} entries.
@@ -900,7 +1010,7 @@ def _get_package_mirror_info(mirror_info, data_source=None,
subst = {}
if data_source and data_source.availability_zone:
- subst['availability_zone'] = data_source.availability_zone
+ subst["availability_zone"] = data_source.availability_zone
# ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b)
# the region is us-east-1. so region = az[0:-1]
@@ -908,18 +1018,18 @@ def _get_package_mirror_info(mirror_info, data_source=None,
ec2_region = data_source.availability_zone[0:-1]
if ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES:
- subst['ec2_region'] = "%s" % ec2_region
+ subst["ec2_region"] = "%s" % ec2_region
elif data_source.platform_type == "ec2":
- subst['ec2_region'] = "%s" % ec2_region
+ subst["ec2_region"] = "%s" % ec2_region
if data_source and data_source.region:
- subst['region'] = data_source.region
+ subst["region"] = data_source.region
results = {}
- for (name, mirror) in mirror_info.get('failsafe', {}).items():
+ for (name, mirror) in mirror_info.get("failsafe", {}).items():
results[name] = mirror
- for (name, searchlist) in mirror_info.get('search', {}).items():
+ for (name, searchlist) in mirror_info.get("search", {}).items():
mirrors = []
for tmpl in searchlist:
try:
@@ -953,17 +1063,20 @@ def _get_arch_package_mirror_info(package_mirrors, arch):
def fetch(name):
- locs, looked_locs = importer.find_module(name, ['', __name__], ['Distro'])
+ locs, looked_locs = importer.find_module(name, ["", __name__], ["Distro"])
if not locs:
- raise ImportError("No distribution found for distro %s (searched %s)"
- % (name, looked_locs))
+ raise ImportError(
+ "No distribution found for distro %s (searched %s)"
+ % (name, looked_locs)
+ )
mod = importer.import_module(locs[0])
- cls = getattr(mod, 'Distro')
+ cls = getattr(mod, "Distro")
return cls
-def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",
- tz_local="/etc/localtime"):
+def set_etc_timezone(
+ tz, tz_file=None, tz_conf="/etc/timezone", tz_local="/etc/localtime"
+):
util.write_file(tz_conf, str(tz).rstrip() + "\n")
# This ensures that the correct tz will be used for the system
if tz_local and tz_file:
@@ -980,7 +1093,7 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",
def uses_systemd():
try:
- res = os.lstat('/run/systemd/system')
+ res = os.lstat("/run/systemd/system")
return stat.S_ISDIR(res.st_mode)
except Exception:
return False
diff --git a/tests/cloud_tests/testcases/bugs/__init__.py b/cloudinit/distros/almalinux.py
index c6452f9c..3dc0a342 100644
--- a/tests/cloud_tests/testcases/bugs/__init__.py
+++ b/cloudinit/distros/almalinux.py
@@ -1,8 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""Test verifiers for cloud-init bugs.
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ pass
-See configs/bugs/README.md for more information
-"""
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py
index ca5bfe80..3d7d4891 100644
--- a/cloudinit/distros/alpine.py
+++ b/cloudinit/distros/alpine.py
@@ -6,13 +6,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import distros, helpers, subp, util
from cloudinit.distros.parsers.hostname import HostnameConf
-
from cloudinit.settings import PER_INSTANCE
NETWORK_FILE_HEADER = """\
@@ -26,12 +21,11 @@ NETWORK_FILE_HEADER = """\
class Distro(distros.Distro):
- init_cmd = ['rc-service'] # init scripts
+ init_cmd = ["rc-service"] # init scripts
locale_conf_fn = "/etc/profile.d/locale.sh"
network_conf_fn = "/etc/network/interfaces"
renderer_configs = {
- "eni": {"eni_path": network_conf_fn,
- "eni_header": NETWORK_FILE_HEADER}
+ "eni": {"eni_path": network_conf_fn, "eni_header": NETWORK_FILE_HEADER}
}
def __init__(self, name, cfg, paths):
@@ -40,13 +34,13 @@ class Distro(distros.Distro):
# calls from repeatly happening (when they
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
- self.default_locale = 'C.UTF-8'
- self.osfamily = 'alpine'
- cfg['ssh_svcname'] = 'sshd'
+ self.default_locale = "C.UTF-8"
+ self.osfamily = "alpine"
+ cfg["ssh_svcname"] = "sshd"
def get_locale(self):
"""The default locale for Alpine Linux is different than
- cloud-init's DataSource default.
+ cloud-init's DataSource default.
"""
return self.default_locale
@@ -71,33 +65,20 @@ class Distro(distros.Distro):
def install_packages(self, pkglist):
self.update_package_sources()
- self.package_command('add', pkgs=pkglist)
-
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
-
- def _bring_up_interfaces(self, device_names):
- use_all = False
- for d in device_names:
- if d == 'all':
- use_all = True
- if use_all:
- return distros.Distro._bring_up_interface(self, '-a')
- else:
- return distros.Distro._bring_up_interfaces(self, device_names)
+ self.package_command("add", pkgs=pkglist)
- def _write_hostname(self, your_hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
conf = None
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
- conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, str(conf), 0o644)
+ conf = HostnameConf("")
+ conf.set_hostname(hostname)
+ util.write_file(filename, str(conf), 0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
@@ -129,7 +110,7 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- cmd = ['apk']
+ cmd = ["apk"]
# Redirect output
cmd.append("--quiet")
@@ -141,25 +122,32 @@ class Distro(distros.Distro):
if command:
cmd.append(command)
- pkglist = util.expand_package_list('%s-%s', pkgs)
+ if command == "upgrade":
+ cmd.extend(["--update-cache", "--available"])
+
+ pkglist = util.expand_package_list("%s-%s", pkgs)
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
subp.subp(cmd, capture=False)
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["update"], freq=PER_INSTANCE)
+ self._runner.run(
+ "update-sources",
+ self.package_command,
+ ["update"],
+ freq=PER_INSTANCE,
+ )
@property
def preferred_ntp_clients(self):
"""Allow distro to determine the preferred ntp client list"""
if not self._preferred_ntp_clients:
- self._preferred_ntp_clients = ['chrony', 'ntp']
+ self._preferred_ntp_clients = ["chrony", "ntp"]
return self._preferred_ntp_clients
- def shutdown_command(self, mode='poweroff', delay='now', message=None):
+ def shutdown_command(self, mode="poweroff", delay="now", message=None):
# called from cc_power_state_change.load_power_state
# Alpine has halt/poweroff/reboot, with the following specifics:
# - we use them rather than the generic "shutdown"
@@ -173,7 +161,7 @@ class Distro(distros.Distro):
# halt/poweroff/reboot commands take seconds rather than minutes.
if delay == "now":
# Alpine's commands do not understand "now".
- command += ['0']
+ command += ["0"]
else:
try:
command.append(str(int(delay) * 60))
@@ -185,4 +173,5 @@ class Distro(distros.Distro):
return command
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/amazon.py b/cloudinit/distros/amazon.py
index 5fcec952..a3573547 100644
--- a/cloudinit/distros/amazon.py
+++ b/cloudinit/distros/amazon.py
@@ -14,7 +14,6 @@ from cloudinit.distros import rhel
class Distro(rhel.Distro):
-
def update_package_sources(self):
return None
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index 967be168..0bdfef83 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -4,33 +4,29 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit import util
-from cloudinit import subp
+import os
+from cloudinit import distros, helpers
+from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.distros import net_util
from cloudinit.distros.parsers.hostname import HostnameConf
-
from cloudinit.net.renderers import RendererNotFoundError
-
from cloudinit.settings import PER_INSTANCE
-import os
-
LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
- locale_conf_fn = "/etc/locale.gen"
+ locale_gen_fn = "/etc/locale.gen"
network_conf_dir = "/etc/netctl"
- resolve_conf_fn = "/etc/resolv.conf"
- init_cmd = ['systemctl'] # init scripts
+ init_cmd = ["systemctl"] # init scripts
renderer_configs = {
- "netplan": {"netplan_path": "/etc/netplan/50-cloud-init.yaml",
- "netplan_header": "# generated by cloud-init\n",
- "postcmds": True}
+ "netplan": {
+ "netplan_path": "/etc/netplan/50-cloud-init.yaml",
+ "netplan_header": "# generated by cloud-init\n",
+ "postcmds": True,
+ }
}
def __init__(self, name, cfg, paths):
@@ -39,83 +35,94 @@ class Distro(distros.Distro):
# calls from repeatly happening (when they
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
- self.osfamily = 'arch'
- cfg['ssh_svcname'] = 'sshd'
+ self.osfamily = "arch"
+ cfg["ssh_svcname"] = "sshd"
def apply_locale(self, locale, out_fn=None):
- if not out_fn:
- out_fn = self.locale_conf_fn
- subp.subp(['locale-gen', '-G', locale], capture=False)
- # "" provides trailing newline during join
+ if out_fn is not None and out_fn != "/etc/locale.conf":
+ LOG.warning(
+ "Invalid locale_configfile %s, only supported "
+ "value is /etc/locale.conf",
+ out_fn,
+ )
lines = [
util.make_header(),
- 'LANG="%s"' % (locale),
+ # Hard-coding the charset isn't ideal, but there is no other way.
+ "%s UTF-8" % (locale),
"",
]
- util.write_file(out_fn, "\n".join(lines))
+ util.write_file(self.locale_gen_fn, "\n".join(lines))
+ subp.subp(["locale-gen"], capture=False)
+ # In the future systemd can handle locale-gen stuff:
+ # https://github.com/systemd/systemd/pull/9864
+ subp.subp(["localectl", "set-locale", locale], capture=False)
def install_packages(self, pkglist):
self.update_package_sources()
- self.package_command('', pkgs=pkglist)
+ self.package_command("", pkgs=pkglist)
- def _write_network_config(self, netconfig):
+ def _write_network_state(self, network_state):
try:
- return self._supported_write_network_config(netconfig)
+ super()._write_network_state(network_state)
except RendererNotFoundError as e:
# Fall back to old _write_network
raise NotImplementedError from e
def _write_network(self, settings):
entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
+ LOG.debug(
+ "Translated ubuntu style network settings %s into %s",
+ settings,
+ entries,
+ )
return _render_network(
- entries, resolv_conf=self.resolve_conf_fn,
+ entries,
+ resolv_conf=self.resolve_conf_fn,
conf_dir=self.network_conf_dir,
- enable_func=self._enable_interface)
+ enable_func=self._enable_interface,
+ )
def _enable_interface(self, device_name):
- cmd = ['netctl', 'reenable', device_name]
+ cmd = ["netctl", "reenable", device_name]
try:
(_out, err) = subp.subp(cmd)
if len(err):
- LOG.warning("Running %s resulted in stderr output: %s",
- cmd, err)
+ LOG.warning(
+ "Running %s resulted in stderr output: %s", cmd, err
+ )
except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
def _bring_up_interface(self, device_name):
- cmd = ['netctl', 'restart', device_name]
- LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
+ cmd = ["netctl", "restart", device_name]
+ LOG.debug(
+ "Attempting to run bring up interface %s using command %s",
+ device_name,
+ cmd,
+ )
try:
(_out, err) = subp.subp(cmd)
if len(err):
- LOG.warning("Running %s resulted in stderr output: %s",
- cmd, err)
+ LOG.warning(
+ "Running %s resulted in stderr output: %s", cmd, err
+ )
return True
except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
- def _bring_up_interfaces(self, device_names):
- for d in device_names:
- if not self._bring_up_interface(d):
- return False
- return True
-
- def _write_hostname(self, your_hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
conf = None
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
- conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, str(conf), omode="w", mode=0o644)
+ conf = HostnameConf("")
+ conf.set_hostname(hostname)
+ util.write_file(filename, str(conf), omode="w", mode=0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
@@ -137,6 +144,21 @@ class Distro(distros.Distro):
return default
return hostname
+ # hostname (inetutils) isn't installed per default on arch, so we use
+ # hostnamectl which is installed per default (systemd).
+ def _apply_hostname(self, hostname):
+ LOG.debug(
+ "Non-persistently setting the system hostname to %s", hostname
+ )
+ try:
+ subp.subp(["hostnamectl", "--transient", "set-hostname", hostname])
+ except subp.ProcessExecutionError:
+ util.logexc(
+ LOG,
+ "Failed to non-persistently adjust the system hostname to %s",
+ hostname,
+ )
+
def set_timezone(self, tz):
distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
@@ -144,7 +166,7 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- cmd = ['pacman', "-Sy", "--quiet", "--noconfirm"]
+ cmd = ["pacman", "-Sy", "--quiet", "--noconfirm"]
# Redirect output
if args and isinstance(args, str):
@@ -152,22 +174,30 @@ class Distro(distros.Distro):
elif args and isinstance(args, list):
cmd.extend(args)
+ if command == "upgrade":
+ command = "-u"
if command:
cmd.append(command)
- pkglist = util.expand_package_list('%s-%s', pkgs)
+ pkglist = util.expand_package_list("%s-%s", pkgs)
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
subp.subp(cmd, capture=False)
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["-y"], freq=PER_INSTANCE)
-
-
-def _render_network(entries, target="/", conf_dir="etc/netctl",
- resolv_conf="etc/resolv.conf", enable_func=None):
+ self._runner.run(
+ "update-sources", self.package_command, ["-y"], freq=PER_INSTANCE
+ )
+
+
+def _render_network(
+ entries,
+ target="/",
+ conf_dir="etc/netctl",
+ resolv_conf="etc/resolv.conf",
+ enable_func=None,
+):
"""Render the translate_network format into netctl files in target.
Paths will be rendered under target.
"""
@@ -178,29 +208,27 @@ def _render_network(entries, target="/", conf_dir="etc/netctl",
conf_dir = subp.target_path(target, conf_dir)
for (dev, info) in entries.items():
- if dev == 'lo':
+ if dev == "lo":
# no configuration should be rendered for 'lo'
continue
devs.append(dev)
net_fn = os.path.join(conf_dir, dev)
net_cfg = {
- 'Connection': 'ethernet',
- 'Interface': dev,
- 'IP': info.get('bootproto'),
- 'Address': "%s/%s" % (info.get('address'),
- info.get('netmask')),
- 'Gateway': info.get('gateway'),
- 'DNS': info.get('dns-nameservers', []),
+ "Connection": "ethernet",
+ "Interface": dev,
+ "IP": info.get("bootproto"),
+ "Address": "%s/%s" % (info.get("address"), info.get("netmask")),
+ "Gateway": info.get("gateway"),
+ "DNS": info.get("dns-nameservers", []),
}
util.write_file(net_fn, convert_netctl(net_cfg))
- if enable_func and info.get('auto'):
+ if enable_func and info.get("auto"):
enable_func(dev)
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
+ if "dns-nameservers" in info:
+ nameservers.extend(info["dns-nameservers"])
if nameservers:
- util.write_file(resolv_conf,
- convert_resolv_conf(nameservers))
+ util.write_file(resolv_conf, convert_resolv_conf(nameservers))
return devs
@@ -217,17 +245,18 @@ def convert_netctl(settings):
if val is None:
val = ""
elif isinstance(val, (tuple, list)):
- val = "(" + ' '.join("'%s'" % v for v in val) + ")"
+ val = "(" + " ".join("'%s'" % v for v in val) + ")"
result.append("%s=%s\n" % (key, val))
- return ''.join(result)
+ return "".join(result)
def convert_resolv_conf(settings):
"""Returns a settings string formatted for resolv.conf."""
- result = ''
+ result = ""
if isinstance(settings, list):
for ns in settings:
- result = result + 'nameserver %s\n' % ns
+ result = result + "nameserver %s\n" % ns
return result
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py
index f717a667..1b4498b3 100644
--- a/cloudinit/distros/bsd.py
+++ b/cloudinit/distros/bsd.py
@@ -1,12 +1,10 @@
import platform
-from cloudinit import distros
-from cloudinit.distros import bsd_utils
-from cloudinit import helpers
+from cloudinit import distros, helpers
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import net, subp, util
+from cloudinit.distros import bsd_utils
+
from .networking import BSDNetworking
LOG = logging.getLogger(__name__)
@@ -14,12 +12,12 @@ LOG = logging.getLogger(__name__)
class BSD(distros.Distro):
networking_cls = BSDNetworking
- hostname_conf_fn = '/etc/rc.conf'
+ hostname_conf_fn = "/etc/rc.conf"
rc_conf_fn = "/etc/rc.conf"
# This differs from the parent Distro class, which has -P for
# poweroff.
- shutdown_options_map = {'halt': '-H', 'poweroff': '-p', 'reboot': '-r'}
+ shutdown_options_map = {"halt": "-H", "poweroff": "-p", "reboot": "-r"}
# Set in BSD distro subclasses
group_add_cmd_prefix = []
@@ -35,7 +33,7 @@ class BSD(distros.Distro):
# calls from repeatly happening (when they
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
- cfg['ssh_svcname'] = 'sshd'
+ cfg["ssh_svcname"] = "sshd"
self.osfamily = platform.system().lower()
def _read_system_hostname(self):
@@ -43,13 +41,13 @@ class BSD(distros.Distro):
return (self.hostname_conf_fn, sys_hostname)
def _read_hostname(self, filename, default=None):
- return bsd_utils.get_rc_config_value('hostname')
+ return bsd_utils.get_rc_config_value("hostname")
def _get_add_member_to_group_cmd(self, member_name, group_name):
- raise NotImplementedError('Return list cmd to add member to group')
+ raise NotImplementedError("Return list cmd to add member to group")
def _write_hostname(self, hostname, filename):
- bsd_utils.set_rc_config_value('hostname', hostname, fn='/etc/rc.conf')
+ bsd_utils.set_rc_config_value("hostname", hostname, fn="/etc/rc.conf")
def create_group(self, name, members=None):
if util.is_group(name):
@@ -66,45 +64,55 @@ class BSD(distros.Distro):
members = []
for member in members:
if not util.is_user(member):
- LOG.warning("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
+ LOG.warning(
+ "Unable to add group member '%s' to group '%s'"
+ "; user does not exist.",
+ member,
+ name,
+ )
continue
try:
subp.subp(self._get_add_member_to_group_cmd(member, name))
LOG.info("Added user '%s' to group '%s'", member, name)
except Exception:
- util.logexc(LOG, "Failed to add user '%s' to group '%s'",
- member, name)
+ util.logexc(
+ LOG, "Failed to add user '%s' to group '%s'", member, name
+ )
def generate_fallback_config(self):
- nconf = {'config': [], 'version': 1}
+ nconf = {"config": [], "version": 1}
for mac, name in net.get_interfaces_by_mac().items():
- nconf['config'].append(
- {'type': 'physical', 'name': name,
- 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]})
+ nconf["config"].append(
+ {
+ "type": "physical",
+ "name": name,
+ "mac_address": mac,
+ "subnets": [{"type": "dhcp"}],
+ }
+ )
return nconf
def install_packages(self, pkglist):
self.update_package_sources()
- self.package_command('install', pkgs=pkglist)
+ self.package_command("install", pkgs=pkglist)
def _get_pkg_cmd_environ(self):
"""Return environment vars used in *BSD package_command operations"""
- raise NotImplementedError('BSD subclasses return a dict of env vars')
+ raise NotImplementedError("BSD subclasses return a dict of env vars")
def package_command(self, command, args=None, pkgs=None):
if pkgs is None:
pkgs = []
- if command == 'install':
+ if command == "install":
cmd = self.pkg_cmd_install_prefix
- elif command == 'remove':
+ elif command == "remove":
cmd = self.pkg_cmd_remove_prefix
- elif command == 'update':
+ elif command == "update":
if not self.pkg_cmd_update_prefix:
return
cmd = self.pkg_cmd_update_prefix
- elif command == 'upgrade':
+ elif command == "upgrade":
if not self.pkg_cmd_upgrade_prefix:
return
cmd = self.pkg_cmd_upgrade_prefix
@@ -114,20 +122,17 @@ class BSD(distros.Distro):
elif args and isinstance(args, list):
cmd.extend(args)
- pkglist = util.expand_package_list('%s-%s', pkgs)
+ pkglist = util.expand_package_list("%s-%s", pkgs)
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
subp.subp(cmd, env=self._get_pkg_cmd_environ(), capture=False)
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
-
def set_timezone(self, tz):
distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
def apply_locale(self, locale, out_fn=None):
- LOG.debug('Cannot set the locale.')
+ LOG.debug("Cannot set the locale.")
def apply_network_config_names(self, netconfig):
- LOG.debug('Cannot rename network interface.')
+ LOG.debug("Cannot rename network interface.")
diff --git a/cloudinit/distros/bsd_utils.py b/cloudinit/distros/bsd_utils.py
index 079d0d53..00cd0662 100644
--- a/cloudinit/distros/bsd_utils.py
+++ b/cloudinit/distros/bsd_utils.py
@@ -18,31 +18,31 @@ def _unquote(value):
return value
-def get_rc_config_value(key, fn='/etc/rc.conf'):
- key_prefix = '{}='.format(key)
+def get_rc_config_value(key, fn="/etc/rc.conf"):
+ key_prefix = "{}=".format(key)
for line in util.load_file(fn).splitlines():
if line.startswith(key_prefix):
- value = line.replace(key_prefix, '')
+ value = line.replace(key_prefix, "")
return _unquote(value)
-def set_rc_config_value(key, value, fn='/etc/rc.conf'):
+def set_rc_config_value(key, value, fn="/etc/rc.conf"):
lines = []
done = False
value = shlex.quote(value)
original_content = util.load_file(fn)
for line in original_content.splitlines():
- if '=' in line:
- k, v = line.split('=', 1)
+ if "=" in line:
+ k, v = line.split("=", 1)
if k == key:
v = value
done = True
- lines.append('='.join([k, v]))
+ lines.append("=".join([k, v]))
else:
lines.append(line)
if not done:
- lines.append('='.join([key, value]))
- new_content = '\n'.join(lines) + '\n'
+ lines.append("=".join([key, value]))
+ new_content = "\n".join(lines) + "\n"
if new_content != original_content:
util.write_file(fn, new_content)
diff --git a/cloudinit/distros/centos.py b/cloudinit/distros/centos.py
index edb3165d..3dc0a342 100644
--- a/cloudinit/distros/centos.py
+++ b/cloudinit/distros/centos.py
@@ -6,4 +6,5 @@ from cloudinit.distros import rhel
class Distro(rhel.Distro):
pass
+
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/__init__.py b/cloudinit/distros/cloudlinux.py
index 39af88c2..3dc0a342 100644
--- a/tests/cloud_tests/testcases/examples/__init__.py
+++ b/cloudinit/distros/cloudlinux.py
@@ -1,8 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""Test verifiers for cloud-init examples.
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ pass
-See configs/examples/README.md for more information
-"""
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 844aaf21..6dc1ad40 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -7,27 +7,29 @@
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
-
+import fcntl
import os
+import time
-from cloudinit import distros
-from cloudinit import helpers
+from cloudinit import distros, helpers
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, util
from cloudinit.distros.parsers.hostname import HostnameConf
-
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
-APT_GET_COMMAND = ('apt-get', '--option=Dpkg::Options::=--force-confold',
- '--option=Dpkg::options::=--force-unsafe-io',
- '--assume-yes', '--quiet')
+APT_LOCK_WAIT_TIMEOUT = 30
+APT_GET_COMMAND = (
+ "apt-get",
+ "--option=Dpkg::Options::=--force-confold",
+ "--option=Dpkg::options::=--force-unsafe-io",
+ "--assume-yes",
+ "--quiet",
+)
APT_GET_WRAPPER = {
- 'command': 'eatmydata',
- 'enabled': 'auto',
+ "command": "eatmydata",
+ "enabled": "auto",
}
NETWORK_FILE_HEADER = """\
@@ -41,19 +43,36 @@ NETWORK_FILE_HEADER = """\
NETWORK_CONF_FN = "/etc/network/interfaces.d/50-cloud-init"
LOCALE_CONF_FN = "/etc/default/locale"
+# The frontend lock needs to be acquired first followed by the order that
+# apt uses. /var/lib/apt/lists is locked independently of that install chain,
+# and only locked during update, so you can acquire it either order.
+# Also update does not acquire the dpkg frontend lock.
+# More context:
+# https://github.com/canonical/cloud-init/pull/1034#issuecomment-986971376
+APT_LOCK_FILES = [
+ "/var/lib/dpkg/lock-frontend",
+ "/var/lib/dpkg/lock",
+ "/var/cache/apt/archives/lock",
+ "/var/lib/apt/lists/lock",
+]
+
class Distro(distros.Distro):
hostname_conf_fn = "/etc/hostname"
network_conf_fn = {
"eni": "/etc/network/interfaces.d/50-cloud-init",
- "netplan": "/etc/netplan/50-cloud-init.yaml"
+ "netplan": "/etc/netplan/50-cloud-init.yaml",
}
renderer_configs = {
- "eni": {"eni_path": network_conf_fn["eni"],
- "eni_header": NETWORK_FILE_HEADER},
- "netplan": {"netplan_path": network_conf_fn["netplan"],
- "netplan_header": NETWORK_FILE_HEADER,
- "postcmds": True}
+ "eni": {
+ "eni_path": network_conf_fn["eni"],
+ "eni_header": NETWORK_FILE_HEADER,
+ },
+ "netplan": {
+ "netplan_path": network_conf_fn["netplan"],
+ "netplan_header": NETWORK_FILE_HEADER,
+ "postcmds": True,
+ },
}
def __init__(self, name, cfg, paths):
@@ -62,8 +81,8 @@ class Distro(distros.Distro):
# calls from repeatly happening (when they
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
- self.osfamily = 'debian'
- self.default_locale = 'en_US.UTF-8'
+ self.osfamily = "debian"
+ self.default_locale = "en_US.UTF-8"
self.system_locale = None
def get_locale(self):
@@ -74,25 +93,29 @@ class Distro(distros.Distro):
self.system_locale = read_system_locale()
# Return system_locale setting if valid, else use default locale
- return (self.system_locale if self.system_locale else
- self.default_locale)
+ return (
+ self.system_locale if self.system_locale else self.default_locale
+ )
- def apply_locale(self, locale, out_fn=None, keyname='LANG'):
+ def apply_locale(self, locale, out_fn=None, keyname="LANG"):
"""Apply specified locale to system, regenerate if specified locale
- differs from system default."""
+ differs from system default."""
if not out_fn:
out_fn = LOCALE_CONF_FN
if not locale:
- raise ValueError('Failed to provide locale value.')
+ raise ValueError("Failed to provide locale value.")
# Only call locale regeneration if needed
# Update system locale config with specified locale if needed
distro_locale = self.get_locale()
conf_fn_exists = os.path.exists(out_fn)
sys_locale_unset = False if self.system_locale else True
- need_regen = (locale.lower() != distro_locale.lower() or
- not conf_fn_exists or sys_locale_unset)
+ need_regen = (
+ locale.lower() != distro_locale.lower()
+ or not conf_fn_exists
+ or sys_locale_unset
+ )
need_conf = not conf_fn_exists or need_regen or sys_locale_unset
if need_regen:
@@ -100,7 +123,10 @@ class Distro(distros.Distro):
else:
LOG.debug(
"System has '%s=%s' requested '%s', skipping regeneration.",
- keyname, self.system_locale, locale)
+ keyname,
+ self.system_locale,
+ locale,
+ )
if need_conf:
update_locale_conf(locale, out_fn, keyname=keyname)
@@ -109,34 +135,24 @@ class Distro(distros.Distro):
def install_packages(self, pkglist):
self.update_package_sources()
- self.package_command('install', pkgs=pkglist)
+ self.package_command("install", pkgs=pkglist)
- def _write_network_config(self, netconfig):
+ def _write_network_state(self, network_state):
_maybe_remove_legacy_eth0()
- return self._supported_write_network_config(netconfig)
-
- def _bring_up_interfaces(self, device_names):
- use_all = False
- for d in device_names:
- if d == 'all':
- use_all = True
- if use_all:
- return distros.Distro._bring_up_interface(self, '--all')
- else:
- return distros.Distro._bring_up_interfaces(self, device_names)
+ return super()._write_network_state(network_state)
- def _write_hostname(self, your_hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
conf = None
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
- conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, str(conf), 0o644)
+ conf = HostnameConf("")
+ conf.set_hostname(hostname)
+ util.write_file(filename, str(conf), 0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
@@ -165,18 +181,90 @@ class Distro(distros.Distro):
def set_timezone(self, tz):
distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
+ def _apt_lock_available(self, lock_files=None):
+ """Determines if another process holds any apt locks.
+
+ If all locks are clear, return True else False.
+ """
+ if lock_files is None:
+ lock_files = APT_LOCK_FILES
+ for lock in lock_files:
+ if not os.path.exists(lock):
+ # Only wait for lock files that already exist
+ continue
+ with open(lock, "w") as handle:
+ try:
+ fcntl.lockf(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except OSError:
+ return False
+ return True
+
+ def _wait_for_apt_command(
+ self, short_cmd, subp_kwargs, timeout=APT_LOCK_WAIT_TIMEOUT
+ ):
+ """Wait for apt install to complete.
+
+ short_cmd: Name of command like "upgrade" or "install"
+ subp_kwargs: kwargs to pass to subp
+ """
+ start_time = time.time()
+ LOG.debug("Waiting for apt lock")
+ while time.time() - start_time < timeout:
+ if not self._apt_lock_available():
+ time.sleep(1)
+ continue
+ LOG.debug("apt lock available")
+ try:
+ # Allow the output of this to flow outwards (not be captured)
+ log_msg = "apt-%s [%s]" % (
+ short_cmd,
+ " ".join(subp_kwargs["args"]),
+ )
+ return util.log_time(
+ logfunc=LOG.debug,
+ msg=log_msg,
+ func=subp.subp,
+ kwargs=subp_kwargs,
+ )
+ except subp.ProcessExecutionError:
+ # Even though we have already waited for the apt lock to be
+ # available, it is possible that the lock was acquired by
+ # another process since the check. Since apt doesn't provide
+ # a meaningful error code to check and checking the error
+ # text is fragile and subject to internationalization, we
+ # can instead check the apt lock again. If the apt lock is
+ # still available, given the length of an average apt
+ # transaction, it is extremely unlikely that another process
+ # raced us when we tried to acquire it, so raise the apt
+ # error received. If the lock is unavailable, just keep waiting
+ if self._apt_lock_available():
+ raise
+ LOG.debug("Another process holds apt lock. Waiting...")
+ time.sleep(1)
+ raise TimeoutError("Could not get apt lock")
+
def package_command(self, command, args=None, pkgs=None):
+ """Run the given package command.
+
+ On Debian, this will run apt-get (unless APT_GET_COMMAND is set).
+
+ command: The command to run, like "upgrade" or "install"
+ args: Arguments passed to apt itself in addition to
+ any specified in APT_GET_COMMAND
+ pkgs: Apt packages that the command will apply to
+ """
if pkgs is None:
pkgs = []
e = os.environ.copy()
- # See: http://manpages.ubuntu.com/manpages/xenial/man7/debconf.7.html
- e['DEBIAN_FRONTEND'] = 'noninteractive'
+ # See: http://manpages.ubuntu.com/manpages/bionic/man7/debconf.7.html
+ e["DEBIAN_FRONTEND"] = "noninteractive"
wcfg = self.get_option("apt_get_wrapper", APT_GET_WRAPPER)
cmd = _get_wrapper_prefix(
- wcfg.get('command', APT_GET_WRAPPER['command']),
- wcfg.get('enabled', APT_GET_WRAPPER['enabled']))
+ wcfg.get("command", APT_GET_WRAPPER["command"]),
+ wcfg.get("enabled", APT_GET_WRAPPER["enabled"]),
+ )
cmd.extend(list(self.get_option("apt_get_command", APT_GET_COMMAND)))
@@ -187,35 +275,46 @@ class Distro(distros.Distro):
subcmd = command
if command == "upgrade":
- subcmd = self.get_option("apt_get_upgrade_subcommand",
- "dist-upgrade")
+ subcmd = self.get_option(
+ "apt_get_upgrade_subcommand", "dist-upgrade"
+ )
cmd.append(subcmd)
- pkglist = util.expand_package_list('%s=%s', pkgs)
+ pkglist = util.expand_package_list("%s=%s", pkgs)
cmd.extend(pkglist)
- # Allow the output of this to flow outwards (ie not be captured)
- util.log_time(logfunc=LOG.debug,
- msg="apt-%s [%s]" % (command, ' '.join(cmd)),
- func=subp.subp,
- args=(cmd,), kwargs={'env': e, 'capture': False})
+ self._wait_for_apt_command(
+ short_cmd=command,
+ subp_kwargs={"args": cmd, "env": e, "capture": False},
+ )
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["update"], freq=PER_INSTANCE)
+ self._runner.run(
+ "update-sources",
+ self.package_command,
+ ["update"],
+ freq=PER_INSTANCE,
+ )
def get_primary_arch(self):
return util.get_dpkg_architecture()
+ def set_keymap(self, layout, model, variant, options):
+ # Let localectl take care of updating /etc/default/keyboard
+ distros.Distro.set_keymap(self, layout, model, variant, options)
+ # Workaround for localectl not applying new settings instantly
+ # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=926037
+ self.manage_service("restart", "console-setup")
+
def _get_wrapper_prefix(cmd, mode):
if isinstance(cmd, str):
cmd = [str(cmd)]
- if (util.is_true(mode) or
- (str(mode).lower() == "auto" and cmd[0] and
- subp.which(cmd[0]))):
+ if util.is_true(mode) or (
+ str(mode).lower() == "auto" and cmd[0] and subp.which(cmd[0])
+ ):
return cmd
else:
return []
@@ -223,13 +322,13 @@ def _get_wrapper_prefix(cmd, mode):
def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"):
"""Ubuntu cloud images previously included a 'eth0.cfg' that had
- hard coded content. That file would interfere with the rendered
- configuration if it was present.
+ hard coded content. That file would interfere with the rendered
+ configuration if it was present.
- if the file does not exist do nothing.
- If the file exists:
- - with known content, remove it and warn
- - with unknown content, leave it and warn
+ if the file does not exist do nothing.
+ If the file exists:
+ - with known content, remove it and warn
+ - with unknown content, leave it and warn
"""
if not os.path.exists(path):
@@ -239,24 +338,25 @@ def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"):
try:
contents = util.load_file(path)
known_contents = ["auto eth0", "iface eth0 inet dhcp"]
- lines = [f.strip() for f in contents.splitlines()
- if not f.startswith("#")]
+ lines = [
+ f.strip() for f in contents.splitlines() if not f.startswith("#")
+ ]
if lines == known_contents:
util.del_file(path)
msg = "removed %s with known contents" % path
else:
- msg = (bmsg + " '%s' exists with user configured content." % path)
+ msg = bmsg + " '%s' exists with user configured content." % path
except Exception:
msg = bmsg + " %s exists, but could not be read." % path
LOG.warning(msg)
-def read_system_locale(sys_path=LOCALE_CONF_FN, keyname='LANG'):
+def read_system_locale(sys_path=LOCALE_CONF_FN, keyname="LANG"):
"""Read system default locale setting, if present"""
sys_val = ""
if not sys_path:
- raise ValueError('Invalid path: %s' % sys_path)
+ raise ValueError("Invalid path: %s" % sys_path)
if os.path.exists(sys_path):
locale_content = util.load_file(sys_path)
@@ -266,16 +366,22 @@ def read_system_locale(sys_path=LOCALE_CONF_FN, keyname='LANG'):
return sys_val
-def update_locale_conf(locale, sys_path, keyname='LANG'):
+def update_locale_conf(locale, sys_path, keyname="LANG"):
"""Update system locale config"""
- LOG.debug('Updating %s with locale setting %s=%s',
- sys_path, keyname, locale)
+ LOG.debug(
+ "Updating %s with locale setting %s=%s", sys_path, keyname, locale
+ )
subp.subp(
- ['update-locale', '--locale-file=' + sys_path,
- '%s=%s' % (keyname, locale)], capture=False)
+ [
+ "update-locale",
+ "--locale-file=" + sys_path,
+ "%s=%s" % (keyname, locale),
+ ],
+ capture=False,
+ )
-def regenerate_locale(locale, sys_path, keyname='LANG'):
+def regenerate_locale(locale, sys_path, keyname="LANG"):
"""
Run locale-gen for the provided locale and set the default
system variable `keyname` appropriately in the provided `sys_path`.
@@ -286,13 +392,13 @@ def regenerate_locale(locale, sys_path, keyname='LANG'):
# C
# C.UTF-8
# POSIX
- if locale.lower() in ['c', 'c.utf-8', 'posix']:
- LOG.debug('%s=%s does not require rengeneration', keyname, locale)
+ if locale.lower() in ["c", "c.utf-8", "posix"]:
+ LOG.debug("%s=%s does not require rengeneration", keyname, locale)
return
# finally, trigger regeneration
- LOG.debug('Generating locales for %s', locale)
- subp.subp(['locale-gen', locale], capture=False)
+ LOG.debug("Generating locales for %s", locale)
+ subp.subp(["locale-gen", locale], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/dragonflybsd.py b/cloudinit/distros/dragonflybsd.py
new file mode 100644
index 00000000..0d02bee0
--- /dev/null
+++ b/cloudinit/distros/dragonflybsd.py
@@ -0,0 +1,12 @@
+# Copyright (C) 2020-2021 Gonéri Le Bouder
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import cloudinit.distros.freebsd
+
+
+class Distro(cloudinit.distros.freebsd.Distro):
+ home_dir = "/home"
+
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/main/__init__.py b/cloudinit/distros/eurolinux.py
index 0a592637..3dc0a342 100644
--- a/tests/cloud_tests/testcases/main/__init__.py
+++ b/cloudinit/distros/eurolinux.py
@@ -1,8 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""Test verifiers for cloud-init main features.
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ pass
-See configs/main/README.md for more information
-"""
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/fedora.py b/cloudinit/distros/fedora.py
index 0fe1fbca..39203225 100644
--- a/cloudinit/distros/fedora.py
+++ b/cloudinit/distros/fedora.py
@@ -14,4 +14,5 @@ from cloudinit.distros import rhel
class Distro(rhel.Distro):
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index dde34d41..513abdc2 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -10,53 +10,54 @@ from io import StringIO
import cloudinit.distros.bsd
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
class Distro(cloudinit.distros.bsd.BSD):
- usr_lib_exec = '/usr/local/lib'
- login_conf_fn = '/etc/login.conf'
- login_conf_fn_bak = '/etc/login.conf.orig'
- ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users'
- group_add_cmd_prefix = ['pw', 'group', 'add']
+ """
+ Distro subclass for FreeBSD.
+
+ (N.B. DragonFlyBSD inherits from this class.)
+ """
+
+ usr_lib_exec = "/usr/local/lib"
+ login_conf_fn = "/etc/login.conf"
+ login_conf_fn_bak = "/etc/login.conf.orig"
+ ci_sudoers_fn = "/usr/local/etc/sudoers.d/90-cloud-init-users"
+ group_add_cmd_prefix = ["pw", "group", "add"]
pkg_cmd_install_prefix = ["pkg", "install"]
pkg_cmd_remove_prefix = ["pkg", "remove"]
pkg_cmd_update_prefix = ["pkg", "update"]
pkg_cmd_upgrade_prefix = ["pkg", "upgrade"]
-
- def _select_hostname(self, hostname, fqdn):
- # Should be FQDN if available. See rc.conf(5) in FreeBSD
- if fqdn:
- return fqdn
- return hostname
+ prefer_fqdn = True # See rc.conf(5) in FreeBSD
+ home_dir = "/usr/home"
def _get_add_member_to_group_cmd(self, member_name, group_name):
- return ['pw', 'usermod', '-n', member_name, '-G', group_name]
+ return ["pw", "usermod", "-n", member_name, "-G", group_name]
def add_user(self, name, **kwargs):
if util.is_user(name):
LOG.info("User %s already exists, skipping.", name)
return False
- pw_useradd_cmd = ['pw', 'useradd', '-n', name]
- log_pw_useradd_cmd = ['pw', 'useradd', '-n', name]
+ pw_useradd_cmd = ["pw", "useradd", "-n", name]
+ log_pw_useradd_cmd = ["pw", "useradd", "-n", name]
pw_useradd_opts = {
- "homedir": '-d',
- "gecos": '-c',
- "primary_group": '-g',
- "groups": '-G',
- "shell": '-s',
- "inactive": '-E',
+ "homedir": "-d",
+ "gecos": "-c",
+ "primary_group": "-g",
+ "groups": "-G",
+ "shell": "-s",
+ "inactive": "-E",
}
pw_useradd_flags = {
- "no_user_group": '--no-user-group',
- "system": '--system',
- "no_log_init": '--no-log-init',
+ "no_user_group": "--no-user-group",
+ "system": "--system",
+ "no_log_init": "--no-log-init",
}
for key, val in kwargs.items():
@@ -67,14 +68,19 @@ class Distro(cloudinit.distros.bsd.BSD):
pw_useradd_cmd.append(pw_useradd_flags[key])
log_pw_useradd_cmd.append(pw_useradd_flags[key])
- if 'no_create_home' in kwargs or 'system' in kwargs:
- pw_useradd_cmd.append('-d/nonexistent')
- log_pw_useradd_cmd.append('-d/nonexistent')
+ if "no_create_home" in kwargs or "system" in kwargs:
+ pw_useradd_cmd.append("-d/nonexistent")
+ log_pw_useradd_cmd.append("-d/nonexistent")
else:
- pw_useradd_cmd.append('-d/usr/home/%s' % name)
- pw_useradd_cmd.append('-m')
- log_pw_useradd_cmd.append('-d/usr/home/%s' % name)
- log_pw_useradd_cmd.append('-m')
+ pw_useradd_cmd.append(
+ "-d{home_dir}/{name}".format(home_dir=self.home_dir, name=name)
+ )
+ pw_useradd_cmd.append("-m")
+ log_pw_useradd_cmd.append(
+ "-d{home_dir}/{name}".format(home_dir=self.home_dir, name=name)
+ )
+
+ log_pw_useradd_cmd.append("-m")
# Run the command
LOG.info("Adding user %s", name)
@@ -85,13 +91,13 @@ class Distro(cloudinit.distros.bsd.BSD):
raise
# Set the password if it is provided
# For security consideration, only hashed passwd is assumed
- passwd_val = kwargs.get('passwd', None)
+ passwd_val = kwargs.get("passwd", None)
if passwd_val is not None:
self.set_passwd(name, passwd_val, hashed=True)
def expire_passwd(self, user):
try:
- subp.subp(['pw', 'usermod', user, '-p', '01-Jan-1970'])
+ subp.subp(["pw", "usermod", user, "-p", "01-Jan-1970"])
except Exception:
util.logexc(LOG, "Failed to set pw expiration for %s", user)
raise
@@ -103,15 +109,18 @@ class Distro(cloudinit.distros.bsd.BSD):
hash_opt = "-h"
try:
- subp.subp(['pw', 'usermod', user, hash_opt, '0'],
- data=passwd, logstring="chpasswd for %s" % user)
+ subp.subp(
+ ["pw", "usermod", user, hash_opt, "0"],
+ data=passwd,
+ logstring="chpasswd for %s" % user,
+ )
except Exception:
util.logexc(LOG, "Failed to set password for %s", user)
raise
def lock_passwd(self, name):
try:
- subp.subp(['pw', 'usermod', name, '-h', '-'])
+ subp.subp(["pw", "usermod", name, "-h", "-"])
except Exception:
util.logexc(LOG, "Failed to lock user %s", name)
raise
@@ -120,8 +129,9 @@ class Distro(cloudinit.distros.bsd.BSD):
# Adjust the locales value to the new value
newconf = StringIO()
for line in util.load_file(self.login_conf_fn).splitlines():
- newconf.write(re.sub(r'^default:',
- r'default:lang=%s:' % locale, line))
+ newconf.write(
+ re.sub(r"^default:", r"default:lang=%s:" % locale, line)
+ )
newconf.write("\n")
# Make a backup of login.conf.
@@ -132,15 +142,16 @@ class Distro(cloudinit.distros.bsd.BSD):
try:
LOG.debug("Running cap_mkdb for %s", locale)
- subp.subp(['cap_mkdb', self.login_conf_fn])
+ subp.subp(["cap_mkdb", self.login_conf_fn])
except subp.ProcessExecutionError:
# cap_mkdb failed, so restore the backup.
util.logexc(LOG, "Failed to apply locale %s", locale)
try:
util.copy(self.login_conf_fn_bak, self.login_conf_fn)
except IOError:
- util.logexc(LOG, "Failed to restore %s backup",
- self.login_conf_fn)
+ util.logexc(
+ LOG, "Failed to restore %s backup", self.login_conf_fn
+ )
def apply_network_config_names(self, netconfig):
# This is handled by the freebsd network renderer. It writes in
@@ -152,12 +163,16 @@ class Distro(cloudinit.distros.bsd.BSD):
def _get_pkg_cmd_environ(self):
"""Return environment vars used in *BSD package_command operations"""
e = os.environ.copy()
- e['ASSUME_ALWAYS_YES'] = 'YES'
+ e["ASSUME_ALWAYS_YES"] = "YES"
return e
def update_package_sources(self):
self._runner.run(
- "update-sources", self.package_command,
- ["update"], freq=PER_INSTANCE)
+ "update-sources",
+ self.package_command,
+ ["update"],
+ freq=PER_INSTANCE,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index e9b82602..4eb76da8 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -6,26 +6,27 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import distros
-from cloudinit import helpers
+from cloudinit import distros, helpers
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, util
from cloudinit.distros import net_util
from cloudinit.distros.parsers.hostname import HostnameConf
-
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
- locale_conf_fn = '/etc/locale.gen'
- network_conf_fn = '/etc/conf.d/net'
- resolve_conf_fn = '/etc/resolv.conf'
- hostname_conf_fn = '/etc/conf.d/hostname'
- init_cmd = ['rc-service'] # init scripts
+ locale_conf_fn = "/etc/env.d/02locale"
+ locale_gen_fn = "/etc/locale.gen"
+ network_conf_fn = "/etc/conf.d/net"
+ hostname_conf_fn = "/etc/conf.d/hostname"
+ init_cmd = ["rc-service"] # init scripts
+ default_locale = "en_US.UTF-8"
+
+ # C.UTF8 makes sense to generate, but is not selected
+ # Add /etc/locale.gen entries to this list to support more locales
+ locales = ["C.UTF8 UTF-8", "en_US.UTF-8 UTF-8"]
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -33,97 +34,121 @@ class Distro(distros.Distro):
# calls from repeatly happening (when they
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
- self.osfamily = 'gentoo'
+ self.osfamily = "gentoo"
# Fix sshd restarts
- cfg['ssh_svcname'] = '/etc/init.d/sshd'
-
- def apply_locale(self, locale, out_fn=None):
- if not out_fn:
- out_fn = self.locale_conf_fn
- subp.subp(['locale-gen', '-G', locale], capture=False)
- # "" provides trailing newline during join
- lines = [
- util.make_header(),
- 'LANG="%s"' % locale,
- "",
- ]
- util.write_file(out_fn, "\n".join(lines))
+ cfg["ssh_svcname"] = "/etc/init.d/sshd"
+ if distros.uses_systemd():
+ LOG.error("Cloud-init does not support systemd with gentoo")
+
+ def apply_locale(self, _, out_fn=None):
+ """rc-only - not compatible with systemd
+
+ Locales need to be added to /etc/locale.gen and generated prior
+ to selection. Default to en_US.UTF-8 for simplicity.
+ """
+ util.write_file(self.locale_gen_fn, "\n".join(self.locales), mode=644)
+
+ # generate locales
+ subp.subp(["locale-gen"], capture=False)
+
+ # select locale
+ subp.subp(
+ ["eselect", "locale", "set", self.default_locale], capture=False
+ )
def install_packages(self, pkglist):
self.update_package_sources()
- self.package_command('', pkgs=pkglist)
+ self.package_command("", pkgs=pkglist)
def _write_network(self, settings):
entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
+ LOG.debug(
+ "Translated ubuntu style network settings %s into %s",
+ settings,
+ entries,
+ )
dev_names = entries.keys()
nameservers = []
for (dev, info) in entries.items():
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if dev == 'lo':
+ if "dns-nameservers" in info:
+ nameservers.extend(info["dns-nameservers"])
+ if dev == "lo":
continue
- net_fn = self.network_conf_fn + '.' + dev
- dns_nameservers = info.get('dns-nameservers')
+ net_fn = self.network_conf_fn + "." + dev
+ dns_nameservers = info.get("dns-nameservers")
if isinstance(dns_nameservers, (list, tuple)):
- dns_nameservers = str(tuple(dns_nameservers)).replace(',', '')
+ dns_nameservers = str(tuple(dns_nameservers)).replace(",", "")
# eth0, {'auto': True, 'ipv6': {}, 'bootproto': 'dhcp'}
# lo, {'dns-nameservers': ['10.0.1.3'], 'ipv6': {}, 'auto': True}
- results = ''
- if info.get('bootproto') == 'dhcp':
+ results = ""
+ if info.get("bootproto") == "dhcp":
results += 'config_{name}="dhcp"'.format(name=dev)
else:
results += (
'config_{name}="{ip_address} netmask {netmask}"\n'
'mac_{name}="{hwaddr}"\n'
- ).format(name=dev, ip_address=info.get('address'),
- netmask=info.get('netmask'),
- hwaddr=info.get('hwaddress'))
- results += 'routes_{name}="default via {gateway}"\n'.format(
+ ).format(
name=dev,
- gateway=info.get('gateway')
+ ip_address=info.get("address"),
+ netmask=info.get("netmask"),
+ hwaddr=info.get("hwaddress"),
)
- if info.get('dns-nameservers'):
+ results += 'routes_{name}="default via {gateway}"\n'.format(
+ name=dev, gateway=info.get("gateway")
+ )
+ if info.get("dns-nameservers"):
results += 'dns_servers_{name}="{dnsservers}"\n'.format(
- name=dev,
- dnsservers=dns_nameservers)
+ name=dev, dnsservers=dns_nameservers
+ )
util.write_file(net_fn, results)
self._create_network_symlink(dev)
- if info.get('auto'):
- cmd = ['rc-update', 'add', 'net.{name}'.format(name=dev),
- 'default']
+ if info.get("auto"):
+ cmd = [
+ "rc-update",
+ "add",
+ "net.{name}".format(name=dev),
+ "default",
+ ]
try:
(_out, err) = subp.subp(cmd)
if len(err):
- LOG.warning("Running %s resulted in stderr output: %s",
- cmd, err)
+ LOG.warning(
+ "Running %s resulted in stderr output: %s",
+ cmd,
+ err,
+ )
except subp.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed",
- cmd)
+ util.logexc(
+ LOG, "Running interface command %s failed", cmd
+ )
if nameservers:
- util.write_file(self.resolve_conf_fn,
- convert_resolv_conf(nameservers))
+ util.write_file(
+ self.resolve_conf_fn, convert_resolv_conf(nameservers)
+ )
return dev_names
@staticmethod
def _create_network_symlink(interface_name):
- file_path = '/etc/init.d/net.{name}'.format(name=interface_name)
+ file_path = "/etc/init.d/net.{name}".format(name=interface_name)
if not util.is_link(file_path):
- util.sym_link('/etc/init.d/net.lo', file_path)
+ util.sym_link("/etc/init.d/net.lo", file_path)
def _bring_up_interface(self, device_name):
- cmd = ['/etc/init.d/net.%s' % device_name, 'restart']
- LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
+ cmd = ["/etc/init.d/net.%s" % device_name, "restart"]
+ LOG.debug(
+ "Attempting to run bring up interface %s using command %s",
+ device_name,
+ cmd,
+ )
try:
(_out, err) = subp.subp(cmd)
if len(err):
- LOG.warning("Running %s resulted in stderr output: %s",
- cmd, err)
+ LOG.warning(
+ "Running %s resulted in stderr output: %s", cmd, err
+ )
return True
except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
@@ -132,40 +157,41 @@ class Distro(distros.Distro):
def _bring_up_interfaces(self, device_names):
use_all = False
for d in device_names:
- if d == 'all':
+ if d == "all":
use_all = True
if use_all:
# Grab device names from init scripts
- cmd = ['ls', '/etc/init.d/net.*']
+ cmd = ["ls", "/etc/init.d/net.*"]
try:
(_out, err) = subp.subp(cmd)
if len(err):
- LOG.warning("Running %s resulted in stderr output: %s",
- cmd, err)
+ LOG.warning(
+ "Running %s resulted in stderr output: %s", cmd, err
+ )
except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
- devices = [x.split('.')[2] for x in _out.split(' ')]
+ devices = [x.split(".")[2] for x in _out.split(" ")]
return distros.Distro._bring_up_interfaces(self, devices)
else:
return distros.Distro._bring_up_interfaces(self, device_names)
- def _write_hostname(self, your_hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
conf = None
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
- conf = HostnameConf('')
+ conf = HostnameConf("")
# Many distro's format is the hostname by itself, and that is the
# way HostnameConf works but gentoo expects it to be in
# hostname="the-actual-hostname"
- conf.set_hostname('hostname="%s"' % your_hostname)
- util.write_file(out_fn, str(conf), 0o644)
+ conf.set_hostname('hostname="%s"' % hostname)
+ util.write_file(filename, str(conf), 0o644)
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.hostname_conf_fn)
@@ -195,7 +221,7 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- cmd = list('emerge')
+ cmd = list("emerge")
# Redirect output
cmd.append("--quiet")
@@ -207,23 +233,28 @@ class Distro(distros.Distro):
if command:
cmd.append(command)
- pkglist = util.expand_package_list('%s-%s', pkgs)
+ pkglist = util.expand_package_list("%s-%s", pkgs)
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
subp.subp(cmd, capture=False)
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["-u", "world"], freq=PER_INSTANCE)
+ self._runner.run(
+ "update-sources",
+ self.package_command,
+ ["-u", "world"],
+ freq=PER_INSTANCE,
+ )
def convert_resolv_conf(settings):
"""Returns a settings string formatted for resolv.conf."""
- result = ''
+ result = ""
if isinstance(settings, list):
for ns in settings:
- result += 'nameserver %s\n' % ns
+ result += "nameserver %s\n" % ns
return result
+
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/__init__.py b/cloudinit/distros/miraclelinux.py
index 6ab8114d..3dc0a342 100644
--- a/tests/cloud_tests/testcases/modules/__init__.py
+++ b/cloudinit/distros/miraclelinux.py
@@ -1,8 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""Test verifiers for cloud-init cc modules.
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ pass
-See configs/modules/README.md for more information
-"""
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py
index edfcd99d..e37fb19b 100644
--- a/cloudinit/distros/net_util.py
+++ b/cloudinit/distros/net_util.py
@@ -68,7 +68,9 @@
# }
from cloudinit.net.network_state import (
- net_prefix_to_ipv4_mask, mask_and_ipv4_to_bcast_addr)
+ mask_and_ipv4_to_bcast_addr,
+ net_prefix_to_ipv4_mask,
+)
def translate_network(settings):
@@ -86,7 +88,7 @@ def translate_network(settings):
ifaces = []
consume = {}
for (cmd, args) in entries:
- if cmd == 'iface':
+ if cmd == "iface":
if consume:
ifaces.append(consume)
consume = {}
@@ -96,19 +98,19 @@ def translate_network(settings):
# Check if anything left over to consume
absorb = False
for (cmd, args) in consume.items():
- if cmd == 'iface':
+ if cmd == "iface":
absorb = True
if absorb:
ifaces.append(consume)
# Now translate
real_ifaces = {}
for info in ifaces:
- if 'iface' not in info:
+ if "iface" not in info:
continue
- iface_details = info['iface'].split(None)
+ iface_details = info["iface"].split(None)
# Check if current device *may* have an ipv6 IP
use_ipv6 = False
- if 'inet6' in iface_details:
+ if "inet6" in iface_details:
use_ipv6 = True
dev_name = None
if len(iface_details) >= 1:
@@ -118,55 +120,54 @@ def translate_network(settings):
if not dev_name:
continue
iface_info = {}
- iface_info['ipv6'] = {}
+ iface_info["ipv6"] = {}
if len(iface_details) >= 3:
proto_type = iface_details[2].strip().lower()
# Seems like this can be 'loopback' which we don't
# really care about
- if proto_type in ['dhcp', 'static']:
- iface_info['bootproto'] = proto_type
+ if proto_type in ["dhcp", "static"]:
+ iface_info["bootproto"] = proto_type
# These can just be copied over
if use_ipv6:
- for k in ['address', 'gateway']:
+ for k in ["address", "gateway"]:
if k in info:
val = info[k].strip().lower()
if val:
- iface_info['ipv6'][k] = val
+ iface_info["ipv6"][k] = val
else:
- for k in ['netmask', 'address', 'gateway', 'broadcast']:
+ for k in ["netmask", "address", "gateway", "broadcast"]:
if k in info:
val = info[k].strip().lower()
if val:
iface_info[k] = val
# handle static ip configurations using
# ipaddress/prefix-length format
- if 'address' in iface_info:
- if 'netmask' not in iface_info:
+ if "address" in iface_info:
+ if "netmask" not in iface_info:
# check if the address has a network prefix
- addr, _, prefix = iface_info['address'].partition('/')
+ addr, _, prefix = iface_info["address"].partition("/")
if prefix:
- iface_info['netmask'] = (
- net_prefix_to_ipv4_mask(prefix))
- iface_info['address'] = addr
+ iface_info["netmask"] = net_prefix_to_ipv4_mask(prefix)
+ iface_info["address"] = addr
# if we set the netmask, we also can set the broadcast
- iface_info['broadcast'] = (
- mask_and_ipv4_to_bcast_addr(
- iface_info['netmask'], addr))
+ iface_info["broadcast"] = mask_and_ipv4_to_bcast_addr(
+ iface_info["netmask"], addr
+ )
# Name server info provided??
- if 'dns-nameservers' in info:
- iface_info['dns-nameservers'] = info['dns-nameservers'].split()
+ if "dns-nameservers" in info:
+ iface_info["dns-nameservers"] = info["dns-nameservers"].split()
# Name server search info provided??
- if 'dns-search' in info:
- iface_info['dns-search'] = info['dns-search'].split()
+ if "dns-search" in info:
+ iface_info["dns-search"] = info["dns-search"].split()
# Is any mac address spoofing going on??
- if 'hwaddress' in info:
- hw_info = info['hwaddress'].lower().strip()
+ if "hwaddress" in info:
+ hw_info = info["hwaddress"].lower().strip()
hw_split = hw_info.split(None, 1)
- if len(hw_split) == 2 and hw_split[0].startswith('ether'):
+ if len(hw_split) == 2 and hw_split[0].startswith("ether"):
hw_addr = hw_split[1]
if hw_addr:
- iface_info['hwaddress'] = hw_addr
+ iface_info["hwaddress"] = hw_addr
# If ipv6 is enabled, device will have multiple IPs, so we need to
# update the dictionary instead of overwriting it...
if dev_name in real_ifaces:
@@ -179,13 +180,14 @@ def translate_network(settings):
if not args:
continue
dev_name = args[0].strip().lower()
- if cmd == 'auto':
+ if cmd == "auto":
# Seems like auto can be like 'auto eth0 eth0:1' so just get the
# first part out as the device name
if dev_name in real_ifaces:
- real_ifaces[dev_name]['auto'] = True
- if cmd == 'iface' and 'inet6' in args:
- real_ifaces[dev_name]['inet6'] = True
+ real_ifaces[dev_name]["auto"] = True
+ if cmd == "iface" and "inet6" in args:
+ real_ifaces[dev_name]["inet6"] = True
return real_ifaces
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py
index f1a9b182..9c38ae51 100644
--- a/cloudinit/distros/netbsd.py
+++ b/cloudinit/distros/netbsd.py
@@ -8,8 +8,7 @@ import platform
import cloudinit.distros.bsd
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
@@ -21,42 +20,42 @@ class NetBSD(cloudinit.distros.bsd.BSD):
(N.B. OpenBSD inherits from this class.)
"""
- ci_sudoers_fn = '/usr/pkg/etc/sudoers.d/90-cloud-init-users'
+ ci_sudoers_fn = "/usr/pkg/etc/sudoers.d/90-cloud-init-users"
group_add_cmd_prefix = ["groupadd"]
def __init__(self, name, cfg, paths):
super().__init__(name, cfg, paths)
if os.path.exists("/usr/pkg/bin/pkgin"):
- self.pkg_cmd_install_prefix = ['pkgin', '-y', 'install']
- self.pkg_cmd_remove_prefix = ['pkgin', '-y', 'remove']
- self.pkg_cmd_update_prefix = ['pkgin', '-y', 'update']
- self.pkg_cmd_upgrade_prefix = ['pkgin', '-y', 'full-upgrade']
+ self.pkg_cmd_install_prefix = ["pkgin", "-y", "install"]
+ self.pkg_cmd_remove_prefix = ["pkgin", "-y", "remove"]
+ self.pkg_cmd_update_prefix = ["pkgin", "-y", "update"]
+ self.pkg_cmd_upgrade_prefix = ["pkgin", "-y", "full-upgrade"]
else:
- self.pkg_cmd_install_prefix = ['pkg_add', '-U']
- self.pkg_cmd_remove_prefix = ['pkg_delete']
+ self.pkg_cmd_install_prefix = ["pkg_add", "-U"]
+ self.pkg_cmd_remove_prefix = ["pkg_delete"]
def _get_add_member_to_group_cmd(self, member_name, group_name):
- return ['usermod', '-G', group_name, member_name]
+ return ["usermod", "-G", group_name, member_name]
def add_user(self, name, **kwargs):
if util.is_user(name):
LOG.info("User %s already exists, skipping.", name)
return False
- adduser_cmd = ['useradd']
- log_adduser_cmd = ['useradd']
+ adduser_cmd = ["useradd"]
+ log_adduser_cmd = ["useradd"]
adduser_opts = {
- "homedir": '-d',
- "gecos": '-c',
- "primary_group": '-g',
- "groups": '-G',
- "shell": '-s',
+ "homedir": "-d",
+ "gecos": "-c",
+ "primary_group": "-g",
+ "groups": "-G",
+ "shell": "-s",
}
adduser_flags = {
- "no_user_group": '--no-user-group',
- "system": '--system',
- "no_log_init": '--no-log-init',
+ "no_user_group": "--no-user-group",
+ "system": "--system",
+ "no_log_init": "--no-log-init",
}
for key, val in kwargs.items():
@@ -67,9 +66,9 @@ class NetBSD(cloudinit.distros.bsd.BSD):
adduser_cmd.append(adduser_flags[key])
log_adduser_cmd.append(adduser_flags[key])
- if 'no_create_home' not in kwargs or 'system' not in kwargs:
- adduser_cmd += ['-m']
- log_adduser_cmd += ['-m']
+ if "no_create_home" not in kwargs or "system" not in kwargs:
+ adduser_cmd += ["-m"]
+ log_adduser_cmd += ["-m"]
adduser_cmd += [name]
log_adduser_cmd += [name]
@@ -83,29 +82,28 @@ class NetBSD(cloudinit.distros.bsd.BSD):
raise
# Set the password if it is provided
# For security consideration, only hashed passwd is assumed
- passwd_val = kwargs.get('passwd', None)
+ passwd_val = kwargs.get("passwd", None)
if passwd_val is not None:
self.set_passwd(name, passwd_val, hashed=True)
def set_passwd(self, user, passwd, hashed=False):
if hashed:
hashed_pw = passwd
- elif not hasattr(crypt, 'METHOD_BLOWFISH'):
+ elif not hasattr(crypt, "METHOD_BLOWFISH"):
# crypt.METHOD_BLOWFISH comes with Python 3.7 which is available
# on NetBSD 7 and 8.
- LOG.error((
- 'Cannot set non-encrypted password for user %s. '
- 'Python >= 3.7 is required.'), user)
+ LOG.error(
+ "Cannot set non-encrypted password for user %s. "
+ "Python >= 3.7 is required.",
+ user,
+ )
return
else:
method = crypt.METHOD_BLOWFISH # pylint: disable=E1101
- hashed_pw = crypt.crypt(
- passwd,
- crypt.mksalt(method)
- )
+ hashed_pw = crypt.crypt(passwd, crypt.mksalt(method))
try:
- subp.subp(['usermod', '-p', hashed_pw, user])
+ subp.subp(["usermod", "-p", hashed_pw, user])
except Exception:
util.logexc(LOG, "Failed to set password for %s", user)
raise
@@ -113,40 +111,42 @@ class NetBSD(cloudinit.distros.bsd.BSD):
def force_passwd_change(self, user):
try:
- subp.subp(['usermod', '-F', user])
+ subp.subp(["usermod", "-F", user])
except Exception:
util.logexc(LOG, "Failed to set pw expiration for %s", user)
raise
def lock_passwd(self, name):
try:
- subp.subp(['usermod', '-C', 'yes', name])
+ subp.subp(["usermod", "-C", "yes", name])
except Exception:
util.logexc(LOG, "Failed to lock user %s", name)
raise
def unlock_passwd(self, name):
try:
- subp.subp(['usermod', '-C', 'no', name])
+ subp.subp(["usermod", "-C", "no", name])
except Exception:
util.logexc(LOG, "Failed to unlock user %s", name)
raise
def apply_locale(self, locale, out_fn=None):
- LOG.debug('Cannot set the locale.')
+ LOG.debug("Cannot set the locale.")
def apply_network_config_names(self, netconfig):
- LOG.debug('NetBSD cannot rename network interface.')
+ LOG.debug("NetBSD cannot rename network interface.")
def _get_pkg_cmd_environ(self):
"""Return env vars used in NetBSD package_command operations"""
os_release = platform.release()
os_arch = platform.machine()
e = os.environ.copy()
- e['PKG_PATH'] = (
- 'http://cdn.netbsd.org/pub/pkgsrc/'
- 'packages/NetBSD/%s/%s/All'
- ) % (os_arch, os_release)
+ e[
+ "PKG_PATH"
+ ] = "http://cdn.netbsd.org/pub/pkgsrc/packages/NetBSD/%s/%s/All" % (
+ os_arch,
+ os_release,
+ )
return e
def update_package_sources(self):
@@ -156,4 +156,5 @@ class NetBSD(cloudinit.distros.bsd.BSD):
class Distro(NetBSD):
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/networking.py b/cloudinit/distros/networking.py
index c291196a..b24b6233 100644
--- a/cloudinit/distros/networking.py
+++ b/cloudinit/distros/networking.py
@@ -1,10 +1,9 @@
import abc
import logging
import os
+from typing import List, Optional
-from cloudinit import subp
-from cloudinit import net, util
-
+from cloudinit import net, subp, util
LOG = logging.getLogger(__name__)
@@ -24,7 +23,7 @@ class Networking(metaclass=abc.ABCMeta):
"""
def __init__(self):
- self.blacklist_drivers = None
+ self.blacklist_drivers: Optional[List[str]] = None
def _get_current_rename_info(self) -> dict:
return net._get_current_rename_info()
@@ -73,7 +72,8 @@ class Networking(metaclass=abc.ABCMeta):
def get_interfaces_by_mac(self) -> dict:
return net.get_interfaces_by_mac(
- blacklist_drivers=self.blacklist_drivers)
+ blacklist_drivers=self.blacklist_drivers
+ )
def get_master(self, devname: DeviceName):
return net.get_master(devname)
@@ -225,7 +225,7 @@ class LinuxNetworking(Networking):
def try_set_link_up(self, devname: DeviceName) -> bool:
"""Try setting the link to up explicitly and return if it is up.
- Not guaranteed to bring the interface up. The caller is expected to
- add wait times before retrying."""
- subp.subp(['ip', 'link', 'set', devname, 'up'])
+ Not guaranteed to bring the interface up. The caller is expected to
+ add wait times before retrying."""
+ subp.subp(["ip", "link", "set", devname, "up"])
return self.is_up(devname)
diff --git a/cloudinit/distros/openEuler.py b/cloudinit/distros/openEuler.py
new file mode 100644
index 00000000..3dc0a342
--- /dev/null
+++ b/cloudinit/distros/openEuler.py
@@ -0,0 +1,10 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ pass
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/openbsd.py b/cloudinit/distros/openbsd.py
index 720c9cf3..ccdb8799 100644
--- a/cloudinit/distros/openbsd.py
+++ b/cloudinit/distros/openbsd.py
@@ -7,28 +7,27 @@ import platform
import cloudinit.distros.netbsd
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
class Distro(cloudinit.distros.netbsd.NetBSD):
- hostname_conf_fn = '/etc/myname'
+ hostname_conf_fn = "/etc/myname"
def _read_hostname(self, filename, default=None):
return util.load_file(self.hostname_conf_fn)
def _write_hostname(self, hostname, filename):
- content = hostname + '\n'
+ content = hostname + "\n"
util.write_file(self.hostname_conf_fn, content)
def _get_add_member_to_group_cmd(self, member_name, group_name):
- return ['usermod', '-G', group_name, member_name]
+ return ["usermod", "-G", group_name, member_name]
def lock_passwd(self, name):
try:
- subp.subp(['usermod', '-p', '*', name])
+ subp.subp(["usermod", "-p", "*", name])
except Exception:
util.logexc(LOG, "Failed to lock user %s", name)
raise
@@ -41,11 +40,10 @@ class Distro(cloudinit.distros.netbsd.NetBSD):
os_release = platform.release()
os_arch = platform.machine()
e = os.environ.copy()
- e['PKG_PATH'] = (
- 'ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/'
- 'packages/{os_arch}/').format(
- os_arch=os_arch, os_release=os_release
- )
+ e["PKG_PATH"] = (
+ "ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/"
+ "packages/{os_arch}/"
+ ).format(os_arch=os_arch, os_release=os_release)
return e
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index 7ca0ef99..00ed1514 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -8,69 +8,61 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import distros
-
-from cloudinit.distros.parsers.hostname import HostnameConf
-
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import distros, helpers, subp, util
from cloudinit.distros import rhel_util as rhutil
+from cloudinit.distros.parsers.hostname import HostnameConf
from cloudinit.settings import PER_INSTANCE
class Distro(distros.Distro):
- clock_conf_fn = '/etc/sysconfig/clock'
- hostname_conf_fn = '/etc/HOSTNAME'
- init_cmd = ['service']
- locale_conf_fn = '/etc/sysconfig/language'
- network_conf_fn = '/etc/sysconfig/network/config'
- network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
- resolve_conf_fn = '/etc/resolv.conf'
- route_conf_tpl = '/etc/sysconfig/network/ifroute-%s'
- systemd_hostname_conf_fn = '/etc/hostname'
- systemd_locale_conf_fn = '/etc/locale.conf'
- tz_local_fn = '/etc/localtime'
+ clock_conf_fn = "/etc/sysconfig/clock"
+ hostname_conf_fn = "/etc/HOSTNAME"
+ init_cmd = ["service"]
+ locale_conf_fn = "/etc/sysconfig/language"
+ network_conf_fn = "/etc/sysconfig/network/config"
+ network_script_tpl = "/etc/sysconfig/network/ifcfg-%s"
+ route_conf_tpl = "/etc/sysconfig/network/ifroute-%s"
+ systemd_hostname_conf_fn = "/etc/hostname"
+ systemd_locale_conf_fn = "/etc/locale.conf"
+ tz_local_fn = "/etc/localtime"
renderer_configs = {
- 'sysconfig': {
- 'control': 'etc/sysconfig/network/config',
- 'flavor': 'suse',
- 'iface_templates': '%(base)s/network/ifcfg-%(name)s',
- 'netrules_path': (
- 'etc/udev/rules.d/85-persistent-net-cloud-init.rules'),
- 'route_templates': {
- 'ipv4': '%(base)s/network/ifroute-%(name)s',
- 'ipv6': '%(base)s/network/ifroute-%(name)s',
- }
+ "sysconfig": {
+ "control": "etc/sysconfig/network/config",
+ "flavor": "suse",
+ "iface_templates": "%(base)s/network/ifcfg-%(name)s",
+ "netrules_path": (
+ "etc/udev/rules.d/85-persistent-net-cloud-init.rules"
+ ),
+ "route_templates": {
+ "ipv4": "%(base)s/network/ifroute-%(name)s",
+ "ipv6": "%(base)s/network/ifroute-%(name)s",
+ },
}
}
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
self._runner = helpers.Runners(paths)
- self.osfamily = 'suse'
- cfg['ssh_svcname'] = 'sshd'
+ self.osfamily = "suse"
+ cfg["ssh_svcname"] = "sshd"
if self.uses_systemd():
- self.init_cmd = ['systemctl']
- cfg['ssh_svcname'] = 'sshd.service'
+ self.init_cmd = ["systemctl"]
+ cfg["ssh_svcname"] = "sshd.service"
def apply_locale(self, locale, out_fn=None):
if self.uses_systemd():
if not out_fn:
out_fn = self.systemd_locale_conf_fn
- locale_cfg = {'LANG': locale}
+ locale_cfg = {"LANG": locale}
else:
if not out_fn:
out_fn = self.locale_conf_fn
- locale_cfg = {'RC_LANG': locale}
+ locale_cfg = {"RC_LANG": locale}
rhutil.update_sysconfig_file(out_fn, locale_cfg)
def install_packages(self, pkglist):
self.package_command(
- 'install',
- args='--auto-agree-with-licenses',
- pkgs=pkglist
+ "install", args="--auto-agree-with-licenses", pkgs=pkglist
)
def package_command(self, command, args=None, pkgs=None):
@@ -78,11 +70,11 @@ class Distro(distros.Distro):
pkgs = []
# No user interaction possible, enable non-interactive mode
- cmd = ['zypper', '--non-interactive']
+ cmd = ["zypper", "--non-interactive"]
# Command is the operation, such as install
- if command == 'upgrade':
- command = 'update'
+ if command == "upgrade":
+ command = "update"
cmd.append(command)
# args are the arguments to the command, not global options
@@ -91,7 +83,7 @@ class Distro(distros.Distro):
elif args and isinstance(args, list):
cmd.extend(args)
- pkglist = util.expand_package_list('%s-%s', pkgs)
+ pkglist = util.expand_package_list("%s-%s", pkgs)
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
@@ -107,27 +99,25 @@ class Distro(distros.Distro):
else:
# Adjust the sysconfig clock zone setting
clock_cfg = {
- 'TIMEZONE': str(tz),
+ "TIMEZONE": str(tz),
}
rhutil.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
# This ensures that the correct tz will be used for the system
util.copy(tz_file, self.tz_local_fn)
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ['refresh'], freq=PER_INSTANCE)
-
- def _bring_up_interfaces(self, device_names):
- if device_names and 'all' in device_names:
- raise RuntimeError(('Distro %s can not translate '
- 'the device name "all"') % (self.name))
- return distros.Distro._bring_up_interfaces(self, device_names)
+ self._runner.run(
+ "update-sources",
+ self.package_command,
+ ["refresh"],
+ freq=PER_INSTANCE,
+ )
def _read_hostname(self, filename, default=None):
- if self.uses_systemd() and filename.endswith('/previous-hostname'):
+ if self.uses_systemd() and filename.endswith("/previous-hostname"):
return util.load_file(filename).strip()
elif self.uses_systemd():
- (out, _err) = subp.subp(['hostname'])
+ (out, _err) = subp.subp(["hostname"])
if len(out):
return out
else:
@@ -157,26 +147,23 @@ class Distro(distros.Distro):
host_fn = self.hostname_conf_fn
return (host_fn, self._read_hostname(host_fn))
- def _write_hostname(self, hostname, out_fn):
- if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
- util.write_file(out_fn, hostname)
+ def _write_hostname(self, hostname, filename):
+ if self.uses_systemd() and filename.endswith("/previous-hostname"):
+ util.write_file(filename, hostname)
elif self.uses_systemd():
- subp.subp(['hostnamectl', 'set-hostname', str(hostname)])
+ subp.subp(["hostnamectl", "set-hostname", str(hostname)])
else:
conf = None
try:
# Try to update the previous one
# so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
+ conf = self._read_hostname_conf(filename)
except IOError:
pass
if not conf:
- conf = HostnameConf('')
+ conf = HostnameConf("")
conf.set_hostname(hostname)
- util.write_file(out_fn, str(conf), 0o644)
-
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
+ util.write_file(filename, str(conf), 0o644)
@property
def preferred_ntp_clients(self):
@@ -184,22 +171,28 @@ class Distro(distros.Distro):
# Allow distro to determine the preferred ntp client list
if not self._preferred_ntp_clients:
- distro_info = util.system_info()['dist']
+ distro_info = util.system_info()["dist"]
name = distro_info[0]
- major_ver = int(distro_info[1].split('.')[0])
+ major_ver = int(distro_info[1].split(".")[0])
# This is horribly complicated because of a case of
# "we do not care if versions should be increasing syndrome"
- if (
- (major_ver >= 15 and 'openSUSE' not in name) or
- (major_ver >= 15 and 'openSUSE' in name and major_ver != 42)
+ if (major_ver >= 15 and "openSUSE" not in name) or (
+ major_ver >= 15 and "openSUSE" in name and major_ver != 42
):
- self._preferred_ntp_clients = ['chrony',
- 'systemd-timesyncd', 'ntp']
+ self._preferred_ntp_clients = [
+ "chrony",
+ "systemd-timesyncd",
+ "ntp",
+ ]
else:
- self._preferred_ntp_clients = ['ntp',
- 'systemd-timesyncd', 'chrony']
+ self._preferred_ntp_clients = [
+ "ntp",
+ "systemd-timesyncd",
+ "chrony",
+ ]
return self._preferred_ntp_clients
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/parsers/__init__.py b/cloudinit/distros/parsers/__init__.py
index 6b5b6dde..5bea2ae1 100644
--- a/cloudinit/distros/parsers/__init__.py
+++ b/cloudinit/distros/parsers/__init__.py
@@ -9,10 +9,11 @@ def chop_comment(text, comment_chars):
comment_locations = [text.find(c) for c in comment_chars]
comment_locations = [c for c in comment_locations if c != -1]
if not comment_locations:
- return (text, '')
+ return (text, "")
min_comment = min(comment_locations)
before_comment = text[0:min_comment]
comment = text[min_comment:]
return (before_comment, comment)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py
index e74c083c..61674082 100644
--- a/cloudinit/distros/parsers/hostname.py
+++ b/cloudinit/distros/parsers/hostname.py
@@ -23,11 +23,11 @@ class HostnameConf(object):
self.parse()
contents = StringIO()
for (line_type, components) in self._contents:
- if line_type == 'blank':
+ if line_type == "blank":
contents.write("%s\n" % (components[0]))
- elif line_type == 'all_comment':
+ elif line_type == "all_comment":
contents.write("%s\n" % (components[0]))
- elif line_type == 'hostname':
+ elif line_type == "hostname":
(hostname, tail) = components
contents.write("%s%s\n" % (hostname, tail))
# Ensure trailing newline
@@ -40,7 +40,7 @@ class HostnameConf(object):
def hostname(self):
self.parse()
for (line_type, components) in self._contents:
- if line_type == 'hostname':
+ if line_type == "hostname":
return components[0]
return None
@@ -51,28 +51,28 @@ class HostnameConf(object):
self.parse()
replaced = False
for (line_type, components) in self._contents:
- if line_type == 'hostname':
+ if line_type == "hostname":
components[0] = str(your_hostname)
replaced = True
if not replaced:
- self._contents.append(('hostname', [str(your_hostname), '']))
+ self._contents.append(("hostname", [str(your_hostname), ""]))
def _parse(self, contents):
entries = []
hostnames_found = set()
for line in contents.splitlines():
if not len(line.strip()):
- entries.append(('blank', [line]))
+ entries.append(("blank", [line]))
continue
- (head, tail) = chop_comment(line.strip(), '#')
+ (head, tail) = chop_comment(line.strip(), "#")
if not len(head):
- entries.append(('all_comment', [line]))
+ entries.append(("all_comment", [line]))
continue
- entries.append(('hostname', [head, tail]))
+ entries.append(("hostname", [head, tail]))
hostnames_found.add(head)
if len(hostnames_found) > 1:
- raise IOError("Multiple hostnames (%s) found!"
- % (hostnames_found))
+ raise IOError("Multiple hostnames (%s) found!" % (hostnames_found))
return entries
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py
index 54e4e934..e43880af 100644
--- a/cloudinit/distros/parsers/hosts.py
+++ b/cloudinit/distros/parsers/hosts.py
@@ -25,7 +25,7 @@ class HostsConf(object):
self.parse()
options = []
for (line_type, components) in self._contents:
- if line_type == 'option':
+ if line_type == "option":
(pieces, _tail) = components
if len(pieces) and pieces[0] == ip:
options.append(pieces[1:])
@@ -35,7 +35,7 @@ class HostsConf(object):
self.parse()
n_entries = []
for (line_type, components) in self._contents:
- if line_type != 'option':
+ if line_type != "option":
n_entries.append((line_type, components))
continue
else:
@@ -48,35 +48,37 @@ class HostsConf(object):
def add_entry(self, ip, canonical_hostname, *aliases):
self.parse()
- self._contents.append(('option',
- ([ip, canonical_hostname] + list(aliases), '')))
+ self._contents.append(
+ ("option", ([ip, canonical_hostname] + list(aliases), ""))
+ )
def _parse(self, contents):
entries = []
for line in contents.splitlines():
if not len(line.strip()):
- entries.append(('blank', [line]))
+ entries.append(("blank", [line]))
continue
- (head, tail) = chop_comment(line.strip(), '#')
+ (head, tail) = chop_comment(line.strip(), "#")
if not len(head):
- entries.append(('all_comment', [line]))
+ entries.append(("all_comment", [line]))
continue
- entries.append(('option', [head.split(None), tail]))
+ entries.append(("option", [head.split(None), tail]))
return entries
def __str__(self):
self.parse()
contents = StringIO()
for (line_type, components) in self._contents:
- if line_type == 'blank':
+ if line_type == "blank":
contents.write("%s\n" % (components[0]))
- elif line_type == 'all_comment':
+ elif line_type == "all_comment":
contents.write("%s\n" % (components[0]))
- elif line_type == 'option':
+ elif line_type == "option":
(pieces, tail) = components
pieces = [str(p) for p in pieces]
pieces = "\t".join(pieces)
contents.write("%s%s\n" % (pieces, tail))
return contents.getvalue()
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/parsers/networkmanager_conf.py b/cloudinit/distros/parsers/networkmanager_conf.py
index ac51f122..4b669b0f 100644
--- a/cloudinit/distros/parsers/networkmanager_conf.py
+++ b/cloudinit/distros/parsers/networkmanager_conf.py
@@ -13,9 +13,9 @@ import configobj
class NetworkManagerConf(configobj.ConfigObj):
def __init__(self, contents):
- configobj.ConfigObj.__init__(self, contents,
- interpolation=False,
- write_empty_values=False)
+ configobj.ConfigObj.__init__(
+ self, contents, interpolation=False, write_empty_values=False
+ )
def set_section_keypair(self, section_name, key, value):
if section_name not in self.sections:
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index 62929d03..0ef4e147 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -6,9 +6,9 @@
from io import StringIO
-from cloudinit.distros.parsers import chop_comment
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.distros.parsers import chop_comment
LOG = logging.getLogger(__name__)
@@ -26,12 +26,12 @@ class ResolvConf(object):
@property
def nameservers(self):
self.parse()
- return self._retr_option('nameserver')
+ return self._retr_option("nameserver")
@property
def local_domain(self):
self.parse()
- dm = self._retr_option('domain')
+ dm = self._retr_option("domain")
if dm:
return dm[0]
return None
@@ -39,7 +39,7 @@ class ResolvConf(object):
@property
def search_domains(self):
self.parse()
- current_sds = self._retr_option('search')
+ current_sds = self._retr_option("search")
flat_sds = []
for sdlist in current_sds:
for sd in sdlist.split(None):
@@ -51,11 +51,11 @@ class ResolvConf(object):
self.parse()
contents = StringIO()
for (line_type, components) in self._contents:
- if line_type == 'blank':
+ if line_type == "blank":
contents.write("\n")
- elif line_type == 'all_comment':
+ elif line_type == "all_comment":
contents.write("%s\n" % (components[0]))
- elif line_type == 'option':
+ elif line_type == "option":
(cfg_opt, cfg_value, comment_tail) = components
line = "%s %s" % (cfg_opt, cfg_value)
if len(comment_tail):
@@ -66,7 +66,7 @@ class ResolvConf(object):
def _retr_option(self, opt_name):
found = []
for (line_type, components) in self._contents:
- if line_type == 'option':
+ if line_type == "option":
(cfg_opt, cfg_value, _comment_tail) = components
if cfg_opt == opt_name:
found.append(cfg_value)
@@ -74,27 +74,29 @@ class ResolvConf(object):
def add_nameserver(self, ns):
self.parse()
- current_ns = self._retr_option('nameserver')
+ current_ns = self._retr_option("nameserver")
new_ns = list(current_ns)
new_ns.append(str(ns))
new_ns = util.uniq_list(new_ns)
if len(new_ns) == len(current_ns):
return current_ns
if len(current_ns) >= 3:
- LOG.warning("ignoring nameserver %r: adding would "
- "exceed the maximum of "
- "'3' name servers (see resolv.conf(5))", ns)
+ LOG.warning(
+ "ignoring nameserver %r: adding would "
+ "exceed the maximum of "
+ "'3' name servers (see resolv.conf(5))",
+ ns,
+ )
return current_ns[:3]
- self._remove_option('nameserver')
+ self._remove_option("nameserver")
for n in new_ns:
- self._contents.append(('option', ['nameserver', n, '']))
+ self._contents.append(("option", ["nameserver", n, ""]))
return new_ns
def _remove_option(self, opt_name):
-
def remove_opt(item):
line_type, components = item
- if line_type != 'option':
+ if line_type != "option":
return False
(cfg_opt, _cfg_value, _comment_tail) = components
if cfg_opt != opt_name:
@@ -116,23 +118,26 @@ class ResolvConf(object):
return new_sds
if len(flat_sds) >= 6:
# Hard restriction on only 6 search domains
- raise ValueError(("Adding %r would go beyond the "
- "'6' maximum search domains") % (search_domain))
+ raise ValueError(
+ "Adding %r would go beyond the '6' maximum search domains"
+ % (search_domain)
+ )
s_list = " ".join(new_sds)
if len(s_list) > 256:
# Some hard limit on 256 chars total
- raise ValueError(("Adding %r would go beyond the "
- "256 maximum search list character limit")
- % (search_domain))
- self._remove_option('search')
- self._contents.append(('option', ['search', s_list, '']))
+ raise ValueError(
+ "Adding %r would go beyond the "
+ "256 maximum search list character limit" % (search_domain)
+ )
+ self._remove_option("search")
+ self._contents.append(("option", ["search", s_list, ""]))
return flat_sds
@local_domain.setter
def local_domain(self, domain):
self.parse()
- self._remove_option('domain')
- self._contents.append(('option', ['domain', str(domain), '']))
+ self._remove_option("domain")
+ self._contents.append(("option", ["domain", str(domain), ""]))
return domain
def _parse(self, contents):
@@ -140,24 +145,30 @@ class ResolvConf(object):
for (i, line) in enumerate(contents.splitlines()):
sline = line.strip()
if not sline:
- entries.append(('blank', [line]))
+ entries.append(("blank", [line]))
continue
- (head, tail) = chop_comment(line, ';#')
+ (head, tail) = chop_comment(line, ";#")
if not len(head.strip()):
- entries.append(('all_comment', [line]))
+ entries.append(("all_comment", [line]))
continue
if not tail:
- tail = ''
+ tail = ""
try:
(cfg_opt, cfg_values) = head.split(None, 1)
except (IndexError, ValueError) as e:
raise IOError(
"Incorrectly formatted resolv.conf line %s" % (i + 1)
) from e
- if cfg_opt not in ['nameserver', 'domain',
- 'search', 'sortlist', 'options']:
+ if cfg_opt not in [
+ "nameserver",
+ "domain",
+ "search",
+ "sortlist",
+ "options",
+ ]:
raise IOError("Unexpected resolv.conf option %s" % (cfg_opt))
entries.append(("option", [cfg_opt, cfg_values, tail]))
return entries
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py
index dee4c551..4132734c 100644
--- a/cloudinit/distros/parsers/sys_conf.py
+++ b/cloudinit/distros/parsers/sys_conf.py
@@ -20,7 +20,7 @@ import configobj
# See: http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html
# or look at the 'param_expand()' function in the subst.c file in the bash
# source tarball...
-SHELL_VAR_RULE = r'[a-zA-Z_]+[a-zA-Z0-9_]*'
+SHELL_VAR_RULE = r"[a-zA-Z_]+[a-zA-Z0-9_]*"
SHELL_VAR_REGEXES = [
# Basic variables
re.compile(r"\$" + SHELL_VAR_RULE),
@@ -48,10 +48,11 @@ class SysConf(configobj.ConfigObj):
``configobj.ConfigObj.__init__`` (i.e. "a filename, file like object,
or list of lines").
"""
+
def __init__(self, contents):
- configobj.ConfigObj.__init__(self, contents,
- interpolation=False,
- write_empty_values=True)
+ configobj.ConfigObj.__init__(
+ self, contents, interpolation=False, write_empty_values=True
+ )
def __str__(self):
contents = self.write()
@@ -66,11 +67,13 @@ class SysConf(configobj.ConfigObj):
if not isinstance(value, str):
raise ValueError('Value "%s" is not a string' % (value))
if len(value) == 0:
- return ''
+ return ""
quot_func = None
if value[0] in ['"', "'"] and value[-1] in ['"', "'"]:
if len(value) == 1:
- quot_func = (lambda x: self._get_single_quote(x) % x)
+ quot_func = (
+ lambda x: self._get_single_quote(x) % x
+ ) # noqa: E731
else:
# Quote whitespace if it isn't the start + end of a shell command
if value.strip().startswith("$(") and value.strip().endswith(")"):
@@ -82,11 +85,13 @@ class SysConf(configobj.ConfigObj):
# leave it alone since the pipes.quote function likes
# to use single quotes which won't get expanded...
if re.search(r"[\n\"']", value):
- quot_func = (lambda x:
- self._get_triple_quote(x) % x)
+ quot_func = (
+ lambda x: self._get_triple_quote(x) % x
+ ) # noqa: E731
else:
- quot_func = (lambda x:
- self._get_single_quote(x) % x)
+ quot_func = (
+ lambda x: self._get_single_quote(x) % x
+ ) # noqa: E731
else:
quot_func = pipes.quote
if not quot_func:
@@ -99,10 +104,13 @@ class SysConf(configobj.ConfigObj):
val = self._decode_element(self._quote(this_entry))
key = self._decode_element(self._quote(entry))
cmnt = self._decode_element(comment)
- return '%s%s%s%s%s' % (indent_string,
- key,
- self._a_to_u('='),
- val,
- cmnt)
+ return "%s%s%s%s%s" % (
+ indent_string,
+ key,
+ self._a_to_u("="),
+ val,
+ cmnt,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/photon.py b/cloudinit/distros/photon.py
new file mode 100644
index 00000000..14cefe90
--- /dev/null
+++ b/cloudinit/distros/photon.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python3
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2021 VMware Inc.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import distros, helpers
+from cloudinit import log as logging
+from cloudinit import net, subp, util
+from cloudinit.distros import rhel_util as rhutil
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(distros.Distro):
+ systemd_hostname_conf_fn = "/etc/hostname"
+ network_conf_dir = "/etc/systemd/network/"
+ systemd_locale_conf_fn = "/etc/locale.conf"
+ resolve_conf_fn = "/etc/systemd/resolved.conf"
+
+ renderer_configs = {
+ "networkd": {
+ "resolv_conf_fn": resolve_conf_fn,
+ "network_conf_dir": network_conf_dir,
+ }
+ }
+
+ # Should be fqdn if we can use it
+ prefer_fqdn = True
+
+ def __init__(self, name, cfg, paths):
+ distros.Distro.__init__(self, name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ self.osfamily = "photon"
+ self.init_cmd = ["systemctl"]
+
+ def exec_cmd(self, cmd, capture=True):
+ LOG.debug("Attempting to run: %s", cmd)
+ try:
+ (out, err) = subp.subp(cmd, capture=capture)
+ if err:
+ LOG.warning(
+ "Running %s resulted in stderr output: %s", cmd, err
+ )
+ return True, out, err
+ return False, out, err
+ except subp.ProcessExecutionError:
+ util.logexc(LOG, "Command %s failed", cmd)
+ return True, None, None
+
+ def generate_fallback_config(self):
+ key = "disable_fallback_netcfg"
+ disable_fallback_netcfg = self._cfg.get(key, True)
+ LOG.debug("%s value is: %s", key, disable_fallback_netcfg)
+
+ if not disable_fallback_netcfg:
+ return net.generate_fallback_config()
+
+ LOG.info(
+ "Skipping generate_fallback_config. Rely on PhotonOS default "
+ "network config"
+ )
+ return None
+
+ def apply_locale(self, locale, out_fn=None):
+ # This has a dependancy on glibc-i18n, user need to manually install it
+ # and enable the option in cloud.cfg
+ if not out_fn:
+ out_fn = self.systemd_locale_conf_fn
+
+ locale_cfg = {
+ "LANG": locale,
+ }
+
+ rhutil.update_sysconfig_file(out_fn, locale_cfg)
+
+ # rhutil will modify /etc/locale.conf
+ # For locale change to take effect, reboot is needed or we can restart
+ # systemd-localed. This is equivalent of localectl
+ cmd = ["systemctl", "restart", "systemd-localed"]
+ self.exec_cmd(cmd)
+
+ def install_packages(self, pkglist):
+ # self.update_package_sources()
+ self.package_command("install", pkgs=pkglist)
+
+ def _write_hostname(self, hostname, filename):
+ if filename and filename.endswith("/previous-hostname"):
+ util.write_file(filename, hostname)
+ else:
+ ret, _out, err = self.exec_cmd(
+ ["hostnamectl", "set-hostname", str(hostname)]
+ )
+ if ret:
+ LOG.warning(
+ (
+ "Error while setting hostname: %s\nGiven hostname: %s",
+ err,
+ hostname,
+ )
+ )
+
+ def _read_system_hostname(self):
+ sys_hostname = self._read_hostname(self.systemd_hostname_conf_fn)
+ return (self.systemd_hostname_conf_fn, sys_hostname)
+
+ def _read_hostname(self, filename, default=None):
+ if filename and filename.endswith("/previous-hostname"):
+ return util.load_file(filename).strip()
+
+ _ret, out, _err = self.exec_cmd(["hostname", "-f"])
+ return out.strip() if out else default
+
+ def _get_localhost_ip(self):
+ return "127.0.1.1"
+
+ def set_timezone(self, tz):
+ distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
+
+ def package_command(self, command, args=None, pkgs=None):
+ if not pkgs:
+ pkgs = []
+
+ cmd = ["tdnf", "-y"]
+ if args and isinstance(args, str):
+ cmd.append(args)
+ elif args and isinstance(args, list):
+ cmd.extend(args)
+
+ cmd.append(command)
+
+ pkglist = util.expand_package_list("%s-%s", pkgs)
+ cmd.extend(pkglist)
+
+ ret, _out, err = self.exec_cmd(cmd)
+ if ret:
+ LOG.error("Error while installing packages: %s", err)
+
+ def update_package_sources(self):
+ self._runner.run(
+ "update-sources",
+ self.package_command,
+ ["makecache"],
+ freq=PER_INSTANCE,
+ )
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index c72f7c17..84744ece 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -8,12 +8,9 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import distros
-from cloudinit import helpers
+from cloudinit import distros, helpers
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, util
from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
@@ -22,48 +19,48 @@ LOG = logging.getLogger(__name__)
def _make_sysconfig_bool(val):
if val:
- return 'yes'
+ return "yes"
else:
- return 'no'
+ return "no"
class Distro(distros.Distro):
# See: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Networking_Guide/sec-Network_Configuration_Using_sysconfig_Files.html # noqa
clock_conf_fn = "/etc/sysconfig/clock"
- locale_conf_fn = '/etc/sysconfig/i18n'
- systemd_locale_conf_fn = '/etc/locale.conf'
+ locale_conf_fn = "/etc/sysconfig/i18n"
+ systemd_locale_conf_fn = "/etc/locale.conf"
network_conf_fn = "/etc/sysconfig/network"
hostname_conf_fn = "/etc/sysconfig/network"
systemd_hostname_conf_fn = "/etc/hostname"
- network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s'
- resolve_conf_fn = "/etc/resolv.conf"
+ network_script_tpl = "/etc/sysconfig/network-scripts/ifcfg-%s"
tz_local_fn = "/etc/localtime"
usr_lib_exec = "/usr/libexec"
renderer_configs = {
- 'sysconfig': {
- 'control': 'etc/sysconfig/network',
- 'iface_templates': '%(base)s/network-scripts/ifcfg-%(name)s',
- 'route_templates': {
- 'ipv4': '%(base)s/network-scripts/route-%(name)s',
- 'ipv6': '%(base)s/network-scripts/route6-%(name)s'
- }
+ "sysconfig": {
+ "control": "etc/sysconfig/network",
+ "iface_templates": "%(base)s/network-scripts/ifcfg-%(name)s",
+ "route_templates": {
+ "ipv4": "%(base)s/network-scripts/route-%(name)s",
+ "ipv6": "%(base)s/network-scripts/route6-%(name)s",
+ },
}
}
+ # Should be fqdn if we can use it
+ # See: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/deployment_guide/ch-sysconfig # noqa: E501
+ prefer_fqdn = True
+
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
# This will be used to restrict certain
# calls from repeatly happening (when they
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
- self.osfamily = 'redhat'
- cfg['ssh_svcname'] = 'sshd'
+ self.osfamily = "redhat"
+ cfg["ssh_svcname"] = "sshd"
def install_packages(self, pkglist):
- self.package_command('install', pkgs=pkglist)
-
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
+ self.package_command("install", pkgs=pkglist)
def apply_locale(self, locale, out_fn=None):
if self.uses_systemd():
@@ -74,29 +71,22 @@ class Distro(distros.Distro):
if not out_fn:
out_fn = self.locale_conf_fn
locale_cfg = {
- 'LANG': locale,
+ "LANG": locale,
}
rhel_util.update_sysconfig_file(out_fn, locale_cfg)
- def _write_hostname(self, hostname, out_fn):
+ def _write_hostname(self, hostname, filename):
# systemd will never update previous-hostname for us, so
# we need to do it ourselves
- if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
- util.write_file(out_fn, hostname)
+ if self.uses_systemd() and filename.endswith("/previous-hostname"):
+ util.write_file(filename, hostname)
elif self.uses_systemd():
- subp.subp(['hostnamectl', 'set-hostname', str(hostname)])
+ subp.subp(["hostnamectl", "set-hostname", str(hostname)])
else:
host_cfg = {
- 'HOSTNAME': hostname,
+ "HOSTNAME": hostname,
}
- rhel_util.update_sysconfig_file(out_fn, host_cfg)
-
- def _select_hostname(self, hostname, fqdn):
- # Should be fqdn if we can use it
- # See: https://www.centos.org/docs/5/html/Deployment_Guide-en-US/ch-sysconfig.html#s2-sysconfig-network # noqa
- if fqdn:
- return fqdn
- return hostname
+ rhel_util.update_sysconfig_file(filename, host_cfg)
def _read_system_hostname(self):
if self.uses_systemd():
@@ -106,27 +96,21 @@ class Distro(distros.Distro):
return (host_fn, self._read_hostname(host_fn))
def _read_hostname(self, filename, default=None):
- if self.uses_systemd() and filename.endswith('/previous-hostname'):
+ if self.uses_systemd() and filename.endswith("/previous-hostname"):
return util.load_file(filename).strip()
elif self.uses_systemd():
- (out, _err) = subp.subp(['hostname'])
+ (out, _err) = subp.subp(["hostname"])
if len(out):
return out
else:
return default
else:
(_exists, contents) = rhel_util.read_sysconfig_file(filename)
- if 'HOSTNAME' in contents:
- return contents['HOSTNAME']
+ if "HOSTNAME" in contents:
+ return contents["HOSTNAME"]
else:
return default
- def _bring_up_interfaces(self, device_names):
- if device_names and 'all' in device_names:
- raise RuntimeError(('Distro %s can not translate '
- 'the device name "all"') % (self.name))
- return distros.Distro._bring_up_interfaces(self, device_names)
-
def set_timezone(self, tz):
tz_file = self._find_tz_file(tz)
if self.uses_systemd():
@@ -137,7 +121,7 @@ class Distro(distros.Distro):
else:
# Adjust the sysconfig clock zone setting
clock_cfg = {
- 'ZONE': str(tz),
+ "ZONE": str(tz),
}
rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
# This ensures that the correct tz will be used for the system
@@ -147,18 +131,18 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- if subp.which('dnf'):
- LOG.debug('Using DNF for package management')
- cmd = ['dnf']
+ if subp.which("dnf"):
+ LOG.debug("Using DNF for package management")
+ cmd = ["dnf"]
else:
- LOG.debug('Using YUM for package management')
+ LOG.debug("Using YUM for package management")
# the '-t' argument makes yum tolerant of errors on the command
# line with regard to packages.
#
# For example: if you request to install foo, bar and baz and baz
# is installed; yum won't error out complaining that baz is already
# installed.
- cmd = ['yum', '-t']
+ cmd = ["yum", "-t"]
# Determines whether or not yum prompts for confirmation
# of critical actions. We don't want to prompt...
cmd.append("-y")
@@ -170,14 +154,19 @@ class Distro(distros.Distro):
cmd.append(command)
- pkglist = util.expand_package_list('%s-%s', pkgs)
+ pkglist = util.expand_package_list("%s-%s", pkgs)
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
subp.subp(cmd, capture=False)
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["makecache"], freq=PER_INSTANCE)
+ self._runner.run(
+ "update-sources",
+ self.package_command,
+ ["makecache"],
+ freq=PER_INSTANCE,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py
index d71394b4..c96f93b5 100644
--- a/cloudinit/distros/rhel_util.py
+++ b/cloudinit/distros/rhel_util.py
@@ -8,10 +8,9 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.distros.parsers.sys_conf import SysConf
-
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.distros.parsers.sys_conf import SysConf
LOG = logging.getLogger(__name__)
@@ -49,4 +48,5 @@ def read_sysconfig_file(fn):
contents = []
return (exists, SysConf(contents))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/rocky.py b/cloudinit/distros/rocky.py
new file mode 100644
index 00000000..3dc0a342
--- /dev/null
+++ b/cloudinit/distros/rocky.py
@@ -0,0 +1,10 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ pass
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py
index f3bfb9c2..484214e7 100644
--- a/cloudinit/distros/sles.py
+++ b/cloudinit/distros/sles.py
@@ -10,4 +10,5 @@ from cloudinit.distros import opensuse
class Distro(opensuse.Distro):
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/tests/test_init.py b/cloudinit/distros/tests/test_init.py
deleted file mode 100644
index db534654..00000000
--- a/cloudinit/distros/tests/test_init.py
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright (C) 2020 Canonical Ltd.
-#
-# Author: Daniel Watkins <oddbloke@ubuntu.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-"""Tests for cloudinit/distros/__init__.py"""
-
-from unittest import mock
-
-import pytest
-
-from cloudinit.distros import _get_package_mirror_info, LDH_ASCII_CHARS
-
-
-# Define a set of characters we would expect to be replaced
-INVALID_URL_CHARS = [
- chr(x) for x in range(127) if chr(x) not in LDH_ASCII_CHARS
-]
-for separator in [":", ".", "/", "#", "?", "@", "[", "]"]:
- # Remove from the set characters that either separate hostname parts (":",
- # "."), terminate hostnames ("/", "#", "?", "@"), or cause Python to be
- # unable to parse URLs ("[", "]").
- INVALID_URL_CHARS.remove(separator)
-
-
-class TestGetPackageMirrorInfo:
- """
- Tests for cloudinit.distros._get_package_mirror_info.
-
- These supplement the tests in tests/unittests/test_distros/test_generic.py
- which are more focused on testing a single production-like configuration.
- These tests are more focused on specific aspects of the unit under test.
- """
-
- @pytest.mark.parametrize('mirror_info,expected', [
- # Empty info gives empty return
- ({}, {}),
- # failsafe values used if present
- ({'failsafe': {'primary': 'http://value', 'security': 'http://other'}},
- {'primary': 'http://value', 'security': 'http://other'}),
- # search values used if present
- ({'search': {'primary': ['http://value'],
- 'security': ['http://other']}},
- {'primary': ['http://value'], 'security': ['http://other']}),
- # failsafe values used if search value not present
- ({'search': {'primary': ['http://value']},
- 'failsafe': {'security': 'http://other'}},
- {'primary': ['http://value'], 'security': 'http://other'})
- ])
- def test_get_package_mirror_info_failsafe(self, mirror_info, expected):
- """
- Test the interaction between search and failsafe inputs
-
- (This doesn't test the case where the mirror_filter removes all search
- options; test_failsafe_used_if_all_search_results_filtered_out covers
- that.)
- """
- assert expected == _get_package_mirror_info(mirror_info,
- mirror_filter=lambda x: x)
-
- def test_failsafe_used_if_all_search_results_filtered_out(self):
- """Test the failsafe option used if all search options eliminated."""
- mirror_info = {
- 'search': {'primary': ['http://value']},
- 'failsafe': {'primary': 'http://other'}
- }
- assert {'primary': 'http://other'} == _get_package_mirror_info(
- mirror_info, mirror_filter=lambda x: False)
-
- @pytest.mark.parametrize('allow_ec2_mirror, platform_type', [
- (True, 'ec2')
- ])
- @pytest.mark.parametrize('availability_zone,region,patterns,expected', (
- # Test ec2_region alone
- ('fk-fake-1f', None, ['http://EC2-%(ec2_region)s/ubuntu'],
- ['http://ec2-fk-fake-1/ubuntu']),
- # Test availability_zone alone
- ('fk-fake-1f', None, ['http://AZ-%(availability_zone)s/ubuntu'],
- ['http://az-fk-fake-1f/ubuntu']),
- # Test region alone
- (None, 'fk-fake-1', ['http://RG-%(region)s/ubuntu'],
- ['http://rg-fk-fake-1/ubuntu']),
- # Test that ec2_region is not available for non-matching AZs
- ('fake-fake-1f', None,
- ['http://EC2-%(ec2_region)s/ubuntu',
- 'http://AZ-%(availability_zone)s/ubuntu'],
- ['http://az-fake-fake-1f/ubuntu']),
- # Test that template order maintained
- (None, 'fake-region',
- ['http://RG-%(region)s-2/ubuntu', 'http://RG-%(region)s-1/ubuntu'],
- ['http://rg-fake-region-2/ubuntu', 'http://rg-fake-region-1/ubuntu']),
- # Test that non-ASCII hostnames are IDNA encoded;
- # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q"
- (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com/ubuntu'],
- ['http://www.xn--idna--4kd53hh6aba3q.com/ubuntu']),
- # Test that non-ASCII hostnames with a port are IDNA encoded;
- # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q"
- (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com:8080/ubuntu'],
- ['http://www.xn--idna--4kd53hh6aba3q.com:8080/ubuntu']),
- # Test that non-ASCII non-hostname parts of URLs are unchanged
- (None, 'ТεЅТ̣', ['http://www.example.com/%(region)s/ubuntu'],
- ['http://www.example.com/ТεЅТ̣/ubuntu']),
- # Test that IPv4 addresses are unchanged
- (None, 'fk-fake-1', ['http://192.168.1.1:8080/%(region)s/ubuntu'],
- ['http://192.168.1.1:8080/fk-fake-1/ubuntu']),
- # Test that IPv6 addresses are unchanged
- (None, 'fk-fake-1',
- ['http://[2001:67c:1360:8001::23]/%(region)s/ubuntu'],
- ['http://[2001:67c:1360:8001::23]/fk-fake-1/ubuntu']),
- # Test that unparseable URLs are filtered out of the mirror list
- (None, 'inv[lid',
- ['http://%(region)s.in.hostname/should/be/filtered',
- 'http://but.not.in.the.path/%(region)s'],
- ['http://but.not.in.the.path/inv[lid']),
- (None, '-some-region-',
- ['http://-lead-ing.%(region)s.trail-ing-.example.com/ubuntu'],
- ['http://lead-ing.some-region.trail-ing.example.com/ubuntu']),
- ) + tuple(
- # Dynamically generate a test case for each non-LDH
- # (Letters/Digits/Hyphen) ASCII character, testing that it is
- # substituted with a hyphen
- (None, 'fk{0}fake{0}1'.format(invalid_char),
- ['http://%(region)s/ubuntu'], ['http://fk-fake-1/ubuntu'])
- for invalid_char in INVALID_URL_CHARS
- ))
- def test_valid_substitution(self,
- allow_ec2_mirror,
- platform_type,
- availability_zone,
- region,
- patterns,
- expected):
- """Test substitution works as expected."""
- flag_path = "cloudinit.distros." \
- "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES"
-
- m_data_source = mock.Mock(
- availability_zone=availability_zone,
- region=region,
- platform_type=platform_type
- )
- mirror_info = {'search': {'primary': patterns}}
-
- with mock.patch(flag_path, allow_ec2_mirror):
- ret = _get_package_mirror_info(
- mirror_info,
- data_source=m_data_source,
- mirror_filter=lambda x: x
- )
- print(allow_ec2_mirror)
- print(platform_type)
- print(availability_zone)
- print(region)
- print(patterns)
- print(expected)
- assert {'primary': expected} == ret
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index 2a1f93d9..ec6470a9 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -9,41 +9,44 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.distros import debian
-from cloudinit.distros import PREFERRED_NTP_CLIENTS
-from cloudinit import util
-
import copy
+from cloudinit import util
+from cloudinit.distros import PREFERRED_NTP_CLIENTS, debian
-class Distro(debian.Distro):
+class Distro(debian.Distro):
def __init__(self, name, cfg, paths):
super(Distro, self).__init__(name, cfg, paths)
# Ubuntu specific network cfg locations
self.network_conf_fn = {
"eni": "/etc/network/interfaces.d/50-cloud-init.cfg",
- "netplan": "/etc/netplan/50-cloud-init.yaml"
+ "netplan": "/etc/netplan/50-cloud-init.yaml",
}
self.renderer_configs = {
- "eni": {"eni_path": self.network_conf_fn["eni"],
- "eni_header": debian.NETWORK_FILE_HEADER},
- "netplan": {"netplan_path": self.network_conf_fn["netplan"],
- "netplan_header": debian.NETWORK_FILE_HEADER,
- "postcmds": True}
+ "eni": {
+ "eni_path": self.network_conf_fn["eni"],
+ "eni_header": debian.NETWORK_FILE_HEADER,
+ },
+ "netplan": {
+ "netplan_path": self.network_conf_fn["netplan"],
+ "netplan_header": debian.NETWORK_FILE_HEADER,
+ "postcmds": True,
+ },
}
@property
def preferred_ntp_clients(self):
"""The preferred ntp client is dependent on the version."""
if not self._preferred_ntp_clients:
- (_name, _version, codename) = util.system_info()['dist']
+ (_name, _version, codename) = util.system_info()["dist"]
# Xenial cloud-init only installed ntp, UbuntuCore has timesyncd.
if codename == "xenial" and not util.system_is_snappy():
- self._preferred_ntp_clients = ['ntp']
+ self._preferred_ntp_clients = ["ntp"]
else:
- self._preferred_ntp_clients = (
- copy.deepcopy(PREFERRED_NTP_CLIENTS))
+ self._preferred_ntp_clients = copy.deepcopy(
+ PREFERRED_NTP_CLIENTS
+ )
return self._preferred_ntp_clients
diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py
index 08446a95..72766392 100755
--- a/cloudinit/distros/ug_util.py
+++ b/cloudinit/distros/ug_util.py
@@ -10,92 +10,80 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit import log as logging
-from cloudinit import type_utils
-from cloudinit import util
+from cloudinit import type_utils, util
LOG = logging.getLogger(__name__)
-# Normalizes a input group configuration
-# which can be a comma seperated list of
-# group names, or a list of group names
-# or a python dictionary of group names
-# to a list of members of that group.
+# Normalizes an input group configuration which can be:
+# Comma seperated string or a list or a dictionary
#
-# The output is a dictionary of group
-# names => members of that group which
-# is the standard form used in the rest
-# of cloud-init
+# Returns dictionary of group names => members of that group which is the
+# standard form used in the rest of cloud-init
def _normalize_groups(grp_cfg):
if isinstance(grp_cfg, str):
grp_cfg = grp_cfg.strip().split(",")
+
if isinstance(grp_cfg, list):
c_grp_cfg = {}
for i in grp_cfg:
if isinstance(i, dict):
for k, v in i.items():
- if k not in c_grp_cfg:
- if isinstance(v, list):
- c_grp_cfg[k] = list(v)
- elif isinstance(v, str):
- c_grp_cfg[k] = [v]
- else:
- raise TypeError("Bad group member type %s" %
- type_utils.obj_name(v))
+ if not isinstance(v, (list, str)):
+ raise TypeError(
+ "Bad group member type %s"
+ % (type_utils.obj_name(v))
+ )
+
+ if isinstance(v, list):
+ c_grp_cfg.setdefault(k, []).extend(v)
else:
- if isinstance(v, list):
- c_grp_cfg[k].extend(v)
- elif isinstance(v, str):
- c_grp_cfg[k].append(v)
- else:
- raise TypeError("Bad group member type %s" %
- type_utils.obj_name(v))
+ c_grp_cfg.setdefault(k, []).append(v)
elif isinstance(i, str):
if i not in c_grp_cfg:
c_grp_cfg[i] = []
else:
- raise TypeError("Unknown group name type %s" %
- type_utils.obj_name(i))
+ raise TypeError(
+ "Unknown group name type %s" % (type_utils.obj_name(i))
+ )
grp_cfg = c_grp_cfg
+
groups = {}
if isinstance(grp_cfg, dict):
- for (grp_name, grp_members) in grp_cfg.items():
+ for grp_name, grp_members in grp_cfg.items():
groups[grp_name] = util.uniq_merge_sorted(grp_members)
else:
- raise TypeError(("Group config must be list, dict "
- " or string types only and not %s") %
- type_utils.obj_name(grp_cfg))
+ raise TypeError(
+ "Group config must be list, dict or string type only but found %s"
+ % (type_utils.obj_name(grp_cfg))
+ )
return groups
-# Normalizes a input group configuration
-# which can be a comma seperated list of
-# user names, or a list of string user names
-# or a list of dictionaries with components
-# that define the user config + 'name' (if
-# a 'name' field does not exist then the
-# default user is assumed to 'own' that
-# configuration.
+# Normalizes an input group configuration which can be: a list or a dictionary
+#
+# components that define the user config + 'name' (if a 'name' field does not
+# exist then the default user is assumed to 'own' that configuration.)
#
-# The output is a dictionary of user
-# names => user config which is the standard
-# form used in the rest of cloud-init. Note
-# the default user will have a special config
-# entry 'default' which will be marked as true
-# all other users will be marked as false.
+# Returns a dictionary of user names => user config which is the standard form
+# used in the rest of cloud-init. Note the default user will have a special
+# config entry 'default' which will be marked true and all other users will be
+# marked false.
def _normalize_users(u_cfg, def_user_cfg=None):
if isinstance(u_cfg, dict):
ad_ucfg = []
- for (k, v) in u_cfg.items():
+ for k, v in u_cfg.items():
if isinstance(v, (bool, int, float, str)):
if util.is_true(v):
ad_ucfg.append(str(k))
elif isinstance(v, dict):
- v['name'] = k
+ v["name"] = k
ad_ucfg.append(v)
else:
- raise TypeError(("Unmappable user value type %s"
- " for key %s") % (type_utils.obj_name(v), k))
+ raise TypeError(
+ "Unmappable user value type %s for key %s"
+ % (type_utils.obj_name(v), k)
+ )
u_cfg = ad_ucfg
elif isinstance(u_cfg, str):
u_cfg = util.uniq_merge_sorted(u_cfg)
@@ -107,181 +95,157 @@ def _normalize_users(u_cfg, def_user_cfg=None):
if u and u not in users:
users[u] = {}
elif isinstance(user_config, dict):
- if 'name' in user_config:
- n = user_config.pop('name')
- prev_config = users.get(n) or {}
- users[n] = util.mergemanydict([prev_config,
- user_config])
- else:
- # Assume the default user then
- prev_config = users.get('default') or {}
- users['default'] = util.mergemanydict([prev_config,
- user_config])
+ n = user_config.pop("name", "default")
+ prev_config = users.get(n) or {}
+ users[n] = util.mergemanydict([prev_config, user_config])
else:
- raise TypeError(("User config must be dictionary/list "
- " or string types only and not %s") %
- type_utils.obj_name(user_config))
+ raise TypeError(
+ "User config must be dictionary/list or string "
+ " types only and not %s" % (type_utils.obj_name(user_config))
+ )
# Ensure user options are in the right python friendly format
if users:
c_users = {}
- for (uname, uconfig) in users.items():
+ for uname, uconfig in users.items():
c_uconfig = {}
- for (k, v) in uconfig.items():
- k = k.replace('-', '_').strip()
+ for k, v in uconfig.items():
+ k = k.replace("-", "_").strip()
if k:
c_uconfig[k] = v
c_users[uname] = c_uconfig
users = c_users
- # Fixup the default user into the real
- # default user name and replace it...
+ # Fix the default user into the actual default user name and replace it.
def_user = None
- if users and 'default' in users:
- def_config = users.pop('default')
+ if users and "default" in users:
+ def_config = users.pop("default")
if def_user_cfg:
- # Pickup what the default 'real name' is
- # and any groups that are provided by the
- # default config
+ # Pickup what the default 'real name' is and any groups that are
+ # provided by the default config
def_user_cfg = def_user_cfg.copy()
- def_user = def_user_cfg.pop('name')
- def_groups = def_user_cfg.pop('groups', [])
- # Pickup any config + groups for that user name
- # that we may have previously extracted
+ def_user = def_user_cfg.pop("name")
+ def_groups = def_user_cfg.pop("groups", [])
+ # Pick any config + groups for the user name that we may have
+ # extracted previously
parsed_config = users.pop(def_user, {})
- parsed_groups = parsed_config.get('groups', [])
- # Now merge our extracted groups with
- # anything the default config provided
+ parsed_groups = parsed_config.get("groups", [])
+ # Now merge the extracted groups with the default config provided
users_groups = util.uniq_merge_sorted(parsed_groups, def_groups)
- parsed_config['groups'] = ",".join(users_groups)
- # The real config for the default user is the
- # combination of the default user config provided
- # by the distro, the default user config provided
- # by the above merging for the user 'default' and
- # then the parsed config from the user's 'real name'
- # which does not have to be 'default' (but could be)
- users[def_user] = util.mergemanydict([def_user_cfg,
- def_config,
- parsed_config])
-
- # Ensure that only the default user that we
- # found (if any) is actually marked as being
- # the default user
- if users:
- for (uname, uconfig) in users.items():
- if def_user and uname == def_user:
- uconfig['default'] = True
- else:
- uconfig['default'] = False
+ parsed_config["groups"] = ",".join(users_groups)
+ # The real config for the default user is the combination of the
+ # default user config provided by the distro, the default user
+ # config provided by the above merging for the user 'default' and
+ # then the parsed config from the user's 'real name' which does not
+ # have to be 'default' (but could be)
+ users[def_user] = util.mergemanydict(
+ [def_user_cfg, def_config, parsed_config]
+ )
+
+ # Ensure that only the default user that we found (if any) is actually
+ # marked as the default user
+ for uname, uconfig in users.items():
+ uconfig["default"] = uname == def_user if def_user else False
return users
-# Normalizes a set of user/users and group
-# dictionary configuration into a useable
-# format that the rest of cloud-init can
-# understand using the default user
-# provided by the input distrobution (if any)
-# to allow for mapping of the 'default' user.
+# Normalizes a set of user/users and group dictionary configuration into an
+# usable format so that the rest of cloud-init can understand using the default
+# user provided by the input distribution (if any) to allow mapping of the
+# 'default' user.
#
# Output is a dictionary of group names -> [member] (list)
# and a dictionary of user names -> user configuration (dict)
#
-# If 'user' exists it will override
-# the 'users'[0] entry (if a list) otherwise it will
-# just become an entry in the returned dictionary (no override)
+# If 'user' exists, it will override
+# The 'users'[0] entry (if a list) otherwise it will just become an entry in
+# the returned dictionary (no override)
def normalize_users_groups(cfg, distro):
if not cfg:
cfg = {}
- users = {}
- groups = {}
- if 'groups' in cfg:
- groups = _normalize_groups(cfg['groups'])
-
# Handle the previous style of doing this where the first user
# overrides the concept of the default user if provided in the user: XYZ
# format.
old_user = {}
- if 'user' in cfg and cfg['user']:
- old_user = cfg['user']
- # Translate it into the format that is more useful
- # going forward
+ if "user" in cfg and cfg["user"]:
+ old_user = cfg["user"]
+ # Translate it into a format that will be more useful going forward
if isinstance(old_user, str):
- old_user = {
- 'name': old_user,
- }
- if not isinstance(old_user, dict):
- LOG.warning(("Format for 'user' key must be a string or dictionary"
- " and not %s"), type_utils.obj_name(old_user))
+ old_user = {"name": old_user}
+ elif not isinstance(old_user, dict):
+ LOG.warning(
+ "Format for 'user' key must be a string or dictionary"
+ " and not %s",
+ type_utils.obj_name(old_user),
+ )
old_user = {}
- # If no old user format, then assume the distro
- # provides what the 'default' user maps to, but notice
- # that if this is provided, we won't automatically inject
- # a 'default' user into the users list, while if a old user
- # format is provided we will.
+ # If no old user format, then assume the distro provides what the 'default'
+ # user maps to, but notice that if this is provided, we won't automatically
+ # inject a 'default' user into the users list, while if an old user format
+ # is provided we will.
distro_user_config = {}
try:
distro_user_config = distro.get_default_user()
except NotImplementedError:
- LOG.warning(("Distro has not implemented default user "
- "access. No distribution provided default user"
- " will be normalized."))
-
- # Merge the old user (which may just be an empty dict when not
- # present with the distro provided default user configuration so
- # that the old user style picks up all the distribution specific
- # attributes (if any)
+ LOG.warning(
+ "Distro has not implemented default user access. No "
+ "distribution provided default user will be normalized."
+ )
+
+ # Merge the old user (which may just be an empty dict when not present)
+ # with the distro provided default user configuration so that the old user
+ # style picks up all the distribution specific attributes (if any)
default_user_config = util.mergemanydict([old_user, distro_user_config])
- base_users = cfg.get('users', [])
+ base_users = cfg.get("users", [])
if not isinstance(base_users, (list, dict, str)):
- LOG.warning(("Format for 'users' key must be a comma separated string"
- " or a dictionary or a list and not %s"),
- type_utils.obj_name(base_users))
+ LOG.warning(
+ "Format for 'users' key must be a comma separated string"
+ " or a dictionary or a list but found %s",
+ type_utils.obj_name(base_users),
+ )
base_users = []
if old_user:
- # Ensure that when user: is provided that this user
- # always gets added (as the default user)
+ # When 'user:' is provided, it should be made as the default user
if isinstance(base_users, list):
- # Just add it on at the end...
- base_users.append({'name': 'default'})
+ base_users.append({"name": "default"})
elif isinstance(base_users, dict):
- base_users['default'] = dict(base_users).get('default', True)
+ base_users["default"] = dict(base_users).get("default", True)
elif isinstance(base_users, str):
- # Just append it on to be re-parsed later
base_users += ",default"
+ groups = {}
+ if "groups" in cfg:
+ groups = _normalize_groups(cfg["groups"])
+
users = _normalize_users(base_users, default_user_config)
return (users, groups)
-# Given a user dictionary config it will
-# extract the default user name and user config
-# from that list and return that tuple or
-# return (None, None) if no default user is
-# found in the given input
+# Given a user dictionary config, extract the default user name and user config
+# and return them or return (None, None) if no default user is found
def extract_default(users, default_name=None, default_config=None):
if not users:
- users = {}
+ return (default_name, default_config)
def safe_find(entry):
config = entry[1]
- if not config or 'default' not in config:
+ if not config or "default" not in config:
return False
- else:
- return config['default']
+ return config["default"]
- tmp_users = users.items()
- tmp_users = dict(filter(safe_find, tmp_users))
+ tmp_users = dict(filter(safe_find, users.items()))
if not tmp_users:
return (default_name, default_config)
- else:
- name = list(tmp_users)[0]
- config = tmp_users[name]
- config.pop('default', None)
- return (name, config)
+
+ name = list(tmp_users)[0]
+ config = tmp_users[name]
+ config.pop("default", None)
+ return (name, config)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/virtuozzo.py b/cloudinit/distros/virtuozzo.py
new file mode 100644
index 00000000..3dc0a342
--- /dev/null
+++ b/cloudinit/distros/virtuozzo.py
@@ -0,0 +1,10 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ pass
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/dmi.py b/cloudinit/dmi.py
index f0e69a5a..3a999d41 100644
--- a/cloudinit/dmi.py
+++ b/cloudinit/dmi.py
@@ -1,17 +1,17 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import os
+from collections import namedtuple
+
from cloudinit import log as logging
from cloudinit import subp
from cloudinit.util import is_container, is_FreeBSD
-from collections import namedtuple
-import os
-
LOG = logging.getLogger(__name__)
# Path for DMI Data
DMI_SYS_PATH = "/sys/class/dmi/id"
-kdmi = namedtuple('KernelNames', ['linux', 'freebsd'])
+kdmi = namedtuple("KernelNames", ["linux", "freebsd"])
kdmi.__new__.defaults__ = (None, None)
# FreeBSD's kenv(1) and Linux /sys/class/dmi/id/* both use different names from
@@ -20,23 +20,23 @@ kdmi.__new__.defaults__ = (None, None)
# This is our canonical translation table. If we add more tools on other
# platforms to find dmidecode's values, their keys need to be put in here.
DMIDECODE_TO_KERNEL = {
- 'baseboard-asset-tag': kdmi('board_asset_tag', 'smbios.planar.tag'),
- 'baseboard-manufacturer': kdmi('board_vendor', 'smbios.planar.maker'),
- 'baseboard-product-name': kdmi('board_name', 'smbios.planar.product'),
- 'baseboard-serial-number': kdmi('board_serial', 'smbios.planar.serial'),
- 'baseboard-version': kdmi('board_version', 'smbios.planar.version'),
- 'bios-release-date': kdmi('bios_date', 'smbios.bios.reldate'),
- 'bios-vendor': kdmi('bios_vendor', 'smbios.bios.vendor'),
- 'bios-version': kdmi('bios_version', 'smbios.bios.version'),
- 'chassis-asset-tag': kdmi('chassis_asset_tag', 'smbios.chassis.tag'),
- 'chassis-manufacturer': kdmi('chassis_vendor', 'smbios.chassis.maker'),
- 'chassis-serial-number': kdmi('chassis_serial', 'smbios.chassis.serial'),
- 'chassis-version': kdmi('chassis_version', 'smbios.chassis.version'),
- 'system-manufacturer': kdmi('sys_vendor', 'smbios.system.maker'),
- 'system-product-name': kdmi('product_name', 'smbios.system.product'),
- 'system-serial-number': kdmi('product_serial', 'smbios.system.serial'),
- 'system-uuid': kdmi('product_uuid', 'smbios.system.uuid'),
- 'system-version': kdmi('product_version', 'smbios.system.version'),
+ "baseboard-asset-tag": kdmi("board_asset_tag", "smbios.planar.tag"),
+ "baseboard-manufacturer": kdmi("board_vendor", "smbios.planar.maker"),
+ "baseboard-product-name": kdmi("board_name", "smbios.planar.product"),
+ "baseboard-serial-number": kdmi("board_serial", "smbios.planar.serial"),
+ "baseboard-version": kdmi("board_version", "smbios.planar.version"),
+ "bios-release-date": kdmi("bios_date", "smbios.bios.reldate"),
+ "bios-vendor": kdmi("bios_vendor", "smbios.bios.vendor"),
+ "bios-version": kdmi("bios_version", "smbios.bios.version"),
+ "chassis-asset-tag": kdmi("chassis_asset_tag", "smbios.chassis.tag"),
+ "chassis-manufacturer": kdmi("chassis_vendor", "smbios.chassis.maker"),
+ "chassis-serial-number": kdmi("chassis_serial", "smbios.chassis.serial"),
+ "chassis-version": kdmi("chassis_version", "smbios.chassis.version"),
+ "system-manufacturer": kdmi("sys_vendor", "smbios.system.maker"),
+ "system-product-name": kdmi("product_name", "smbios.system.product"),
+ "system-serial-number": kdmi("product_serial", "smbios.system.serial"),
+ "system-uuid": kdmi("product_uuid", "smbios.system.uuid"),
+ "system-version": kdmi("product_version", "smbios.system.version"),
}
@@ -62,14 +62,18 @@ def _read_dmi_syspath(key):
# uninitialized dmi values show as all \xff and /sys appends a '\n'.
# in that event, return empty string.
- if key_data == b'\xff' * (len(key_data) - 1) + b'\n':
+ if key_data == b"\xff" * (len(key_data) - 1) + b"\n":
key_data = b""
try:
- return key_data.decode('utf8').strip()
+ return key_data.decode("utf8").strip()
except UnicodeDecodeError as e:
- LOG.error("utf-8 decode of content (%s) in %s failed: %s",
- dmi_key_path, key_data, e)
+ LOG.error(
+ "utf-8 decode of content (%s) in %s failed: %s",
+ dmi_key_path,
+ key_data,
+ e,
+ )
return None
@@ -91,7 +95,7 @@ def _read_kenv(key):
LOG.debug("kenv returned '%s' for '%s'", result, kmap.freebsd)
return result
except subp.ProcessExecutionError as e:
- LOG.debug('failed kenv cmd: %s\n%s', cmd, e)
+ LOG.debug("failed kenv cmd: %s\n%s", cmd, e)
return None
return None
@@ -111,7 +115,7 @@ def _call_dmidecode(key, dmidecode_path):
return ""
return result
except subp.ProcessExecutionError as e:
- LOG.debug('failed dmidecode cmd: %s\n%s', cmd, e)
+ LOG.debug("failed dmidecode cmd: %s\n%s", cmd, e)
return None
@@ -144,20 +148,20 @@ def read_dmi_data(key):
return syspath_value
def is_x86(arch):
- return (arch == 'x86_64' or (arch[0] == 'i' and arch[2:] == '86'))
+ return arch == "x86_64" or (arch[0] == "i" and arch[2:] == "86")
# running dmidecode can be problematic on some arches (LP: #1243287)
uname_arch = os.uname()[4]
- if not (is_x86(uname_arch) or uname_arch in ('aarch64', 'amd64')):
+ if not (is_x86(uname_arch) or uname_arch in ("aarch64", "amd64")):
LOG.debug("dmidata is not supported on %s", uname_arch)
return None
- dmidecode_path = subp.which('dmidecode')
+ dmidecode_path = subp.which("dmidecode")
if dmidecode_path:
return _call_dmidecode(key, dmidecode_path)
- LOG.warning("did not find either path %s or dmidecode command",
- DMI_SYS_PATH)
+ LOG.debug("did not find either path %s or dmidecode command", DMI_SYS_PATH)
return None
+
# vi: ts=4 expandtab
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index 34acfe84..d4019557 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -10,8 +10,7 @@ import functools
import json
from cloudinit import log as logging
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import url_helper, util
LOG = logging.getLogger(__name__)
SKIP_USERDATA_CODES = frozenset([url_helper.NOT_FOUND])
@@ -30,7 +29,7 @@ class MetadataLeafDecoder(object):
def __call__(self, field, blob):
if not blob:
- return ''
+ return ""
try:
blob = util.decode_binary(blob)
except UnicodeDecodeError:
@@ -40,8 +39,11 @@ class MetadataLeafDecoder(object):
# Assume it's json, unless it fails parsing...
return json.loads(blob)
except (ValueError, TypeError) as e:
- LOG.warning("Field %s looked like a json object, but it"
- " was not: %s", field, e)
+ LOG.warning(
+ "Field %s looked like a json object, but it was not: %s",
+ field,
+ e,
+ )
if blob.find("\n") != -1:
return blob.splitlines()
return blob
@@ -85,7 +87,7 @@ class MetadataMaterializer(object):
if not field or not field_name:
continue
# Don't materialize credentials
- if field_name == 'security-credentials':
+ if field_name == "security-credentials":
continue
if has_children(field):
if field_name not in children:
@@ -127,8 +129,7 @@ class MetadataMaterializer(object):
joined.update(child_contents)
for field in leaf_contents.keys():
if field in joined:
- LOG.warning("Duplicate key found in results from %s",
- base_url)
+ LOG.warning("Duplicate key found in results from %s", base_url)
else:
joined[field] = leaf_contents[field]
return joined
@@ -139,25 +140,36 @@ def skip_retry_on_codes(status_codes, _request_args, cause):
return cause.code not in status_codes
-def get_instance_userdata(api_version='latest',
- metadata_address='http://169.254.169.254',
- ssl_details=None, timeout=5, retries=5,
- headers_cb=None, headers_redact=None,
- exception_cb=None):
+def get_instance_userdata(
+ api_version="latest",
+ metadata_address="http://169.254.169.254",
+ ssl_details=None,
+ timeout=5,
+ retries=5,
+ headers_cb=None,
+ headers_redact=None,
+ exception_cb=None,
+):
ud_url = url_helper.combine_url(metadata_address, api_version)
- ud_url = url_helper.combine_url(ud_url, 'user-data')
- user_data = ''
+ ud_url = url_helper.combine_url(ud_url, "user-data")
+ user_data = ""
try:
if not exception_cb:
# It is ok for userdata to not exist (thats why we are stopping if
# NOT_FOUND occurs) and just in that case returning an empty
# string.
- exception_cb = functools.partial(skip_retry_on_codes,
- SKIP_USERDATA_CODES)
+ exception_cb = functools.partial(
+ skip_retry_on_codes, SKIP_USERDATA_CODES
+ )
response = url_helper.read_file_or_url(
- ud_url, ssl_details=ssl_details, timeout=timeout,
- retries=retries, exception_cb=exception_cb, headers_cb=headers_cb,
- headers_redact=headers_redact)
+ ud_url,
+ ssl_details=ssl_details,
+ timeout=timeout,
+ retries=retries,
+ exception_cb=exception_cb,
+ headers_cb=headers_cb,
+ headers_redact=headers_redact,
+ )
user_data = response.contents
except url_helper.UrlError as e:
if e.code not in SKIP_USERDATA_CODES:
@@ -167,27 +179,37 @@ def get_instance_userdata(api_version='latest',
return user_data
-def _get_instance_metadata(tree, api_version='latest',
- metadata_address='http://169.254.169.254',
- ssl_details=None, timeout=5, retries=5,
- leaf_decoder=None, headers_cb=None,
- headers_redact=None,
- exception_cb=None):
+def _get_instance_metadata(
+ tree,
+ api_version="latest",
+ metadata_address="http://169.254.169.254",
+ ssl_details=None,
+ timeout=5,
+ retries=5,
+ leaf_decoder=None,
+ headers_cb=None,
+ headers_redact=None,
+ exception_cb=None,
+):
md_url = url_helper.combine_url(metadata_address, api_version, tree)
caller = functools.partial(
- url_helper.read_file_or_url, ssl_details=ssl_details,
- timeout=timeout, retries=retries, headers_cb=headers_cb,
+ url_helper.read_file_or_url,
+ ssl_details=ssl_details,
+ timeout=timeout,
+ retries=retries,
+ headers_cb=headers_cb,
headers_redact=headers_redact,
- exception_cb=exception_cb)
+ exception_cb=exception_cb,
+ )
def mcaller(url):
return caller(url).contents
try:
response = caller(md_url)
- materializer = MetadataMaterializer(response.contents,
- md_url, mcaller,
- leaf_decoder=leaf_decoder)
+ materializer = MetadataMaterializer(
+ response.contents, md_url, mcaller, leaf_decoder=leaf_decoder
+ )
md = materializer.materialize()
if not isinstance(md, (dict)):
md = {}
@@ -197,35 +219,56 @@ def _get_instance_metadata(tree, api_version='latest',
return {}
-def get_instance_metadata(api_version='latest',
- metadata_address='http://169.254.169.254',
- ssl_details=None, timeout=5, retries=5,
- leaf_decoder=None, headers_cb=None,
- headers_redact=None,
- exception_cb=None):
+def get_instance_metadata(
+ api_version="latest",
+ metadata_address="http://169.254.169.254",
+ ssl_details=None,
+ timeout=5,
+ retries=5,
+ leaf_decoder=None,
+ headers_cb=None,
+ headers_redact=None,
+ exception_cb=None,
+):
# Note, 'meta-data' explicitly has trailing /.
# this is required for CloudStack (LP: #1356855)
- return _get_instance_metadata(tree='meta-data/', api_version=api_version,
- metadata_address=metadata_address,
- ssl_details=ssl_details, timeout=timeout,
- retries=retries, leaf_decoder=leaf_decoder,
- headers_redact=headers_redact,
- headers_cb=headers_cb,
- exception_cb=exception_cb)
-
-
-def get_instance_identity(api_version='latest',
- metadata_address='http://169.254.169.254',
- ssl_details=None, timeout=5, retries=5,
- leaf_decoder=None, headers_cb=None,
- headers_redact=None,
- exception_cb=None):
- return _get_instance_metadata(tree='dynamic/instance-identity',
- api_version=api_version,
- metadata_address=metadata_address,
- ssl_details=ssl_details, timeout=timeout,
- retries=retries, leaf_decoder=leaf_decoder,
- headers_redact=headers_redact,
- headers_cb=headers_cb,
- exception_cb=exception_cb)
+ return _get_instance_metadata(
+ tree="meta-data/",
+ api_version=api_version,
+ metadata_address=metadata_address,
+ ssl_details=ssl_details,
+ timeout=timeout,
+ retries=retries,
+ leaf_decoder=leaf_decoder,
+ headers_redact=headers_redact,
+ headers_cb=headers_cb,
+ exception_cb=exception_cb,
+ )
+
+
+def get_instance_identity(
+ api_version="latest",
+ metadata_address="http://169.254.169.254",
+ ssl_details=None,
+ timeout=5,
+ retries=5,
+ leaf_decoder=None,
+ headers_cb=None,
+ headers_redact=None,
+ exception_cb=None,
+):
+ return _get_instance_metadata(
+ tree="dynamic/instance-identity",
+ api_version=api_version,
+ metadata_address=metadata_address,
+ ssl_details=ssl_details,
+ timeout=timeout,
+ retries=retries,
+ leaf_decoder=leaf_decoder,
+ headers_redact=headers_redact,
+ headers_cb=headers_cb,
+ exception_cb=exception_cb,
+ )
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/event.py b/cloudinit/event.py
index f7b311fb..eaf8bd0b 100644
--- a/cloudinit/event.py
+++ b/cloudinit/event.py
@@ -1,17 +1,75 @@
# This file is part of cloud-init. See LICENSE file for license information.
-
"""Classes and functions related to event handling."""
+from enum import Enum
+from typing import Dict, Set
+
+from cloudinit import log as logging
+
+LOG = logging.getLogger(__name__)
+
-# Event types which can generate maintenance requests for cloud-init.
-class EventType(object):
- BOOT = "System boot"
- BOOT_NEW_INSTANCE = "New instance first boot"
+class EventScope(Enum):
+ # NETWORK is currently the only scope, but we want to leave room to
+ # grow other scopes (e.g., STORAGE) without having to make breaking
+ # changes to the user config
+ NETWORK = "network"
- # TODO: Cloud-init will grow support for the follow event types:
- # UDEV
+ def __str__(self): # pylint: disable=invalid-str-returned
+ return self.value
+
+
+class EventType(Enum):
+ """Event types which can generate maintenance requests for cloud-init."""
+
+ # Cloud-init should grow support for the follow event types:
+ # HOTPLUG
# METADATA_CHANGE
# USER_REQUEST
+ BOOT = "boot"
+ BOOT_NEW_INSTANCE = "boot-new-instance"
+ BOOT_LEGACY = "boot-legacy"
+ HOTPLUG = "hotplug"
+
+ def __str__(self): # pylint: disable=invalid-str-returned
+ return self.value
+
+
+def userdata_to_events(user_config: dict) -> Dict[EventScope, Set[EventType]]:
+ """Convert userdata into update config format defined on datasource.
+
+ Userdata is in the form of (e.g):
+ {'network': {'when': ['boot']}}
+
+ DataSource config is in the form of:
+ {EventScope.Network: {EventType.BOOT}}
+
+ Take the first and return the second
+ """
+ update_config = {}
+ for scope, scope_list in user_config.items():
+ try:
+ new_scope = EventScope(scope)
+ except ValueError as e:
+ LOG.warning(
+ "%s! Update data will be ignored for '%s' scope",
+ str(e),
+ scope,
+ )
+ continue
+ try:
+ new_values = [EventType(x) for x in scope_list["when"]]
+ except ValueError as e:
+ LOG.warning(
+ "%s! Update data will be ignored for '%s' scope",
+ str(e),
+ scope,
+ )
+ new_values = []
+ update_config[new_scope] = set(new_values)
+
+ return update_config
+
# vi: ts=4 expandtab
diff --git a/cloudinit/filters/launch_index.py b/cloudinit/filters/launch_index.py
index 5c8bcffb..5aeb0a17 100644
--- a/cloudinit/filters/launch_index.py
+++ b/cloudinit/filters/launch_index.py
@@ -23,7 +23,7 @@ class Filter(object):
self.allow_none = allow_none
def _select(self, message):
- msg_idx = message.get('Launch-Index', None)
+ msg_idx = message.get("Launch-Index", None)
if self.allow_none and msg_idx is None:
return True
msg_idx = util.safe_int(msg_idx)
@@ -47,9 +47,12 @@ class Filter(object):
new_msgs.append(m)
else:
discarded += 1
- LOG.debug(("Discarding %s multipart messages "
- "which do not match launch index %s"),
- discarded, self.wanted_idx)
+ LOG.debug(
+ "Discarding %s multipart messages "
+ "which do not match launch index %s",
+ discarded,
+ self.wanted_idx,
+ )
new_message = copy.copy(message)
new_message.set_payload(new_msgs)
new_message[ud.ATTACHMENT_FIELD] = str(len(new_msgs))
@@ -62,4 +65,5 @@ class Filter(object):
return root_message
return self._do_filter(root_message)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
index 3780326c..8daa5e37 100644
--- a/cloudinit/gpg.py
+++ b/cloudinit/gpg.py
@@ -7,19 +7,28 @@
"""gpg.py - Collection of gpg key related functions"""
+import time
+
from cloudinit import log as logging
from cloudinit import subp
-import time
-
LOG = logging.getLogger(__name__)
+GPG_LIST = [
+ "gpg",
+ "--with-fingerprint",
+ "--no-default-keyring",
+ "--list-keys",
+ "--keyring",
+]
+
def export_armour(key):
"""Export gpg key, armoured key gets returned"""
try:
- (armour, _) = subp.subp(["gpg", "--export", "--armour", key],
- capture=True)
+ (armour, _) = subp.subp(
+ ["gpg", "--export", "--armour", key], capture=True
+ )
except subp.ProcessExecutionError as error:
# debug, since it happens for any key not on the system initially
LOG.debug('Failed to export armoured key "%s": %s', key, error)
@@ -27,6 +36,33 @@ def export_armour(key):
return armour
+def dearmor(key):
+ """Dearmor gpg key, dearmored key gets returned
+
+ note: man gpg(1) makes no mention of an --armour spelling, only --armor
+ """
+ return subp.subp(["gpg", "--dearmor"], data=key, decode=False)[0]
+
+
+def list(key_file, human_output=False):
+ """List keys from a keyring with fingerprints. Default to a stable machine
+ parseable format.
+
+ @param key_file: a string containing a filepath to a key
+ @param human_output: return output intended for human parsing
+ """
+ cmd = []
+ cmd.extend(GPG_LIST)
+ if not human_output:
+ cmd.append("--with-colons")
+
+ cmd.append(key_file)
+ (stdout, stderr) = subp.subp(cmd, capture=True)
+ if stderr:
+ LOG.warning('Failed to export armoured key "%s": %s', key_file, stderr)
+ return stdout
+
+
def recv_key(key, keyserver, retries=(1, 1)):
"""Receive gpg key from the specified keyserver.
@@ -52,8 +88,12 @@ def recv_key(key, keyserver, retries=(1, 1)):
trynum += 1
try:
subp.subp(cmd, capture=True)
- LOG.debug("Imported key '%s' from keyserver '%s' on try %d",
- key, keyserver, trynum)
+ LOG.debug(
+ "Imported key '%s' from keyserver '%s' on try %d",
+ key,
+ keyserver,
+ trynum,
+ )
return
except subp.ProcessExecutionError as e:
error = e
@@ -61,25 +101,28 @@ def recv_key(key, keyserver, retries=(1, 1)):
naplen = next(sleeps)
LOG.debug(
"Import failed with exit code %d, will try again in %ss",
- error.exit_code, naplen)
+ error.exit_code,
+ naplen,
+ )
time.sleep(naplen)
except StopIteration as e:
raise ValueError(
- ("Failed to import key '%s' from keyserver '%s' "
- "after %d tries: %s") % (key, keyserver, trynum, error)
+ "Failed to import key '%s' from keyserver '%s' "
+ "after %d tries: %s" % (key, keyserver, trynum, error)
) from e
def delete_key(key):
"""Delete the specified key from the local gpg ring"""
try:
- subp.subp(["gpg", "--batch", "--yes", "--delete-keys", key],
- capture=True)
+ subp.subp(
+ ["gpg", "--batch", "--yes", "--delete-keys", key], capture=True
+ )
except subp.ProcessExecutionError as error:
LOG.warning('Failed delete key "%s": %s', key, error)
-def getkeybyid(keyid, keyserver='keyserver.ubuntu.com'):
+def getkeybyid(keyid, keyserver="keyserver.ubuntu.com"):
"""get gpg keyid from keyserver"""
armour = export_armour(keyid)
if not armour:
@@ -87,7 +130,7 @@ def getkeybyid(keyid, keyserver='keyserver.ubuntu.com'):
recv_key(keyid, keyserver=keyserver)
armour = export_armour(keyid)
except ValueError:
- LOG.exception('Failed to obtain gpg key %s', keyid)
+ LOG.exception("Failed to obtain gpg key %s", keyid)
raise
finally:
# delete just imported key to leave environment as it was before
@@ -95,4 +138,5 @@ def getkeybyid(keyid, keyserver='keyserver.ubuntu.com'):
return armour
+
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index a409ff8a..7d8a9208 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -13,9 +13,8 @@ import os
from cloudinit import importer
from cloudinit import log as logging
-from cloudinit import type_utils
-from cloudinit import util
-from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES)
+from cloudinit import type_utils, util
+from cloudinit.settings import FREQUENCIES, PER_ALWAYS, PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -24,7 +23,7 @@ LOG = logging.getLogger(__name__)
NOT_MULTIPART_TYPE = "text/x-not-multipart"
# When none is assigned this gets used
-OCTET_TYPE = 'application/octet-stream'
+OCTET_TYPE = "application/octet-stream"
# Special content types that signal the start and end of processing
CONTENT_END = "__end__"
@@ -34,32 +33,39 @@ CONTENT_SIGNALS = [CONTENT_START, CONTENT_END]
# Used when a part-handler type is encountered
# to allow for registration of new types.
PART_CONTENT_TYPES = ["text/part-handler"]
-PART_HANDLER_FN_TMPL = 'part-handler-%03d'
+PART_HANDLER_FN_TMPL = "part-handler-%03d"
# For parts without filenames
-PART_FN_TPL = 'part-%03d'
+PART_FN_TPL = "part-%03d"
# Different file beginnings to their content type
INCLUSION_TYPES_MAP = {
- '#include': 'text/x-include-url',
- '#include-once': 'text/x-include-once-url',
- '#!': 'text/x-shellscript',
- '#cloud-config': 'text/cloud-config',
- '#upstart-job': 'text/upstart-job',
- '#part-handler': 'text/part-handler',
- '#cloud-boothook': 'text/cloud-boothook',
- '#cloud-config-archive': 'text/cloud-config-archive',
- '#cloud-config-jsonp': 'text/cloud-config-jsonp',
- '## template: jinja': 'text/jinja2',
+ "#include": "text/x-include-url",
+ "#include-once": "text/x-include-once-url",
+ "#!": "text/x-shellscript",
+ "#cloud-config": "text/cloud-config",
+ "#upstart-job": "text/upstart-job",
+ "#part-handler": "text/part-handler",
+ "#cloud-boothook": "text/cloud-boothook",
+ "#cloud-config-archive": "text/cloud-config-archive",
+ "#cloud-config-jsonp": "text/cloud-config-jsonp",
+ "## template: jinja": "text/jinja2",
+ # Note: for the next 3 entries, the prefix doesn't matter because these
+ # are for types that can only be used as part of a MIME message. However,
+ # including these entries supresses warnings during `cloudinit devel
+ # make-mime`, which otherwise would require `--force`.
+ "text/x-shellscript-per-boot": "text/x-shellscript-per-boot",
+ "text/x-shellscript-per-instance": "text/x-shellscript-per-instance",
+ "text/x-shellscript-per-once": "text/x-shellscript-per-once",
}
# Sorted longest first
-INCLUSION_SRCH = sorted(list(INCLUSION_TYPES_MAP.keys()),
- key=(lambda e: 0 - len(e)))
+INCLUSION_SRCH = sorted(
+ list(INCLUSION_TYPES_MAP.keys()), key=(lambda e: 0 - len(e))
+)
class Handler(metaclass=abc.ABCMeta):
-
def __init__(self, frequency, version=2):
self.handler_version = version
self.frequency = frequency
@@ -69,11 +75,13 @@ class Handler(metaclass=abc.ABCMeta):
def list_types(self):
# Each subclass must define the supported content prefixes it handles.
- if not hasattr(self, 'prefixes'):
- raise NotImplementedError('Missing prefixes subclass attribute')
+ if not hasattr(self, "prefixes"):
+ raise NotImplementedError("Missing prefixes subclass attribute")
else:
- return [INCLUSION_TYPES_MAP[prefix]
- for prefix in getattr(self, 'prefixes')]
+ return [
+ INCLUSION_TYPES_MAP[prefix]
+ for prefix in getattr(self, "prefixes")
+ ]
@abc.abstractmethod
def handle_part(self, *args, **kwargs):
@@ -82,8 +90,10 @@ class Handler(metaclass=abc.ABCMeta):
def run_part(mod, data, filename, payload, frequency, headers):
mod_freq = mod.frequency
- if not (mod_freq == PER_ALWAYS or
- (frequency == PER_INSTANCE and mod_freq == PER_INSTANCE)):
+ if not (
+ mod_freq == PER_ALWAYS
+ or (frequency == PER_INSTANCE and mod_freq == PER_INSTANCE)
+ ):
return
# Sanity checks on version (should be an int convertable)
try:
@@ -91,33 +101,45 @@ def run_part(mod, data, filename, payload, frequency, headers):
mod_ver = int(mod_ver)
except (TypeError, ValueError, AttributeError):
mod_ver = 1
- content_type = headers['Content-Type']
+ content_type = headers["Content-Type"]
try:
- LOG.debug("Calling handler %s (%s, %s, %s) with frequency %s",
- mod, content_type, filename, mod_ver, frequency)
+ LOG.debug(
+ "Calling handler %s (%s, %s, %s) with frequency %s",
+ mod,
+ content_type,
+ filename,
+ mod_ver,
+ frequency,
+ )
if mod_ver == 3:
# Treat as v. 3 which does get a frequency + headers
- mod.handle_part(data, content_type, filename,
- payload, frequency, headers)
+ mod.handle_part(
+ data, content_type, filename, payload, frequency, headers
+ )
elif mod_ver == 2:
# Treat as v. 2 which does get a frequency
- mod.handle_part(data, content_type, filename,
- payload, frequency)
+ mod.handle_part(data, content_type, filename, payload, frequency)
elif mod_ver == 1:
# Treat as v. 1 which gets no frequency
mod.handle_part(data, content_type, filename, payload)
else:
raise ValueError("Unknown module version %s" % (mod_ver))
except Exception:
- util.logexc(LOG, "Failed calling handler %s (%s, %s, %s) with "
- "frequency %s", mod, content_type, filename, mod_ver,
- frequency)
+ util.logexc(
+ LOG,
+ "Failed calling handler %s (%s, %s, %s) with frequency %s",
+ mod,
+ content_type,
+ filename,
+ mod_ver,
+ frequency,
+ )
def call_begin(mod, data, frequency):
# Create a fake header set
headers = {
- 'Content-Type': CONTENT_START,
+ "Content-Type": CONTENT_START,
}
run_part(mod, data, None, None, frequency, headers)
@@ -125,31 +147,35 @@ def call_begin(mod, data, frequency):
def call_end(mod, data, frequency):
# Create a fake header set
headers = {
- 'Content-Type': CONTENT_END,
+ "Content-Type": CONTENT_END,
}
run_part(mod, data, None, None, frequency, headers)
def walker_handle_handler(pdata, _ctype, _filename, payload):
- curcount = pdata['handlercount']
+ curcount = pdata["handlercount"]
modname = PART_HANDLER_FN_TMPL % (curcount)
- frequency = pdata['frequency']
- modfname = os.path.join(pdata['handlerdir'], "%s" % (modname))
+ frequency = pdata["frequency"]
+ modfname = os.path.join(pdata["handlerdir"], "%s" % (modname))
if not modfname.endswith(".py"):
modfname = "%s.py" % (modfname)
# TODO(harlowja): Check if path exists??
util.write_file(modfname, payload, 0o600)
- handlers = pdata['handlers']
+ handlers = pdata["handlers"]
try:
mod = fixup_handler(importer.import_module(modname))
- call_begin(mod, pdata['data'], frequency)
+ call_begin(mod, pdata["data"], frequency)
# Only register and increment after the above have worked, so we don't
# register if it fails starting.
handlers.register(mod, initialized=True)
- pdata['handlercount'] = curcount + 1
+ pdata["handlercount"] = curcount + 1
except Exception:
- util.logexc(LOG, "Failed at registering python file: %s (part "
- "handler %s)", modfname, curcount)
+ util.logexc(
+ LOG,
+ "Failed at registering python file: %s (part handler %s)",
+ modfname,
+ curcount,
+ )
def _extract_first_or_bytes(blob, size):
@@ -161,7 +187,7 @@ def _extract_first_or_bytes(blob, size):
else:
# We want to avoid decoding the whole blob (it might be huge)
# By taking 4*size bytes we guarantee to decode size utf8 chars
- start = blob[:4 * size].decode(errors='ignore').split("\n", 1)[0]
+ start = blob[: 4 * size].decode(errors="ignore").split("\n", 1)[0]
if len(start) >= size:
start = start[:size]
except UnicodeDecodeError:
@@ -176,7 +202,7 @@ def _escape_string(text):
except (LookupError, TypeError):
try:
# Unicode (and Python 3's str) doesn't support string_escape...
- return text.encode('unicode_escape')
+ return text.encode("unicode_escape")
except TypeError:
# Give up...
pass
@@ -189,28 +215,40 @@ def _escape_string(text):
def walker_callback(data, filename, payload, headers):
- content_type = headers['Content-Type']
- if content_type in data.get('excluded'):
+ content_type = headers["Content-Type"]
+ if content_type in data.get("excluded"):
LOG.debug('content_type "%s" is excluded', content_type)
return
if content_type in PART_CONTENT_TYPES:
walker_handle_handler(data, content_type, filename, payload)
return
- handlers = data['handlers']
+ handlers = data["handlers"]
if content_type in handlers:
- run_part(handlers[content_type], data['data'], filename,
- payload, data['frequency'], headers)
+ run_part(
+ handlers[content_type],
+ data["data"],
+ filename,
+ payload,
+ data["frequency"],
+ headers,
+ )
elif payload:
# Extract the first line or 24 bytes for displaying in the log
start = _extract_first_or_bytes(payload, 24)
details = "'%s...'" % (_escape_string(start))
if content_type == NOT_MULTIPART_TYPE:
- LOG.warning("Unhandled non-multipart (%s) userdata: %s",
- content_type, details)
+ LOG.warning(
+ "Unhandled non-multipart (%s) userdata: %s",
+ content_type,
+ details,
+ )
else:
- LOG.warning("Unhandled unknown content-type (%s) userdata: %s",
- content_type, details)
+ LOG.warning(
+ "Unhandled unknown content-type (%s) userdata: %s",
+ content_type,
+ details,
+ )
else:
LOG.debug("Empty payload of type %s", content_type)
@@ -221,7 +259,7 @@ def walk(msg, callback, data):
partnum = 0
for part in msg.walk():
# multipart/* are just containers
- if part.get_content_maintype() == 'multipart':
+ if part.get_content_maintype() == "multipart":
continue
ctype = part.get_content_type()
@@ -234,7 +272,7 @@ def walk(msg, callback, data):
headers = dict(part)
LOG.debug(headers)
- headers['Content-Type'] = ctype
+ headers["Content-Type"] = ctype
payload = util.fully_decoded_payload(part)
callback(data, filename, payload, headers)
partnum = partnum + 1
@@ -243,8 +281,8 @@ def walk(msg, callback, data):
def fixup_handler(mod, def_freq=PER_INSTANCE):
if not hasattr(mod, "handler_version"):
setattr(mod, "handler_version", 1)
- if not hasattr(mod, 'frequency'):
- setattr(mod, 'frequency', def_freq)
+ if not hasattr(mod, "frequency"):
+ setattr(mod, "frequency", def_freq)
else:
freq = mod.frequency
if freq and freq not in FREQUENCIES:
@@ -263,4 +301,5 @@ def type_from_starts_with(payload, default=None):
return INCLUSION_TYPES_MAP[text]
return default
+
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
index c6205097..602800ed 100644
--- a/cloudinit/handlers/boot_hook.py
+++ b/cloudinit/handlers/boot_hook.py
@@ -12,10 +12,8 @@ import os
from cloudinit import handlers
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.settings import (PER_ALWAYS)
+from cloudinit import subp, util
+from cloudinit.settings import PER_ALWAYS
LOG = logging.getLogger(__name__)
@@ -23,7 +21,7 @@ LOG = logging.getLogger(__name__)
class BootHookPartHandler(handlers.Handler):
# The content prefixes this handler understands.
- prefixes = ['#cloud-boothook']
+ prefixes = ["#cloud-boothook"]
def __init__(self, paths, datasource, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS)
@@ -35,8 +33,9 @@ class BootHookPartHandler(handlers.Handler):
def _write_part(self, payload, filename):
filename = util.clean_filename(filename)
filepath = os.path.join(self.boothook_dir, filename)
- contents = util.strip_prefix_suffix(util.dos2unix(payload),
- prefix=self.prefixes[0])
+ contents = util.strip_prefix_suffix(
+ util.dos2unix(payload), prefix=self.prefixes[0]
+ )
util.write_file(filepath, contents.lstrip(), 0o700)
return filepath
@@ -48,12 +47,14 @@ class BootHookPartHandler(handlers.Handler):
try:
env = os.environ.copy()
if self.instance_id is not None:
- env['INSTANCE_ID'] = str(self.instance_id)
+ env["INSTANCE_ID"] = str(self.instance_id)
subp.subp([filepath], env=env)
except subp.ProcessExecutionError:
util.logexc(LOG, "Boothooks script %s execution error", filepath)
except Exception:
- util.logexc(LOG, "Boothooks unknown error when running %s",
- filepath)
+ util.logexc(
+ LOG, "Boothooks unknown error when running %s", filepath
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
index 2a307364..8070c6cb 100644
--- a/cloudinit/handlers/cloud_config.py
+++ b/cloudinit/handlers/cloud_config.py
@@ -12,15 +12,12 @@ import jsonpatch
from cloudinit import handlers
from cloudinit import log as logging
-from cloudinit import mergers
-from cloudinit import util
-from cloudinit import safeyaml
-
-from cloudinit.settings import (PER_ALWAYS)
+from cloudinit import mergers, safeyaml, util
+from cloudinit.settings import PER_ALWAYS
LOG = logging.getLogger(__name__)
-MERGE_HEADER = 'Merge-Type'
+MERGE_HEADER = "Merge-Type"
# Due to the way the loading of yaml configuration was done previously,
# where previously each cloud config part was appended to a larger yaml
@@ -39,7 +36,7 @@ MERGE_HEADER = 'Merge-Type'
# a: 22
#
# This gets loaded into yaml with final result {'a': 22}
-DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()')
+DEF_MERGERS = mergers.string_extract_mergers("dict(replace)+list()+str()")
CLOUD_PREFIX = "#cloud-config"
JSONP_PREFIX = "#cloud-config-jsonp"
@@ -53,7 +50,7 @@ class CloudConfigPartHandler(handlers.Handler):
handlers.Handler.__init__(self, PER_ALWAYS, version=3)
self.cloud_buf = None
self.cloud_fn = paths.get_ipath("cloud_config")
- if 'cloud_config_path' in _kwargs:
+ if "cloud_config_path" in _kwargs:
self.cloud_fn = paths.get_ipath(_kwargs["cloud_config_path"])
self.file_names = []
@@ -66,14 +63,14 @@ class CloudConfigPartHandler(handlers.Handler):
file_lines.append("# from %s files" % (len(self.file_names)))
for fn in self.file_names:
if not fn:
- fn = '?'
+ fn = "?"
file_lines.append("# %s" % (fn))
file_lines.append("")
if self.cloud_buf is not None:
# Something was actually gathered....
lines = [
CLOUD_PREFIX,
- '',
+ "",
]
lines.extend(file_lines)
lines.append(safeyaml.dumps(self.cloud_buf))
@@ -82,9 +79,9 @@ class CloudConfigPartHandler(handlers.Handler):
util.write_file(self.cloud_fn, "\n".join(lines), 0o600)
def _extract_mergers(self, payload, headers):
- merge_header_headers = ''
- for h in [MERGE_HEADER, 'X-%s' % (MERGE_HEADER)]:
- tmp_h = headers.get(h, '')
+ merge_header_headers = ""
+ for h in [MERGE_HEADER, "X-%s" % (MERGE_HEADER)]:
+ tmp_h = headers.get(h, "")
if tmp_h:
merge_header_headers = tmp_h
break
@@ -92,6 +89,9 @@ class CloudConfigPartHandler(handlers.Handler):
# or the merge type from the headers or default to our own set
# if neither exists (or is empty) from the later.
payload_yaml = util.load_yaml(payload)
+ if payload_yaml is None:
+ raise ValueError("empty cloud config")
+
mergers_yaml = mergers.dict_extract_mergers(payload_yaml)
mergers_header = mergers.string_extract_mergers(merge_header_headers)
all_mergers = []
@@ -142,8 +142,16 @@ class CloudConfigPartHandler(handlers.Handler):
for i in ("\n", "\r", "\t"):
filename = filename.replace(i, " ")
self.file_names.append(filename.strip())
+ except ValueError as err:
+ LOG.warning(
+ "Failed at merging in cloud config part from %s: %s",
+ filename,
+ err,
+ )
except Exception:
- util.logexc(LOG, "Failed at merging in cloud config part from %s",
- filename)
+ util.logexc(
+ LOG, "Failed at merging in cloud config part from %s", filename
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py
index aadfbf86..1f9caa64 100644
--- a/cloudinit/handlers/jinja_template.py
+++ b/cloudinit/handlers/jinja_template.py
@@ -1,63 +1,75 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from errno import EACCES
+import copy
import os
import re
+from errno import EACCES
+from typing import Optional
try:
from jinja2.exceptions import UndefinedError as JUndefinedError
+ from jinja2.lexer import operator_re
except ImportError:
# No jinja2 dependency
JUndefinedError = Exception
+ operator_re = re.compile(r"[-.]")
from cloudinit import handlers
from cloudinit import log as logging
-from cloudinit.sources import INSTANCE_JSON_FILE
-from cloudinit.templater import render_string, MISSING_JINJA_PREFIX
-from cloudinit.util import b64d, load_file, load_json, json_dumps
-
from cloudinit.settings import PER_ALWAYS
+from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
+from cloudinit.templater import MISSING_JINJA_PREFIX, render_string
+from cloudinit.util import b64d, json_dumps, load_file, load_json
LOG = logging.getLogger(__name__)
class JinjaTemplatePartHandler(handlers.Handler):
- prefixes = ['## template: jinja']
+ prefixes = ["## template: jinja"]
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS, version=3)
self.paths = paths
self.sub_handlers = {}
- for handler in _kwargs.get('sub_handlers', []):
+ for handler in _kwargs.get("sub_handlers", []):
for ctype in handler.list_types():
self.sub_handlers[ctype] = handler
def handle_part(self, data, ctype, filename, payload, frequency, headers):
if ctype in handlers.CONTENT_SIGNALS:
return
- jinja_json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
+ jinja_json_file = os.path.join(
+ self.paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
+ )
rendered_payload = render_jinja_payload_from_file(
- payload, filename, jinja_json_file)
+ payload, filename, jinja_json_file
+ )
if not rendered_payload:
return
subtype = handlers.type_from_starts_with(rendered_payload)
sub_handler = self.sub_handlers.get(subtype)
if not sub_handler:
LOG.warning(
- 'Ignoring jinja template for %s. Could not find supported'
- ' sub-handler for type %s', filename, subtype)
+ "Ignoring jinja template for %s. Could not find supported"
+ " sub-handler for type %s",
+ filename,
+ subtype,
+ )
return
if sub_handler.handler_version == 3:
sub_handler.handle_part(
- data, ctype, filename, rendered_payload, frequency, headers)
+ data, ctype, filename, rendered_payload, frequency, headers
+ )
elif sub_handler.handler_version == 2:
sub_handler.handle_part(
- data, ctype, filename, rendered_payload, frequency)
+ data, ctype, filename, rendered_payload, frequency
+ )
def render_jinja_payload_from_file(
- payload, payload_fn, instance_data_file, debug=False):
+ payload, payload_fn, instance_data_file, debug=False
+):
"""Render a jinja template payload sourcing variables from jinja_vars_path.
@param payload: String of jinja template content. Should begin with
@@ -75,19 +87,21 @@ def render_jinja_payload_from_file(
rendered_payload = None
if not os.path.exists(instance_data_file):
raise RuntimeError(
- 'Cannot render jinja template vars. Instance data not yet'
- ' present at %s' % instance_data_file)
+ "Cannot render jinja template vars. Instance data not yet"
+ " present at %s" % instance_data_file
+ )
try:
instance_data = load_json(load_file(instance_data_file))
except (IOError, OSError) as e:
if e.errno == EACCES:
raise RuntimeError(
- 'Cannot render jinja template vars. No read permission on'
+ "Cannot render jinja template vars. No read permission on"
" '%s'. Try sudo" % instance_data_file
) from e
rendered_payload = render_jinja_payload(
- payload, payload_fn, instance_data, debug)
+ payload, payload_fn, instance_data, debug
+ )
if not rendered_payload:
return None
return rendered_payload
@@ -96,51 +110,87 @@ def render_jinja_payload_from_file(
def render_jinja_payload(payload, payload_fn, instance_data, debug=False):
instance_jinja_vars = convert_jinja_instance_data(
instance_data,
- decode_paths=instance_data.get('base64-encoded-keys', []))
+ decode_paths=instance_data.get("base64-encoded-keys", []),
+ include_key_aliases=True,
+ )
if debug:
- LOG.debug('Converted jinja variables\n%s',
- json_dumps(instance_jinja_vars))
+ LOG.debug(
+ "Converted jinja variables\n%s", json_dumps(instance_jinja_vars)
+ )
try:
rendered_payload = render_string(payload, instance_jinja_vars)
except (TypeError, JUndefinedError) as e:
- LOG.warning(
- 'Ignoring jinja template for %s: %s', payload_fn, str(e))
+ LOG.warning("Ignoring jinja template for %s: %s", payload_fn, str(e))
return None
warnings = [
- "'%s'" % var.replace(MISSING_JINJA_PREFIX, '')
+ "'%s'" % var.replace(MISSING_JINJA_PREFIX, "")
for var in re.findall(
- r'%s[^\s]+' % MISSING_JINJA_PREFIX, rendered_payload)]
+ r"%s[^\s]+" % MISSING_JINJA_PREFIX, rendered_payload
+ )
+ ]
if warnings:
LOG.warning(
"Could not render jinja template variables in file '%s': %s",
- payload_fn, ', '.join(warnings))
+ payload_fn,
+ ", ".join(warnings),
+ )
return rendered_payload
-def convert_jinja_instance_data(data, prefix='', sep='/', decode_paths=()):
+def get_jinja_variable_alias(orig_name: str) -> Optional[str]:
+ """Return a jinja variable alias, replacing any operators with underscores.
+
+ Provide underscore-delimited key aliases to simplify dot-notation
+ attribute references for keys which contain operators "." or "-".
+ This provides for simpler short-hand jinja attribute notation
+ allowing one to avoid quoting keys which contain operators.
+ {{ ds.v1_0.config.user_network_config }} instead of
+ {{ ds['v1.0'].config["user.network-config"] }}.
+
+ :param orig_name: String representing a jinja variable name to scrub/alias.
+
+ :return: A string with any jinja operators replaced if needed. Otherwise,
+ none if no alias required.
+ """
+ alias_name = re.sub(operator_re, "_", orig_name)
+ if alias_name != orig_name:
+ return alias_name
+ return None
+
+
+def convert_jinja_instance_data(
+ data, prefix="", sep="/", decode_paths=(), include_key_aliases=False
+):
"""Process instance-data.json dict for use in jinja templates.
Replace hyphens with underscores for jinja templates and decode any
base64_encoded_keys.
"""
result = {}
- decode_paths = [path.replace('-', '_') for path in decode_paths]
+ decode_paths = [path.replace("-", "_") for path in decode_paths]
for key, value in sorted(data.items()):
- if '-' in key:
- # Standardize keys for use in #cloud-config/shell templates
- key = key.replace('-', '_')
- key_path = '{0}{1}{2}'.format(prefix, sep, key) if prefix else key
+ key_path = "{0}{1}{2}".format(prefix, sep, key) if prefix else key
if key_path in decode_paths:
value = b64d(value)
if isinstance(value, dict):
result[key] = convert_jinja_instance_data(
- value, key_path, sep=sep, decode_paths=decode_paths)
- if re.match(r'v\d+', key):
+ value,
+ key_path,
+ sep=sep,
+ decode_paths=decode_paths,
+ include_key_aliases=include_key_aliases,
+ )
+ if re.match(r"v\d+$", key):
# Copy values to top-level aliases
for subkey, subvalue in result[key].items():
- result[subkey] = subvalue
+ result[subkey] = copy.deepcopy(subvalue)
else:
result[key] = value
+ if include_key_aliases:
+ alias_name = get_jinja_variable_alias(key)
+ if alias_name:
+ result[alias_name] = copy.deepcopy(result[key])
return result
+
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
index 9917f551..44061838 100644
--- a/cloudinit/handlers/shell_script.py
+++ b/cloudinit/handlers/shell_script.py
@@ -10,21 +10,19 @@
import os
-from cloudinit import handlers
-from cloudinit import util
-
-from cloudinit.settings import (PER_ALWAYS)
+from cloudinit import handlers, util
+from cloudinit.settings import PER_ALWAYS
class ShellScriptPartHandler(handlers.Handler):
- prefixes = ['#!']
+ prefixes = ["#!"]
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS)
- self.script_dir = paths.get_ipath_cur('scripts')
- if 'script_path' in _kwargs:
- self.script_dir = paths.get_ipath_cur(_kwargs['script_path'])
+ self.script_dir = paths.get_ipath_cur("scripts")
+ if "script_path" in _kwargs:
+ self.script_dir = paths.get_ipath_cur(_kwargs["script_path"])
def handle_part(self, data, ctype, filename, payload, frequency):
if ctype in handlers.CONTENT_SIGNALS:
@@ -36,4 +34,5 @@ class ShellScriptPartHandler(handlers.Handler):
path = os.path.join(self.script_dir, filename)
util.write_file(path, payload, 0o700)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/shell_script_by_frequency.py b/cloudinit/handlers/shell_script_by_frequency.py
new file mode 100644
index 00000000..923cca57
--- /dev/null
+++ b/cloudinit/handlers/shell_script_by_frequency.py
@@ -0,0 +1,62 @@
+import os
+
+from cloudinit import log, util
+from cloudinit.handlers import Handler
+from cloudinit.settings import PER_ALWAYS, PER_INSTANCE, PER_ONCE
+
+LOG = log.getLogger(__name__)
+
+# cloudinit/settings.py defines PER_*** frequency constants. It makes sense to
+# use them here, instead of hardcodes, and map them to the 'per-***' frequency-
+# specific folders in /v/l/c/scripts. It might make sense to expose this at a
+# higher level or in a more general module -- eg maybe in cloudinit/settings.py
+# itself -- but for now it's here.
+path_map = {
+ PER_ALWAYS: "per-boot",
+ PER_INSTANCE: "per-instance",
+ PER_ONCE: "per-once",
+}
+
+
+def get_mime_type_by_frequency(freq):
+ mime_type = f"text/x-shellscript-{path_map[freq]}"
+ return mime_type
+
+
+def get_script_folder_by_frequency(freq, scripts_dir):
+ """Return the frequency-specific subfolder for a given frequency constant
+ and parent folder."""
+ freqPath = path_map[freq]
+ folder = os.path.join(scripts_dir, freqPath)
+ return folder
+
+
+def write_script_by_frequency(script_path, payload, frequency, scripts_dir):
+ """Given a filename, a payload, a frequency, and a scripts folder, write
+ the payload to the correct frequency-specific path"""
+ filename = os.path.basename(script_path)
+ filename = util.clean_filename(filename)
+ folder = get_script_folder_by_frequency(frequency, scripts_dir)
+ path = os.path.join(folder, filename)
+ payload = util.dos2unix(payload)
+ util.write_file(path, payload, 0o700)
+
+
+class ShellScriptByFreqPartHandler(Handler):
+ """Common base class for the frequency-specific script handlers."""
+
+ def __init__(self, script_frequency, paths, **_kwargs):
+ Handler.__init__(self, PER_ALWAYS)
+ self.prefixes = [get_mime_type_by_frequency(script_frequency)]
+ self.script_frequency = script_frequency
+ self.scripts_dir = paths.get_cpath("scripts")
+ if "script_path" in _kwargs:
+ self.scripts_dir = paths.get_cpath(_kwargs["script_path"])
+
+ def handle_part(self, data, ctype, script_path, payload, frequency):
+ if script_path is not None:
+ filename = os.path.basename(script_path)
+ filename = util.clean_filename(filename)
+ write_script_by_frequency(
+ script_path, payload, self.script_frequency, self.scripts_dir
+ )
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index a9d29537..4bc95f97 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -13,17 +13,15 @@ import re
from cloudinit import handlers
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.settings import (PER_INSTANCE)
+from cloudinit import subp, util
+from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
class UpstartJobPartHandler(handlers.Handler):
- prefixes = ['#upstart-job']
+ prefixes = ["#upstart-job"]
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_INSTANCE)
@@ -43,7 +41,7 @@ class UpstartJobPartHandler(handlers.Handler):
filename = util.clean_filename(filename)
(_name, ext) = os.path.splitext(filename)
if not ext:
- ext = ''
+ ext = ""
ext = ext.lower()
if ext != ".conf":
filename = filename + ".conf"
@@ -78,9 +76,10 @@ def _has_suitable_upstart():
if not os.path.exists("/usr/bin/dpkg-query"):
return False
try:
- (dpkg_ver, _err) = subp.subp(["dpkg-query",
- "--showformat=${Version}",
- "--show", "upstart"], rcs=[0, 1])
+ (dpkg_ver, _err) = subp.subp(
+ ["dpkg-query", "--showformat=${Version}", "--show", "upstart"],
+ rcs=[0, 1],
+ )
except Exception:
util.logexc(LOG, "dpkg-query failed")
return False
@@ -93,8 +92,9 @@ def _has_suitable_upstart():
if e.exit_code == 1:
pass
else:
- util.logexc(LOG, "dpkg --compare-versions failed [%s]",
- e.exit_code)
+ util.logexc(
+ LOG, "dpkg --compare-versions failed [%s]", e.exit_code
+ )
except Exception:
util.logexc(LOG, "dpkg --compare-versions failed")
return False
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 9752ad28..c2c9e584 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -8,19 +8,15 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from time import time
-
import contextlib
import os
-from configparser import NoSectionError, NoOptionError, RawConfigParser
+from configparser import NoOptionError, NoSectionError, RawConfigParser
from io import StringIO
-
-from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
- CFG_ENV_NAME)
+from time import time
from cloudinit import log as logging
-from cloudinit import type_utils
-from cloudinit import util
+from cloudinit import persistence, type_utils, util
+from cloudinit.settings import CFG_ENV_NAME, PER_ALWAYS, PER_INSTANCE, PER_ONCE
LOG = logging.getLogger(__name__)
@@ -91,8 +87,9 @@ class FileSemaphores(object):
try:
util.del_dir(self.sem_path)
except (IOError, OSError):
- util.logexc(LOG, "Failed deleting semaphore directory %s",
- self.sem_path)
+ util.logexc(
+ LOG, "Failed deleting semaphore directory %s", self.sem_path
+ )
def _acquire(self, name, freq):
# Check again if its been already gotten
@@ -124,11 +121,14 @@ class FileSemaphores(object):
# this case could happen if the migrator module hadn't run yet
# but the item had run before we did canon_sem_name.
if cname != name and os.path.exists(self._get_path(name, freq)):
- LOG.warning("%s has run without canonicalized name [%s].\n"
- "likely the migrator has not yet run. "
- "It will run next boot.\n"
- "run manually with: cloud-init single --name=migrator",
- name, cname)
+ LOG.warning(
+ "%s has run without canonicalized name [%s].\n"
+ "likely the migrator has not yet run. "
+ "It will run next boot.\n"
+ "run manually with: cloud-init single --name=migrator",
+ name,
+ cname,
+ )
return True
return False
@@ -187,9 +187,14 @@ class Runners(object):
class ConfigMerger(object):
- def __init__(self, paths=None, datasource=None,
- additional_fns=None, base_cfg=None,
- include_vendor=True):
+ def __init__(
+ self,
+ paths=None,
+ datasource=None,
+ additional_fns=None,
+ base_cfg=None,
+ include_vendor=True,
+ ):
self._paths = paths
self._ds = datasource
self._fns = additional_fns
@@ -206,8 +211,11 @@ class ConfigMerger(object):
if ds_cfg and isinstance(ds_cfg, (dict)):
d_cfgs.append(ds_cfg)
except Exception:
- util.logexc(LOG, "Failed loading of datasource config object "
- "from %s", self._ds)
+ util.logexc(
+ LOG,
+ "Failed loading of datasource config object from %s",
+ self._ds,
+ )
return d_cfgs
def _get_env_configs(self):
@@ -217,8 +225,7 @@ class ConfigMerger(object):
try:
e_cfgs.append(util.read_conf(e_fn))
except Exception:
- util.logexc(LOG, 'Failed loading of env. config from %s',
- e_fn)
+ util.logexc(LOG, "Failed loading of env. config from %s", e_fn)
return e_cfgs
def _get_instance_configs(self):
@@ -228,9 +235,13 @@ class ConfigMerger(object):
if not self._paths:
return i_cfgs
- cc_paths = ['cloud_config']
+ cc_paths = ["cloud_config"]
if self._include_vendor:
- cc_paths.append('vendor_cloud_config')
+ # the order is important here: we want vendor2
+ # (dynamic vendor data from OpenStack)
+ # to override vendor (static data from OpenStack)
+ cc_paths.append("vendor2_cloud_config")
+ cc_paths.append("vendor_cloud_config")
for cc_p in cc_paths:
cc_fn = self._paths.get_ipath_cur(cc_p)
@@ -239,11 +250,14 @@ class ConfigMerger(object):
i_cfgs.append(util.read_conf(cc_fn))
except PermissionError:
LOG.debug(
- 'Skipped loading cloud-config from %s due to'
- ' non-root.', cc_fn)
+ "Skipped loading cloud-config from %s due to"
+ " non-root.",
+ cc_fn,
+ )
except Exception:
- util.logexc(LOG, 'Failed loading of cloud-config from %s',
- cc_fn)
+ util.logexc(
+ LOG, "Failed loading of cloud-config from %s", cc_fn
+ )
return i_cfgs
def _read_cfg(self):
@@ -259,8 +273,9 @@ class ConfigMerger(object):
try:
cfgs.append(util.read_conf(c_fn))
except Exception:
- util.logexc(LOG, "Failed loading of configuration from %s",
- c_fn)
+ util.logexc(
+ LOG, "Failed loading of configuration from %s", c_fn
+ )
cfgs.extend(self._get_env_configs())
cfgs.extend(self._get_instance_configs())
@@ -278,7 +293,6 @@ class ConfigMerger(object):
class ContentHandlers(object):
-
def __init__(self):
self.registered = {}
self.initialized = []
@@ -313,19 +327,21 @@ class ContentHandlers(object):
return list(self.registered.items())
-class Paths(object):
+class Paths(persistence.CloudInitPickleMixin):
+ _ci_pkl_version = 1
+
def __init__(self, path_cfgs, ds=None):
self.cfgs = path_cfgs
# Populate all the initial paths
- self.cloud_dir = path_cfgs.get('cloud_dir', '/var/lib/cloud')
- self.run_dir = path_cfgs.get('run_dir', '/run/cloud-init')
- self.instance_link = os.path.join(self.cloud_dir, 'instance')
+ self.cloud_dir = path_cfgs.get("cloud_dir", "/var/lib/cloud")
+ self.run_dir = path_cfgs.get("run_dir", "/run/cloud-init")
+ self.instance_link = os.path.join(self.cloud_dir, "instance")
self.boot_finished = os.path.join(self.instance_link, "boot-finished")
- self.upstart_conf_d = path_cfgs.get('upstart_dir')
- self.seed_dir = os.path.join(self.cloud_dir, 'seed')
+ self.upstart_conf_d = path_cfgs.get("upstart_dir")
+ self.seed_dir = os.path.join(self.cloud_dir, "seed")
# This one isn't joined, since it should just be read-only
- template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/')
- self.template_tpl = os.path.join(template_dir, '%s.tmpl')
+ template_dir = path_cfgs.get("templates_dir", "/etc/cloud/templates/")
+ self.template_tpl = os.path.join(template_dir, "%s.tmpl")
self.lookups = {
"handlers": "handlers",
"scripts": "scripts",
@@ -337,9 +353,12 @@ class Paths(object):
"obj_pkl": "obj.pkl",
"cloud_config": "cloud-config.txt",
"vendor_cloud_config": "vendor-cloud-config.txt",
+ "vendor2_cloud_config": "vendor2-cloud-config.txt",
"data": "data",
"vendordata_raw": "vendor-data.txt",
+ "vendordata2_raw": "vendor-data2.txt",
"vendordata": "vendor-data.txt.i",
+ "vendordata2": "vendor-data2.txt.i",
"instance_id": ".instance-id",
"manual_clean_marker": "manual-clean",
"warnings": "warnings",
@@ -347,6 +366,18 @@ class Paths(object):
# Set when a datasource becomes active
self.datasource = ds
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ """Perform deserialization fixes for Paths."""
+ if not hasattr(self, "run_dir"):
+ # On older versions of cloud-init the Paths class do not
+ # have the run_dir attribute. This is problematic because
+ # when loading the pickle object on newer versions of cloud-init
+ # we will rely on this attribute. To fix that, we are now
+ # manually adding that attribute here.
+ self.run_dir = Paths(
+ path_cfgs=self.cfgs, ds=self.datasource
+ ).run_dir
+
# get_ipath_cur: get the current instance path for an item
def get_ipath_cur(self, name=None):
return self._get_path(self.instance_link, name)
@@ -364,8 +395,8 @@ class Paths(object):
iid = self.datasource.get_instance_id()
if iid is None:
return None
- path_safe_iid = str(iid).replace(os.sep, '_')
- ipath = os.path.join(self.cloud_dir, 'instances', path_safe_iid)
+ path_safe_iid = str(iid).replace(os.sep, "_")
+ ipath = os.path.join(self.cloud_dir, "instances", path_safe_iid)
add_on = self.lookups.get(name)
if add_on:
ipath = os.path.join(ipath, add_on)
@@ -377,8 +408,10 @@ class Paths(object):
def get_ipath(self, name=None):
ipath = self._get_ipath(name)
if not ipath:
- LOG.warning(("No per instance data available, "
- "is there an datasource/iid set?"))
+ LOG.warning(
+ "No per instance data available, "
+ "is there an datasource/iid set?"
+ )
return None
else:
return ipath
@@ -401,6 +434,7 @@ class Paths(object):
# you can avoid catching exceptions that you typically don't
# care about...
+
class DefaultingConfigParser(RawConfigParser):
DEF_INT = 0
DEF_FLOAT = 0.0
@@ -418,7 +452,7 @@ class DefaultingConfigParser(RawConfigParser):
return value
def set(self, section, option, value=None):
- if not self.has_section(section) and section.lower() != 'default':
+ if not self.has_section(section) and section.lower() != "default":
self.add_section(section)
RawConfigParser.set(self, section, option, value)
@@ -442,13 +476,14 @@ class DefaultingConfigParser(RawConfigParser):
return RawConfigParser.getint(self, section, option)
def stringify(self, header=None):
- contents = ''
+ contents = ""
outputstream = StringIO()
self.write(outputstream)
outputstream.flush()
contents = outputstream.getvalue()
if header:
- contents = '\n'.join([header, contents, ''])
+ contents = "\n".join([header, contents, ""])
return contents
+
# vi: ts=4 expandtab
diff --git a/cloudinit/importer.py b/cloudinit/importer.py
index f1194fbe..2bc210dd 100644
--- a/cloudinit/importer.py
+++ b/cloudinit/importer.py
@@ -9,6 +9,25 @@
# This file is part of cloud-init. See LICENSE file for license information.
import sys
+import typing
+
+# annotations add value for development, but don't break old versions
+# pyver: 3.6 -> 3.8
+# pylint: disable=E1101
+if sys.version_info >= (3, 8):
+
+ class MetaSchema(typing.TypedDict):
+ name: str
+ id: str
+ title: str
+ description: str
+ distros: typing.List[str]
+ examples: typing.List[str]
+ frequency: str
+
+else:
+ MetaSchema = dict
+# pylint: enable=E1101
def import_module(module_name):
@@ -16,7 +35,8 @@ def import_module(module_name):
return sys.modules[module_name]
-def find_module(base_name, search_paths, required_attrs=None):
+def find_module(base_name: str, search_paths, required_attrs=None) -> tuple:
+ """Finds and imports specified modules"""
if not required_attrs:
required_attrs = []
# NOTE(harlowja): translate the search paths to include the base name.
@@ -26,7 +46,7 @@ def find_module(base_name, search_paths, required_attrs=None):
if path:
real_path.extend(path.split("."))
real_path.append(base_name)
- full_path = '.'.join(real_path)
+ full_path = ".".join(real_path)
lookup_paths.append(full_path)
found_paths = []
for full_path in lookup_paths:
@@ -45,4 +65,5 @@ def find_module(base_name, search_paths, required_attrs=None):
found_paths.append(full_path)
return (found_paths, lookup_paths)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 2e5df042..f40201bb 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -8,7 +8,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import collections
+import collections.abc
import io
import logging
import logging.config
@@ -28,7 +28,7 @@ DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
# Default basic format
-DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s'
+DEF_CON_FORMAT = "%(asctime)s - %(filename)s[%(levelname)s]: %(message)s"
# Always format logging timestamps as UTC time
logging.Formatter.converter = time.gmtime
@@ -39,8 +39,8 @@ def setupBasicLogging(level=DEBUG, formatter=None):
formatter = logging.Formatter(DEF_CON_FORMAT)
root = logging.getLogger()
for handler in root.handlers:
- if hasattr(handler, 'stream') and hasattr(handler.stream, 'name'):
- if handler.stream.name == '<stderr>':
+ if hasattr(handler, "stream") and hasattr(handler.stream, "name"):
+ if handler.stream.name == "<stderr>":
handler.setLevel(level)
return
# Didn't have an existing stderr handler; create a new handler
@@ -69,18 +69,18 @@ def setupLogging(cfg=None):
cfg = {}
log_cfgs = []
- log_cfg = cfg.get('logcfg')
+ log_cfg = cfg.get("logcfg")
if log_cfg and isinstance(log_cfg, str):
# If there is a 'logcfg' entry in the config,
# respect it, it is the old keyname
log_cfgs.append(str(log_cfg))
elif "log_cfgs" in cfg:
- for a_cfg in cfg['log_cfgs']:
+ for a_cfg in cfg["log_cfgs"]:
if isinstance(a_cfg, str):
log_cfgs.append(a_cfg)
- elif isinstance(a_cfg, (collections.Iterable)):
+ elif isinstance(a_cfg, (collections.abc.Iterable)):
cfg_str = [str(c) for c in a_cfg]
- log_cfgs.append('\n'.join(cfg_str))
+ log_cfgs.append("\n".join(cfg_str))
else:
log_cfgs.append(str(a_cfg))
@@ -109,16 +109,17 @@ def setupLogging(cfg=None):
pass
# If it didn't work, at least setup a basic logger (if desired)
- basic_enabled = cfg.get('log_basic', True)
+ basic_enabled = cfg.get("log_basic", True)
- sys.stderr.write(("WARN: no logging configured!"
- " (tried %s configs)\n") % (am_tried))
+ sys.stderr.write(
+ "WARN: no logging configured! (tried %s configs)\n" % (am_tried)
+ )
if basic_enabled:
sys.stderr.write("Setting up basic logging...\n")
setupBasicLogging()
-def getLogger(name='cloudinit'):
+def getLogger(name="cloudinit"):
return logging.getLogger(name)
diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py
index 7fa493a6..a7a6a47f 100644
--- a/cloudinit/mergers/__init__.py
+++ b/cloudinit/mergers/__init__.py
@@ -6,14 +6,13 @@
import re
-from cloudinit import importer
-from cloudinit import type_utils
+from cloudinit import importer, type_utils
NAME_MTCH = re.compile(r"(^[a-zA-Z_][A-Za-z0-9_]*)\((.*?)\)$")
DEF_MERGE_TYPE = "list()+dict()+str()"
-MERGER_PREFIX = 'm_'
-MERGER_ATTR = 'Merger'
+MERGER_PREFIX = "m_"
+MERGER_ATTR = "Merger"
class UnknownMerger(object):
@@ -53,7 +52,7 @@ class LookupMerger(UnknownMerger):
self._lookups = lookups
def __str__(self):
- return 'LookupMerger: (%s)' % (len(self._lookups))
+ return "LookupMerger: (%s)" % (len(self._lookups))
# For items which can not be merged by the parent this object
# will lookup in a internally maintained set of objects and
@@ -69,25 +68,26 @@ class LookupMerger(UnknownMerger):
meth = getattr(merger, meth_wanted)
break
if not meth:
- return UnknownMerger._handle_unknown(self, meth_wanted,
- value, merge_with)
+ return UnknownMerger._handle_unknown(
+ self, meth_wanted, value, merge_with
+ )
return meth(value, merge_with)
def dict_extract_mergers(config):
parsed_mergers = []
- raw_mergers = config.pop('merge_how', None)
+ raw_mergers = config.pop("merge_how", None)
if raw_mergers is None:
- raw_mergers = config.pop('merge_type', None)
+ raw_mergers = config.pop("merge_type", None)
if raw_mergers is None:
return parsed_mergers
if isinstance(raw_mergers, str):
return string_extract_mergers(raw_mergers)
for m in raw_mergers:
if isinstance(m, (dict)):
- name = m['name']
+ name = m["name"]
name = name.replace("-", "_").strip()
- opts = m['settings']
+ opts = m["settings"]
else:
name = m[0]
if len(m) >= 2:
@@ -110,8 +110,9 @@ def string_extract_mergers(merge_how):
continue
match = NAME_MTCH.match(m_name)
if not match:
- msg = ("Matcher identifer '%s' is not in the right format" %
- (m_name))
+ msg = "Matcher identifer '%s' is not in the right format" % (
+ m_name
+ )
raise ValueError(msg)
(m_name, m_ops) = match.groups()
m_ops = m_ops.strip().split(",")
@@ -129,14 +130,15 @@ def construct(parsed_mergers):
for (m_name, m_ops) in parsed_mergers:
if not m_name.startswith(MERGER_PREFIX):
m_name = MERGER_PREFIX + str(m_name)
- merger_locs, looked_locs = importer.find_module(m_name,
- [__name__],
- [MERGER_ATTR])
+ merger_locs, looked_locs = importer.find_module(
+ m_name, [__name__], [MERGER_ATTR]
+ )
if not merger_locs:
- msg = ("Could not find merger module named '%s' "
- "with attribute '%s' (searched %s)") % (m_name,
- MERGER_ATTR,
- looked_locs)
+ msg = (
+ "Could not find merger module named '%s' "
+ "with attribute '%s' (searched %s)"
+ % (m_name, MERGER_ATTR, looked_locs)
+ )
raise ImportError(msg)
else:
mod = importer.import_module(merger_locs[0])
@@ -149,4 +151,5 @@ def construct(parsed_mergers):
mergers.append(attr(root, opts))
return root
+
# vi: ts=4 expandtab
diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py
index 93472f13..274ccafc 100644
--- a/cloudinit/mergers/m_dict.py
+++ b/cloudinit/mergers/m_dict.py
@@ -4,8 +4,11 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-DEF_MERGE_TYPE = 'no_replace'
-MERGE_TYPES = ('replace', DEF_MERGE_TYPE,)
+DEF_MERGE_TYPE = "no_replace"
+MERGE_TYPES = (
+ "replace",
+ DEF_MERGE_TYPE,
+)
def _has_any(what, *keys):
@@ -25,21 +28,27 @@ class Merger(object):
self._method = m
break
# Affect how recursive merging is done on other primitives.
- self._recurse_str = 'recurse_str' in opts
- self._recurse_array = _has_any(opts, 'recurse_array', 'recurse_list')
- self._allow_delete = 'allow_delete' in opts
+ self._recurse_str = "recurse_str" in opts
+ self._recurse_array = _has_any(opts, "recurse_array", "recurse_list")
+ self._allow_delete = "allow_delete" in opts
# Backwards compat require this to be on.
self._recurse_dict = True
def __str__(self):
- s = ('DictMerger: (method=%s,recurse_str=%s,'
- 'recurse_dict=%s,recurse_array=%s,allow_delete=%s)')
- s = s % (self._method, self._recurse_str,
- self._recurse_dict, self._recurse_array, self._allow_delete)
+ s = (
+ "DictMerger: (method=%s,recurse_str=%s,"
+ "recurse_dict=%s,recurse_array=%s,allow_delete=%s)"
+ )
+ s = s % (
+ self._method,
+ self._recurse_str,
+ self._recurse_dict,
+ self._recurse_array,
+ self._allow_delete,
+ )
return s
def _do_dict_replace(self, value, merge_with, do_replace):
-
def merge_same_key(old_v, new_v):
if do_replace:
return new_v
@@ -65,12 +74,13 @@ class Merger(object):
def _on_dict(self, value, merge_with):
if not isinstance(merge_with, (dict)):
return value
- if self._method == 'replace':
+ if self._method == "replace":
merged = self._do_dict_replace(dict(value), merge_with, True)
- elif self._method == 'no_replace':
+ elif self._method == "no_replace":
merged = self._do_dict_replace(dict(value), merge_with, False)
else:
raise NotImplementedError("Unknown merge type %s" % (self._method))
return merged
+
# vi: ts=4 expandtab
diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py
index 19f32771..9dfae8cd 100644
--- a/cloudinit/mergers/m_list.py
+++ b/cloudinit/mergers/m_list.py
@@ -4,8 +4,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-DEF_MERGE_TYPE = 'replace'
-MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace')
+DEF_MERGE_TYPE = "replace"
+MERGE_TYPES = ("append", "prepend", DEF_MERGE_TYPE, "no_replace")
def _has_any(what, *keys):
@@ -25,38 +25,44 @@ class Merger(object):
self._method = m
break
# Affect how recursive merging is done on other primitives
- self._recurse_str = _has_any(opts, 'recurse_str')
- self._recurse_dict = _has_any(opts, 'recurse_dict')
- self._recurse_array = _has_any(opts, 'recurse_array', 'recurse_list')
+ self._recurse_str = _has_any(opts, "recurse_str")
+ self._recurse_dict = _has_any(opts, "recurse_dict")
+ self._recurse_array = _has_any(opts, "recurse_array", "recurse_list")
def __str__(self):
- return ('ListMerger: (method=%s,recurse_str=%s,'
- 'recurse_dict=%s,recurse_array=%s)') % (self._method,
- self._recurse_str,
- self._recurse_dict,
- self._recurse_array)
+ return (
+ "ListMerger: (method=%s,recurse_str=%s,"
+ "recurse_dict=%s,recurse_array=%s)"
+ % (
+ self._method,
+ self._recurse_str,
+ self._recurse_dict,
+ self._recurse_array,
+ )
+ )
def _on_tuple(self, value, merge_with):
return tuple(self._on_list(list(value), merge_with))
def _on_list(self, value, merge_with):
- if (self._method == 'replace' and
- not isinstance(merge_with, (tuple, list))):
+ if self._method == "replace" and not isinstance(
+ merge_with, (tuple, list)
+ ):
return merge_with
# Ok we now know that what we are merging with is a list or tuple.
merged_list = []
- if self._method == 'prepend':
+ if self._method == "prepend":
merged_list.extend(merge_with)
merged_list.extend(value)
return merged_list
- elif self._method == 'append':
+ elif self._method == "append":
merged_list.extend(value)
merged_list.extend(merge_with)
return merged_list
def merge_same_index(old_v, new_v):
- if self._method == 'no_replace':
+ if self._method == "no_replace":
# Leave it be...
return old_v
if isinstance(new_v, (list, tuple)) and self._recurse_array:
@@ -74,4 +80,5 @@ class Merger(object):
merged_list[i] = merge_same_index(merged_list[i], merge_with[i])
return merged_list
+
# vi: ts=4 expandtab
diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py
index 539e3e29..a96bae5e 100644
--- a/cloudinit/mergers/m_str.py
+++ b/cloudinit/mergers/m_str.py
@@ -7,10 +7,10 @@
class Merger(object):
def __init__(self, _merger, opts):
- self._append = 'append' in opts
+ self._append = "append" in opts
def __str__(self):
- return 'StringMerger: (append=%s)' % (self._append)
+ return "StringMerger: (append=%s)" % (self._append)
# On encountering a unicode object to merge value with
# we will for now just proxy into the string method to let it handle it.
@@ -27,4 +27,5 @@ class Merger(object):
return merge_with
return value + merge_with
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index de65e7af..3270e1f7 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -6,30 +6,46 @@
# This file is part of cloud-init. See LICENSE file for license information.
import errno
+import functools
import ipaddress
import logging
import os
import re
+from typing import Any, Dict
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.net.network_state import mask_to_net_prefix
+from cloudinit import subp, util
+from cloudinit.net.network_state import ipv4_mask_to_net_prefix
from cloudinit.url_helper import UrlError, readurl
LOG = logging.getLogger(__name__)
SYS_CLASS_NET = "/sys/class/net/"
-DEFAULT_PRIMARY_INTERFACE = 'eth0'
-
-
-def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
+DEFAULT_PRIMARY_INTERFACE = "eth0"
+OVS_INTERNAL_INTERFACE_LOOKUP_CMD = [
+ "ovs-vsctl",
+ "--format",
+ "csv",
+ "--no-headings",
+ "--timeout",
+ "10",
+ "--columns",
+ "name",
+ "find",
+ "interface",
+ "type=internal",
+]
+
+
+def natural_sort_key(s, _nsre=re.compile("([0-9]+)")):
"""Sorting for Humans: natural sort order. Can be use as the key to sort
functions.
This will sort ['eth0', 'ens3', 'ens10', 'ens12', 'ens8', 'ens0'] as
['ens0', 'ens3', 'ens8', 'ens10', 'ens12', 'eth0'] instead of the simple
python way which will produce ['ens0', 'ens10', 'ens12', 'ens3', 'ens8',
'eth0']."""
- return [int(text) if text.isdigit() else text.lower()
- for text in re.split(_nsre, s)]
+ return [
+ int(text) if text.isdigit() else text.lower()
+ for text in re.split(_nsre, s)
+ ]
def get_sys_class_path():
@@ -41,14 +57,19 @@ def sys_dev_path(devname, path=""):
return get_sys_class_path() + devname + "/" + path
-def read_sys_net(devname, path, translate=None,
- on_enoent=None, on_keyerror=None,
- on_einval=None):
+def read_sys_net(
+ devname,
+ path,
+ translate=None,
+ on_enoent=None,
+ on_keyerror=None,
+ on_einval=None,
+):
dev_path = sys_dev_path(devname, path)
try:
contents = util.load_file(dev_path)
except (OSError, IOError) as e:
- e_errno = getattr(e, 'errno', None)
+ e_errno = getattr(e, "errno", None)
if e_errno in (errno.ENOENT, errno.ENOTDIR):
if on_enoent is not None:
return on_enoent(e)
@@ -65,19 +86,26 @@ def read_sys_net(devname, path, translate=None,
if on_keyerror is not None:
return on_keyerror(e)
else:
- LOG.debug("Found unexpected (not translatable) value"
- " '%s' in '%s", contents, dev_path)
+ LOG.debug(
+ "Found unexpected (not translatable) value '%s' in '%s",
+ contents,
+ dev_path,
+ )
raise
def read_sys_net_safe(iface, field, translate=None):
def on_excp_false(e):
return False
- return read_sys_net(iface, field,
- on_keyerror=on_excp_false,
- on_enoent=on_excp_false,
- on_einval=on_excp_false,
- translate=translate)
+
+ return read_sys_net(
+ iface,
+ field,
+ on_keyerror=on_excp_false,
+ on_enoent=on_excp_false,
+ on_einval=on_excp_false,
+ translate=translate,
+ )
def read_sys_net_int(iface, field):
@@ -94,7 +122,7 @@ def is_up(devname):
# The linux kernel says to consider devices in 'unknown'
# operstate as up for the purposes of network configuration. See
# Documentation/networking/operstates.txt in the kernel source.
- translate = {'up': True, 'unknown': True, 'down': False}
+ translate = {"up": True, "unknown": True, "down": False}
return read_sys_net_safe(devname, "operstate", translate=translate)
@@ -121,7 +149,7 @@ def master_is_bridge_or_bond(devname):
return False
bonding_path = os.path.join(master_path, "bonding")
bridge_path = os.path.join(master_path, "bridge")
- return (os.path.exists(bonding_path) or os.path.exists(bridge_path))
+ return os.path.exists(bonding_path) or os.path.exists(bridge_path)
def master_is_openvswitch(devname):
@@ -133,24 +161,71 @@ def master_is_openvswitch(devname):
return os.path.exists(ovs_path)
+@functools.lru_cache(maxsize=None)
+def openvswitch_is_installed() -> bool:
+ """Return a bool indicating if Open vSwitch is installed in the system."""
+ ret = bool(subp.which("ovs-vsctl"))
+ if not ret:
+ LOG.debug(
+ "ovs-vsctl not in PATH; not detecting Open vSwitch interfaces"
+ )
+ return ret
+
+
+@functools.lru_cache(maxsize=None)
+def get_ovs_internal_interfaces() -> list:
+ """Return a list of the names of OVS internal interfaces on the system.
+
+ These will all be strings, and are used to exclude OVS-specific interface
+ from cloud-init's network configuration handling.
+ """
+ try:
+ out, _err = subp.subp(OVS_INTERNAL_INTERFACE_LOOKUP_CMD)
+ except subp.ProcessExecutionError as exc:
+ if "database connection failed" in exc.stderr:
+ LOG.info(
+ "Open vSwitch is not yet up; no interfaces will be detected as"
+ " OVS-internal"
+ )
+ return []
+ raise
+ else:
+ return out.splitlines()
+
+
+def is_openvswitch_internal_interface(devname: str) -> bool:
+ """Returns True if this is an OVS internal interface.
+
+ If OVS is not installed or not yet running, this will return False.
+ """
+ if not openvswitch_is_installed():
+ return False
+ ovs_bridges = get_ovs_internal_interfaces()
+ if devname in ovs_bridges:
+ LOG.debug("Detected %s as an OVS interface", devname)
+ return True
+ return False
+
+
def is_netfailover(devname, driver=None):
- """ netfailover driver uses 3 nics, master, primary and standby.
- this returns True if the device is either the primary or standby
- as these devices are to be ignored.
+ """netfailover driver uses 3 nics, master, primary and standby.
+ this returns True if the device is either the primary or standby
+ as these devices are to be ignored.
"""
if driver is None:
driver = device_driver(devname)
- if is_netfail_primary(devname, driver) or is_netfail_standby(devname,
- driver):
+ if is_netfail_primary(devname, driver) or is_netfail_standby(
+ devname, driver
+ ):
return True
return False
def get_dev_features(devname):
- """ Returns a str from reading /sys/class/net/<devname>/device/features."""
- features = ''
+ """Returns a str from reading /sys/class/net/<devname>/device/features."""
+ features = ""
try:
- features = read_sys_net(devname, 'device/features')
+ features = read_sys_net(devname, "device/features")
except Exception:
pass
return features
@@ -170,13 +245,13 @@ def has_netfail_standby_feature(devname):
def is_netfail_master(devname, driver=None):
- """ A device is a "netfail master" device if:
+ """A device is a "netfail master" device if:
- - The device does NOT have the 'master' sysfs attribute
- - The device driver is 'virtio_net'
- - The device has the standby feature bit set
+ - The device does NOT have the 'master' sysfs attribute
+ - The device driver is 'virtio_net'
+ - The device has the standby feature bit set
- Return True if all of the above is True.
+ Return True if all of the above is True.
"""
if get_master(devname) is not None:
return False
@@ -194,17 +269,17 @@ def is_netfail_master(devname, driver=None):
def is_netfail_primary(devname, driver=None):
- """ A device is a "netfail primary" device if:
+ """A device is a "netfail primary" device if:
- - the device has a 'master' sysfs file
- - the device driver is not 'virtio_net'
- - the 'master' sysfs file points to device with virtio_net driver
- - the 'master' device has the 'standby' feature bit set
+ - the device has a 'master' sysfs file
+ - the device driver is not 'virtio_net'
+ - the 'master' sysfs file points to device with virtio_net driver
+ - the 'master' device has the 'standby' feature bit set
- Return True if all of the above is True.
+ Return True if all of the above is True.
"""
# /sys/class/net/<devname>/master -> ../../<master devname>
- master_sysfs_path = sys_dev_path(devname, path='master')
+ master_sysfs_path = sys_dev_path(devname, path="master")
if not os.path.exists(master_sysfs_path):
return False
@@ -227,13 +302,13 @@ def is_netfail_primary(devname, driver=None):
def is_netfail_standby(devname, driver=None):
- """ A device is a "netfail standby" device if:
+ """A device is a "netfail standby" device if:
- - The device has a 'master' sysfs attribute
- - The device driver is 'virtio_net'
- - The device has the standby feature bit set
+ - The device has a 'master' sysfs attribute
+ - The device driver is 'virtio_net'
+ - The device has the standby feature bit set
- Return True if all of the above is True.
+ Return True if all of the above is True.
"""
if get_master(devname) is None:
return False
@@ -253,21 +328,21 @@ def is_netfail_standby(devname, driver=None):
def is_renamed(devname):
"""
/* interface name assignment types (sysfs name_assign_type attribute) */
- #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */
- #define NET_NAME_ENUM 1 /* enumerated by kernel */
- #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */
- #define NET_NAME_USER 3 /* provided by user-space */
- #define NET_NAME_RENAMED 4 /* renamed by user-space */
+ #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */
+ #define NET_NAME_ENUM 1 /* enumerated by kernel */
+ #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */
+ #define NET_NAME_USER 3 /* provided by user-space */
+ #define NET_NAME_RENAMED 4 /* renamed by user-space */
"""
- name_assign_type = read_sys_net_safe(devname, 'name_assign_type')
- if name_assign_type and name_assign_type in ['3', '4']:
+ name_assign_type = read_sys_net_safe(devname, "name_assign_type")
+ if name_assign_type and name_assign_type in ["3", "4"]:
return True
return False
def is_vlan(devname):
uevent = str(read_sys_net_safe(devname, "uevent"))
- return 'DEVTYPE=vlan' in uevent.splitlines()
+ return "DEVTYPE=vlan" in uevent.splitlines()
def device_driver(devname):
@@ -291,7 +366,7 @@ def device_devid(devname):
def get_devicelist():
- if util.is_FreeBSD():
+ if util.is_FreeBSD() or util.is_DragonFlyBSD():
return list(get_interfaces_by_mac().values())
try:
@@ -311,12 +386,12 @@ class ParserError(Exception):
def is_disabled_cfg(cfg):
if not cfg or not isinstance(cfg, dict):
return False
- return cfg.get('config') == "disabled"
+ return cfg.get("config") == "disabled"
def find_fallback_nic(blacklist_drivers=None):
"""Return the name of the 'fallback' network device."""
- if util.is_FreeBSD():
+ if util.is_FreeBSD() or util.is_DragonFlyBSD():
return find_fallback_nic_on_freebsd(blacklist_drivers)
elif util.is_NetBSD() or util.is_OpenBSD():
return find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers)
@@ -325,9 +400,9 @@ def find_fallback_nic(blacklist_drivers=None):
def find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers=None):
- values = list(sorted(
- get_interfaces_by_mac().values(),
- key=natural_sort_key))
+ values = list(
+ sorted(get_interfaces_by_mac().values(), key=natural_sort_key)
+ )
if values:
return values[0]
@@ -341,7 +416,7 @@ def find_fallback_nic_on_freebsd(blacklist_drivers=None):
we'll use the first interface from ``ifconfig -l -u ether``
"""
- stdout, _stderr = subp.subp(['ifconfig', '-l', '-u', 'ether'])
+ stdout, _stderr = subp.subp(["ifconfig", "-l", "-u", "ether"])
values = stdout.split()
if values:
return values[0]
@@ -358,22 +433,31 @@ def find_fallback_nic_on_linux(blacklist_drivers=None):
if not blacklist_drivers:
blacklist_drivers = []
- if 'net.ifnames=0' in util.get_cmdline():
- LOG.debug('Stable ifnames disabled by net.ifnames=0 in /proc/cmdline')
+ if "net.ifnames=0" in util.get_cmdline():
+ LOG.debug("Stable ifnames disabled by net.ifnames=0 in /proc/cmdline")
else:
- unstable = [device for device in get_devicelist()
- if device != 'lo' and not is_renamed(device)]
+ unstable = [
+ device
+ for device in get_devicelist()
+ if device != "lo" and not is_renamed(device)
+ ]
if len(unstable):
- LOG.debug('Found unstable nic names: %s; calling udevadm settle',
- unstable)
- msg = 'Waiting for udev events to settle'
+ LOG.debug(
+ "Found unstable nic names: %s; calling udevadm settle",
+ unstable,
+ )
+ msg = "Waiting for udev events to settle"
util.log_time(LOG.debug, msg, func=util.udevadm_settle)
# get list of interfaces that could have connections
- invalid_interfaces = set(['lo'])
- potential_interfaces = set([device for device in get_devicelist()
- if device_driver(device) not in
- blacklist_drivers])
+ invalid_interfaces = set(["lo"])
+ potential_interfaces = set(
+ [
+ device
+ for device in get_devicelist()
+ if device_driver(device) not in blacklist_drivers
+ ]
+ )
potential_interfaces = potential_interfaces.difference(invalid_interfaces)
# sort into interfaces with carrier, interfaces which could have carrier,
# and ignore interfaces that are definitely disconnected
@@ -391,19 +475,19 @@ def find_fallback_nic_on_linux(blacklist_drivers=None):
if is_netfailover(interface):
# ignore netfailover primary/standby interfaces
continue
- carrier = read_sys_net_int(interface, 'carrier')
+ carrier = read_sys_net_int(interface, "carrier")
if carrier:
connected.append(interface)
continue
# check if nic is dormant or down, as this may make a nick appear to
# not have a carrier even though it could acquire one when brought
# online by dhclient
- dormant = read_sys_net_int(interface, 'dormant')
+ dormant = read_sys_net_int(interface, "dormant")
if dormant:
possibly_connected.append(interface)
continue
- operstate = read_sys_net_safe(interface, 'operstate')
- if operstate in ['dormant', 'down', 'lowerlayerdown', 'unknown']:
+ operstate = read_sys_net_safe(interface, "operstate")
+ if operstate in ["dormant", "down", "lowerlayerdown", "unknown"]:
possibly_connected.append(interface)
continue
@@ -423,7 +507,7 @@ def find_fallback_nic_on_linux(blacklist_drivers=None):
# pick the first that has a mac-address
for name in names:
- if read_sys_net_safe(name, 'address'):
+ if read_sys_net_safe(name, "address"):
return name
return None
@@ -440,32 +524,32 @@ def generate_fallback_config(blacklist_drivers=None, config_driver=None):
# netfail cannot use mac for matching, they have duplicate macs
if is_netfail_master(target_name):
- match = {'name': target_name}
+ match = {"name": target_name}
else:
match = {
- 'macaddress': read_sys_net_safe(target_name, 'address').lower()}
- cfg = {'dhcp4': True, 'set-name': target_name, 'match': match}
+ "macaddress": read_sys_net_safe(target_name, "address").lower()
+ }
+ cfg = {"dhcp4": True, "set-name": target_name, "match": match}
if config_driver:
driver = device_driver(target_name)
if driver:
- cfg['match']['driver'] = driver
- nconf = {'ethernets': {target_name: cfg}, 'version': 2}
+ cfg["match"]["driver"] = driver
+ nconf = {"ethernets": {target_name: cfg}, "version": 2}
return nconf
def extract_physdevs(netcfg):
-
def _version_1(netcfg):
physdevs = []
- for ent in netcfg.get('config', {}):
- if ent.get('type') != 'physical':
+ for ent in netcfg.get("config", {}):
+ if ent.get("type") != "physical":
continue
- mac = ent.get('mac_address')
+ mac = ent.get("mac_address")
if not mac:
continue
- name = ent.get('name')
- driver = ent.get('params', {}).get('driver')
- device_id = ent.get('params', {}).get('device_id')
+ name = ent.get("name")
+ driver = ent.get("params", {}).get("driver")
+ device_id = ent.get("params", {}).get("device_id")
if not driver:
driver = device_driver(name)
if not device_id:
@@ -475,17 +559,17 @@ def extract_physdevs(netcfg):
def _version_2(netcfg):
physdevs = []
- for ent in netcfg.get('ethernets', {}).values():
+ for ent in netcfg.get("ethernets", {}).values():
# only rename if configured to do so
- name = ent.get('set-name')
+ name = ent.get("set-name")
if not name:
continue
# cloud-init requires macaddress for renaming
- mac = ent.get('match', {}).get('macaddress')
+ mac = ent.get("match", {}).get("macaddress")
if not mac:
continue
- driver = ent.get('match', {}).get('driver')
- device_id = ent.get('match', {}).get('device_id')
+ driver = ent.get("match", {}).get("driver")
+ device_id = ent.get("match", {}).get("device_id")
if not driver:
driver = device_driver(name)
if not device_id:
@@ -493,13 +577,13 @@ def extract_physdevs(netcfg):
physdevs.append([mac, name, driver, device_id])
return physdevs
- version = netcfg.get('version')
+ version = netcfg.get("version")
if version == 1:
return _version_1(netcfg)
elif version == 2:
return _version_2(netcfg)
- raise RuntimeError('Unknown network config version: %s' % version)
+ raise RuntimeError("Unknown network config version: %s" % version)
def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
@@ -516,7 +600,7 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
_rename_interfaces(extract_physdevs(netcfg))
except RuntimeError as e:
raise RuntimeError(
- 'Failed to apply network config names: %s' % e
+ "Failed to apply network config names: %s" % e
) from e
@@ -558,33 +642,37 @@ def _get_current_rename_info(check_downable=True):
cur_info = {}
for (name, mac, driver, device_id) in get_interfaces():
cur_info[name] = {
- 'downable': None,
- 'device_id': device_id,
- 'driver': driver,
- 'mac': mac.lower(),
- 'name': name,
- 'up': is_up(name),
+ "downable": None,
+ "device_id": device_id,
+ "driver": driver,
+ "mac": mac.lower(),
+ "name": name,
+ "up": is_up(name),
}
if check_downable:
nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]")
- ipv6, _err = subp.subp(['ip', '-6', 'addr', 'show', 'permanent',
- 'scope', 'global'], capture=True)
- ipv4, _err = subp.subp(['ip', '-4', 'addr', 'show'], capture=True)
+ ipv6, _err = subp.subp(
+ ["ip", "-6", "addr", "show", "permanent", "scope", "global"],
+ capture=True,
+ )
+ ipv4, _err = subp.subp(["ip", "-4", "addr", "show"], capture=True)
nics_with_addresses = set()
for bytes_out in (ipv6, ipv4):
nics_with_addresses.update(nmatch.findall(bytes_out))
for d in cur_info.values():
- d['downable'] = (d['up'] is False or
- d['name'] not in nics_with_addresses)
+ d["downable"] = (
+ d["up"] is False or d["name"] not in nics_with_addresses
+ )
return cur_info
-def _rename_interfaces(renames, strict_present=True, strict_busy=True,
- current_info=None):
+def _rename_interfaces(
+ renames, strict_present=True, strict_busy=True, current_info=None
+):
if not len(renames):
LOG.debug("no interfaces to rename")
@@ -596,14 +684,15 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
cur_info = {}
for name, data in current_info.items():
cur = data.copy()
- if cur.get('mac'):
- cur['mac'] = cur['mac'].lower()
- cur['name'] = name
+ if cur.get("mac"):
+ cur["mac"] = cur["mac"].lower()
+ cur["name"] = name
cur_info[name] = cur
+ LOG.debug("Detected interfaces %s", cur_info)
+
def update_byname(bymac):
- return dict((data['name'], data)
- for data in cur_info.values())
+ return dict((data["name"], data) for data in cur_info.values())
def rename(cur, new):
subp.subp(["ip", "link", "set", cur, "name", new], capture=True)
@@ -624,25 +713,31 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
def entry_match(data, mac, driver, device_id):
"""match if set and in data"""
if mac and driver and device_id:
- return (data['mac'] == mac and
- data['driver'] == driver and
- data['device_id'] == device_id)
+ return (
+ data["mac"] == mac
+ and data["driver"] == driver
+ and data["device_id"] == device_id
+ )
elif mac and driver:
- return (data['mac'] == mac and
- data['driver'] == driver)
+ return data["mac"] == mac and data["driver"] == driver
elif mac:
- return (data['mac'] == mac)
+ return data["mac"] == mac
return False
def find_entry(mac, driver, device_id):
- match = [data for data in cur_info.values()
- if entry_match(data, mac, driver, device_id)]
+ match = [
+ data
+ for data in cur_info.values()
+ if entry_match(data, mac, driver, device_id)
+ ]
if len(match):
if len(match) > 1:
- msg = ('Failed to match a single device. Matched devices "%s"'
- ' with search values "(mac:%s driver:%s device_id:%s)"'
- % (match, mac, driver, device_id))
+ msg = (
+ 'Failed to match a single device. Matched devices "%s"'
+ ' with search values "(mac:%s driver:%s device_id:%s)"'
+ % (match, mac, driver, device_id)
+ )
raise ValueError(msg)
return match[0]
@@ -657,10 +752,11 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
if strict_present:
errors.append(
"[nic not present] Cannot rename mac=%s to %s"
- ", not available." % (mac, new_name))
+ ", not available." % (mac, new_name)
+ )
continue
- cur_name = cur.get('name')
+ cur_name = cur.get("name")
if cur_name == new_name:
# nothing to do
continue
@@ -669,24 +765,25 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
if strict_present:
errors.append(
"[nic not present] Cannot rename mac=%s to %s"
- ", not available." % (mac, new_name))
+ ", not available." % (mac, new_name)
+ )
continue
- if cur['up']:
+ if cur["up"]:
msg = "[busy] Error renaming mac=%s from %s to %s"
- if not cur['downable']:
+ if not cur["downable"]:
if strict_busy:
errors.append(msg % (mac, cur_name, new_name))
continue
- cur['up'] = False
+ cur["up"] = False
cur_ops.append(("down", mac, new_name, (cur_name,)))
ups.append(("up", mac, new_name, (new_name,)))
if new_name in cur_byname:
target = cur_byname[new_name]
- if target['up']:
+ if target["up"]:
msg = "[busy-target] Error renaming mac=%s from %s to %s."
- if not target['downable']:
+ if not target["downable"]:
if strict_busy:
errors.append(msg % (mac, cur_name, new_name))
continue
@@ -699,17 +796,17 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
tmp_name = tmpname_fmt % tmpi
cur_ops.append(("rename", mac, new_name, (new_name, tmp_name)))
- target['name'] = tmp_name
+ target["name"] = tmp_name
cur_byname = update_byname(cur_info)
- if target['up']:
+ if target["up"]:
ups.append(("up", mac, new_name, (tmp_name,)))
- cur_ops.append(("rename", mac, new_name, (cur['name'], new_name)))
- cur['name'] = new_name
+ cur_ops.append(("rename", mac, new_name, (cur["name"], new_name)))
+ cur["name"] = new_name
cur_byname = update_byname(cur_info)
ops += cur_ops
- opmap = {'rename': rename, 'down': down, 'up': up}
+ opmap = {"rename": rename, "down": down, "up": up}
if len(ops) + len(ups) == 0:
if len(errors):
@@ -724,11 +821,12 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
opmap.get(op)(*params)
except Exception as e:
errors.append(
- "[unknown] Error performing %s%s for %s, %s: %s" %
- (op, params, mac, new_name, e))
+ "[unknown] Error performing %s%s for %s, %s: %s"
+ % (op, params, mac, new_name, e)
+ )
if len(errors):
- raise Exception('\n'.join(errors))
+ raise Exception("\n".join(errors))
def get_interface_mac(ifname):
@@ -747,7 +845,7 @@ def get_ib_interface_hwaddr(ifname, ethernet_format):
representation of the address will be returned.
"""
# Type 32 is Infiniband.
- if read_sys_net_safe(ifname, 'type') == '32':
+ if read_sys_net_safe(ifname, "type") == "32":
mac = get_interface_mac(ifname)
if mac and ethernet_format:
# Use bytes 13-15 and 18-20 of the hardware address.
@@ -756,28 +854,32 @@ def get_ib_interface_hwaddr(ifname, ethernet_format):
def get_interfaces_by_mac(blacklist_drivers=None) -> dict:
- if util.is_FreeBSD():
+ if util.is_FreeBSD() or util.is_DragonFlyBSD():
return get_interfaces_by_mac_on_freebsd(
- blacklist_drivers=blacklist_drivers)
+ blacklist_drivers=blacklist_drivers
+ )
elif util.is_NetBSD():
return get_interfaces_by_mac_on_netbsd(
- blacklist_drivers=blacklist_drivers)
+ blacklist_drivers=blacklist_drivers
+ )
elif util.is_OpenBSD():
return get_interfaces_by_mac_on_openbsd(
- blacklist_drivers=blacklist_drivers)
+ blacklist_drivers=blacklist_drivers
+ )
else:
return get_interfaces_by_mac_on_linux(
- blacklist_drivers=blacklist_drivers)
+ blacklist_drivers=blacklist_drivers
+ )
def get_interfaces_by_mac_on_freebsd(blacklist_drivers=None) -> dict():
- (out, _) = subp.subp(['ifconfig', '-a', 'ether'])
+ (out, _) = subp.subp(["ifconfig", "-a", "ether"])
# flatten each interface block in a single line
def flatten(out):
- curr_block = ''
- for line in out.split('\n'):
- if line.startswith('\t'):
+ curr_block = ""
+ for line in out.split("\n"):
+ if line.startswith("\t"):
curr_block += line
else:
if curr_block:
@@ -789,10 +891,11 @@ def get_interfaces_by_mac_on_freebsd(blacklist_drivers=None) -> dict():
def find_mac(flat_list):
for block in flat_list:
m = re.search(
- r"^(?P<ifname>\S*): .*ether\s(?P<mac>[\da-f:]{17}).*",
- block)
+ r"^(?P<ifname>\S*): .*ether\s(?P<mac>[\da-f:]{17}).*", block
+ )
if m:
- yield (m.group('mac'), m.group('ifname'))
+ yield (m.group("mac"), m.group("ifname"))
+
results = {mac: ifname for mac, ifname in find_mac(flatten(out))}
return results
@@ -803,13 +906,13 @@ def get_interfaces_by_mac_on_netbsd(blacklist_drivers=None) -> dict():
r"(?P<ifname>\w+).*address:\s"
r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*"
)
- (out, _) = subp.subp(['ifconfig', '-a'])
- if_lines = re.sub(r'\n\s+', ' ', out).splitlines()
+ (out, _) = subp.subp(["ifconfig", "-a"])
+ if_lines = re.sub(r"\n\s+", " ", out).splitlines()
for line in if_lines:
m = re.match(re_field_match, line)
if m:
fields = m.groupdict()
- ret[fields['mac']] = fields['ifname']
+ ret[fields["mac"]] = fields["ifname"]
return ret
@@ -817,14 +920,15 @@ def get_interfaces_by_mac_on_openbsd(blacklist_drivers=None) -> dict():
ret = {}
re_field_match = (
r"(?P<ifname>\w+).*lladdr\s"
- r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*")
- (out, _) = subp.subp(['ifconfig', '-a'])
- if_lines = re.sub(r'\n\s+', ' ', out).splitlines()
+ r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*"
+ )
+ (out, _) = subp.subp(["ifconfig", "-a"])
+ if_lines = re.sub(r"\n\s+", " ", out).splitlines()
for line in if_lines:
m = re.match(re_field_match, line)
if m:
fields = m.groupdict()
- ret[fields['mac']] = fields['ifname']
+ ret[fields["mac"]] = fields["ifname"]
return ret
@@ -834,11 +938,13 @@ def get_interfaces_by_mac_on_linux(blacklist_drivers=None) -> dict:
Bridges and any devices that have a 'stolen' mac are excluded."""
ret = {}
for name, mac, _driver, _devid in get_interfaces(
- blacklist_drivers=blacklist_drivers):
+ blacklist_drivers=blacklist_drivers
+ ):
if mac in ret:
raise RuntimeError(
- "duplicate mac found! both '%s' and '%s' have mac '%s'" %
- (name, ret[mac], mac))
+ "duplicate mac found! both '%s' and '%s' have mac '%s'"
+ % (name, ret[mac], mac)
+ )
ret[mac] = name
# Try to get an Infiniband hardware address (in 6 byte Ethernet format)
# for the interface.
@@ -846,8 +952,9 @@ def get_interfaces_by_mac_on_linux(blacklist_drivers=None) -> dict:
if ib_mac:
if ib_mac in ret:
raise RuntimeError(
- "duplicate mac found! both '%s' and '%s' have mac '%s'" %
- (name, ret[ib_mac], ib_mac))
+ "duplicate mac found! both '%s' and '%s' have mac '%s'"
+ % (name, ret[ib_mac], ib_mac)
+ )
ret[ib_mac] = name
return ret
@@ -861,7 +968,7 @@ def get_interfaces(blacklist_drivers=None) -> list:
blacklist_drivers = []
devs = get_devicelist()
# 16 somewhat arbitrarily chosen. Normally a mac is 6 '00:' tokens.
- zero_mac = ':'.join(('00',) * 16)
+ zero_mac = ":".join(("00",) * 16)
for name in devs:
if not interface_has_own_mac(name):
continue
@@ -872,8 +979,9 @@ def get_interfaces(blacklist_drivers=None) -> list:
if is_bond(name):
continue
if get_master(name) is not None:
- if (not master_is_bridge_or_bond(name) and
- not master_is_openvswitch(name)):
+ if not master_is_bridge_or_bond(
+ name
+ ) and not master_is_openvswitch(name):
continue
if is_netfailover(name):
continue
@@ -882,7 +990,9 @@ def get_interfaces(blacklist_drivers=None) -> list:
if not mac:
continue
# skip nics that have no mac (00:00....)
- if name != 'lo' and mac == zero_mac[:len(mac)]:
+ if name != "lo" and mac == zero_mac[: len(mac)]:
+ continue
+ if is_openvswitch_internal_interface(name):
continue
# skip nics that have drivers blacklisted
driver = device_driver(name)
@@ -901,24 +1011,43 @@ def get_ib_hwaddrs_by_interface():
if ib_mac:
if ib_mac in ret:
raise RuntimeError(
- "duplicate mac found! both '%s' and '%s' have mac '%s'" %
- (name, ret[ib_mac], ib_mac))
+ "duplicate mac found! both '%s' and '%s' have mac '%s'"
+ % (name, ret[ib_mac], ib_mac)
+ )
ret[name] = ib_mac
return ret
-def has_url_connectivity(url):
- """Return true when the instance has access to the provided URL
+def has_url_connectivity(url_data: Dict[str, Any]) -> bool:
+ """Return true when the instance has access to the provided URL.
Logs a warning if url is not the expected format.
+
+ url_data is a dictionary of kwargs to send to readurl. E.g.:
+
+ has_url_connectivity({
+ "url": "http://example.invalid",
+ "headers": {"some": "header"},
+ "timeout": 10
+ })
"""
- if not any([url.startswith('http://'), url.startswith('https://')]):
+ if "url" not in url_data:
+ LOG.warning(
+ "Ignoring connectivity check. No 'url' to check in %s", url_data
+ )
+ return False
+ url = url_data["url"]
+ if not any([url.startswith("http://"), url.startswith("https://")]):
LOG.warning(
"Ignoring connectivity check. Expected URL beginning with http*://"
- " received '%s'", url)
+ " received '%s'",
+ url,
+ )
return False
+ if "timeout" not in url_data:
+ url_data["timeout"] = 5
try:
- readurl(url, timeout=5)
+ readurl(**url_data)
except UrlError:
return False
return True
@@ -961,14 +1090,22 @@ class EphemeralIPv4Network(object):
No operations are performed if the provided interface already has the
specified configuration.
- This can be verified with the connectivity_url.
+ This can be verified with the connectivity_url_data.
If unconnected, bring up the interface with valid ip, prefix and broadcast.
If router is provided setup a default route for that interface. Upon
context exit, clean up the interface leaving no configuration behind.
"""
- def __init__(self, interface, ip, prefix_or_mask, broadcast, router=None,
- connectivity_url=None, static_routes=None):
+ def __init__(
+ self,
+ interface,
+ ip,
+ prefix_or_mask,
+ broadcast,
+ router=None,
+ connectivity_url_data: Dict[str, Any] = None,
+ static_routes=None,
+ ):
"""Setup context manager and validate call signature.
@param interface: Name of the network interface to bring up.
@@ -977,22 +1114,25 @@ class EphemeralIPv4Network(object):
prefix.
@param broadcast: Broadcast address for the IPv4 network.
@param router: Optionally the default gateway IP.
- @param connectivity_url: Optionally, a URL to verify if a usable
+ @param connectivity_url_data: Optionally, a URL to verify if a usable
connection already exists.
@param static_routes: Optionally a list of static routes from DHCP
"""
if not all([interface, ip, prefix_or_mask, broadcast]):
raise ValueError(
- 'Cannot init network on {0} with {1}/{2} and bcast {3}'.format(
- interface, ip, prefix_or_mask, broadcast))
+ "Cannot init network on {0} with {1}/{2} and bcast {3}".format(
+ interface, ip, prefix_or_mask, broadcast
+ )
+ )
try:
- self.prefix = mask_to_net_prefix(prefix_or_mask)
+ self.prefix = ipv4_mask_to_net_prefix(prefix_or_mask)
except ValueError as e:
raise ValueError(
- 'Cannot setup network: {0}'.format(e)
+ "Cannot setup network, invalid prefix or "
+ "netmask: {0}".format(e)
) from e
- self.connectivity_url = connectivity_url
+ self.connectivity_url_data = connectivity_url_data
self.interface = interface
self.ip = ip
self.broadcast = broadcast
@@ -1002,11 +1142,13 @@ class EphemeralIPv4Network(object):
def __enter__(self):
"""Perform ephemeral network setup if interface is not connected."""
- if self.connectivity_url:
- if has_url_connectivity(self.connectivity_url):
+ if self.connectivity_url_data:
+ if has_url_connectivity(self.connectivity_url_data):
LOG.debug(
- 'Skip ephemeral network setup, instance has connectivity'
- ' to %s', self.connectivity_url)
+ "Skip ephemeral network setup, instance has connectivity"
+ " to %s",
+ self.connectivity_url_data["url"],
+ )
return
self._bringup_device()
@@ -1035,74 +1177,169 @@ class EphemeralIPv4Network(object):
def _delete_address(self, address, prefix):
"""Perform the ip command to remove the specified address."""
subp.subp(
- ['ip', '-family', 'inet', 'addr', 'del',
- '%s/%s' % (address, prefix), 'dev', self.interface],
- capture=True)
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "del",
+ "%s/%s" % (address, prefix),
+ "dev",
+ self.interface,
+ ],
+ capture=True,
+ )
def _bringup_device(self):
"""Perform the ip comands to fully setup the device."""
- cidr = '{0}/{1}'.format(self.ip, self.prefix)
+ cidr = "{0}/{1}".format(self.ip, self.prefix)
LOG.debug(
- 'Attempting setup of ephemeral network on %s with %s brd %s',
- self.interface, cidr, self.broadcast)
+ "Attempting setup of ephemeral network on %s with %s brd %s",
+ self.interface,
+ cidr,
+ self.broadcast,
+ )
try:
subp.subp(
- ['ip', '-family', 'inet', 'addr', 'add', cidr, 'broadcast',
- self.broadcast, 'dev', self.interface],
- capture=True, update_env={'LANG': 'C'})
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ cidr,
+ "broadcast",
+ self.broadcast,
+ "dev",
+ self.interface,
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ )
except subp.ProcessExecutionError as e:
if "File exists" not in e.stderr:
raise
LOG.debug(
- 'Skip ephemeral network setup, %s already has address %s',
- self.interface, self.ip)
+ "Skip ephemeral network setup, %s already has address %s",
+ self.interface,
+ self.ip,
+ )
else:
# Address creation success, bring up device and queue cleanup
subp.subp(
- ['ip', '-family', 'inet', 'link', 'set', 'dev', self.interface,
- 'up'], capture=True)
+ [
+ "ip",
+ "-family",
+ "inet",
+ "link",
+ "set",
+ "dev",
+ self.interface,
+ "up",
+ ],
+ capture=True,
+ )
self.cleanup_cmds.append(
- ['ip', '-family', 'inet', 'link', 'set', 'dev', self.interface,
- 'down'])
+ [
+ "ip",
+ "-family",
+ "inet",
+ "link",
+ "set",
+ "dev",
+ self.interface,
+ "down",
+ ]
+ )
self.cleanup_cmds.append(
- ['ip', '-family', 'inet', 'addr', 'del', cidr, 'dev',
- self.interface])
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "del",
+ cidr,
+ "dev",
+ self.interface,
+ ]
+ )
def _bringup_static_routes(self):
# static_routes = [("169.254.169.254/32", "130.56.248.255"),
# ("0.0.0.0/0", "130.56.240.1")]
for net_address, gateway in self.static_routes:
via_arg = []
- if gateway != "0.0.0.0/0":
- via_arg = ['via', gateway]
+ if gateway != "0.0.0.0":
+ via_arg = ["via", gateway]
subp.subp(
- ['ip', '-4', 'route', 'add', net_address] + via_arg +
- ['dev', self.interface], capture=True)
+ ["ip", "-4", "route", "append", net_address]
+ + via_arg
+ + ["dev", self.interface],
+ capture=True,
+ )
self.cleanup_cmds.insert(
- 0, ['ip', '-4', 'route', 'del', net_address] + via_arg +
- ['dev', self.interface])
+ 0,
+ ["ip", "-4", "route", "del", net_address]
+ + via_arg
+ + ["dev", self.interface],
+ )
def _bringup_router(self):
"""Perform the ip commands to fully setup the router if needed."""
# Check if a default route exists and exit if it does
- out, _ = subp.subp(['ip', 'route', 'show', '0.0.0.0/0'], capture=True)
- if 'default' in out:
+ out, _ = subp.subp(["ip", "route", "show", "0.0.0.0/0"], capture=True)
+ if "default" in out:
LOG.debug(
- 'Skip ephemeral route setup. %s already has default route: %s',
- self.interface, out.strip())
+ "Skip ephemeral route setup. %s already has default route: %s",
+ self.interface,
+ out.strip(),
+ )
return
subp.subp(
- ['ip', '-4', 'route', 'add', self.router, 'dev', self.interface,
- 'src', self.ip], capture=True)
+ [
+ "ip",
+ "-4",
+ "route",
+ "add",
+ self.router,
+ "dev",
+ self.interface,
+ "src",
+ self.ip,
+ ],
+ capture=True,
+ )
self.cleanup_cmds.insert(
0,
- ['ip', '-4', 'route', 'del', self.router, 'dev', self.interface,
- 'src', self.ip])
+ [
+ "ip",
+ "-4",
+ "route",
+ "del",
+ self.router,
+ "dev",
+ self.interface,
+ "src",
+ self.ip,
+ ],
+ )
subp.subp(
- ['ip', '-4', 'route', 'add', 'default', 'via', self.router,
- 'dev', self.interface], capture=True)
+ [
+ "ip",
+ "-4",
+ "route",
+ "add",
+ "default",
+ "via",
+ self.router,
+ "dev",
+ self.interface,
+ ],
+ capture=True,
+ )
self.cleanup_cmds.insert(
- 0, ['ip', '-4', 'route', 'del', 'default', 'dev', self.interface])
+ 0, ["ip", "-4", "route", "del", "default", "dev", self.interface]
+ )
class RendererNotFoundError(RuntimeError):
diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py
new file mode 100644
index 00000000..e80c26df
--- /dev/null
+++ b/cloudinit/net/activators.py
@@ -0,0 +1,290 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import os
+from abc import ABC, abstractmethod
+from typing import Iterable, List, Type
+
+from cloudinit import subp, util
+from cloudinit.net.eni import available as eni_available
+from cloudinit.net.netplan import available as netplan_available
+from cloudinit.net.network_state import NetworkState
+from cloudinit.net.networkd import available as networkd_available
+from cloudinit.net.sysconfig import NM_CFG_FILE
+
+LOG = logging.getLogger(__name__)
+
+
+class NoActivatorException(Exception):
+ pass
+
+
+def _alter_interface(cmd, device_name) -> bool:
+ LOG.debug("Attempting command %s for device %s", cmd, device_name)
+ try:
+ (_out, err) = subp.subp(cmd)
+ if len(err):
+ LOG.warning("Running %s resulted in stderr output: %s", cmd, err)
+ return True
+ except subp.ProcessExecutionError:
+ util.logexc(LOG, "Running interface command %s failed", cmd)
+ return False
+
+
+class NetworkActivator(ABC):
+ @staticmethod
+ @abstractmethod
+ def available() -> bool:
+ """Return True if activator is available, otherwise return False."""
+ raise NotImplementedError()
+
+ @staticmethod
+ @abstractmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """Bring up interface.
+
+ Return True is successful, otherwise return False
+ """
+ raise NotImplementedError()
+
+ @staticmethod
+ @abstractmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """Bring down interface.
+
+ Return True is successful, otherwise return False
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def bring_up_interfaces(cls, device_names: Iterable[str]) -> bool:
+ """Bring up specified list of interfaces.
+
+ Return True is successful, otherwise return False
+ """
+ return all(cls.bring_up_interface(device) for device in device_names)
+
+ @classmethod
+ def bring_up_all_interfaces(cls, network_state: NetworkState) -> bool:
+ """Bring up all interfaces.
+
+ Return True is successful, otherwise return False
+ """
+ return cls.bring_up_interfaces(
+ [i["name"] for i in network_state.iter_interfaces()]
+ )
+
+ @classmethod
+ def bring_down_interfaces(cls, device_names: Iterable[str]) -> bool:
+ """Bring down specified list of interfaces.
+
+ Return True is successful, otherwise return False
+ """
+ return all(cls.bring_down_interface(device) for device in device_names)
+
+ @classmethod
+ def bring_down_all_interfaces(cls, network_state: NetworkState) -> bool:
+ """Bring down all interfaces.
+
+ Return True is successful, otherwise return False
+ """
+ return cls.bring_down_interfaces(
+ [i["name"] for i in network_state.iter_interfaces()]
+ )
+
+
+class IfUpDownActivator(NetworkActivator):
+ # Note that we're not overriding bring_up_interfaces to pass something
+ # like ifup --all because it isn't supported everywhere.
+ # E.g., NetworkManager has a ifupdown plugin that requires the name
+ # of a specific connection.
+ @staticmethod
+ def available(target=None) -> bool:
+ """Return true if ifupdown can be used on this system."""
+ return eni_available(target=target)
+
+ @staticmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """Bring up interface using ifup.
+
+ Return True is successful, otherwise return False
+ """
+ cmd = ["ifup", device_name]
+ return _alter_interface(cmd, device_name)
+
+ @staticmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """Bring up interface using ifup.
+
+ Return True is successful, otherwise return False
+ """
+ cmd = ["ifdown", device_name]
+ return _alter_interface(cmd, device_name)
+
+
+class NetworkManagerActivator(NetworkActivator):
+ @staticmethod
+ def available(target=None) -> bool:
+ """Return true if network manager can be used on this system."""
+ config_present = os.path.isfile(
+ subp.target_path(target, path=NM_CFG_FILE)
+ )
+ nmcli_present = subp.which("nmcli", target=target)
+ return config_present and bool(nmcli_present)
+
+ @staticmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """Bring up interface using nmcli.
+
+ Return True is successful, otherwise return False
+ """
+ cmd = ["nmcli", "connection", "up", "ifname", device_name]
+ return _alter_interface(cmd, device_name)
+
+ @staticmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """Bring down interface using nmcli.
+
+ Return True is successful, otherwise return False
+ """
+ cmd = ["nmcli", "connection", "down", device_name]
+ return _alter_interface(cmd, device_name)
+
+
+class NetplanActivator(NetworkActivator):
+ NETPLAN_CMD = ["netplan", "apply"]
+
+ @staticmethod
+ def available(target=None) -> bool:
+ """Return true if netplan can be used on this system."""
+ return netplan_available(target=target)
+
+ @staticmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ LOG.debug(
+ "Calling 'netplan apply' rather than "
+ "altering individual interfaces"
+ )
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
+
+ @staticmethod
+ def bring_up_interfaces(device_names: Iterable[str]) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ LOG.debug(
+ "Calling 'netplan apply' rather than "
+ "altering individual interfaces"
+ )
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
+
+ @staticmethod
+ def bring_up_all_interfaces(network_state: NetworkState) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
+
+ @staticmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ LOG.debug(
+ "Calling 'netplan apply' rather than "
+ "altering individual interfaces"
+ )
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
+
+ @staticmethod
+ def bring_down_interfaces(device_names: Iterable[str]) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ LOG.debug(
+ "Calling 'netplan apply' rather than "
+ "altering individual interfaces"
+ )
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
+
+ @staticmethod
+ def bring_down_all_interfaces(network_state: NetworkState) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
+
+
+class NetworkdActivator(NetworkActivator):
+ @staticmethod
+ def available(target=None) -> bool:
+ """Return true if ifupdown can be used on this system."""
+ return networkd_available(target=target)
+
+ @staticmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """Return True is successful, otherwise return False"""
+ cmd = ["ip", "link", "set", "up", device_name]
+ return _alter_interface(cmd, device_name)
+
+ @staticmethod
+ def bring_up_all_interfaces(network_state: NetworkState) -> bool:
+ """Return True is successful, otherwise return False"""
+ cmd = ["systemctl", "restart", "systemd-networkd", "systemd-resolved"]
+ return _alter_interface(cmd, "all")
+
+ @staticmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """Return True is successful, otherwise return False"""
+ cmd = ["ip", "link", "set", "down", device_name]
+ return _alter_interface(cmd, device_name)
+
+
+# This section is mostly copied and pasted from renderers.py. An abstract
+# version to encompass both seems overkill at this point
+DEFAULT_PRIORITY = [
+ IfUpDownActivator,
+ NetworkManagerActivator,
+ NetplanActivator,
+ NetworkdActivator,
+]
+
+
+def search_activator(
+ priority=None, target=None
+) -> List[Type[NetworkActivator]]:
+ if priority is None:
+ priority = DEFAULT_PRIORITY
+
+ unknown = [i for i in priority if i not in DEFAULT_PRIORITY]
+ if unknown:
+ raise ValueError(
+ "Unknown activators provided in priority list: %s" % unknown
+ )
+
+ return [activator for activator in priority if activator.available(target)]
+
+
+def select_activator(priority=None, target=None) -> Type[NetworkActivator]:
+ found = search_activator(priority, target)
+ if not found:
+ if priority is None:
+ priority = DEFAULT_PRIORITY
+ tmsg = ""
+ if target and target != "/":
+ tmsg = " in target=%s" % target
+ raise NoActivatorException(
+ "No available network activators found%s. Searched "
+ "through list: %s" % (tmsg, priority)
+ )
+ selected = found[0]
+ LOG.debug("Using selected activator: %s", selected)
+ return selected
diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py
index e34e0454..ff5c7413 100644
--- a/cloudinit/net/bsd.py
+++ b/cloudinit/net/bsd.py
@@ -3,11 +3,9 @@
import re
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import util
-from cloudinit import subp
-from cloudinit.distros.parsers.resolv_conf import ResolvConf
+from cloudinit import net, subp, util
from cloudinit.distros import bsd_utils
+from cloudinit.distros.parsers.resolv_conf import ResolvConf
from . import renderer
@@ -15,8 +13,8 @@ LOG = logging.getLogger(__name__)
class BSDRenderer(renderer.Renderer):
- resolv_conf_fn = 'etc/resolv.conf'
- rc_conf_fn = 'etc/rc.conf'
+ resolv_conf_fn = "etc/resolv.conf"
+ rc_conf_fn = "etc/rc.conf"
def get_rc_config_value(self, key):
fn = subp.target_path(self.target, self.rc_conf_fn)
@@ -31,115 +29,136 @@ class BSDRenderer(renderer.Renderer):
config = {}
self.target = None
self.interface_configurations = {}
- self._postcmds = config.get('postcmds', True)
+ self._postcmds = config.get("postcmds", True)
- def _ifconfig_entries(self, settings, target=None):
+ def _ifconfig_entries(self, settings):
ifname_by_mac = net.get_interfaces_by_mac()
for interface in settings.iter_interfaces():
device_name = interface.get("name")
device_mac = interface.get("mac_address")
- if device_name and re.match(r'^lo\d+$', device_name):
+ if device_name and re.match(r"^lo\d+$", device_name):
continue
if device_mac not in ifname_by_mac:
- LOG.info('Cannot find any device with MAC %s', device_mac)
+ LOG.info("Cannot find any device with MAC %s", device_mac)
elif device_mac and device_name:
cur_name = ifname_by_mac[device_mac]
if cur_name != device_name:
- LOG.info('netif service will rename interface %s to %s',
- cur_name, device_name)
+ LOG.info(
+ "netif service will rename interface %s to %s",
+ cur_name,
+ device_name,
+ )
try:
self.rename_interface(cur_name, device_name)
except NotImplementedError:
- LOG.error((
- 'Interface renaming is '
- 'not supported on this OS'))
+ LOG.error(
+ "Interface renaming is not supported on this OS"
+ )
device_name = cur_name
else:
device_name = ifname_by_mac[device_mac]
- LOG.info('Configuring interface %s', device_name)
+ LOG.info("Configuring interface %s", device_name)
- self.interface_configurations[device_name] = 'DHCP'
+ self.interface_configurations[device_name] = "DHCP"
for subnet in interface.get("subnets", []):
- if subnet.get('type') == 'static':
- if not subnet.get('netmask'):
+ if subnet.get("type") == "static":
+ if not subnet.get("netmask"):
LOG.debug(
- 'Skipping IP %s, because there is no netmask',
- subnet.get('address')
+ "Skipping IP %s, because there is no netmask",
+ subnet.get("address"),
)
continue
- LOG.debug('Configuring dev %s with %s / %s', device_name,
- subnet.get('address'), subnet.get('netmask'))
+ LOG.debug(
+ "Configuring dev %s with %s / %s",
+ device_name,
+ subnet.get("address"),
+ subnet.get("netmask"),
+ )
self.interface_configurations[device_name] = {
- 'address': subnet.get('address'),
- 'netmask': subnet.get('netmask'),
+ "address": subnet.get("address"),
+ "netmask": subnet.get("netmask"),
+ "mtu": subnet.get("mtu") or interface.get("mtu"),
}
- def _route_entries(self, settings, target=None):
+ def _route_entries(self, settings):
routes = list(settings.iter_routes())
for interface in settings.iter_interfaces():
subnets = interface.get("subnets", [])
for subnet in subnets:
- if subnet.get('type') != 'static':
+ if subnet.get("type") != "static":
continue
- gateway = subnet.get('gateway')
- if gateway and len(gateway.split('.')) == 4:
- routes.append({
- 'network': '0.0.0.0',
- 'netmask': '0.0.0.0',
- 'gateway': gateway})
- routes += subnet.get('routes', [])
+ gateway = subnet.get("gateway")
+ if gateway and len(gateway.split(".")) == 4:
+ routes.append(
+ {
+ "network": "0.0.0.0",
+ "netmask": "0.0.0.0",
+ "gateway": gateway,
+ }
+ )
+ routes += subnet.get("routes", [])
for route in routes:
- network = route.get('network')
+ network = route.get("network")
if not network:
- LOG.debug('Skipping a bad route entry')
+ LOG.debug("Skipping a bad route entry")
continue
- netmask = route.get('netmask')
- gateway = route.get('gateway')
+ netmask = route.get("netmask")
+ gateway = route.get("gateway")
self.set_route(network, netmask, gateway)
- def _resolve_conf(self, settings, target=None):
+ def _resolve_conf(self, settings):
nameservers = settings.dns_nameservers
searchdomains = settings.dns_searchdomains
for interface in settings.iter_interfaces():
for subnet in interface.get("subnets", []):
- if 'dns_nameservers' in subnet:
- nameservers.extend(subnet['dns_nameservers'])
- if 'dns_search' in subnet:
- searchdomains.extend(subnet['dns_search'])
+ if "dns_nameservers" in subnet:
+ nameservers.extend(subnet["dns_nameservers"])
+ if "dns_search" in subnet:
+ searchdomains.extend(subnet["dns_search"])
# Try to read the /etc/resolv.conf or just start from scratch if that
# fails.
try:
- resolvconf = ResolvConf(util.load_file(subp.target_path(
- target, self.resolv_conf_fn)))
+ resolvconf = ResolvConf(
+ util.load_file(
+ subp.target_path(self.target, self.resolv_conf_fn)
+ )
+ )
resolvconf.parse()
except IOError:
- util.logexc(LOG, "Failed to parse %s, use new empty file",
- subp.target_path(target, self.resolv_conf_fn))
- resolvconf = ResolvConf('')
+ util.logexc(
+ LOG,
+ "Failed to parse %s, use new empty file",
+ subp.target_path(self.target, self.resolv_conf_fn),
+ )
+ resolvconf = ResolvConf("")
resolvconf.parse()
# Add some nameservers
- for server in nameservers:
+ for server in set(nameservers):
try:
resolvconf.add_nameserver(server)
except ValueError:
util.logexc(LOG, "Failed to add nameserver %s", server)
# And add any searchdomains.
- for domain in searchdomains:
+ for domain in set(searchdomains):
try:
resolvconf.add_search_domain(domain)
except ValueError:
util.logexc(LOG, "Failed to add search domain %s", domain)
util.write_file(
- subp.target_path(target, self.resolv_conf_fn),
- str(resolvconf), 0o644)
+ subp.target_path(self.target, self.resolv_conf_fn),
+ str(resolvconf),
+ 0o644,
+ )
def render_network_state(self, network_state, templates=None, target=None):
+ if target:
+ self.target = target
self._ifconfig_entries(settings=network_state)
self._route_entries(settings=network_state)
self._resolve_conf(settings=network_state)
@@ -149,7 +168,7 @@ class BSDRenderer(renderer.Renderer):
def dhcp_interfaces(self):
ic = self.interface_configurations.items
- return [k for k, v in ic() if v == 'DHCP']
+ return [k for k, v in ic() if v == "DHCP"]
def start_services(self, run=False):
raise NotImplementedError()
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index cc8dc17b..eab86d9f 100755
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -12,11 +12,11 @@ import gzip
import io
import logging
import os
+import shlex
from cloudinit import util
-from . import get_devicelist
-from . import read_sys_net_safe
+from . import get_devicelist, read_sys_net_safe
_OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface"
@@ -57,7 +57,7 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
if self._mac_addrs is None:
self._mac_addrs = {}
for k in get_devicelist():
- mac_addr = read_sys_net_safe(k, 'address')
+ mac_addr = read_sys_net_safe(k, "address")
if mac_addr:
self._mac_addrs[k] = mac_addr
@@ -72,8 +72,9 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
(ii) an open-iscsi interface file is present in the system
"""
if self._files:
- if 'ip=' in self._cmdline or 'ip6=' in self._cmdline:
- return True
+ for item in shlex.split(self._cmdline):
+ if item.startswith("ip=") or item.startswith("ip6="):
+ return True
if os.path.exists(_OPEN_ISCSI_INTERFACE_FILE):
# iBft can configure networking without ip=
return True
@@ -81,7 +82,8 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
def render_config(self) -> dict:
return config_from_klibc_net_cfg(
- files=self._files, mac_addrs=self._mac_addrs,
+ files=self._files,
+ mac_addrs=self._mac_addrs,
)
@@ -111,78 +113,78 @@ def _klibc_to_config_entry(content, mac_addrs=None):
data = util.load_shell_content(content)
try:
- name = data['DEVICE'] if 'DEVICE' in data else data['DEVICE6']
+ name = data["DEVICE"] if "DEVICE" in data else data["DEVICE6"]
except KeyError as e:
raise ValueError("no 'DEVICE' or 'DEVICE6' entry in data") from e
# ipconfig on precise does not write PROTO
# IPv6 config gives us IPV6PROTO, not PROTO.
- proto = data.get('PROTO', data.get('IPV6PROTO'))
+ proto = data.get("PROTO", data.get("IPV6PROTO"))
if not proto:
- if data.get('filename'):
- proto = 'dhcp'
+ if data.get("filename"):
+ proto = "dhcp"
else:
- proto = 'none'
+ proto = "none"
- if proto not in ('none', 'dhcp', 'dhcp6'):
+ if proto not in ("none", "dhcp", "dhcp6"):
raise ValueError("Unexpected value for PROTO: %s" % proto)
iface = {
- 'type': 'physical',
- 'name': name,
- 'subnets': [],
+ "type": "physical",
+ "name": name,
+ "subnets": [],
}
if name in mac_addrs:
- iface['mac_address'] = mac_addrs[name]
+ iface["mac_address"] = mac_addrs[name]
# Handle both IPv4 and IPv6 values
- for pre in ('IPV4', 'IPV6'):
+ for pre in ("IPV4", "IPV6"):
# if no IPV4ADDR or IPV6ADDR, then go on.
if pre + "ADDR" not in data:
continue
# PROTO for ipv4, IPV6PROTO for ipv6
- cur_proto = data.get(pre + 'PROTO', proto)
+ cur_proto = data.get(pre + "PROTO", proto)
# ipconfig's 'none' is called 'static'
- if cur_proto == 'none':
- cur_proto = 'static'
- subnet = {'type': cur_proto, 'control': 'manual'}
+ if cur_proto == "none":
+ cur_proto = "static"
+ subnet = {"type": cur_proto, "control": "manual"}
# only populate address for static types. While the rendered config
# may have an address for dhcp, that is not really expected.
- if cur_proto == 'static':
- subnet['address'] = data[pre + 'ADDR']
+ if cur_proto == "static":
+ subnet["address"] = data[pre + "ADDR"]
# these fields go right on the subnet
- for key in ('NETMASK', 'BROADCAST', 'GATEWAY'):
+ for key in ("NETMASK", "BROADCAST", "GATEWAY"):
if pre + key in data:
subnet[key.lower()] = data[pre + key]
dns = []
# handle IPV4DNS0 or IPV6DNS0
- for nskey in ('DNS0', 'DNS1'):
+ for nskey in ("DNS0", "DNS1"):
ns = data.get(pre + nskey)
# verify it has something other than 0.0.0.0 (or ipv6)
if ns and len(ns.strip(":.0")):
dns.append(data[pre + nskey])
if dns:
- subnet['dns_nameservers'] = dns
+ subnet["dns_nameservers"] = dns
# add search to both ipv4 and ipv6, as it has no namespace
- search = data.get('DOMAINSEARCH')
+ search = data.get("DOMAINSEARCH")
if search:
- if ',' in search:
- subnet['dns_search'] = search.split(",")
+ if "," in search:
+ subnet["dns_search"] = search.split(",")
else:
- subnet['dns_search'] = search.split()
+ subnet["dns_search"] = search.split()
- iface['subnets'].append(subnet)
+ iface["subnets"].append(subnet)
return name, iface
def _get_klibc_net_cfg_files():
- return glob.glob('/run/net-*.conf') + glob.glob('/run/net6-*.conf')
+ return glob.glob("/run/net-*.conf") + glob.glob("/run/net6-*.conf")
def config_from_klibc_net_cfg(files=None, mac_addrs=None):
@@ -192,24 +194,28 @@ def config_from_klibc_net_cfg(files=None, mac_addrs=None):
entries = []
names = {}
for cfg_file in files:
- name, entry = _klibc_to_config_entry(util.load_file(cfg_file),
- mac_addrs=mac_addrs)
+ name, entry = _klibc_to_config_entry(
+ util.load_file(cfg_file), mac_addrs=mac_addrs
+ )
if name in names:
- prev = names[name]['entry']
- if prev.get('mac_address') != entry.get('mac_address'):
+ prev = names[name]["entry"]
+ if prev.get("mac_address") != entry.get("mac_address"):
raise ValueError(
"device '{name}' was defined multiple times ({files})"
" but had differing mac addresses: {old} -> {new}.".format(
- name=name, files=' '.join(names[name]['files']),
- old=prev.get('mac_address'),
- new=entry.get('mac_address')))
- prev['subnets'].extend(entry['subnets'])
- names[name]['files'].append(cfg_file)
+ name=name,
+ files=" ".join(names[name]["files"]),
+ old=prev.get("mac_address"),
+ new=entry.get("mac_address"),
+ )
+ )
+ prev["subnets"].extend(entry["subnets"])
+ names[name]["files"].append(cfg_file)
else:
- names[name] = {'files': [cfg_file], 'entry': entry}
+ names[name] = {"files": [cfg_file], "entry": entry}
entries.append(entry)
- return {'config': entries, 'version': 1}
+ return {"config": entries, "version": 1}
def read_initramfs_config():
@@ -255,8 +261,10 @@ def _b64dgz(data):
except (TypeError, ValueError):
logging.error(
"Expected base64 encoded kernel commandline parameter"
- " network-config. Ignoring network-config=%s.", data)
- return ''
+ " network-config. Ignoring network-config=%s.",
+ data,
+ )
+ return ""
return _decomp_gzip(blob)
@@ -265,7 +273,7 @@ def read_kernel_cmdline_config(cmdline=None):
if cmdline is None:
cmdline = util.get_cmdline()
- if 'network-config=' in cmdline:
+ if "network-config=" in cmdline:
data64 = None
for tok in cmdline.split():
if tok.startswith("network-config="):
@@ -277,4 +285,5 @@ def read_kernel_cmdline_config(cmdline=None):
return None
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 4394c68b..f9af18cf 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -4,25 +4,28 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import configobj
import logging
import os
import re
import signal
import time
from io import StringIO
+from typing import Any, Dict
+
+import configobj
+from cloudinit import subp, temp_utils, util
from cloudinit.net import (
- EphemeralIPv4Network, find_fallback_nic, get_devicelist,
- has_url_connectivity)
+ EphemeralIPv4Network,
+ find_fallback_nic,
+ get_devicelist,
+ has_url_connectivity,
+)
from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip
-from cloudinit import temp_utils
-from cloudinit import subp
-from cloudinit import util
LOG = logging.getLogger(__name__)
-NETWORKD_LEASES_DIR = '/run/systemd/netif/leases'
+NETWORKD_LEASES_DIR = "/run/systemd/netif/leases"
class InvalidDHCPLeaseFileError(Exception):
@@ -38,21 +41,28 @@ class NoDHCPLeaseError(Exception):
class EphemeralDHCPv4(object):
- def __init__(self, iface=None, connectivity_url=None, dhcp_log_func=None):
+ def __init__(
+ self,
+ iface=None,
+ connectivity_url_data: Dict[str, Any] = None,
+ dhcp_log_func=None,
+ ):
self.iface = iface
self._ephipv4 = None
self.lease = None
self.dhcp_log_func = dhcp_log_func
- self.connectivity_url = connectivity_url
+ self.connectivity_url_data = connectivity_url_data
def __enter__(self):
"""Setup sandboxed dhcp context, unless connectivity_url can already be
reached."""
- if self.connectivity_url:
- if has_url_connectivity(self.connectivity_url):
+ if self.connectivity_url_data:
+ if has_url_connectivity(self.connectivity_url_data):
LOG.debug(
- 'Skip ephemeral DHCP setup, instance has connectivity'
- ' to %s', self.connectivity_url)
+ "Skip ephemeral DHCP setup, instance has connectivity"
+ " to %s",
+ self.connectivity_url_data,
+ )
return
return self.obtain_lease()
@@ -81,31 +91,39 @@ class EphemeralDHCPv4(object):
return self.lease
try:
leases = maybe_perform_dhcp_discovery(
- self.iface, self.dhcp_log_func)
+ self.iface, self.dhcp_log_func
+ )
except InvalidDHCPLeaseFileError as e:
raise NoDHCPLeaseError() from e
if not leases:
raise NoDHCPLeaseError()
self.lease = leases[-1]
- LOG.debug("Received dhcp lease on %s for %s/%s",
- self.lease['interface'], self.lease['fixed-address'],
- self.lease['subnet-mask'])
- nmap = {'interface': 'interface', 'ip': 'fixed-address',
- 'prefix_or_mask': 'subnet-mask',
- 'broadcast': 'broadcast-address',
- 'static_routes': [
- 'rfc3442-classless-static-routes',
- 'classless-static-routes'
- ],
- 'router': 'routers'}
+ LOG.debug(
+ "Received dhcp lease on %s for %s/%s",
+ self.lease["interface"],
+ self.lease["fixed-address"],
+ self.lease["subnet-mask"],
+ )
+ nmap = {
+ "interface": "interface",
+ "ip": "fixed-address",
+ "prefix_or_mask": "subnet-mask",
+ "broadcast": "broadcast-address",
+ "static_routes": [
+ "rfc3442-classless-static-routes",
+ "classless-static-routes",
+ ],
+ "router": "routers",
+ }
kwargs = self.extract_dhcp_options_mapping(nmap)
- if not kwargs['broadcast']:
- kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip'])
- if kwargs['static_routes']:
- kwargs['static_routes'] = (
- parse_static_routes(kwargs['static_routes']))
- if self.connectivity_url:
- kwargs['connectivity_url'] = self.connectivity_url
+ if not kwargs["broadcast"]:
+ kwargs["broadcast"] = bcip(kwargs["prefix_or_mask"], kwargs["ip"])
+ if kwargs["static_routes"]:
+ kwargs["static_routes"] = parse_static_routes(
+ kwargs["static_routes"]
+ )
+ if self.connectivity_url_data:
+ kwargs["connectivity_url_data"] = self.connectivity_url_data
ephipv4 = EphemeralIPv4Network(**kwargs)
ephipv4.__enter__()
self._ephipv4 = ephipv4
@@ -116,16 +134,15 @@ class EphemeralDHCPv4(object):
for internal_reference, lease_option_names in nmap.items():
if isinstance(lease_option_names, list):
self.get_first_option_value(
- internal_reference,
- lease_option_names,
- result
+ internal_reference, lease_option_names, result
)
else:
result[internal_reference] = self.lease.get(lease_option_names)
return result
- def get_first_option_value(self, internal_mapping,
- lease_option_names, result):
+ def get_first_option_value(
+ self, internal_mapping, lease_option_names, result
+ ):
for different_names in lease_option_names:
if not result.get(internal_mapping):
result[internal_mapping] = self.lease.get(different_names)
@@ -147,19 +164,20 @@ def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None):
if nic is None:
nic = find_fallback_nic()
if nic is None:
- LOG.debug('Skip dhcp_discovery: Unable to find fallback nic.')
+ LOG.debug("Skip dhcp_discovery: Unable to find fallback nic.")
return []
elif nic not in get_devicelist():
LOG.debug(
- 'Skip dhcp_discovery: nic %s not found in get_devicelist.', nic)
+ "Skip dhcp_discovery: nic %s not found in get_devicelist.", nic
+ )
return []
- dhclient_path = subp.which('dhclient')
+ dhclient_path = subp.which("dhclient")
if not dhclient_path:
- LOG.debug('Skip dhclient configuration: No dhclient command found.')
+ LOG.debug("Skip dhclient configuration: No dhclient command found.")
return []
- with temp_utils.tempdir(rmtree_ignore_errors=True,
- prefix='cloud-init-dhcp-',
- needs_exe=True) as tdir:
+ with temp_utils.tempdir(
+ rmtree_ignore_errors=True, prefix="cloud-init-dhcp-", needs_exe=True
+ ) as tdir:
# Use /var/tmp because /run/cloud-init/tmp is mounted noexec
return dhcp_discovery(dhclient_path, nic, tdir, dhcp_log_func)
@@ -173,25 +191,28 @@ def parse_dhcp_lease_file(lease_file):
@raises: InvalidDHCPLeaseFileError on empty of unparseable leasefile
content.
"""
- lease_regex = re.compile(r"lease {(?P<lease>[^}]*)}\n")
+ lease_regex = re.compile(r"lease {(?P<lease>.*?)}\n", re.DOTALL)
dhcp_leases = []
lease_content = util.load_file(lease_file)
if len(lease_content) == 0:
raise InvalidDHCPLeaseFileError(
- 'Cannot parse empty dhcp lease file {0}'.format(lease_file))
+ "Cannot parse empty dhcp lease file {0}".format(lease_file)
+ )
for lease in lease_regex.findall(lease_content):
lease_options = []
- for line in lease.split(';'):
+ for line in lease.split(";"):
# Strip newlines, double-quotes and option prefix
- line = line.strip().replace('"', '').replace('option ', '')
+ line = line.strip().replace('"', "").replace("option ", "")
if not line:
continue
- lease_options.append(line.split(' ', 1))
+ lease_options.append(line.split(" ", 1))
dhcp_leases.append(dict(lease_options))
if not dhcp_leases:
raise InvalidDHCPLeaseFileError(
- 'Cannot parse dhcp lease file {0}. No leases found'.format(
- lease_file))
+ "Cannot parse dhcp lease file {0}. No leases found".format(
+ lease_file
+ )
+ )
return dhcp_leases
@@ -208,17 +229,17 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
@return: A list of dicts of representing the dhcp leases parsed from the
dhcp.leases file or empty list.
"""
- LOG.debug('Performing a dhcp discovery on %s', interface)
+ LOG.debug("Performing a dhcp discovery on %s", interface)
# XXX We copy dhclient out of /sbin/dhclient to avoid dealing with strict
# app armor profiles which disallow running dhclient -sf <our-script-file>.
# We want to avoid running /sbin/dhclient-script because of side-effects in
# /etc/resolv.conf any any other vendor specific scripts in
# /etc/dhcp/dhclient*hooks.d.
- sandbox_dhclient_cmd = os.path.join(cleandir, 'dhclient')
+ sandbox_dhclient_cmd = os.path.join(cleandir, "dhclient")
util.copy(dhclient_cmd_path, sandbox_dhclient_cmd)
- pid_file = os.path.join(cleandir, 'dhclient.pid')
- lease_file = os.path.join(cleandir, 'dhcp.leases')
+ pid_file = os.path.join(cleandir, "dhclient.pid")
+ lease_file = os.path.join(cleandir, "dhcp.leases")
# In some cases files in /var/tmp may not be executable, launching dhclient
# from there will certainly raise 'Permission denied' error. Try launching
@@ -230,9 +251,19 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
# Generally dhclient relies on dhclient-script PREINIT action to bring the
# link up before attempting discovery. Since we are using -sf /bin/true,
# we need to do that "link up" ourselves first.
- subp.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True)
- cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file,
- '-pf', pid_file, interface, '-sf', '/bin/true']
+ subp.subp(["ip", "link", "set", "dev", interface, "up"], capture=True)
+ cmd = [
+ sandbox_dhclient_cmd,
+ "-1",
+ "-v",
+ "-lf",
+ lease_file,
+ "-pf",
+ pid_file,
+ interface,
+ "-sf",
+ "/bin/true",
+ ]
out, err = subp.subp(cmd, capture=True)
# Wait for pid file and lease file to appear, and for the process
@@ -243,13 +274,16 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
# kill the correct process, thus freeing cleandir to be deleted back
# up the callstack.
missing = util.wait_for_files(
- [pid_file, lease_file], maxwait=5, naplen=0.01)
+ [pid_file, lease_file], maxwait=5, naplen=0.01
+ )
if missing:
- LOG.warning("dhclient did not produce expected files: %s",
- ', '.join(os.path.basename(f) for f in missing))
+ LOG.warning(
+ "dhclient did not produce expected files: %s",
+ ", ".join(os.path.basename(f) for f in missing),
+ )
return []
- ppid = 'unknown'
+ ppid = "unknown"
daemonized = False
for _ in range(0, 1000):
pid_content = util.load_file(pid_file).strip()
@@ -260,7 +294,7 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
else:
ppid = util.get_proc_ppid(pid)
if ppid == 1:
- LOG.debug('killing dhclient with pid=%s', pid)
+ LOG.debug("killing dhclient with pid=%s", pid)
os.kill(pid, signal.SIGKILL)
daemonized = True
break
@@ -268,8 +302,11 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
if not daemonized:
LOG.error(
- 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s '
- 'seconds', pid_content, ppid, 0.01 * 1000
+ "dhclient(pid=%s, parentpid=%s) failed to daemonize after %s "
+ "seconds",
+ pid_content,
+ ppid,
+ 0.01 * 1000,
)
if dhcp_log_func is not None:
dhcp_log_func(out, err)
@@ -301,7 +338,8 @@ def networkd_load_leases(leases_d=None):
return ret
for lfile in os.listdir(leases_d):
ret[lfile] = networkd_parse_lease(
- util.load_file(os.path.join(leases_d, lfile)))
+ util.load_file(os.path.join(leases_d, lfile))
+ )
return ret
@@ -316,7 +354,7 @@ def networkd_get_option_from_leases(keyname, leases_d=None):
def parse_static_routes(rfc3442):
- """ parse rfc3442 format and return a list containing tuple of strings.
+ """parse rfc3442 format and return a list containing tuple of strings.
The tuple is composed of the network_address (including net length) and
gateway for a parsed static route. It can parse two formats of rfc3442,
@@ -346,10 +384,12 @@ def parse_static_routes(rfc3442):
static_routes = []
def _trunc_error(cidr, required, remain):
- msg = ("RFC3442 string malformed. Current route has CIDR of %s "
- "and requires %s significant octets, but only %s remain. "
- "Verify DHCP rfc3442-classless-static-routes value: %s"
- % (cidr, required, remain, rfc3442))
+ msg = (
+ "RFC3442 string malformed. Current route has CIDR of %s "
+ "and requires %s significant octets, but only %s remain. "
+ "Verify DHCP rfc3442-classless-static-routes value: %s"
+ % (cidr, required, remain, rfc3442)
+ )
LOG.error(msg)
current_idx = 0
@@ -362,32 +402,32 @@ def parse_static_routes(rfc3442):
if len(tokens[idx:]) < req_toks:
_trunc_error(net_length, req_toks, len(tokens[idx:]))
return static_routes
- net_address = ".".join(tokens[idx+1:idx+5])
- gateway = ".".join(tokens[idx+5:idx+req_toks])
+ net_address = ".".join(tokens[idx + 1 : idx + 5])
+ gateway = ".".join(tokens[idx + 5 : idx + req_toks])
current_idx = idx + req_toks
elif net_length in range(17, 25):
req_toks = 8
if len(tokens[idx:]) < req_toks:
_trunc_error(net_length, req_toks, len(tokens[idx:]))
return static_routes
- net_address = ".".join(tokens[idx+1:idx+4] + ["0"])
- gateway = ".".join(tokens[idx+4:idx+req_toks])
+ net_address = ".".join(tokens[idx + 1 : idx + 4] + ["0"])
+ gateway = ".".join(tokens[idx + 4 : idx + req_toks])
current_idx = idx + req_toks
elif net_length in range(9, 17):
req_toks = 7
if len(tokens[idx:]) < req_toks:
_trunc_error(net_length, req_toks, len(tokens[idx:]))
return static_routes
- net_address = ".".join(tokens[idx+1:idx+3] + ["0", "0"])
- gateway = ".".join(tokens[idx+3:idx+req_toks])
+ net_address = ".".join(tokens[idx + 1 : idx + 3] + ["0", "0"])
+ gateway = ".".join(tokens[idx + 3 : idx + req_toks])
current_idx = idx + req_toks
elif net_length in range(1, 9):
req_toks = 6
if len(tokens[idx:]) < req_toks:
_trunc_error(net_length, req_toks, len(tokens[idx:]))
return static_routes
- net_address = ".".join(tokens[idx+1:idx+2] + ["0", "0", "0"])
- gateway = ".".join(tokens[idx+2:idx+req_toks])
+ net_address = ".".join(tokens[idx + 1 : idx + 2] + ["0", "0", "0"])
+ gateway = ".".join(tokens[idx + 2 : idx + req_toks])
current_idx = idx + req_toks
elif net_length == 0:
req_toks = 5
@@ -395,15 +435,19 @@ def parse_static_routes(rfc3442):
_trunc_error(net_length, req_toks, len(tokens[idx:]))
return static_routes
net_address = "0.0.0.0"
- gateway = ".".join(tokens[idx+1:idx+req_toks])
+ gateway = ".".join(tokens[idx + 1 : idx + req_toks])
current_idx = idx + req_toks
else:
- LOG.error('Parsed invalid net length "%s". Verify DHCP '
- 'rfc3442-classless-static-routes value.', net_length)
+ LOG.error(
+ 'Parsed invalid net length "%s". Verify DHCP '
+ "rfc3442-classless-static-routes value.",
+ net_length,
+ )
return static_routes
static_routes.append(("%s/%s" % (net_address, net_length), gateway))
return static_routes
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index 0074691b..99e3fbb0 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -5,32 +5,58 @@ import glob
import os
import re
-from . import ParserError
-
-from . import renderer
-from .network_state import subnet_is_ipv6
-
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
+from . import ParserError, renderer
+from .network_state import subnet_is_ipv6
LOG = logging.getLogger(__name__)
NET_CONFIG_COMMANDS = [
- "pre-up", "up", "post-up", "down", "pre-down", "post-down",
+ "pre-up",
+ "up",
+ "post-up",
+ "down",
+ "pre-down",
+ "post-down",
]
NET_CONFIG_BRIDGE_OPTIONS = [
- "bridge_ageing", "bridge_bridgeprio", "bridge_fd", "bridge_gcinit",
- "bridge_hello", "bridge_maxage", "bridge_maxwait", "bridge_stp",
+ "bridge_ageing",
+ "bridge_bridgeprio",
+ "bridge_fd",
+ "bridge_gcinit",
+ "bridge_hello",
+ "bridge_maxage",
+ "bridge_maxwait",
+ "bridge_stp",
]
NET_CONFIG_OPTIONS = [
- "address", "netmask", "broadcast", "network", "metric", "gateway",
- "pointtopoint", "media", "mtu", "hostname", "leasehours", "leasetime",
- "vendor", "client", "bootfile", "server", "hwaddr", "provider", "frame",
- "netnum", "endpoint", "local", "ttl",
+ "address",
+ "netmask",
+ "broadcast",
+ "network",
+ "metric",
+ "gateway",
+ "pointtopoint",
+ "media",
+ "mtu",
+ "hostname",
+ "leasehours",
+ "leasetime",
+ "vendor",
+ "client",
+ "bootfile",
+ "server",
+ "hwaddr",
+ "provider",
+ "frame",
+ "netnum",
+ "endpoint",
+ "local",
+ "ttl",
]
@@ -38,27 +64,27 @@ NET_CONFIG_OPTIONS = [
def _iface_add_subnet(iface, subnet):
content = []
valid_map = [
- 'address',
- 'netmask',
- 'broadcast',
- 'metric',
- 'gateway',
- 'pointopoint',
- 'mtu',
- 'scope',
- 'dns_search',
- 'dns_nameservers',
+ "address",
+ "netmask",
+ "broadcast",
+ "metric",
+ "gateway",
+ "pointopoint",
+ "mtu",
+ "scope",
+ "dns_search",
+ "dns_nameservers",
]
for key, value in subnet.items():
- if key == 'netmask':
+ if key == "netmask":
continue
- if key == 'address':
- value = "%s/%s" % (subnet['address'], subnet['prefix'])
+ if key == "address":
+ value = "%s/%s" % (subnet["address"], subnet["prefix"])
if value and key in valid_map:
if type(value) == list:
value = " ".join(value)
- if '_' in key:
- key = key.replace('_', '-')
+ if "_" in key:
+ key = key.replace("_", "-")
content.append(" {0} {1}".format(key, value))
return sorted(content)
@@ -75,41 +101,44 @@ def _iface_add_attrs(iface, index, ipv4_subnet_mtu):
return []
content = []
ignore_map = [
- 'control',
- 'device_id',
- 'driver',
- 'index',
- 'inet',
- 'mode',
- 'name',
- 'subnets',
- 'type',
+ "control",
+ "device_id",
+ "driver",
+ "index",
+ "inet",
+ "mode",
+ "name",
+ "subnets",
+ "type",
]
# The following parameters require repetitive entries of the key for
# each of the values
multiline_keys = [
- 'bridge_pathcost',
- 'bridge_portprio',
- 'bridge_waitport',
+ "bridge_pathcost",
+ "bridge_portprio",
+ "bridge_waitport",
]
- renames = {'mac_address': 'hwaddress'}
- if iface['type'] not in ['bond', 'bridge', 'infiniband', 'vlan']:
- ignore_map.append('mac_address')
+ renames = {"mac_address": "hwaddress"}
+ if iface["type"] not in ["bond", "bridge", "infiniband", "vlan"]:
+ ignore_map.append("mac_address")
for key, value in iface.items():
# convert bool to string for eni
if type(value) == bool:
- value = 'on' if iface[key] else 'off'
+ value = "on" if iface[key] else "off"
if not value or key in ignore_map:
continue
- if key == 'mtu' and ipv4_subnet_mtu:
+ if key == "mtu" and ipv4_subnet_mtu:
if value != ipv4_subnet_mtu:
LOG.warning(
"Network config: ignoring %s device-level mtu:%s because"
" ipv4 subnet-level mtu:%s provided.",
- iface['name'], value, ipv4_subnet_mtu)
+ iface["name"],
+ value,
+ ipv4_subnet_mtu,
+ )
continue
if key in multiline_keys:
for v in value:
@@ -123,9 +152,9 @@ def _iface_add_attrs(iface, index, ipv4_subnet_mtu):
def _iface_start_entry(iface, index, render_hwaddress=False):
- fullname = iface['name']
+ fullname = iface["name"]
- control = iface['control']
+ control = iface["control"]
if control == "auto":
cverb = "auto"
elif control in ("hotplug",):
@@ -134,12 +163,13 @@ def _iface_start_entry(iface, index, render_hwaddress=False):
cverb = "# control-" + control
subst = iface.copy()
- subst.update({'fullname': fullname, 'cverb': cverb})
+ subst.update({"fullname": fullname, "cverb": cverb})
lines = [
"{cverb} {fullname}".format(**subst),
- "iface {fullname} {inet} {mode}".format(**subst)]
- if render_hwaddress and iface.get('mac_address'):
+ "iface {fullname} {inet} {mode}".format(**subst),
+ ]
+ if render_hwaddress and iface.get("mac_address"):
lines.append(" hwaddress {mac_address}".format(**subst))
return lines
@@ -159,9 +189,9 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
currif = None
for line in contents.splitlines():
line = line.strip()
- if line.startswith('#'):
+ if line.startswith("#"):
continue
- split = line.split(' ')
+ split = line.split(" ")
option = split[0]
if option == "source-directory":
parsed_src_dir = split[1]
@@ -172,16 +202,18 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
dir_contents = [
os.path.join(expanded_path, path)
for path in dir_contents
- if (os.path.isfile(os.path.join(expanded_path, path)) and
- re.match("^[a-zA-Z0-9_-]+$", path) is not None)
+ if (
+ os.path.isfile(os.path.join(expanded_path, path))
+ and re.match("^[a-zA-Z0-9_-]+$", path) is not None
+ )
]
for entry in dir_contents:
with open(entry, "r") as fp:
src_data = fp.read().strip()
abs_entry = os.path.abspath(entry)
_parse_deb_config_data(
- ifaces, src_data,
- os.path.dirname(abs_entry), abs_entry)
+ ifaces, src_data, os.path.dirname(abs_entry), abs_entry
+ )
elif option == "source":
new_src_path = split[1]
if not new_src_path.startswith("/"):
@@ -191,8 +223,8 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
src_data = fp.read().strip()
abs_path = os.path.abspath(expanded_path)
_parse_deb_config_data(
- ifaces, src_data,
- os.path.dirname(abs_path), abs_path)
+ ifaces, src_data, os.path.dirname(abs_path), abs_path
+ )
elif option == "auto":
for iface in split[1:]:
if iface not in ifaces:
@@ -200,7 +232,7 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
# Include the source path this interface was found in.
"_source_path": src_path
}
- ifaces[iface]['auto'] = True
+ ifaces[iface]["auto"] = True
elif option == "iface":
iface, family, method = split[1:4]
if iface not in ifaces:
@@ -208,71 +240,72 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
# Include the source path this interface was found in.
"_source_path": src_path
}
- elif 'family' in ifaces[iface]:
+ elif "family" in ifaces[iface]:
raise ParserError(
"Interface %s can only be defined once. "
- "Re-defined in '%s'." % (iface, src_path))
- ifaces[iface]['family'] = family
- ifaces[iface]['method'] = method
+ "Re-defined in '%s'." % (iface, src_path)
+ )
+ ifaces[iface]["family"] = family
+ ifaces[iface]["method"] = method
currif = iface
elif option == "hwaddress":
if split[1] == "ether":
val = split[2]
else:
val = split[1]
- ifaces[currif]['hwaddress'] = val
+ ifaces[currif]["hwaddress"] = val
elif option in NET_CONFIG_OPTIONS:
ifaces[currif][option] = split[1]
elif option in NET_CONFIG_COMMANDS:
if option not in ifaces[currif]:
ifaces[currif][option] = []
- ifaces[currif][option].append(' '.join(split[1:]))
- elif option.startswith('dns-'):
- if 'dns' not in ifaces[currif]:
- ifaces[currif]['dns'] = {}
- if option == 'dns-search':
- ifaces[currif]['dns']['search'] = []
+ ifaces[currif][option].append(" ".join(split[1:]))
+ elif option.startswith("dns-"):
+ if "dns" not in ifaces[currif]:
+ ifaces[currif]["dns"] = {}
+ if option == "dns-search":
+ ifaces[currif]["dns"]["search"] = []
for domain in split[1:]:
- ifaces[currif]['dns']['search'].append(domain)
- elif option == 'dns-nameservers':
- ifaces[currif]['dns']['nameservers'] = []
+ ifaces[currif]["dns"]["search"].append(domain)
+ elif option == "dns-nameservers":
+ ifaces[currif]["dns"]["nameservers"] = []
for server in split[1:]:
- ifaces[currif]['dns']['nameservers'].append(server)
- elif option.startswith('bridge_'):
- if 'bridge' not in ifaces[currif]:
- ifaces[currif]['bridge'] = {}
+ ifaces[currif]["dns"]["nameservers"].append(server)
+ elif option.startswith("bridge_"):
+ if "bridge" not in ifaces[currif]:
+ ifaces[currif]["bridge"] = {}
if option in NET_CONFIG_BRIDGE_OPTIONS:
- bridge_option = option.replace('bridge_', '', 1)
- ifaces[currif]['bridge'][bridge_option] = split[1]
+ bridge_option = option.replace("bridge_", "", 1)
+ ifaces[currif]["bridge"][bridge_option] = split[1]
elif option == "bridge_ports":
- ifaces[currif]['bridge']['ports'] = []
+ ifaces[currif]["bridge"]["ports"] = []
for iface in split[1:]:
- ifaces[currif]['bridge']['ports'].append(iface)
+ ifaces[currif]["bridge"]["ports"].append(iface)
elif option == "bridge_hw":
# doc is confusing and thus some may put literal 'MAC'
# bridge_hw MAC <address>
# but correct is:
# bridge_hw <address>
if split[1].lower() == "mac":
- ifaces[currif]['bridge']['mac'] = split[2]
+ ifaces[currif]["bridge"]["mac"] = split[2]
else:
- ifaces[currif]['bridge']['mac'] = split[1]
+ ifaces[currif]["bridge"]["mac"] = split[1]
elif option == "bridge_pathcost":
- if 'pathcost' not in ifaces[currif]['bridge']:
- ifaces[currif]['bridge']['pathcost'] = {}
- ifaces[currif]['bridge']['pathcost'][split[1]] = split[2]
+ if "pathcost" not in ifaces[currif]["bridge"]:
+ ifaces[currif]["bridge"]["pathcost"] = {}
+ ifaces[currif]["bridge"]["pathcost"][split[1]] = split[2]
elif option == "bridge_portprio":
- if 'portprio' not in ifaces[currif]['bridge']:
- ifaces[currif]['bridge']['portprio'] = {}
- ifaces[currif]['bridge']['portprio'][split[1]] = split[2]
- elif option.startswith('bond-'):
- if 'bond' not in ifaces[currif]:
- ifaces[currif]['bond'] = {}
- bond_option = option.replace('bond-', '', 1)
- ifaces[currif]['bond'][bond_option] = split[1]
+ if "portprio" not in ifaces[currif]["bridge"]:
+ ifaces[currif]["bridge"]["portprio"] = {}
+ ifaces[currif]["bridge"]["portprio"][split[1]] = split[2]
+ elif option.startswith("bond-"):
+ if "bond" not in ifaces[currif]:
+ ifaces[currif]["bond"] = {}
+ bond_option = option.replace("bond-", "", 1)
+ ifaces[currif]["bond"][bond_option] = split[1]
for iface in ifaces.keys():
- if 'auto' not in ifaces[iface]:
- ifaces[iface]['auto'] = False
+ if "auto" not in ifaces[iface]:
+ ifaces[iface]["auto"] = False
def parse_deb_config(path):
@@ -282,8 +315,8 @@ def parse_deb_config(path):
contents = fp.read().strip()
abs_path = os.path.abspath(path)
_parse_deb_config_data(
- ifaces, contents,
- os.path.dirname(abs_path), abs_path)
+ ifaces, contents, os.path.dirname(abs_path), abs_path
+ )
return ifaces
@@ -308,32 +341,31 @@ def _ifaces_to_net_config_data(ifaces):
dtype = "loopback"
else:
dtype = "physical"
- devs[devname] = {'type': dtype, 'name': devname, 'subnets': []}
+ devs[devname] = {"type": dtype, "name": devname, "subnets": []}
# this isnt strictly correct, but some might specify
# hwaddress on a nic for matching / declaring name.
- if 'hwaddress' in data:
- devs[devname]['mac_address'] = data['hwaddress']
- subnet = {'_orig_eni_name': name, 'type': data['method']}
- if data.get('auto'):
- subnet['control'] = 'auto'
+ if "hwaddress" in data:
+ devs[devname]["mac_address"] = data["hwaddress"]
+ subnet = {"_orig_eni_name": name, "type": data["method"]}
+ if data.get("auto"):
+ subnet["control"] = "auto"
else:
- subnet['control'] = 'manual'
+ subnet["control"] = "manual"
- if data.get('method') == 'static':
- subnet['address'] = data['address']
+ if data.get("method") == "static":
+ subnet["address"] = data["address"]
- for copy_key in ('netmask', 'gateway', 'broadcast'):
+ for copy_key in ("netmask", "gateway", "broadcast"):
if copy_key in data:
subnet[copy_key] = data[copy_key]
- if 'dns' in data:
- for n in ('nameservers', 'search'):
- if n in data['dns'] and data['dns'][n]:
- subnet['dns_' + n] = data['dns'][n]
- devs[devname]['subnets'].append(subnet)
+ if "dns" in data:
+ for n in ("nameservers", "search"):
+ if n in data["dns"] and data["dns"][n]:
+ subnet["dns_" + n] = data["dns"][n]
+ devs[devname]["subnets"].append(subnet)
- return {'version': 1,
- 'config': [devs[d] for d in sorted(devs)]}
+ return {"version": 1, "config": [devs[d] for d in sorted(devs)]}
class Renderer(renderer.Renderer):
@@ -342,10 +374,11 @@ class Renderer(renderer.Renderer):
def __init__(self, config=None):
if not config:
config = {}
- self.eni_path = config.get('eni_path', 'etc/network/interfaces')
- self.eni_header = config.get('eni_header', None)
+ self.eni_path = config.get("eni_path", "etc/network/interfaces")
+ self.eni_header = config.get("eni_header", None)
self.netrules_path = config.get(
- 'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules')
+ "netrules_path", "etc/udev/rules.d/70-persistent-net.rules"
+ )
def _render_route(self, route, indent=""):
"""When rendering routes for an iface, in some cases applying a route
@@ -367,151 +400,166 @@ class Renderer(renderer.Renderer):
down = indent + "pre-down route del"
or_true = " || true"
mapping = {
- 'gateway': 'gw',
- 'metric': 'metric',
+ "gateway": "gw",
+ "metric": "metric",
}
- default_gw = ''
- if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0':
- default_gw = ' default'
- elif route['network'] == '::' and route['prefix'] == 0:
- default_gw = ' -A inet6 default'
+ default_gw = ""
+ if route["network"] == "0.0.0.0" and route["netmask"] == "0.0.0.0":
+ default_gw = " default"
+ elif route["network"] == "::" and route["prefix"] == 0:
+ default_gw = " -A inet6 default"
- route_line = ''
- for k in ['network', 'gateway', 'metric']:
- if default_gw and k == 'network':
+ route_line = ""
+ for k in ["network", "gateway", "metric"]:
+ if default_gw and k == "network":
continue
- if k == 'gateway':
- route_line += '%s %s %s' % (default_gw, mapping[k], route[k])
+ if k == "gateway":
+ route_line += "%s %s %s" % (default_gw, mapping[k], route[k])
elif k in route:
- if k == 'network':
- if ':' in route[k]:
- route_line += ' -A inet6'
+ if k == "network":
+ if ":" in route[k]:
+ route_line += " -A inet6"
+ elif route.get("prefix") == 32:
+ route_line += " -host"
else:
- route_line += ' -net'
- if 'prefix' in route:
- route_line += ' %s/%s' % (route[k], route['prefix'])
+ route_line += " -net"
+ if "prefix" in route:
+ route_line += " %s/%s" % (route[k], route["prefix"])
else:
- route_line += ' %s %s' % (mapping[k], route[k])
+ route_line += " %s %s" % (mapping[k], route[k])
content.append(up + route_line + or_true)
content.append(down + route_line + or_true)
return content
def _render_iface(self, iface, render_hwaddress=False):
sections = []
- subnets = iface.get('subnets', {})
- accept_ra = iface.pop('accept-ra', None)
- ethernet_wol = iface.pop('wakeonlan', None)
+ subnets = iface.get("subnets", {})
+ accept_ra = iface.pop("accept-ra", None)
+ ethernet_wol = iface.pop("wakeonlan", None)
if ethernet_wol:
# Specify WOL setting 'g' for using "Magic Packet"
- iface['ethernet-wol'] = 'g'
+ iface["ethernet-wol"] = "g"
if subnets:
for index, subnet in enumerate(subnets):
ipv4_subnet_mtu = None
- iface['index'] = index
- iface['mode'] = subnet['type']
- iface['control'] = subnet.get('control', 'auto')
- subnet_inet = 'inet'
+ iface["index"] = index
+ iface["mode"] = subnet["type"]
+ iface["control"] = subnet.get("control", "auto")
+ subnet_inet = "inet"
if subnet_is_ipv6(subnet):
- subnet_inet += '6'
+ subnet_inet += "6"
else:
- ipv4_subnet_mtu = subnet.get('mtu')
- iface['inet'] = subnet_inet
- if (subnet['type'] == 'dhcp4' or subnet['type'] == 'dhcp6' or
- subnet['type'] == 'ipv6_dhcpv6-stateful'):
+ ipv4_subnet_mtu = subnet.get("mtu")
+ iface["inet"] = subnet_inet
+ if (
+ subnet["type"] == "dhcp4"
+ or subnet["type"] == "dhcp6"
+ or subnet["type"] == "ipv6_dhcpv6-stateful"
+ ):
# Configure network settings using DHCP or DHCPv6
- iface['mode'] = 'dhcp'
+ iface["mode"] = "dhcp"
if accept_ra is not None:
# Accept router advertisements (0=off, 1=on)
- iface['accept_ra'] = '1' if accept_ra else '0'
- elif subnet['type'] == 'ipv6_dhcpv6-stateless':
+ iface["accept_ra"] = "1" if accept_ra else "0"
+ elif subnet["type"] == "ipv6_dhcpv6-stateless":
# Configure network settings using SLAAC from RAs
- iface['mode'] = 'auto'
+ iface["mode"] = "auto"
# Use stateless DHCPv6 (0=off, 1=on)
- iface['dhcp'] = '1'
- elif subnet['type'] == 'ipv6_slaac':
+ iface["dhcp"] = "1"
+ elif subnet["type"] == "ipv6_slaac":
# Configure network settings using SLAAC from RAs
- iface['mode'] = 'auto'
+ iface["mode"] = "auto"
# Use stateless DHCPv6 (0=off, 1=on)
- iface['dhcp'] = '0'
+ iface["dhcp"] = "0"
elif subnet_is_ipv6(subnet):
# mode might be static6, eni uses 'static'
- iface['mode'] = 'static'
+ iface["mode"] = "static"
if accept_ra is not None:
# Accept router advertisements (0=off, 1=on)
- iface['accept_ra'] = '1' if accept_ra else '0'
+ iface["accept_ra"] = "1" if accept_ra else "0"
# do not emit multiple 'auto $IFACE' lines as older (precise)
# ifupdown complains
- if True in ["auto %s" % (iface['name']) in line
- for line in sections]:
- iface['control'] = 'alias'
+ if True in [
+ "auto %s" % (iface["name"]) in line for line in sections
+ ]:
+ iface["control"] = "alias"
lines = list(
_iface_start_entry(
- iface, index, render_hwaddress=render_hwaddress) +
- _iface_add_subnet(iface, subnet) +
- _iface_add_attrs(iface, index, ipv4_subnet_mtu)
+ iface, index, render_hwaddress=render_hwaddress
+ )
+ + _iface_add_subnet(iface, subnet)
+ + _iface_add_attrs(iface, index, ipv4_subnet_mtu)
)
- for route in subnet.get('routes', []):
+ for route in subnet.get("routes", []):
lines.extend(self._render_route(route, indent=" "))
sections.append(lines)
else:
# ifenslave docs say to auto the slave devices
lines = []
- if 'bond-master' in iface or 'bond-slaves' in iface:
+ if "bond-master" in iface or "bond-slaves" in iface:
lines.append("auto {name}".format(**iface))
lines.append("iface {name} {inet} {mode}".format(**iface))
lines.extend(
- _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None))
+ _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None)
+ )
sections.append(lines)
return sections
def _render_interfaces(self, network_state, render_hwaddress=False):
- '''Given state, emit etc/network/interfaces content.'''
+ """Given state, emit etc/network/interfaces content."""
# handle 'lo' specifically as we need to insert the global dns entries
# there (as that is the only interface that will be always up).
- lo = {'name': 'lo', 'type': 'physical', 'inet': 'inet',
- 'subnets': [{'type': 'loopback', 'control': 'auto'}]}
+ lo = {
+ "name": "lo",
+ "type": "physical",
+ "inet": "inet",
+ "subnets": [{"type": "loopback", "control": "auto"}],
+ }
for iface in network_state.iter_interfaces():
- if iface.get('name') == "lo":
+ if iface.get("name") == "lo":
lo = copy.deepcopy(iface)
nameservers = network_state.dns_nameservers
if nameservers:
- lo['subnets'][0]["dns_nameservers"] = (" ".join(nameservers))
+ lo["subnets"][0]["dns_nameservers"] = " ".join(nameservers)
searchdomains = network_state.dns_searchdomains
if searchdomains:
- lo['subnets'][0]["dns_search"] = (" ".join(searchdomains))
+ lo["subnets"][0]["dns_search"] = " ".join(searchdomains)
# Apply a sort order to ensure that we write out the physical
# interfaces first; this is critical for bonding
order = {
- 'loopback': 0,
- 'physical': 1,
- 'infiniband': 2,
- 'bond': 3,
- 'bridge': 4,
- 'vlan': 5,
+ "loopback": 0,
+ "physical": 1,
+ "infiniband": 2,
+ "bond": 3,
+ "bridge": 4,
+ "vlan": 5,
}
sections = []
sections.extend(self._render_iface(lo))
- for iface in sorted(network_state.iter_interfaces(),
- key=lambda k: (order[k['type']], k['name'])):
+ for iface in sorted(
+ network_state.iter_interfaces(),
+ key=lambda k: (order[k["type"]], k["name"]),
+ ):
- if iface.get('name') == "lo":
+ if iface.get("name") == "lo":
continue
sections.extend(
- self._render_iface(iface, render_hwaddress=render_hwaddress))
+ self._render_iface(iface, render_hwaddress=render_hwaddress)
+ )
for route in network_state.iter_routes():
sections.append(self._render_route(route))
- return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n"
+ return "\n\n".join(["\n".join(s) for s in sections]) + "\n"
def render_network_state(self, network_state, templates=None, target=None):
fpeni = subp.target_path(target, self.eni_path)
@@ -522,34 +570,38 @@ class Renderer(renderer.Renderer):
if self.netrules_path:
netrules = subp.target_path(target, self.netrules_path)
util.ensure_dir(os.path.dirname(netrules))
- util.write_file(netrules,
- self._render_persistent_net(network_state))
+ util.write_file(
+ netrules, self._render_persistent_net(network_state)
+ )
def network_state_to_eni(network_state, header=None, render_hwaddress=False):
# render the provided network state, return a string of equivalent eni
- eni_path = 'etc/network/interfaces'
- renderer = Renderer(config={
- 'eni_path': eni_path,
- 'eni_header': header,
- 'netrules_path': None,
- })
+ eni_path = "etc/network/interfaces"
+ renderer = Renderer(
+ config={
+ "eni_path": eni_path,
+ "eni_header": header,
+ "netrules_path": None,
+ }
+ )
if not header:
header = ""
if not header.endswith("\n"):
header += "\n"
contents = renderer._render_interfaces(
- network_state, render_hwaddress=render_hwaddress)
+ network_state, render_hwaddress=render_hwaddress
+ )
return header + contents
def available(target=None):
- expected = ['ifquery', 'ifup', 'ifdown']
- search = ['/sbin', '/usr/sbin']
+ expected = ["ifquery", "ifup", "ifdown"]
+ search = ["/sbin", "/usr/sbin"]
for p in expected:
if not subp.which(p, search=search, target=target):
return False
- eni = subp.target_path(target, 'etc/network/interfaces')
+ eni = subp.target_path(target, "etc/network/interfaces")
if not os.path.isfile(eni):
return False
diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py
index 0285dfec..ec42b60c 100644
--- a/cloudinit/net/freebsd.py
+++ b/cloudinit/net/freebsd.py
@@ -1,59 +1,69 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import log as logging
import cloudinit.net.bsd
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import log as logging
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
class Renderer(cloudinit.net.bsd.BSDRenderer):
-
def __init__(self, config=None):
self._route_cpt = 0
super(Renderer, self).__init__()
def rename_interface(self, cur_name, device_name):
- self.set_rc_config_value('ifconfig_%s_name' % cur_name, device_name)
+ self.set_rc_config_value("ifconfig_%s_name" % cur_name, device_name)
def write_config(self):
for device_name, v in self.interface_configurations.items():
+ net_config = "DHCP"
if isinstance(v, dict):
- self.set_rc_config_value(
- 'ifconfig_' + device_name,
- v.get('address') + ' netmask ' + v.get('netmask'))
- else:
- self.set_rc_config_value('ifconfig_' + device_name, 'DHCP')
+ net_config = v.get("address") + " netmask " + v.get("netmask")
+ mtu = v.get("mtu")
+ if mtu:
+ net_config += " mtu %d" % mtu
+ self.set_rc_config_value("ifconfig_" + device_name, net_config)
def start_services(self, run=False):
if not run:
LOG.debug("freebsd generate postcmd disabled")
return
- subp.subp(['service', 'netif', 'restart'], capture=True)
+ for dhcp_interface in self.dhcp_interfaces():
+ # Observed on DragonFlyBSD 6. If we use the "restart" parameter,
+ # the routes are not recreated.
+ subp.subp(
+ ["service", "dhclient", "stop", dhcp_interface],
+ rcs=[0, 1],
+ capture=True,
+ )
+
+ subp.subp(["service", "netif", "restart"], capture=True)
# On FreeBSD 10, the restart of routing and dhclient is likely to fail
# because
# - routing: it cannot remove the loopback route, but it will still set
# up the default route as expected.
# - dhclient: it cannot stop the dhclient started by the netif service.
# In both case, the situation is ok, and we can proceed.
- subp.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1])
+ subp.subp(["service", "routing", "restart"], capture=True, rcs=[0, 1])
for dhcp_interface in self.dhcp_interfaces():
- subp.subp(['service', 'dhclient', 'restart', dhcp_interface],
- rcs=[0, 1],
- capture=True)
+ subp.subp(
+ ["service", "dhclient", "start", dhcp_interface],
+ rcs=[0, 1],
+ capture=True,
+ )
def set_route(self, network, netmask, gateway):
- if network == '0.0.0.0':
- self.set_rc_config_value('defaultrouter', gateway)
+ if network == "0.0.0.0":
+ self.set_rc_config_value("defaultrouter", gateway)
else:
- route_name = 'route_net%d' % self._route_cpt
+ route_name = "route_net%d" % self._route_cpt
route_cmd = "-route %s/%s %s" % (network, netmask, gateway)
self.set_rc_config_value(route_name, route_cmd)
self._route_cpt += 1
def available(target=None):
- return util.is_FreeBSD()
+ return util.is_FreeBSD() or util.is_DragonFlyBSD()
diff --git a/cloudinit/net/netbsd.py b/cloudinit/net/netbsd.py
index 71b38ee6..3d6b85b7 100644
--- a/cloudinit/net/netbsd.py
+++ b/cloudinit/net/netbsd.py
@@ -1,43 +1,42 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
import cloudinit.net.bsd
+from cloudinit import log as logging
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
class Renderer(cloudinit.net.bsd.BSDRenderer):
-
def __init__(self, config=None):
super(Renderer, self).__init__()
def write_config(self):
if self.dhcp_interfaces():
- self.set_rc_config_value('dhcpcd', 'YES')
+ self.set_rc_config_value("dhcpcd", "YES")
self.set_rc_config_value(
- 'dhcpcd_flags',
- ' '.join(self.dhcp_interfaces())
+ "dhcpcd_flags", " ".join(self.dhcp_interfaces())
)
for device_name, v in self.interface_configurations.items():
if isinstance(v, dict):
- self.set_rc_config_value(
- 'ifconfig_' + device_name,
- v.get('address') + ' netmask ' + v.get('netmask'))
+ net_config = v.get("address") + " netmask " + v.get("netmask")
+ mtu = v.get("mtu")
+ if mtu:
+ net_config += " mtu %d" % mtu
+ self.set_rc_config_value("ifconfig_" + device_name, net_config)
def start_services(self, run=False):
if not run:
LOG.debug("netbsd generate postcmd disabled")
return
- subp.subp(['service', 'network', 'restart'], capture=True)
+ subp.subp(["service", "network", "restart"], capture=True)
if self.dhcp_interfaces():
- subp.subp(['service', 'dhcpcd', 'restart'], capture=True)
+ subp.subp(["service", "dhcpcd", "restart"], capture=True)
def set_route(self, network, netmask, gateway):
- if network == '0.0.0.0':
- self.set_rc_config_value('defaultroute', gateway)
+ if network == "0.0.0.0":
+ self.set_rc_config_value("defaultroute", gateway)
def available(target=None):
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 53347c83..57ba2d9a 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -3,15 +3,18 @@
import copy
import os
-from . import renderer
-from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2, IPV6_DYNAMIC_TYPES
-
from cloudinit import log as logging
-from cloudinit import util
-from cloudinit import subp
-from cloudinit import safeyaml
+from cloudinit import safeyaml, subp, util
from cloudinit.net import SYS_CLASS_NET, get_devicelist
+from . import renderer
+from .network_state import (
+ IPV6_DYNAMIC_TYPES,
+ NET_CONFIG_TO_V2,
+ NetworkState,
+ subnet_is_ipv6,
+)
+
KNOWN_SNAPD_CONFIG = b"""\
# This is the initial network config.
# It can be overwritten by cloud-init or console-conf.
@@ -32,8 +35,11 @@ LOG = logging.getLogger(__name__)
def _get_params_dict_by_match(config, match):
- return dict((key, value) for (key, value) in config.items()
- if key.startswith(match))
+ return dict(
+ (key, value)
+ for (key, value) in config.items()
+ if key.startswith(match)
+ )
def _extract_addresses(config, entry, ifname, features=None):
@@ -73,14 +79,16 @@ def _extract_addresses(config, entry, ifname, features=None):
"""
- def _listify(obj, token=' '):
+ def _listify(obj, token=" "):
"Helper to convert strings to list of strings, handle single string"
if not obj or type(obj) not in [str]:
return obj
if token in obj:
return obj.split(token)
else:
- return [obj, ]
+ return [
+ obj,
+ ]
if features is None:
features = []
@@ -88,78 +96,85 @@ def _extract_addresses(config, entry, ifname, features=None):
routes = []
nameservers = []
searchdomains = []
- subnets = config.get('subnets', [])
+ subnets = config.get("subnets", [])
if subnets is None:
subnets = []
for subnet in subnets:
- sn_type = subnet.get('type')
- if sn_type.startswith('dhcp'):
- if sn_type == 'dhcp':
- sn_type += '4'
+ sn_type = subnet.get("type")
+ if sn_type.startswith("dhcp"):
+ if sn_type == "dhcp":
+ sn_type += "4"
entry.update({sn_type: True})
elif sn_type in IPV6_DYNAMIC_TYPES:
- entry.update({'dhcp6': True})
- elif sn_type in ['static', 'static6']:
- addr = "%s" % subnet.get('address')
- if 'prefix' in subnet:
- addr += "/%d" % subnet.get('prefix')
- if 'gateway' in subnet and subnet.get('gateway'):
- gateway = subnet.get('gateway')
+ entry.update({"dhcp6": True})
+ elif sn_type in ["static", "static6"]:
+ addr = "%s" % subnet.get("address")
+ if "prefix" in subnet:
+ addr += "/%d" % subnet.get("prefix")
+ if "gateway" in subnet and subnet.get("gateway"):
+ gateway = subnet.get("gateway")
if ":" in gateway:
- entry.update({'gateway6': gateway})
+ entry.update({"gateway6": gateway})
else:
- entry.update({'gateway4': gateway})
- if 'dns_nameservers' in subnet:
- nameservers += _listify(subnet.get('dns_nameservers', []))
- if 'dns_search' in subnet:
- searchdomains += _listify(subnet.get('dns_search', []))
- if 'mtu' in subnet:
- mtukey = 'mtu'
- if subnet_is_ipv6(subnet) and 'ipv6-mtu' in features:
- mtukey = 'ipv6-mtu'
- entry.update({mtukey: subnet.get('mtu')})
- for route in subnet.get('routes', []):
- to_net = "%s/%s" % (route.get('network'),
- route.get('prefix'))
+ entry.update({"gateway4": gateway})
+ if "dns_nameservers" in subnet:
+ nameservers += _listify(subnet.get("dns_nameservers", []))
+ if "dns_search" in subnet:
+ searchdomains += _listify(subnet.get("dns_search", []))
+ if "mtu" in subnet:
+ mtukey = "mtu"
+ if subnet_is_ipv6(subnet) and "ipv6-mtu" in features:
+ mtukey = "ipv6-mtu"
+ entry.update({mtukey: subnet.get("mtu")})
+ for route in subnet.get("routes", []):
+ to_net = "%s/%s" % (route.get("network"), route.get("prefix"))
new_route = {
- 'via': route.get('gateway'),
- 'to': to_net,
+ "via": route.get("gateway"),
+ "to": to_net,
}
- if 'metric' in route:
- new_route.update({'metric': route.get('metric', 100)})
+ if "metric" in route:
+ new_route.update({"metric": route.get("metric", 100)})
routes.append(new_route)
addresses.append(addr)
- if 'mtu' in config:
- entry_mtu = entry.get('mtu')
- if entry_mtu and config['mtu'] != entry_mtu:
+ if "mtu" in config:
+ entry_mtu = entry.get("mtu")
+ if entry_mtu and config["mtu"] != entry_mtu:
LOG.warning(
"Network config: ignoring %s device-level mtu:%s because"
" ipv4 subnet-level mtu:%s provided.",
- ifname, config['mtu'], entry_mtu)
+ ifname,
+ config["mtu"],
+ entry_mtu,
+ )
else:
- entry['mtu'] = config['mtu']
+ entry["mtu"] = config["mtu"]
if len(addresses) > 0:
- entry.update({'addresses': addresses})
+ entry.update({"addresses": addresses})
if len(routes) > 0:
- entry.update({'routes': routes})
+ entry.update({"routes": routes})
if len(nameservers) > 0:
- ns = {'addresses': nameservers}
- entry.update({'nameservers': ns})
+ ns = {"addresses": nameservers}
+ entry.update({"nameservers": ns})
if len(searchdomains) > 0:
- ns = entry.get('nameservers', {})
- ns.update({'search': searchdomains})
- entry.update({'nameservers': ns})
- if 'accept-ra' in config and config['accept-ra'] is not None:
- entry.update({'accept-ra': util.is_true(config.get('accept-ra'))})
+ ns = entry.get("nameservers", {})
+ ns.update({"search": searchdomains})
+ entry.update({"nameservers": ns})
+ if "accept-ra" in config and config["accept-ra"] is not None:
+ entry.update({"accept-ra": util.is_true(config.get("accept-ra"))})
def _extract_bond_slaves_by_name(interfaces, entry, bond_master):
- bond_slave_names = sorted([name for (name, cfg) in interfaces.items()
- if cfg.get('bond-master', None) == bond_master])
+ bond_slave_names = sorted(
+ [
+ name
+ for (name, cfg) in interfaces.items()
+ if cfg.get("bond-master", None) == bond_master
+ ]
+ )
if len(bond_slave_names) > 0:
- entry.update({'interfaces': bond_slave_names})
+ entry.update({"interfaces": bond_slave_names})
def _clean_default(target=None):
@@ -172,13 +187,20 @@ def _clean_default(target=None):
if content != KNOWN_SNAPD_CONFIG:
return
- derived = [subp.target_path(target, f) for f in (
- 'run/systemd/network/10-netplan-all-en.network',
- 'run/systemd/network/10-netplan-all-eth.network',
- 'run/systemd/generator/netplan.stamp')]
+ derived = [
+ subp.target_path(target, f)
+ for f in (
+ "run/systemd/network/10-netplan-all-en.network",
+ "run/systemd/network/10-netplan-all-eth.network",
+ "run/systemd/generator/netplan.stamp",
+ )
+ ]
existing = [f for f in derived if os.path.isfile(f)]
- LOG.debug("removing known config '%s' and derived existing files: %s",
- tpath, existing)
+ LOG.debug(
+ "removing known config '%s' and derived existing files: %s",
+ tpath,
+ existing,
+ )
for f in [tpath] + existing:
os.unlink(f)
@@ -187,18 +209,19 @@ def _clean_default(target=None):
class Renderer(renderer.Renderer):
"""Renders network information in a /etc/netplan/network.yaml format."""
- NETPLAN_GENERATE = ['netplan', 'generate']
- NETPLAN_INFO = ['netplan', 'info']
+ NETPLAN_GENERATE = ["netplan", "generate"]
+ NETPLAN_INFO = ["netplan", "info"]
def __init__(self, config=None):
if not config:
config = {}
- self.netplan_path = config.get('netplan_path',
- 'etc/netplan/50-cloud-init.yaml')
- self.netplan_header = config.get('netplan_header', None)
- self._postcmds = config.get('postcmds', False)
- self.clean_default = config.get('clean_default', True)
- self._features = config.get('features', None)
+ self.netplan_path = config.get(
+ "netplan_path", "etc/netplan/50-cloud-init.yaml"
+ )
+ self.netplan_header = config.get("netplan_header", None)
+ self._postcmds = config.get("postcmds", False)
+ self.clean_default = config.get("clean_default", True)
+ self._features = config.get("features", None)
@property
def features(self):
@@ -206,13 +229,13 @@ class Renderer(renderer.Renderer):
try:
info_blob, _err = subp.subp(self.NETPLAN_INFO, capture=True)
info = util.load_yaml(info_blob)
- self._features = info['netplan.io']['features']
+ self._features = info["netplan.io"]["features"]
except subp.ProcessExecutionError:
# if the info subcommand is not present then we don't have any
# new features
pass
except (TypeError, KeyError) as e:
- LOG.debug('Failed to list features from netplan info: %s', e)
+ LOG.debug("Failed to list features from netplan info: %s", e)
return self._features
def render_network_state(self, network_state, templates=None, target=None):
@@ -244,26 +267,30 @@ class Renderer(renderer.Renderer):
def _net_setup_link(self, run=False):
"""To ensure device link properties are applied, we poke
- udev to re-evaluate networkd .link files and call
- the setup_link udev builtin command
+ udev to re-evaluate networkd .link files and call
+ the setup_link udev builtin command
"""
if not run:
LOG.debug("netplan net_setup_link postcmd disabled")
return
- setup_lnk = ['udevadm', 'test-builtin', 'net_setup_link']
- for cmd in [setup_lnk + [SYS_CLASS_NET + iface]
- for iface in get_devicelist() if
- os.path.islink(SYS_CLASS_NET + iface)]:
+ setup_lnk = ["udevadm", "test-builtin", "net_setup_link"]
+ for cmd in [
+ setup_lnk + [SYS_CLASS_NET + iface]
+ for iface in get_devicelist()
+ if os.path.islink(SYS_CLASS_NET + iface)
+ ]:
subp.subp(cmd, capture=True)
- def _render_content(self, network_state):
+ def _render_content(self, network_state: NetworkState):
# if content already in netplan format, pass it back
if network_state.version == 2:
- LOG.debug('V2 to V2 passthrough')
- return safeyaml.dumps({'network': network_state.config},
- explicit_start=False,
- explicit_end=False)
+ LOG.debug("V2 to V2 passthrough")
+ return safeyaml.dumps(
+ {"network": network_state.config},
+ explicit_start=False,
+ explicit_end=False,
+ )
ethernets = {}
wifis = {}
@@ -272,80 +299,83 @@ class Renderer(renderer.Renderer):
vlans = {}
content = []
- interfaces = network_state._network_state.get('interfaces', [])
+ interfaces = network_state._network_state.get("interfaces", [])
nameservers = network_state.dns_nameservers
searchdomains = network_state.dns_searchdomains
for config in network_state.iter_interfaces():
- ifname = config.get('name')
+ ifname = config.get("name")
# filter None (but not False) entries up front
- ifcfg = dict((key, value) for (key, value) in config.items()
- if value is not None)
-
- if_type = ifcfg.get('type')
- if if_type == 'physical':
+ ifcfg = dict(
+ (key, value)
+ for (key, value) in config.items()
+ if value is not None
+ )
+
+ if_type = ifcfg.get("type")
+ if if_type == "physical":
# required_keys = ['name', 'mac_address']
eth = {
- 'set-name': ifname,
- 'match': ifcfg.get('match', None),
+ "set-name": ifname,
+ "match": ifcfg.get("match", None),
}
- if eth['match'] is None:
- macaddr = ifcfg.get('mac_address', None)
+ if eth["match"] is None:
+ macaddr = ifcfg.get("mac_address", None)
if macaddr is not None:
- eth['match'] = {'macaddress': macaddr.lower()}
+ eth["match"] = {"macaddress": macaddr.lower()}
else:
- del eth['match']
- del eth['set-name']
+ del eth["match"]
+ del eth["set-name"]
_extract_addresses(ifcfg, eth, ifname, self.features)
ethernets.update({ifname: eth})
- elif if_type == 'bond':
+ elif if_type == "bond":
# required_keys = ['name', 'bond_interfaces']
bond = {}
bond_config = {}
# extract bond params and drop the bond_ prefix as it's
# redundent in v2 yaml format
- v2_bond_map = NET_CONFIG_TO_V2.get('bond')
- for match in ['bond_', 'bond-']:
+ v2_bond_map = NET_CONFIG_TO_V2.get("bond")
+ for match in ["bond_", "bond-"]:
bond_params = _get_params_dict_by_match(ifcfg, match)
for (param, value) in bond_params.items():
- newname = v2_bond_map.get(param.replace('_', '-'))
+ newname = v2_bond_map.get(param.replace("_", "-"))
if newname is None:
continue
bond_config.update({newname: value})
if len(bond_config) > 0:
- bond.update({'parameters': bond_config})
- if ifcfg.get('mac_address'):
- bond['macaddress'] = ifcfg.get('mac_address').lower()
- slave_interfaces = ifcfg.get('bond-slaves')
- if slave_interfaces == 'none':
+ bond.update({"parameters": bond_config})
+ if ifcfg.get("mac_address"):
+ bond["macaddress"] = ifcfg.get("mac_address").lower()
+ slave_interfaces = ifcfg.get("bond-slaves")
+ if slave_interfaces == "none":
_extract_bond_slaves_by_name(interfaces, bond, ifname)
_extract_addresses(ifcfg, bond, ifname, self.features)
bonds.update({ifname: bond})
- elif if_type == 'bridge':
+ elif if_type == "bridge":
# required_keys = ['name', 'bridge_ports']
- ports = sorted(copy.copy(ifcfg.get('bridge_ports')))
+ ports = sorted(copy.copy(ifcfg.get("bridge_ports")))
bridge = {
- 'interfaces': ports,
+ "interfaces": ports,
}
# extract bridge params and drop the bridge prefix as it's
# redundent in v2 yaml format
- match_prefix = 'bridge_'
+ match_prefix = "bridge_"
params = _get_params_dict_by_match(ifcfg, match_prefix)
br_config = {}
# v2 yaml uses different names for the keys
# and at least one value format change
- v2_bridge_map = NET_CONFIG_TO_V2.get('bridge')
+ v2_bridge_map = NET_CONFIG_TO_V2.get("bridge")
for (param, value) in params.items():
newname = v2_bridge_map.get(param)
if newname is None:
continue
br_config.update({newname: value})
- if newname in ['path-cost', 'port-priority']:
+ if newname in ["path-cost", "port-priority"]:
# <interface> <value> -> <interface>: int(<value>)
newvalue = {}
for val in value:
@@ -354,58 +384,60 @@ class Renderer(renderer.Renderer):
br_config.update({newname: newvalue})
if len(br_config) > 0:
- bridge.update({'parameters': br_config})
- if ifcfg.get('mac_address'):
- bridge['macaddress'] = ifcfg.get('mac_address').lower()
+ bridge.update({"parameters": br_config})
+ if ifcfg.get("mac_address"):
+ bridge["macaddress"] = ifcfg.get("mac_address").lower()
_extract_addresses(ifcfg, bridge, ifname, self.features)
bridges.update({ifname: bridge})
- elif if_type == 'vlan':
+ elif if_type == "vlan":
# required_keys = ['name', 'vlan_id', 'vlan-raw-device']
vlan = {
- 'id': ifcfg.get('vlan_id'),
- 'link': ifcfg.get('vlan-raw-device')
+ "id": ifcfg.get("vlan_id"),
+ "link": ifcfg.get("vlan-raw-device"),
}
- macaddr = ifcfg.get('mac_address', None)
+ macaddr = ifcfg.get("mac_address", None)
if macaddr is not None:
- vlan['macaddress'] = macaddr.lower()
+ vlan["macaddress"] = macaddr.lower()
_extract_addresses(ifcfg, vlan, ifname, self.features)
vlans.update({ifname: vlan})
# inject global nameserver values under each all interface which
# has addresses and do not already have a DNS configuration
if nameservers or searchdomains:
- nscfg = {'addresses': nameservers, 'search': searchdomains}
+ nscfg = {"addresses": nameservers, "search": searchdomains}
for section in [ethernets, wifis, bonds, bridges, vlans]:
for _name, cfg in section.items():
- if 'nameservers' in cfg or 'addresses' not in cfg:
+ if "nameservers" in cfg or "addresses" not in cfg:
continue
- cfg.update({'nameservers': nscfg})
+ cfg.update({"nameservers": nscfg})
# workaround yaml dictionary key sorting when dumping
def _render_section(name, section):
if section:
- dump = safeyaml.dumps({name: section},
- explicit_start=False,
- explicit_end=False,
- noalias=True)
- txt = util.indent(dump, ' ' * 4)
+ dump = safeyaml.dumps(
+ {name: section},
+ explicit_start=False,
+ explicit_end=False,
+ noalias=True,
+ )
+ txt = util.indent(dump, " " * 4)
return [txt]
return []
content.append("network:\n version: 2\n")
- content += _render_section('ethernets', ethernets)
- content += _render_section('wifis', wifis)
- content += _render_section('bonds', bonds)
- content += _render_section('bridges', bridges)
- content += _render_section('vlans', vlans)
+ content += _render_section("ethernets", ethernets)
+ content += _render_section("wifis", wifis)
+ content += _render_section("bonds", bonds)
+ content += _render_section("bridges", bridges)
+ content += _render_section("vlans", vlans)
return "".join(content)
def available(target=None):
- expected = ['netplan']
- search = ['/usr/sbin', '/sbin']
+ expected = ["netplan"]
+ search = ["/usr/sbin", "/sbin"]
for p in expected:
if not subp.which(p, search=search, target=target):
return False
@@ -414,11 +446,13 @@ def available(target=None):
def network_state_to_netplan(network_state, header=None):
# render the provided network state, return a string of equivalent eni
- netplan_path = 'etc/network/50-cloud-init.yaml'
- renderer = Renderer({
- 'netplan_path': netplan_path,
- 'netplan_header': header,
- })
+ netplan_path = "etc/network/50-cloud-init.yaml"
+ renderer = Renderer(
+ {
+ "netplan_path": netplan_path,
+ "netplan_header": header,
+ }
+ )
if not header:
header = ""
if not header.endswith("\n"):
@@ -426,4 +460,5 @@ def network_state_to_netplan(network_state, header=None):
contents = renderer._render_content(network_state)
return header + contents
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index e8bf9e39..7bac8adf 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -6,88 +6,75 @@
import copy
import functools
+import ipaddress
import logging
import socket
import struct
-from cloudinit import safeyaml
-from cloudinit import util
+from cloudinit import safeyaml, util
LOG = logging.getLogger(__name__)
NETWORK_STATE_VERSION = 1
-IPV6_DYNAMIC_TYPES = ['dhcp6',
- 'ipv6_slaac',
- 'ipv6_dhcpv6-stateless',
- 'ipv6_dhcpv6-stateful']
+IPV6_DYNAMIC_TYPES = [
+ "dhcp6",
+ "ipv6_slaac",
+ "ipv6_dhcpv6-stateless",
+ "ipv6_dhcpv6-stateful",
+]
NETWORK_STATE_REQUIRED_KEYS = {
- 1: ['version', 'config', 'network_state'],
+ 1: ["version", "config", "network_state"],
}
NETWORK_V2_KEY_FILTER = [
- 'addresses', 'dhcp4', 'dhcp4-overrides', 'dhcp6', 'dhcp6-overrides',
- 'gateway4', 'gateway6', 'interfaces', 'match', 'mtu', 'nameservers',
- 'renderer', 'set-name', 'wakeonlan', 'accept-ra'
+ "addresses",
+ "dhcp4",
+ "dhcp4-overrides",
+ "dhcp6",
+ "dhcp6-overrides",
+ "gateway4",
+ "gateway6",
+ "interfaces",
+ "match",
+ "mtu",
+ "nameservers",
+ "renderer",
+ "set-name",
+ "wakeonlan",
+ "accept-ra",
]
NET_CONFIG_TO_V2 = {
- 'bond': {'bond-ad-select': 'ad-select',
- 'bond-arp-interval': 'arp-interval',
- 'bond-arp-ip-target': 'arp-ip-target',
- 'bond-arp-validate': 'arp-validate',
- 'bond-downdelay': 'down-delay',
- 'bond-fail-over-mac': 'fail-over-mac-policy',
- 'bond-lacp-rate': 'lacp-rate',
- 'bond-miimon': 'mii-monitor-interval',
- 'bond-min-links': 'min-links',
- 'bond-mode': 'mode',
- 'bond-num-grat-arp': 'gratuitious-arp',
- 'bond-primary': 'primary',
- 'bond-primary-reselect': 'primary-reselect-policy',
- 'bond-updelay': 'up-delay',
- 'bond-xmit-hash-policy': 'transmit-hash-policy'},
- 'bridge': {'bridge_ageing': 'ageing-time',
- 'bridge_bridgeprio': 'priority',
- 'bridge_fd': 'forward-delay',
- 'bridge_gcint': None,
- 'bridge_hello': 'hello-time',
- 'bridge_maxage': 'max-age',
- 'bridge_maxwait': None,
- 'bridge_pathcost': 'path-cost',
- 'bridge_portprio': 'port-priority',
- 'bridge_stp': 'stp',
- 'bridge_waitport': None}}
-
-
-def parse_net_config_data(net_config, skip_broken=True):
- """Parses the config, returns NetworkState object
-
- :param net_config: curtin network config dict
- """
- state = None
- version = net_config.get('version')
- config = net_config.get('config')
- if version == 2:
- # v2 does not have explicit 'config' key so we
- # pass the whole net-config as-is
- config = net_config
-
- if version and config is not None:
- nsi = NetworkStateInterpreter(version=version, config=config)
- nsi.parse_config(skip_broken=skip_broken)
- state = nsi.get_network_state()
-
- return state
-
-
-def parse_net_config(path, skip_broken=True):
- """Parses a curtin network configuration file and
- return network state"""
- ns = None
- net_config = util.read_conf(path)
- if 'network' in net_config:
- ns = parse_net_config_data(net_config.get('network'),
- skip_broken=skip_broken)
- return ns
+ "bond": {
+ "bond-ad-select": "ad-select",
+ "bond-arp-interval": "arp-interval",
+ "bond-arp-ip-target": "arp-ip-target",
+ "bond-arp-validate": "arp-validate",
+ "bond-downdelay": "down-delay",
+ "bond-fail-over-mac": "fail-over-mac-policy",
+ "bond-lacp-rate": "lacp-rate",
+ "bond-miimon": "mii-monitor-interval",
+ "bond-min-links": "min-links",
+ "bond-mode": "mode",
+ "bond-num-grat-arp": "gratuitious-arp",
+ "bond-primary": "primary",
+ "bond-primary-reselect": "primary-reselect-policy",
+ "bond-updelay": "up-delay",
+ "bond-xmit-hash-policy": "transmit-hash-policy",
+ },
+ "bridge": {
+ "bridge_ageing": "ageing-time",
+ "bridge_bridgeprio": "priority",
+ "bridge_fd": "forward-delay",
+ "bridge_gcint": None,
+ "bridge_hello": "hello-time",
+ "bridge_maxage": "max-age",
+ "bridge_maxwait": None,
+ "bridge_pathcost": "path-cost",
+ "bridge_portprio": "port-priority",
+ "bridge_stp": "stp",
+ "bridge_waitport": None,
+ },
+}
def from_state_file(state_file):
@@ -109,17 +96,16 @@ class InvalidCommand(Exception):
def ensure_command_keys(required_keys):
-
def wrapper(func):
-
@functools.wraps(func)
def decorator(self, command, *args, **kwargs):
if required_keys:
missing_keys = diff_keys(required_keys, command)
if missing_keys:
- raise InvalidCommand("Command missing %s of required"
- " keys %s" % (missing_keys,
- required_keys))
+ raise InvalidCommand(
+ "Command missing %s of required keys %s"
+ % (missing_keys, required_keys)
+ )
return func(self, command, *args, **kwargs)
return decorator
@@ -134,29 +120,28 @@ class CommandHandlerMeta(type):
'handle_' and on finding those will populate a class attribute mapping
so that those methods can be quickly located and called.
"""
+
def __new__(cls, name, parents, dct):
command_handlers = {}
for attr_name, attr in dct.items():
- if callable(attr) and attr_name.startswith('handle_'):
- handles_what = attr_name[len('handle_'):]
+ if callable(attr) and attr_name.startswith("handle_"):
+ handles_what = attr_name[len("handle_") :]
if handles_what:
command_handlers[handles_what] = attr
- dct['command_handlers'] = command_handlers
- return super(CommandHandlerMeta, cls).__new__(cls, name,
- parents, dct)
+ dct["command_handlers"] = command_handlers
+ return super(CommandHandlerMeta, cls).__new__(cls, name, parents, dct)
class NetworkState(object):
-
def __init__(self, network_state, version=NETWORK_STATE_VERSION):
self._network_state = copy.deepcopy(network_state)
self._version = version
- self.use_ipv6 = network_state.get('use_ipv6', False)
+ self.use_ipv6 = network_state.get("use_ipv6", False)
self._has_default_route = None
@property
def config(self):
- return self._network_state['config']
+ return self._network_state["config"]
@property
def version(self):
@@ -165,14 +150,14 @@ class NetworkState(object):
@property
def dns_nameservers(self):
try:
- return self._network_state['dns']['nameservers']
+ return self._network_state["dns"]["nameservers"]
except KeyError:
return []
@property
def dns_searchdomains(self):
try:
- return self._network_state['dns']['search']
+ return self._network_state["dns"]["search"]
except KeyError:
return []
@@ -183,7 +168,7 @@ class NetworkState(object):
return self._has_default_route
def iter_interfaces(self, filter_func=None):
- ifaces = self._network_state.get('interfaces', {})
+ ifaces = self._network_state.get("interfaces", {})
for iface in ifaces.values():
if filter_func is None:
yield iface
@@ -192,7 +177,7 @@ class NetworkState(object):
yield iface
def iter_routes(self, filter_func=None):
- for route in self._network_state.get('routes', []):
+ for route in self._network_state.get("routes", []):
if filter_func is not None:
if filter_func(route):
yield route
@@ -204,39 +189,39 @@ class NetworkState(object):
if self._is_default_route(route):
return True
for iface in self.iter_interfaces():
- for subnet in iface.get('subnets', []):
- for route in subnet.get('routes', []):
+ for subnet in iface.get("subnets", []):
+ for route in subnet.get("routes", []):
if self._is_default_route(route):
return True
return False
def _is_default_route(self, route):
- default_nets = ('::', '0.0.0.0')
+ default_nets = ("::", "0.0.0.0")
return (
- route.get('prefix') == 0
- and route.get('network') in default_nets
+ route.get("prefix") == 0 and route.get("network") in default_nets
)
class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
initial_network_state = {
- 'interfaces': {},
- 'routes': [],
- 'dns': {
- 'nameservers': [],
- 'search': [],
+ "interfaces": {},
+ "routes": [],
+ "dns": {
+ "nameservers": [],
+ "search": [],
},
- 'use_ipv6': False,
- 'config': None,
+ "use_ipv6": False,
+ "config": None,
}
def __init__(self, version=NETWORK_STATE_VERSION, config=None):
self._version = version
self._config = config
self._network_state = copy.deepcopy(self.initial_network_state)
- self._network_state['config'] = config
+ self._network_state["config"] = config
self._parsed = False
+ self._interface_dns_map = {}
@property
def network_state(self):
@@ -244,41 +229,41 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
@property
def use_ipv6(self):
- return self._network_state.get('use_ipv6')
+ return self._network_state.get("use_ipv6")
@use_ipv6.setter
def use_ipv6(self, val):
- self._network_state.update({'use_ipv6': val})
+ self._network_state.update({"use_ipv6": val})
def dump(self):
state = {
- 'version': self._version,
- 'config': self._config,
- 'network_state': self._network_state,
+ "version": self._version,
+ "config": self._config,
+ "network_state": self._network_state,
}
return safeyaml.dumps(state)
def load(self, state):
- if 'version' not in state:
- LOG.error('Invalid state, missing version field')
- raise ValueError('Invalid state, missing version field')
+ if "version" not in state:
+ LOG.error("Invalid state, missing version field")
+ raise ValueError("Invalid state, missing version field")
- required_keys = NETWORK_STATE_REQUIRED_KEYS[state['version']]
+ required_keys = NETWORK_STATE_REQUIRED_KEYS[state["version"]]
missing_keys = diff_keys(required_keys, state)
if missing_keys:
- msg = 'Invalid state, missing keys: %s' % (missing_keys)
+ msg = "Invalid state, missing keys: %s" % (missing_keys)
LOG.error(msg)
raise ValueError(msg)
# v1 - direct attr mapping, except version
- for key in [k for k in required_keys if k not in ['version']]:
+ for key in [k for k in required_keys if k not in ["version"]]:
setattr(self, key, state[key])
def dump_network_state(self):
return safeyaml.dumps(self._network_state)
def as_dict(self):
- return {'version': self._version, 'config': self._config}
+ return {"version": self._version, "config": self._config}
def get_network_state(self):
ns = self.network_state
@@ -294,7 +279,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
def parse_config_v1(self, skip_broken=True):
for command in self._config:
- command_type = command['type']
+ command_type = command["type"]
try:
handler = self.command_handlers[command_type]
except KeyError as e:
@@ -307,13 +292,29 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
if not skip_broken:
raise
else:
- LOG.warning("Skipping invalid command: %s", command,
- exc_info=True)
+ LOG.warning(
+ "Skipping invalid command: %s", command, exc_info=True
+ )
LOG.debug(self.dump_network_state())
+ for interface, dns in self._interface_dns_map.items():
+ iface = None
+ try:
+ iface = self._network_state["interfaces"][interface]
+ except KeyError as e:
+ raise ValueError(
+ "Nameserver specified for interface {0}, "
+ "but interface {0} does not exist!".format(interface)
+ ) from e
+ if iface:
+ nameservers, search = dns
+ iface["dns"] = {
+ "addresses": nameservers,
+ "search": search,
+ }
def parse_config_v2(self, skip_broken=True):
for command_type, command in self._config.items():
- if command_type in ['version', 'renderer']:
+ if command_type in ["version", "renderer"]:
continue
try:
handler = self.command_handlers[command_type]
@@ -328,17 +329,18 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
if not skip_broken:
raise
else:
- LOG.warning("Skipping invalid command: %s", command,
- exc_info=True)
+ LOG.warning(
+ "Skipping invalid command: %s", command, exc_info=True
+ )
LOG.debug(self.dump_network_state())
- @ensure_command_keys(['name'])
+ @ensure_command_keys(["name"])
def handle_loopback(self, command):
return self.handle_physical(command)
- @ensure_command_keys(['name'])
+ @ensure_command_keys(["name"])
def handle_physical(self, command):
- '''
+ """
command = {
'type': 'physical',
'mac_address': 'c0:d6:9f:2c:e8:80',
@@ -348,119 +350,122 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
],
'accept-ra': 'true'
}
- '''
+ """
- interfaces = self._network_state.get('interfaces', {})
- iface = interfaces.get(command['name'], {})
- for param, val in command.get('params', {}).items():
+ interfaces = self._network_state.get("interfaces", {})
+ iface = interfaces.get(command["name"], {})
+ for param, val in command.get("params", {}).items():
iface.update({param: val})
# convert subnet ipv6 netmask to cidr as needed
- subnets = _normalize_subnets(command.get('subnets'))
+ subnets = _normalize_subnets(command.get("subnets"))
# automatically set 'use_ipv6' if any addresses are ipv6
if not self.use_ipv6:
for subnet in subnets:
- if (subnet.get('type').endswith('6') or
- is_ipv6_addr(subnet.get('address'))):
+ if subnet.get("type").endswith("6") or is_ipv6_addr(
+ subnet.get("address")
+ ):
self.use_ipv6 = True
break
- accept_ra = command.get('accept-ra', None)
+ accept_ra = command.get("accept-ra", None)
if accept_ra is not None:
accept_ra = util.is_true(accept_ra)
- wakeonlan = command.get('wakeonlan', None)
+ wakeonlan = command.get("wakeonlan", None)
if wakeonlan is not None:
wakeonlan = util.is_true(wakeonlan)
- iface.update({
- 'name': command.get('name'),
- 'type': command.get('type'),
- 'mac_address': command.get('mac_address'),
- 'inet': 'inet',
- 'mode': 'manual',
- 'mtu': command.get('mtu'),
- 'address': None,
- 'gateway': None,
- 'subnets': subnets,
- 'accept-ra': accept_ra,
- 'wakeonlan': wakeonlan,
- })
- self._network_state['interfaces'].update({command.get('name'): iface})
+ iface.update(
+ {
+ "name": command.get("name"),
+ "type": command.get("type"),
+ "mac_address": command.get("mac_address"),
+ "inet": "inet",
+ "mode": "manual",
+ "mtu": command.get("mtu"),
+ "address": None,
+ "gateway": None,
+ "subnets": subnets,
+ "accept-ra": accept_ra,
+ "wakeonlan": wakeonlan,
+ }
+ )
+ self._network_state["interfaces"].update({command.get("name"): iface})
self.dump_network_state()
- @ensure_command_keys(['name', 'vlan_id', 'vlan_link'])
+ @ensure_command_keys(["name", "vlan_id", "vlan_link"])
def handle_vlan(self, command):
- '''
- auto eth0.222
- iface eth0.222 inet static
- address 10.10.10.1
- netmask 255.255.255.0
- hwaddress ether BC:76:4E:06:96:B3
- vlan-raw-device eth0
- '''
- interfaces = self._network_state.get('interfaces', {})
+ """
+ auto eth0.222
+ iface eth0.222 inet static
+ address 10.10.10.1
+ netmask 255.255.255.0
+ hwaddress ether BC:76:4E:06:96:B3
+ vlan-raw-device eth0
+ """
+ interfaces = self._network_state.get("interfaces", {})
self.handle_physical(command)
- iface = interfaces.get(command.get('name'), {})
- iface['vlan-raw-device'] = command.get('vlan_link')
- iface['vlan_id'] = command.get('vlan_id')
- interfaces.update({iface['name']: iface})
+ iface = interfaces.get(command.get("name"), {})
+ iface["vlan-raw-device"] = command.get("vlan_link")
+ iface["vlan_id"] = command.get("vlan_id")
+ interfaces.update({iface["name"]: iface})
- @ensure_command_keys(['name', 'bond_interfaces', 'params'])
+ @ensure_command_keys(["name", "bond_interfaces", "params"])
def handle_bond(self, command):
- '''
- #/etc/network/interfaces
- auto eth0
- iface eth0 inet manual
- bond-master bond0
- bond-mode 802.3ad
-
- auto eth1
- iface eth1 inet manual
- bond-master bond0
- bond-mode 802.3ad
-
- auto bond0
- iface bond0 inet static
- address 192.168.0.10
- gateway 192.168.0.1
- netmask 255.255.255.0
- bond-slaves none
- bond-mode 802.3ad
- bond-miimon 100
- bond-downdelay 200
- bond-updelay 200
- bond-lacp-rate 4
- '''
+ """
+ #/etc/network/interfaces
+ auto eth0
+ iface eth0 inet manual
+ bond-master bond0
+ bond-mode 802.3ad
+
+ auto eth1
+ iface eth1 inet manual
+ bond-master bond0
+ bond-mode 802.3ad
+
+ auto bond0
+ iface bond0 inet static
+ address 192.168.0.10
+ gateway 192.168.0.1
+ netmask 255.255.255.0
+ bond-slaves none
+ bond-mode 802.3ad
+ bond-miimon 100
+ bond-downdelay 200
+ bond-updelay 200
+ bond-lacp-rate 4
+ """
self.handle_physical(command)
- interfaces = self._network_state.get('interfaces')
- iface = interfaces.get(command.get('name'), {})
- for param, val in command.get('params').items():
+ interfaces = self._network_state.get("interfaces")
+ iface = interfaces.get(command.get("name"), {})
+ for param, val in command.get("params").items():
iface.update({param: val})
- iface.update({'bond-slaves': 'none'})
- self._network_state['interfaces'].update({iface['name']: iface})
+ iface.update({"bond-slaves": "none"})
+ self._network_state["interfaces"].update({iface["name"]: iface})
# handle bond slaves
- for ifname in command.get('bond_interfaces'):
+ for ifname in command.get("bond_interfaces"):
if ifname not in interfaces:
cmd = {
- 'name': ifname,
- 'type': 'bond',
+ "name": ifname,
+ "type": "bond",
}
# inject placeholder
self.handle_physical(cmd)
- interfaces = self._network_state.get('interfaces', {})
+ interfaces = self._network_state.get("interfaces", {})
bond_if = interfaces.get(ifname)
- bond_if['bond-master'] = command.get('name')
+ bond_if["bond-master"] = command.get("name")
# copy in bond config into slave
- for param, val in command.get('params').items():
+ for param, val in command.get("params").items():
bond_if.update({param: val})
- self._network_state['interfaces'].update({ifname: bond_if})
+ self._network_state["interfaces"].update({ifname: bond_if})
- @ensure_command_keys(['name', 'bridge_interfaces'])
+ @ensure_command_keys(["name", "bridge_interfaces"])
def handle_bridge(self, command):
- '''
+ """
auto br0
iface br0 inet static
address 10.10.10.1
@@ -485,70 +490,91 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
"bridge_stp",
"bridge_waitport",
]
- '''
+ """
# find one of the bridge port ifaces to get mac_addr
# handle bridge_slaves
- interfaces = self._network_state.get('interfaces', {})
- for ifname in command.get('bridge_interfaces'):
+ interfaces = self._network_state.get("interfaces", {})
+ for ifname in command.get("bridge_interfaces"):
if ifname in interfaces:
continue
cmd = {
- 'name': ifname,
+ "name": ifname,
}
# inject placeholder
self.handle_physical(cmd)
- interfaces = self._network_state.get('interfaces', {})
+ interfaces = self._network_state.get("interfaces", {})
self.handle_physical(command)
- iface = interfaces.get(command.get('name'), {})
- iface['bridge_ports'] = command['bridge_interfaces']
- for param, val in command.get('params', {}).items():
+ iface = interfaces.get(command.get("name"), {})
+ iface["bridge_ports"] = command["bridge_interfaces"]
+ for param, val in command.get("params", {}).items():
iface.update({param: val})
# convert value to boolean
- bridge_stp = iface.get('bridge_stp')
+ bridge_stp = iface.get("bridge_stp")
if bridge_stp is not None and type(bridge_stp) != bool:
- if bridge_stp in ['on', '1', 1]:
+ if bridge_stp in ["on", "1", 1]:
bridge_stp = True
- elif bridge_stp in ['off', '0', 0]:
+ elif bridge_stp in ["off", "0", 0]:
bridge_stp = False
else:
raise ValueError(
- 'Cannot convert bridge_stp value ({stp}) to'
- ' boolean'.format(stp=bridge_stp))
- iface.update({'bridge_stp': bridge_stp})
+ "Cannot convert bridge_stp value ({stp}) to"
+ " boolean".format(stp=bridge_stp)
+ )
+ iface.update({"bridge_stp": bridge_stp})
- interfaces.update({iface['name']: iface})
+ interfaces.update({iface["name"]: iface})
- @ensure_command_keys(['name'])
+ @ensure_command_keys(["name"])
def handle_infiniband(self, command):
self.handle_physical(command)
- @ensure_command_keys(['address'])
- def handle_nameserver(self, command):
- dns = self._network_state.get('dns')
- if 'address' in command:
- addrs = command['address']
+ def _parse_dns(self, command):
+ nameservers = []
+ search = []
+ if "address" in command:
+ addrs = command["address"]
if not type(addrs) == list:
addrs = [addrs]
for addr in addrs:
- dns['nameservers'].append(addr)
- if 'search' in command:
- paths = command['search']
+ nameservers.append(addr)
+ if "search" in command:
+ paths = command["search"]
if not isinstance(paths, list):
paths = [paths]
for path in paths:
- dns['search'].append(path)
+ search.append(path)
+ return nameservers, search
- @ensure_command_keys(['destination'])
+ @ensure_command_keys(["address"])
+ def handle_nameserver(self, command):
+ dns = self._network_state.get("dns")
+ nameservers, search = self._parse_dns(command)
+ if "interface" in command:
+ self._interface_dns_map[command["interface"]] = (
+ nameservers,
+ search,
+ )
+ else:
+ dns["nameservers"].extend(nameservers)
+ dns["search"].extend(search)
+
+ @ensure_command_keys(["address"])
+ def _handle_individual_nameserver(self, command, iface):
+ _iface = self._network_state.get("interfaces")
+ nameservers, search = self._parse_dns(command)
+ _iface[iface]["dns"] = {"nameservers": nameservers, "search": search}
+
+ @ensure_command_keys(["destination"])
def handle_route(self, command):
- self._network_state['routes'].append(_normalize_route(command))
+ self._network_state["routes"].append(_normalize_route(command))
# V2 handlers
def handle_bonds(self, command):
- '''
+ """
v2_command = {
bond0: {
'interfaces': ['interface0', 'interface1'],
@@ -575,12 +601,12 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
}
}
- '''
- self._handle_bond_bridge(command, cmd_type='bond')
+ """
+ self._handle_bond_bridge(command, cmd_type="bond")
def handle_bridges(self, command):
- '''
+ """
v2_command = {
br0: {
'interfaces': ['interface0', 'interface1'],
@@ -601,11 +627,11 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
}
}
- '''
- self._handle_bond_bridge(command, cmd_type='bridge')
+ """
+ self._handle_bond_bridge(command, cmd_type="bridge")
def handle_ethernets(self, command):
- '''
+ """
ethernets:
eno1:
match:
@@ -641,34 +667,38 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
{'type': 'dhcp4'}
]
}
- '''
+ """
for eth, cfg in command.items():
phy_cmd = {
- 'type': 'physical',
- 'name': cfg.get('set-name', eth),
+ "type": "physical",
+ "name": cfg.get("set-name", eth),
}
- match = cfg.get('match', {})
- mac_address = match.get('macaddress', None)
+ match = cfg.get("match", {})
+ mac_address = match.get("macaddress", None)
if not mac_address:
- LOG.debug('NetworkState Version2: missing "macaddress" info '
- 'in config entry: %s: %s', eth, str(cfg))
- phy_cmd['mac_address'] = mac_address
- driver = match.get('driver', None)
+ LOG.debug(
+ 'NetworkState Version2: missing "macaddress" info '
+ "in config entry: %s: %s",
+ eth,
+ str(cfg),
+ )
+ phy_cmd["mac_address"] = mac_address
+ driver = match.get("driver", None)
if driver:
- phy_cmd['params'] = {'driver': driver}
- for key in ['mtu', 'match', 'wakeonlan', 'accept-ra']:
+ phy_cmd["params"] = {"driver": driver}
+ for key in ["mtu", "match", "wakeonlan", "accept-ra"]:
if key in cfg:
phy_cmd[key] = cfg[key]
subnets = self._v2_to_v1_ipcfg(cfg)
if len(subnets) > 0:
- phy_cmd.update({'subnets': subnets})
+ phy_cmd.update({"subnets": subnets})
- LOG.debug('v2(ethernets) -> v1(physical):\n%s', phy_cmd)
+ LOG.debug("v2(ethernets) -> v1(physical):\n%s", phy_cmd)
self.handle_physical(phy_cmd)
def handle_vlans(self, command):
- '''
+ """
v2_vlans = {
'eth0.123': {
'id': 123,
@@ -684,135 +714,154 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
'vlan_id': 123,
'subnets': [{'type': 'dhcp4'}],
}
- '''
+ """
for vlan, cfg in command.items():
vlan_cmd = {
- 'type': 'vlan',
- 'name': vlan,
- 'vlan_id': cfg.get('id'),
- 'vlan_link': cfg.get('link'),
+ "type": "vlan",
+ "name": vlan,
+ "vlan_id": cfg.get("id"),
+ "vlan_link": cfg.get("link"),
}
- if 'mtu' in cfg:
- vlan_cmd['mtu'] = cfg['mtu']
+ if "mtu" in cfg:
+ vlan_cmd["mtu"] = cfg["mtu"]
subnets = self._v2_to_v1_ipcfg(cfg)
if len(subnets) > 0:
- vlan_cmd.update({'subnets': subnets})
- LOG.debug('v2(vlans) -> v1(vlan):\n%s', vlan_cmd)
+ vlan_cmd.update({"subnets": subnets})
+ LOG.debug("v2(vlans) -> v1(vlan):\n%s", vlan_cmd)
self.handle_vlan(vlan_cmd)
def handle_wifis(self, command):
- LOG.warning('Wifi configuration is only available to distros with'
- ' netplan rendering support.')
+ LOG.warning(
+ "Wifi configuration is only available to distros with"
+ " netplan rendering support."
+ )
def _v2_common(self, cfg):
- LOG.debug('v2_common: handling config:\n%s', cfg)
- if 'nameservers' in cfg:
- search = cfg.get('nameservers').get('search', [])
- dns = cfg.get('nameservers').get('addresses', [])
- name_cmd = {'type': 'nameserver'}
- if len(search) > 0:
- name_cmd.update({'search': search})
- if len(dns) > 0:
- name_cmd.update({'addresses': dns})
- LOG.debug('v2(nameserver) -> v1(nameserver):\n%s', name_cmd)
- self.handle_nameserver(name_cmd)
+ LOG.debug("v2_common: handling config:\n%s", cfg)
+ for iface, dev_cfg in cfg.items():
+ if "set-name" in dev_cfg:
+ set_name_iface = dev_cfg.get("set-name")
+ if set_name_iface:
+ iface = set_name_iface
+ if "nameservers" in dev_cfg:
+ search = dev_cfg.get("nameservers").get("search", [])
+ dns = dev_cfg.get("nameservers").get("addresses", [])
+ name_cmd = {"type": "nameserver"}
+ if len(search) > 0:
+ name_cmd.update({"search": search})
+ if len(dns) > 0:
+ name_cmd.update({"address": dns})
+ self.handle_nameserver(name_cmd)
+ self._handle_individual_nameserver(name_cmd, iface)
def _handle_bond_bridge(self, command, cmd_type=None):
"""Common handler for bond and bridge types"""
# inverse mapping for v2 keynames to v1 keynames
- v2key_to_v1 = dict((v, k) for k, v in
- NET_CONFIG_TO_V2.get(cmd_type).items())
+ v2key_to_v1 = dict(
+ (v, k) for k, v in NET_CONFIG_TO_V2.get(cmd_type).items()
+ )
for item_name, item_cfg in command.items():
- item_params = dict((key, value) for (key, value) in
- item_cfg.items() if key not in
- NETWORK_V2_KEY_FILTER)
+ item_params = dict(
+ (key, value)
+ for (key, value) in item_cfg.items()
+ if key not in NETWORK_V2_KEY_FILTER
+ )
# we accept the fixed spelling, but write the old for compatibility
# Xenial does not have an updated netplan which supports the
# correct spelling. LP: #1756701
- params = item_params.get('parameters', {})
- grat_value = params.pop('gratuitous-arp', None)
+ params = item_params.get("parameters", {})
+ grat_value = params.pop("gratuitous-arp", None)
if grat_value:
- params['gratuitious-arp'] = grat_value
+ params["gratuitious-arp"] = grat_value
v1_cmd = {
- 'type': cmd_type,
- 'name': item_name,
- cmd_type + '_interfaces': item_cfg.get('interfaces'),
- 'params': dict((v2key_to_v1[k], v) for k, v in params.items())
+ "type": cmd_type,
+ "name": item_name,
+ cmd_type + "_interfaces": item_cfg.get("interfaces"),
+ "params": dict((v2key_to_v1[k], v) for k, v in params.items()),
}
- if 'mtu' in item_cfg:
- v1_cmd['mtu'] = item_cfg['mtu']
+ if "mtu" in item_cfg:
+ v1_cmd["mtu"] = item_cfg["mtu"]
subnets = self._v2_to_v1_ipcfg(item_cfg)
if len(subnets) > 0:
- v1_cmd.update({'subnets': subnets})
+ v1_cmd.update({"subnets": subnets})
- LOG.debug('v2(%s) -> v1(%s):\n%s', cmd_type, cmd_type, v1_cmd)
+ LOG.debug("v2(%s) -> v1(%s):\n%s", cmd_type, cmd_type, v1_cmd)
if cmd_type == "bridge":
self.handle_bridge(v1_cmd)
elif cmd_type == "bond":
self.handle_bond(v1_cmd)
else:
- raise ValueError('Unknown command type: {cmd_type}'.format(
- cmd_type=cmd_type))
+ raise ValueError(
+ "Unknown command type: {cmd_type}".format(
+ cmd_type=cmd_type
+ )
+ )
def _v2_to_v1_ipcfg(self, cfg):
"""Common ipconfig extraction from v2 to v1 subnets array."""
def _add_dhcp_overrides(overrides, subnet):
- if 'route-metric' in overrides:
- subnet['metric'] = overrides['route-metric']
+ if "route-metric" in overrides:
+ subnet["metric"] = overrides["route-metric"]
subnets = []
- if cfg.get('dhcp4'):
- subnet = {'type': 'dhcp4'}
- _add_dhcp_overrides(cfg.get('dhcp4-overrides', {}), subnet)
+ if cfg.get("dhcp4"):
+ subnet = {"type": "dhcp4"}
+ _add_dhcp_overrides(cfg.get("dhcp4-overrides", {}), subnet)
subnets.append(subnet)
- if cfg.get('dhcp6'):
- subnet = {'type': 'dhcp6'}
+ if cfg.get("dhcp6"):
+ subnet = {"type": "dhcp6"}
self.use_ipv6 = True
- _add_dhcp_overrides(cfg.get('dhcp6-overrides', {}), subnet)
+ _add_dhcp_overrides(cfg.get("dhcp6-overrides", {}), subnet)
subnets.append(subnet)
gateway4 = None
gateway6 = None
nameservers = {}
- for address in cfg.get('addresses', []):
+ for address in cfg.get("addresses", []):
subnet = {
- 'type': 'static',
- 'address': address,
+ "type": "static",
+ "address": address,
}
if ":" in address:
- if 'gateway6' in cfg and gateway6 is None:
- gateway6 = cfg.get('gateway6')
- subnet.update({'gateway': gateway6})
+ if "gateway6" in cfg and gateway6 is None:
+ gateway6 = cfg.get("gateway6")
+ subnet.update({"gateway": gateway6})
else:
- if 'gateway4' in cfg and gateway4 is None:
- gateway4 = cfg.get('gateway4')
- subnet.update({'gateway': gateway4})
+ if "gateway4" in cfg and gateway4 is None:
+ gateway4 = cfg.get("gateway4")
+ subnet.update({"gateway": gateway4})
- if 'nameservers' in cfg and not nameservers:
- addresses = cfg.get('nameservers').get('addresses')
+ if "nameservers" in cfg and not nameservers:
+ addresses = cfg.get("nameservers").get("addresses")
if addresses:
- nameservers['dns_nameservers'] = addresses
- search = cfg.get('nameservers').get('search')
+ nameservers["dns_nameservers"] = addresses
+ search = cfg.get("nameservers").get("search")
if search:
- nameservers['dns_search'] = search
+ nameservers["dns_search"] = search
subnet.update(nameservers)
subnets.append(subnet)
routes = []
- for route in cfg.get('routes', []):
- routes.append(_normalize_route(
- {'destination': route.get('to'), 'gateway': route.get('via')}))
+ for route in cfg.get("routes", []):
+ routes.append(
+ _normalize_route(
+ {
+ "destination": route.get("to"),
+ "gateway": route.get("via"),
+ }
+ )
+ )
# v2 routes are bound to the interface, in v1 we add them under
# the first subnet since there isn't an equivalent interface level.
if len(subnets) and len(routes):
- subnets[0]['routes'] = routes
+ subnets[0]["routes"] = routes
return subnets
@@ -822,18 +871,25 @@ def _normalize_subnet(subnet):
subnet = copy.deepcopy(subnet)
normal_subnet = dict((k, v) for k, v in subnet.items() if v)
- if subnet.get('type') in ('static', 'static6'):
+ if subnet.get("type") in ("static", "static6"):
normal_subnet.update(
- _normalize_net_keys(normal_subnet, address_keys=(
- 'address', 'ip_address',)))
- normal_subnet['routes'] = [_normalize_route(r)
- for r in subnet.get('routes', [])]
+ _normalize_net_keys(
+ normal_subnet,
+ address_keys=(
+ "address",
+ "ip_address",
+ ),
+ )
+ )
+ normal_subnet["routes"] = [
+ _normalize_route(r) for r in subnet.get("routes", [])
+ ]
def listify(snet, name):
if name in snet and not isinstance(snet[name], list):
snet[name] = snet[name].split()
- for k in ('dns_search', 'dns_nameservers'):
+ for k in ("dns_search", "dns_nameservers"):
listify(normal_subnet, k)
return normal_subnet
@@ -857,42 +913,52 @@ def _normalize_net_keys(network, address_keys=()):
addr_key = key
break
if not addr_key:
- message = (
- 'No config network address keys [%s] found in %s' %
- (','.join(address_keys), network))
+ message = "No config network address keys [%s] found in %s" % (
+ ",".join(address_keys),
+ network,
+ )
LOG.error(message)
raise ValueError(message)
addr = net.get(addr_key)
ipv6 = is_ipv6_addr(addr)
- netmask = net.get('netmask')
+ netmask = net.get("netmask")
if "/" in addr:
addr_part, _, maybe_prefix = addr.partition("/")
net[addr_key] = addr_part
try:
prefix = int(maybe_prefix)
except ValueError:
- # this supports input of <address>/255.255.255.0
- prefix = mask_to_net_prefix(maybe_prefix)
- elif netmask:
- prefix = mask_to_net_prefix(netmask)
- elif 'prefix' in net:
- prefix = int(net['prefix'])
+ if ipv6:
+ # this supports input of ffff:ffff:ffff::
+ prefix = ipv6_mask_to_net_prefix(maybe_prefix)
+ else:
+ # this supports input of 255.255.255.0
+ prefix = ipv4_mask_to_net_prefix(maybe_prefix)
+ elif netmask and not ipv6:
+ prefix = ipv4_mask_to_net_prefix(netmask)
+ elif netmask and ipv6:
+ prefix = ipv6_mask_to_net_prefix(netmask)
+ elif "prefix" in net:
+ prefix = int(net["prefix"])
else:
prefix = 64 if ipv6 else 24
- if 'prefix' in net and str(net['prefix']) != str(prefix):
- LOG.warning("Overwriting existing 'prefix' with '%s' in "
- "network info: %s", prefix, net)
- net['prefix'] = prefix
+ if "prefix" in net and str(net["prefix"]) != str(prefix):
+ LOG.warning(
+ "Overwriting existing 'prefix' with '%s' in network info: %s",
+ prefix,
+ net,
+ )
+ net["prefix"] = prefix
if ipv6:
# TODO: we could/maybe should add this back with the very uncommon
# 'netmask' for ipv6. We need a 'net_prefix_to_ipv6_mask' for that.
- if 'netmask' in net:
- del net['netmask']
+ if "netmask" in net:
+ del net["netmask"]
else:
- net['netmask'] = net_prefix_to_ipv4_mask(net['prefix'])
+ net["netmask"] = net_prefix_to_ipv4_mask(net["prefix"])
return net
@@ -905,25 +971,28 @@ def _normalize_route(route):
'prefix': the network prefix for address as an integer.
'metric': integer metric (only if present in input).
'netmask': netmask (string) equivalent to prefix iff network is ipv4.
- """
+ """
# Prune None-value keys. Specifically allow 0 (a valid metric).
- normal_route = dict((k, v) for k, v in route.items()
- if v not in ("", None))
- if 'destination' in normal_route:
- normal_route['network'] = normal_route['destination']
- del normal_route['destination']
+ normal_route = dict(
+ (k, v) for k, v in route.items() if v not in ("", None)
+ )
+ if "destination" in normal_route:
+ normal_route["network"] = normal_route["destination"]
+ del normal_route["destination"]
normal_route.update(
_normalize_net_keys(
- normal_route, address_keys=('network', 'destination')))
+ normal_route, address_keys=("network", "destination")
+ )
+ )
- metric = normal_route.get('metric')
+ metric = normal_route.get("metric")
if metric:
try:
- normal_route['metric'] = int(metric)
+ normal_route["metric"] = int(metric)
except ValueError as e:
raise TypeError(
- 'Route config metric {} is not an integer'.format(metric)
+ "Route config metric {} is not an integer".format(metric)
) from e
return normal_route
@@ -944,10 +1013,10 @@ def subnet_is_ipv6(subnet):
"""Common helper for checking network_state subnets for ipv6."""
# 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or
# 'ipv6_slaac'
- if subnet['type'].endswith('6') or subnet['type'] in IPV6_DYNAMIC_TYPES:
+ if subnet["type"].endswith("6") or subnet["type"] in IPV6_DYNAMIC_TYPES:
# This is a request either static6 type or DHCPv6.
return True
- elif subnet['type'] == 'static' and is_ipv6_addr(subnet.get('address')):
+ elif subnet["type"] == "static" and is_ipv6_addr(subnet.get("address")):
return True
return False
@@ -959,7 +1028,8 @@ def net_prefix_to_ipv4_mask(prefix):
24 -> "255.255.255.0"
Also supports input as a string."""
mask = socket.inet_ntoa(
- struct.pack(">I", (0xffffffff << (32 - int(prefix)) & 0xffffffff)))
+ struct.pack(">I", (0xFFFFFFFF << (32 - int(prefix)) & 0xFFFFFFFF))
+ )
return mask
@@ -972,84 +1042,82 @@ def ipv4_mask_to_net_prefix(mask):
str(24) => 24
"24" => 24
"""
- if isinstance(mask, int):
- return mask
- if isinstance(mask, str):
- try:
- return int(mask)
- except ValueError:
- pass
- else:
- raise TypeError("mask '%s' is not a string or int")
-
- if '.' not in mask:
- raise ValueError("netmask '%s' does not contain a '.'" % mask)
-
- toks = mask.split(".")
- if len(toks) != 4:
- raise ValueError("netmask '%s' had only %d parts" % (mask, len(toks)))
-
- return sum([bin(int(x)).count('1') for x in toks])
+ return ipaddress.ip_network(f"0.0.0.0/{mask}").prefixlen
def ipv6_mask_to_net_prefix(mask):
"""Convert an ipv6 netmask (very uncommon) or prefix (64) to prefix.
- If 'mask' is an integer or string representation of one then
- int(mask) will be returned.
+ If the input is already an integer or a string representation of
+ an integer, then int(mask) will be returned.
+ "ffff:ffff:ffff::" => 48
+ "48" => 48
"""
-
- if isinstance(mask, int):
- return mask
- if isinstance(mask, str):
- try:
- return int(mask)
- except ValueError:
- pass
- else:
- raise TypeError("mask '%s' is not a string or int")
-
- if ':' not in mask:
- raise ValueError("mask '%s' does not have a ':'")
-
- bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00,
- 0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc,
- 0xfffe, 0xffff]
- prefix = 0
- for word in mask.split(':'):
- if not word or int(word, 16) == 0:
- break
- prefix += bitCount.index(int(word, 16))
-
- return prefix
-
-
-def mask_to_net_prefix(mask):
- """Return the network prefix for the netmask provided.
-
- Supports ipv4 or ipv6 netmasks."""
try:
- # if 'mask' is a prefix that is an integer.
- # then just return it.
- return int(mask)
+ # In the case the mask is already a prefix
+ prefixlen = ipaddress.ip_network(f"::/{mask}").prefixlen
+ return prefixlen
except ValueError:
+ # ValueError means mask is an IPv6 address representation and need
+ # conversion.
pass
- if is_ipv6_addr(mask):
- return ipv6_mask_to_net_prefix(mask)
- else:
- return ipv4_mask_to_net_prefix(mask)
+
+ netmask = ipaddress.ip_address(mask)
+ mask_int = int(netmask)
+ # If the mask is all zeroes, just return it
+ if mask_int == 0:
+ return mask_int
+
+ trailing_zeroes = min(
+ ipaddress.IPV6LENGTH, (~mask_int & (mask_int - 1)).bit_length()
+ )
+ leading_ones = mask_int >> trailing_zeroes
+ prefixlen = ipaddress.IPV6LENGTH - trailing_zeroes
+ all_ones = (1 << prefixlen) - 1
+ if leading_ones != all_ones:
+ raise ValueError("Invalid network mask '%s'" % mask)
+
+ return prefixlen
def mask_and_ipv4_to_bcast_addr(mask, ip):
"""Calculate the broadcast address from the subnet mask and ip addr.
Supports ipv4 only."""
- ip_bin = int(''.join([bin(int(x) + 256)[3:] for x in ip.split('.')]), 2)
+ ip_bin = int("".join([bin(int(x) + 256)[3:] for x in ip.split(".")]), 2)
mask_dec = ipv4_mask_to_net_prefix(mask)
- bcast_bin = ip_bin | (2**(32 - mask_dec) - 1)
- bcast_str = '.'.join([str(bcast_bin >> (i << 3) & 0xFF)
- for i in range(4)[::-1]])
+ bcast_bin = ip_bin | (2 ** (32 - mask_dec) - 1)
+ bcast_str = ".".join(
+ [str(bcast_bin >> (i << 3) & 0xFF) for i in range(4)[::-1]]
+ )
return bcast_str
+def parse_net_config_data(net_config, skip_broken=True) -> NetworkState:
+ """Parses the config, returns NetworkState object
+
+ :param net_config: curtin network config dict
+ """
+ state = None
+ version = net_config.get("version")
+ config = net_config.get("config")
+ if version == 2:
+ # v2 does not have explicit 'config' key so we
+ # pass the whole net-config as-is
+ config = net_config
+
+ if version and config is not None:
+ nsi = NetworkStateInterpreter(version=version, config=config)
+ nsi.parse_config(skip_broken=skip_broken)
+ state = nsi.get_network_state()
+
+ if not state:
+ raise RuntimeError(
+ "No valid network_state object created from network config. "
+ "Did you specify the correct version?"
+ )
+
+ return state
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py
new file mode 100644
index 00000000..3bbeb284
--- /dev/null
+++ b/cloudinit/net/networkd.py
@@ -0,0 +1,280 @@
+#!/usr/bin/env python3
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2021 VMware Inc.
+#
+# Author: Shreenidhi Shedi <yesshedi@gmail.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+from collections import OrderedDict
+
+from cloudinit import log as logging
+from cloudinit import subp, util
+
+from . import renderer
+
+LOG = logging.getLogger(__name__)
+
+
+class CfgParser:
+ def __init__(self):
+ self.conf_dict = OrderedDict(
+ {
+ "Match": [],
+ "Link": [],
+ "Network": [],
+ "DHCPv4": [],
+ "DHCPv6": [],
+ "Address": [],
+ "Route": [],
+ }
+ )
+
+ def update_section(self, sec, key, val):
+ for k in self.conf_dict.keys():
+ if k == sec:
+ self.conf_dict[k].append(key + "=" + str(val))
+ # remove duplicates from list
+ self.conf_dict[k] = list(dict.fromkeys(self.conf_dict[k]))
+ self.conf_dict[k].sort()
+
+ def get_final_conf(self):
+ contents = ""
+ for k, v in sorted(self.conf_dict.items()):
+ if not v:
+ continue
+ contents += "[" + k + "]\n"
+ for e in sorted(v):
+ contents += e + "\n"
+ contents += "\n"
+
+ return contents
+
+ def dump_data(self, target_fn):
+ if not target_fn:
+ LOG.warning("Target file not given")
+ return
+
+ contents = self.get_final_conf()
+ LOG.debug("Final content: %s", contents)
+ util.write_file(target_fn, contents)
+
+
+class Renderer(renderer.Renderer):
+ """
+ Renders network information in /etc/systemd/network
+
+ This Renderer is currently experimental and doesn't support all the
+ use cases supported by the other renderers yet.
+ """
+
+ def __init__(self, config=None):
+ if not config:
+ config = {}
+ self.resolve_conf_fn = config.get(
+ "resolve_conf_fn", "/etc/systemd/resolved.conf"
+ )
+ self.network_conf_dir = config.get(
+ "network_conf_dir", "/etc/systemd/network/"
+ )
+
+ def generate_match_section(self, iface, cfg):
+ sec = "Match"
+ match_dict = {
+ "name": "Name",
+ "driver": "Driver",
+ "mac_address": "MACAddress",
+ }
+
+ if not iface:
+ return
+
+ for k, v in match_dict.items():
+ if k in iface and iface[k]:
+ cfg.update_section(sec, v, iface[k])
+
+ return iface["name"]
+
+ def generate_link_section(self, iface, cfg):
+ sec = "Link"
+
+ if not iface:
+ return
+
+ if "mtu" in iface and iface["mtu"]:
+ cfg.update_section(sec, "MTUBytes", iface["mtu"])
+
+ def parse_routes(self, conf, cfg):
+ sec = "Route"
+ route_cfg_map = {
+ "gateway": "Gateway",
+ "network": "Destination",
+ "metric": "Metric",
+ }
+
+ # prefix is derived using netmask by network_state
+ prefix = ""
+ if "prefix" in conf:
+ prefix = "/" + str(conf["prefix"])
+
+ for k, v in conf.items():
+ if k not in route_cfg_map:
+ continue
+ if k == "network":
+ v += prefix
+ cfg.update_section(sec, route_cfg_map[k], v)
+
+ def parse_subnets(self, iface, cfg):
+ dhcp = "no"
+ sec = "Network"
+ for e in iface.get("subnets", []):
+ t = e["type"]
+ if t == "dhcp4" or t == "dhcp":
+ if dhcp == "no":
+ dhcp = "ipv4"
+ elif dhcp == "ipv6":
+ dhcp = "yes"
+ elif t == "dhcp6":
+ if dhcp == "no":
+ dhcp = "ipv6"
+ elif dhcp == "ipv4":
+ dhcp = "yes"
+ if "routes" in e and e["routes"]:
+ for i in e["routes"]:
+ self.parse_routes(i, cfg)
+ if "address" in e:
+ subnet_cfg_map = {
+ "address": "Address",
+ "gateway": "Gateway",
+ "dns_nameservers": "DNS",
+ "dns_search": "Domains",
+ }
+ for k, v in e.items():
+ if k == "address":
+ if "prefix" in e:
+ v += "/" + str(e["prefix"])
+ cfg.update_section("Address", subnet_cfg_map[k], v)
+ elif k == "gateway":
+ cfg.update_section("Route", subnet_cfg_map[k], v)
+ elif k == "dns_nameservers" or k == "dns_search":
+ cfg.update_section(sec, subnet_cfg_map[k], " ".join(v))
+
+ cfg.update_section(sec, "DHCP", dhcp)
+
+ if dhcp in ["ipv6", "yes"] and isinstance(
+ iface.get("accept-ra", ""), bool
+ ):
+ cfg.update_section(sec, "IPv6AcceptRA", iface["accept-ra"])
+
+ # This is to accommodate extra keys present in VMware config
+ def dhcp_domain(self, d, cfg):
+ for item in ["dhcp4domain", "dhcp6domain"]:
+ if item not in d:
+ continue
+ ret = str(d[item]).casefold()
+ try:
+ ret = util.translate_bool(ret)
+ ret = "yes" if ret else "no"
+ except ValueError:
+ if ret != "route":
+ LOG.warning("Invalid dhcp4domain value - %s", ret)
+ ret = "no"
+ if item == "dhcp4domain":
+ section = "DHCPv4"
+ else:
+ section = "DHCPv6"
+ cfg.update_section(section, "UseDomains", ret)
+
+ def parse_dns(self, iface, cfg, ns):
+ sec = "Network"
+
+ dns_cfg_map = {
+ "search": "Domains",
+ "nameservers": "DNS",
+ "addresses": "DNS",
+ }
+
+ dns = iface.get("dns")
+ if not dns and ns.version == 1:
+ dns = {
+ "search": ns.dns_searchdomains,
+ "nameservers": ns.dns_nameservers,
+ }
+ elif not dns and ns.version == 2:
+ return
+
+ for k, v in dns_cfg_map.items():
+ if k in dns and dns[k]:
+ cfg.update_section(sec, v, " ".join(dns[k]))
+
+ def create_network_file(self, link, conf, nwk_dir):
+ net_fn_owner = "systemd-network"
+
+ LOG.debug("Setting Networking Config for %s", link)
+
+ net_fn = nwk_dir + "10-cloud-init-" + link + ".network"
+ util.write_file(net_fn, conf)
+ util.chownbyname(net_fn, net_fn_owner, net_fn_owner)
+
+ def render_network_state(self, network_state, templates=None, target=None):
+ fp_nwkd = self.network_conf_dir
+ if target:
+ fp_nwkd = subp.target_path(target) + fp_nwkd
+
+ util.ensure_dir(os.path.dirname(fp_nwkd))
+
+ ret_dict = self._render_content(network_state)
+ for k, v in ret_dict.items():
+ self.create_network_file(k, v, fp_nwkd)
+
+ def _render_content(self, ns):
+ ret_dict = {}
+ for iface in ns.iter_interfaces():
+ cfg = CfgParser()
+
+ link = self.generate_match_section(iface, cfg)
+ self.generate_link_section(iface, cfg)
+ self.parse_subnets(iface, cfg)
+ self.parse_dns(iface, cfg, ns)
+
+ for route in ns.iter_routes():
+ self.parse_routes(route, cfg)
+
+ if ns.version == 2:
+ name = iface["name"]
+ # network state doesn't give dhcp domain info
+ # using ns.config as a workaround here
+
+ # Check to see if this interface matches against an interface
+ # from the network state that specified a set-name directive.
+ # If there is a device with a set-name directive and it has
+ # set-name value that matches the current name, then update the
+ # current name to the device's name. That will be the value in
+ # the ns.config['ethernets'] dict below.
+ for dev_name, dev_cfg in ns.config["ethernets"].items():
+ if "set-name" in dev_cfg:
+ if dev_cfg.get("set-name") == name:
+ name = dev_name
+ break
+
+ self.dhcp_domain(ns.config["ethernets"][name], cfg)
+
+ ret_dict.update({link: cfg.get_final_conf()})
+
+ return ret_dict
+
+
+def available(target=None):
+ expected = ["ip", "systemctl"]
+ search = ["/usr/sbin", "/bin"]
+ for p in expected:
+ if not subp.which(p, search=search, target=target):
+ return False
+ return True
+
+
+def network_state_to_networkd(ns):
+ renderer = Renderer({})
+ return renderer._render_content(ns)
diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py
index 166d77e6..70e9f461 100644
--- a/cloudinit/net/openbsd.py
+++ b/cloudinit/net/openbsd.py
@@ -1,44 +1,58 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+import platform
+
import cloudinit.net.bsd
+from cloudinit import log as logging
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
class Renderer(cloudinit.net.bsd.BSDRenderer):
-
def write_config(self):
for device_name, v in self.interface_configurations.items():
- if_file = 'etc/hostname.{}'.format(device_name)
+ if_file = "etc/hostname.{}".format(device_name)
fn = subp.target_path(self.target, if_file)
if device_name in self.dhcp_interfaces():
- content = 'dhcp\n'
+ content = "dhcp\n"
elif isinstance(v, dict):
try:
- content = "inet {address} {netmask}\n".format(
- address=v['address'],
- netmask=v['netmask']
+ content = "inet {address} {netmask}".format(
+ address=v["address"], netmask=v["netmask"]
)
except KeyError:
LOG.error(
- "Invalid static configuration for %s",
- device_name)
+ "Invalid static configuration for %s", device_name
+ )
+ mtu = v.get("mtu")
+ if mtu:
+ content += " mtu %d" % mtu
+ content += "\n"
util.write_file(fn, content)
def start_services(self, run=False):
+ has_dhcpleasectl = bool(int(platform.release().split(".")[0]) > 6)
if not self._postcmds:
LOG.debug("openbsd generate postcmd disabled")
return
- subp.subp(['sh', '/etc/netstart'], capture=True)
+ if has_dhcpleasectl: # OpenBSD 7.0+
+ subp.subp(["sh", "/etc/netstart"], capture=True)
+ for interface in self.dhcp_interfaces():
+ subp.subp(
+ ["dhcpleasectl", "-w", "30", interface], capture=True
+ )
+ else:
+ subp.subp(["pkill", "dhclient"], capture=True, rcs=[0, 1])
+ subp.subp(["route", "del", "default"], capture=True, rcs=[0, 1])
+ subp.subp(["route", "flush", "default"], capture=True, rcs=[0, 1])
+ subp.subp(["sh", "/etc/netstart"], capture=True)
def set_route(self, network, netmask, gateway):
- if network == '0.0.0.0':
- if_file = 'etc/mygate'
+ if network == "0.0.0.0":
+ if_file = "etc/mygate"
fn = subp.target_path(self.target, if_file)
- content = gateway + '\n'
+ content = gateway + "\n"
util.write_file(fn, content)
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index 2a61a7a8..34b74b80 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -8,26 +8,28 @@
import abc
import io
-from .network_state import parse_net_config_data
-from .udev import generate_udev_rule
+from cloudinit.net.network_state import parse_net_config_data
+from cloudinit.net.udev import generate_udev_rule
def filter_by_type(match_type):
- return lambda iface: match_type == iface['type']
+ return lambda iface: match_type == iface["type"]
def filter_by_name(match_name):
- return lambda iface: match_name == iface['name']
+ return lambda iface: match_name == iface["name"]
def filter_by_attr(match_name):
return lambda iface: (match_name in iface and iface[match_name])
-filter_by_physical = filter_by_type('physical')
+filter_by_physical = filter_by_type("physical")
class Renderer(object):
+ def __init__(self, config=None):
+ pass
@staticmethod
def _render_persistent_net(network_state):
@@ -37,22 +39,27 @@ class Renderer(object):
content = io.StringIO()
for iface in network_state.iter_interfaces(filter_by_physical):
# for physical interfaces write out a persist net udev rule
- if 'name' in iface and iface.get('mac_address'):
- driver = iface.get('driver', None)
- content.write(generate_udev_rule(iface['name'],
- iface['mac_address'],
- driver=driver))
+ if "name" in iface and iface.get("mac_address"):
+ driver = iface.get("driver", None)
+ content.write(
+ generate_udev_rule(
+ iface["name"], iface["mac_address"], driver=driver
+ )
+ )
return content.getvalue()
@abc.abstractmethod
- def render_network_state(self, network_state, templates=None,
- target=None):
+ def render_network_state(self, network_state, templates=None, target=None):
"""Render network state."""
- def render_network_config(self, network_config, templates=None,
- target=None):
+ def render_network_config(
+ self, network_config, templates=None, target=None
+ ):
return self.render_network_state(
network_state=parse_net_config_data(network_config),
- templates=templates, target=target)
+ templates=templates,
+ target=target,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py
index e2de4d55..c755f04c 100644
--- a/cloudinit/net/renderers.py
+++ b/cloudinit/net/renderers.py
@@ -1,27 +1,43 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from . import eni
-from . import freebsd
-from . import netbsd
-from . import netplan
-from . import RendererNotFoundError
-from . import openbsd
-from . import sysconfig
+from typing import List, Tuple, Type
+
+from . import (
+ RendererNotFoundError,
+ eni,
+ freebsd,
+ netbsd,
+ netplan,
+ networkd,
+ openbsd,
+ renderer,
+ sysconfig,
+)
NAME_TO_RENDERER = {
"eni": eni,
"freebsd": freebsd,
"netbsd": netbsd,
"netplan": netplan,
+ "networkd": networkd,
"openbsd": openbsd,
"sysconfig": sysconfig,
}
-DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd",
- "netbsd", "openbsd"]
+DEFAULT_PRIORITY = [
+ "eni",
+ "sysconfig",
+ "netplan",
+ "freebsd",
+ "netbsd",
+ "openbsd",
+ "networkd",
+]
-def search(priority=None, target=None, first=False):
+def search(
+ priority=None, target=None, first=False
+) -> List[Tuple[str, Type[renderer.Renderer]]]:
if priority is None:
priority = DEFAULT_PRIORITY
@@ -30,7 +46,8 @@ def search(priority=None, target=None, first=False):
unknown = [i for i in priority if i not in available]
if unknown:
raise ValueError(
- "Unknown renderers provided in priority list: %s" % unknown)
+ "Unknown renderers provided in priority list: %s" % unknown
+ )
found = []
for name in priority:
@@ -38,13 +55,13 @@ def search(priority=None, target=None, first=False):
if render_mod.available(target):
cur = (name, render_mod.Renderer)
if first:
- return cur
+ return [cur]
found.append(cur)
return found
-def select(priority=None, target=None):
+def select(priority=None, target=None) -> Tuple[str, Type[renderer.Renderer]]:
found = search(priority, target=target, first=True)
if not found:
if priority is None:
@@ -53,8 +70,10 @@ def select(priority=None, target=None):
if target and target != "/":
tmsg = " in target=%s" % target
raise RendererNotFoundError(
- "No available network renderers found%s. Searched "
- "through list: %s" % (tmsg, priority))
- return found
+ "No available network renderers found%s. Searched through list: %s"
+ % (tmsg, priority)
+ )
+ return found[0]
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index a930e612..ba85c4f6 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -8,21 +8,36 @@ import re
from configobj import ConfigObj
from cloudinit import log as logging
-from cloudinit import util
-from cloudinit import subp
-from cloudinit.distros.parsers import networkmanager_conf
-from cloudinit.distros.parsers import resolv_conf
+from cloudinit import subp, util
+from cloudinit.distros.parsers import networkmanager_conf, resolv_conf
+from cloudinit.net import network_state
from . import renderer
from .network_state import (
- is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES)
+ IPV6_DYNAMIC_TYPES,
+ is_ipv6_addr,
+ net_prefix_to_ipv4_mask,
+ subnet_is_ipv6,
+)
LOG = logging.getLogger(__name__)
+KNOWN_DISTROS = [
+ "almalinux",
+ "centos",
+ "cloudlinux",
+ "eurolinux",
+ "fedora",
+ "miraclelinux",
+ "openEuler",
+ "rhel",
+ "rocky",
+ "suse",
+ "virtuozzo",
+]
NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf"
-KNOWN_DISTROS = ['centos', 'fedora', 'rhel', 'suse']
-def _make_header(sep='#'):
+def _make_header(sep="#"):
lines = [
"Created by cloud-init on instance boot automatically, do not edit.",
"",
@@ -36,8 +51,8 @@ def _make_header(sep='#'):
def _is_default_route(route):
- default_nets = ('::', '0.0.0.0')
- return route['prefix'] == 0 and route['network'] in default_nets
+ default_nets = ("::", "0.0.0.0")
+ return route["prefix"] == 0 and route["network"] in default_nets
def _quote_value(value):
@@ -54,19 +69,19 @@ def _quote_value(value):
def enable_ifcfg_rh(path):
"""Add ifcfg-rh to NetworkManager.cfg plugins if main section is present"""
config = ConfigObj(path)
- if 'main' in config:
- if 'plugins' in config['main']:
- if 'ifcfg-rh' in config['main']['plugins']:
+ if "main" in config:
+ if "plugins" in config["main"]:
+ if "ifcfg-rh" in config["main"]["plugins"]:
return
else:
- config['main']['plugins'] = []
+ config["main"]["plugins"] = []
- if isinstance(config['main']['plugins'], list):
- config['main']['plugins'].append('ifcfg-rh')
+ if isinstance(config["main"]["plugins"], list):
+ config["main"]["plugins"].append("ifcfg-rh")
else:
- config['main']['plugins'] = [config['main']['plugins'], 'ifcfg-rh']
+ config["main"]["plugins"] = [config["main"]["plugins"], "ifcfg-rh"]
config.write()
- LOG.debug('Enabled ifcfg-rh NetworkManager plugins')
+ LOG.debug("Enabled ifcfg-rh NetworkManager plugins")
class ConfigMap(object):
@@ -74,8 +89,8 @@ class ConfigMap(object):
# Why does redhat prefer yes/no to true/false??
_bool_map = {
- True: 'yes',
- False: 'no',
+ True: "yes",
+ False: "no",
}
def __init__(self):
@@ -126,8 +141,7 @@ class ConfigMap(object):
class Route(ConfigMap):
"""Represents a route configuration."""
- def __init__(self, route_name, base_sysconf_dir,
- ipv4_tpl, ipv6_tpl):
+ def __init__(self, route_name, base_sysconf_dir, ipv4_tpl, ipv6_tpl):
super(Route, self).__init__()
self.last_idx = 1
self.has_set_default_ipv4 = False
@@ -138,8 +152,12 @@ class Route(ConfigMap):
self.route_fn_tpl_ipv6 = ipv6_tpl
def copy(self):
- r = Route(self._route_name, self._base_sysconf_dir,
- self.route_fn_tpl_ipv4, self.route_fn_tpl_ipv6)
+ r = Route(
+ self._route_name,
+ self._base_sysconf_dir,
+ self.route_fn_tpl_ipv4,
+ self.route_fn_tpl_ipv6,
+ )
r._conf = self._conf.copy()
r.last_idx = self.last_idx
r.has_set_default_ipv4 = self.has_set_default_ipv4
@@ -148,20 +166,22 @@ class Route(ConfigMap):
@property
def path_ipv4(self):
- return self.route_fn_tpl_ipv4 % ({'base': self._base_sysconf_dir,
- 'name': self._route_name})
+ return self.route_fn_tpl_ipv4 % (
+ {"base": self._base_sysconf_dir, "name": self._route_name}
+ )
@property
def path_ipv6(self):
- return self.route_fn_tpl_ipv6 % ({'base': self._base_sysconf_dir,
- 'name': self._route_name})
+ return self.route_fn_tpl_ipv6 % (
+ {"base": self._base_sysconf_dir, "name": self._route_name}
+ )
def is_ipv6_route(self, address):
- return ':' in address
+ return ":" in address
def to_string(self, proto="ipv4"):
# only accept ipv4 and ipv6
- if proto not in ['ipv4', 'ipv6']:
+ if proto not in ["ipv4", "ipv6"]:
raise ValueError("Unknown protocol '%s'" % (str(proto)))
buf = io.StringIO()
buf.write(_make_header())
@@ -171,43 +191,61 @@ class Route(ConfigMap):
# (because Route can contain a mix of IPv4 and IPv6)
reindex = -1
for key in sorted(self._conf.keys()):
- if 'ADDRESS' in key:
- index = key.replace('ADDRESS', '')
- address_value = str(self._conf[key])
- # only accept combinations:
- # if proto ipv6 only display ipv6 routes
- # if proto ipv4 only display ipv4 routes
- # do not add ipv6 routes if proto is ipv4
- # do not add ipv4 routes if proto is ipv6
- # (this array will contain a mix of ipv4 and ipv6)
- if proto == "ipv4" and not self.is_ipv6_route(address_value):
- netmask_value = str(self._conf['NETMASK' + index])
- gateway_value = str(self._conf['GATEWAY' + index])
- # increase IPv4 index
- reindex = reindex + 1
- buf.write("%s=%s\n" % ('ADDRESS' + str(reindex),
- _quote_value(address_value)))
- buf.write("%s=%s\n" % ('GATEWAY' + str(reindex),
- _quote_value(gateway_value)))
- buf.write("%s=%s\n" % ('NETMASK' + str(reindex),
- _quote_value(netmask_value)))
- metric_key = 'METRIC' + index
- if metric_key in self._conf:
- metric_value = str(self._conf['METRIC' + index])
- buf.write("%s=%s\n" % ('METRIC' + str(reindex),
- _quote_value(metric_value)))
- elif proto == "ipv6" and self.is_ipv6_route(address_value):
- netmask_value = str(self._conf['NETMASK' + index])
- gateway_value = str(self._conf['GATEWAY' + index])
- metric_value = (
- 'metric ' + str(self._conf['METRIC' + index])
- if 'METRIC' + index in self._conf else '')
+ if "ADDRESS" not in key:
+ continue
+
+ index = key.replace("ADDRESS", "")
+ address_value = str(self._conf[key])
+ netmask_value = str(self._conf["NETMASK" + index])
+ gateway_value = str(self._conf["GATEWAY" + index])
+
+ # only accept combinations:
+ # if proto ipv6 only display ipv6 routes
+ # if proto ipv4 only display ipv4 routes
+ # do not add ipv6 routes if proto is ipv4
+ # do not add ipv4 routes if proto is ipv6
+ # (this array will contain a mix of ipv4 and ipv6)
+ if proto == "ipv4" and not self.is_ipv6_route(address_value):
+ # increase IPv4 index
+ reindex = reindex + 1
+ buf.write(
+ "%s=%s\n"
+ % ("ADDRESS" + str(reindex), _quote_value(address_value))
+ )
+ buf.write(
+ "%s=%s\n"
+ % ("GATEWAY" + str(reindex), _quote_value(gateway_value))
+ )
+ buf.write(
+ "%s=%s\n"
+ % ("NETMASK" + str(reindex), _quote_value(netmask_value))
+ )
+ metric_key = "METRIC" + index
+ if metric_key in self._conf:
+ metric_value = str(self._conf["METRIC" + index])
buf.write(
- "%s/%s via %s %s dev %s\n" % (address_value,
- netmask_value,
- gateway_value,
- metric_value,
- self._route_name))
+ "%s=%s\n"
+ % ("METRIC" + str(reindex), _quote_value(metric_value))
+ )
+ elif proto == "ipv6" and self.is_ipv6_route(address_value):
+ prefix_value = network_state.ipv6_mask_to_net_prefix(
+ netmask_value
+ )
+ metric_value = (
+ "metric " + str(self._conf["METRIC" + index])
+ if "METRIC" + index in self._conf
+ else ""
+ )
+ buf.write(
+ "%s/%s via %s %s dev %s\n"
+ % (
+ address_value,
+ prefix_value,
+ gateway_value,
+ metric_value,
+ self._route_name,
+ )
+ )
return buf.getvalue()
@@ -216,27 +254,31 @@ class NetInterface(ConfigMap):
"""Represents a sysconfig/networking-script (and its config + children)."""
iface_types = {
- 'ethernet': 'Ethernet',
- 'bond': 'Bond',
- 'bridge': 'Bridge',
- 'infiniband': 'InfiniBand',
- 'vlan': 'Vlan',
+ "ethernet": "Ethernet",
+ "bond": "Bond",
+ "bridge": "Bridge",
+ "infiniband": "InfiniBand",
+ "vlan": "Vlan",
}
- def __init__(self, iface_name, base_sysconf_dir, templates,
- kind='ethernet'):
+ def __init__(
+ self, iface_name, base_sysconf_dir, templates, kind="ethernet"
+ ):
super(NetInterface, self).__init__()
self.children = []
self.templates = templates
- route_tpl = self.templates.get('route_templates')
- self.routes = Route(iface_name, base_sysconf_dir,
- ipv4_tpl=route_tpl.get('ipv4'),
- ipv6_tpl=route_tpl.get('ipv6'))
- self.iface_fn_tpl = self.templates.get('iface_templates')
+ route_tpl = self.templates.get("route_templates")
+ self.routes = Route(
+ iface_name,
+ base_sysconf_dir,
+ ipv4_tpl=route_tpl.get("ipv4"),
+ ipv6_tpl=route_tpl.get("ipv6"),
+ )
+ self.iface_fn_tpl = self.templates.get("iface_templates")
self.kind = kind
self._iface_name = iface_name
- self._conf['DEVICE'] = iface_name
+ self._conf["DEVICE"] = iface_name
self._base_sysconf_dir = base_sysconf_dir
@property
@@ -246,7 +288,7 @@ class NetInterface(ConfigMap):
@name.setter
def name(self, iface_name):
self._iface_name = iface_name
- self._conf['DEVICE'] = iface_name
+ self._conf["DEVICE"] = iface_name
@property
def kind(self):
@@ -257,16 +299,18 @@ class NetInterface(ConfigMap):
if kind not in self.iface_types:
raise ValueError(kind)
self._kind = kind
- self._conf['TYPE'] = self.iface_types[kind]
+ self._conf["TYPE"] = self.iface_types[kind]
@property
def path(self):
- return self.iface_fn_tpl % ({'base': self._base_sysconf_dir,
- 'name': self.name})
+ return self.iface_fn_tpl % (
+ {"base": self._base_sysconf_dir, "name": self.name}
+ )
def copy(self, copy_children=False, copy_routes=False):
- c = NetInterface(self.name, self._base_sysconf_dir,
- self.templates, kind=self._kind)
+ c = NetInterface(
+ self.name, self._base_sysconf_dir, self.templates, kind=self._kind
+ )
c._conf = self._conf.copy()
if copy_children:
c.children = list(self.children)
@@ -275,7 +319,7 @@ class NetInterface(ConfigMap):
return c
def skip_key_value(self, key, val):
- if key == 'TYPE' and val == 'Vlan':
+ if key == "TYPE" and val == "Vlan":
return True
return False
@@ -289,158 +333,180 @@ class Renderer(renderer.Renderer):
# details about this)
iface_defaults = {
- 'rhel': {'ONBOOT': True, 'USERCTL': False, 'NM_CONTROLLED': False,
- 'BOOTPROTO': 'none'},
- 'suse': {'BOOTPROTO': 'static', 'STARTMODE': 'auto'},
+ "rhel": {
+ "ONBOOT": True,
+ "USERCTL": False,
+ "NM_CONTROLLED": False,
+ "BOOTPROTO": "none",
+ },
+ "suse": {"BOOTPROTO": "static", "STARTMODE": "auto"},
}
cfg_key_maps = {
- 'rhel': {
- 'accept-ra': 'IPV6_FORCE_ACCEPT_RA',
- 'bridge_stp': 'STP',
- 'bridge_ageing': 'AGEING',
- 'bridge_bridgeprio': 'PRIO',
- 'mac_address': 'HWADDR',
- 'mtu': 'MTU',
+ "rhel": {
+ "accept-ra": "IPV6_FORCE_ACCEPT_RA",
+ "bridge_stp": "STP",
+ "bridge_ageing": "AGEING",
+ "bridge_bridgeprio": "PRIO",
+ "mac_address": "HWADDR",
+ "mtu": "MTU",
},
- 'suse': {
- 'bridge_stp': 'BRIDGE_STP',
- 'bridge_ageing': 'BRIDGE_AGEINGTIME',
- 'bridge_bridgeprio': 'BRIDGE_PRIORITY',
- 'mac_address': 'LLADDR',
- 'mtu': 'MTU',
+ "suse": {
+ "bridge_stp": "BRIDGE_STP",
+ "bridge_ageing": "BRIDGE_AGEINGTIME",
+ "bridge_bridgeprio": "BRIDGE_PRIORITY",
+ "mac_address": "LLADDR",
+ "mtu": "MTU",
},
}
# If these keys exist, then their values will be used to form
- # a BONDING_OPTS grouping; otherwise no grouping will be set.
- bond_tpl_opts = tuple([
- ('bond_mode', "mode=%s"),
- ('bond_xmit_hash_policy', "xmit_hash_policy=%s"),
- ('bond_miimon', "miimon=%s"),
- ('bond_min_links', "min_links=%s"),
- ('bond_arp_interval', "arp_interval=%s"),
- ('bond_arp_ip_target', "arp_ip_target=%s"),
- ('bond_arp_validate', "arp_validate=%s"),
- ('bond_ad_select', "ad_select=%s"),
- ('bond_num_grat_arp', "num_grat_arp=%s"),
- ('bond_downdelay', "downdelay=%s"),
- ('bond_updelay', "updelay=%s"),
- ('bond_lacp_rate', "lacp_rate=%s"),
- ('bond_fail_over_mac', "fail_over_mac=%s"),
- ('bond_primary', "primary=%s"),
- ('bond_primary_reselect', "primary_reselect=%s"),
- ])
+ # a BONDING_OPTS / BONDING_MODULE_OPTS grouping; otherwise no
+ # grouping will be set.
+ bond_tpl_opts = tuple(
+ [
+ ("bond_mode", "mode=%s"),
+ ("bond_xmit_hash_policy", "xmit_hash_policy=%s"),
+ ("bond_miimon", "miimon=%s"),
+ ("bond_min_links", "min_links=%s"),
+ ("bond_arp_interval", "arp_interval=%s"),
+ ("bond_arp_ip_target", "arp_ip_target=%s"),
+ ("bond_arp_validate", "arp_validate=%s"),
+ ("bond_ad_select", "ad_select=%s"),
+ ("bond_num_grat_arp", "num_grat_arp=%s"),
+ ("bond_downdelay", "downdelay=%s"),
+ ("bond_updelay", "updelay=%s"),
+ ("bond_lacp_rate", "lacp_rate=%s"),
+ ("bond_fail_over_mac", "fail_over_mac=%s"),
+ ("bond_primary", "primary=%s"),
+ ("bond_primary_reselect", "primary_reselect=%s"),
+ ]
+ )
templates = {}
def __init__(self, config=None):
if not config:
config = {}
- self.sysconf_dir = config.get('sysconf_dir', 'etc/sysconfig')
+ self.sysconf_dir = config.get("sysconf_dir", "etc/sysconfig")
self.netrules_path = config.get(
- 'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules')
- self.dns_path = config.get('dns_path', 'etc/resolv.conf')
- nm_conf_path = 'etc/NetworkManager/conf.d/99-cloud-init.conf'
- self.networkmanager_conf_path = config.get('networkmanager_conf_path',
- nm_conf_path)
+ "netrules_path", "etc/udev/rules.d/70-persistent-net.rules"
+ )
+ self.dns_path = config.get("dns_path", "etc/resolv.conf")
+ nm_conf_path = "etc/NetworkManager/conf.d/99-cloud-init.conf"
+ self.networkmanager_conf_path = config.get(
+ "networkmanager_conf_path", nm_conf_path
+ )
self.templates = {
- 'control': config.get('control'),
- 'iface_templates': config.get('iface_templates'),
- 'route_templates': config.get('route_templates'),
+ "control": config.get("control"),
+ "iface_templates": config.get("iface_templates"),
+ "route_templates": config.get("route_templates"),
}
- self.flavor = config.get('flavor', 'rhel')
+ self.flavor = config.get("flavor", "rhel")
@classmethod
def _render_iface_shared(cls, iface, iface_cfg, flavor):
flavor_defaults = copy.deepcopy(cls.iface_defaults.get(flavor, {}))
iface_cfg.update(flavor_defaults)
- for old_key in ('mac_address', 'mtu', 'accept-ra'):
+ for old_key in ("mac_address", "mtu", "accept-ra"):
old_value = iface.get(old_key)
if old_value is not None:
# only set HWADDR on physical interfaces
- if (old_key == 'mac_address' and
- iface['type'] not in ['physical', 'infiniband']):
+ if old_key == "mac_address" and iface["type"] not in [
+ "physical",
+ "infiniband",
+ ]:
continue
new_key = cls.cfg_key_maps[flavor].get(old_key)
if new_key:
iface_cfg[new_key] = old_value
# only set WakeOnLan for physical interfaces
- if ('wakeonlan' in iface and iface['wakeonlan'] and
- iface['type'] == 'physical'):
- iface_cfg['ETHTOOL_OPTS'] = 'wol g'
+ if (
+ "wakeonlan" in iface
+ and iface["wakeonlan"]
+ and iface["type"] == "physical"
+ ):
+ iface_cfg["ETHTOOL_OPTS"] = "wol g"
@classmethod
def _render_subnets(cls, iface_cfg, subnets, has_default_route, flavor):
# setting base values
- if flavor == 'suse':
- iface_cfg['BOOTPROTO'] = 'static'
- if 'BRIDGE' in iface_cfg:
- iface_cfg['BOOTPROTO'] = 'dhcp'
- iface_cfg.drop('BRIDGE')
+ if flavor == "suse":
+ iface_cfg["BOOTPROTO"] = "static"
+ if "BRIDGE" in iface_cfg:
+ iface_cfg["BOOTPROTO"] = "dhcp"
+ iface_cfg.drop("BRIDGE")
else:
- iface_cfg['BOOTPROTO'] = 'none'
+ iface_cfg["BOOTPROTO"] = "none"
# modifying base values according to subnets
for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
- mtu_key = 'MTU'
- subnet_type = subnet.get('type')
- if subnet_type == 'dhcp6' or subnet_type == 'ipv6_dhcpv6-stateful':
- if flavor == 'suse':
+ mtu_key = "MTU"
+ subnet_type = subnet.get("type")
+ if subnet_type == "dhcp6" or subnet_type == "ipv6_dhcpv6-stateful":
+ if flavor == "suse":
# User wants dhcp for both protocols
- if iface_cfg['BOOTPROTO'] == 'dhcp4':
- iface_cfg['BOOTPROTO'] = 'dhcp'
+ if iface_cfg["BOOTPROTO"] == "dhcp4":
+ iface_cfg["BOOTPROTO"] = "dhcp"
else:
# Only IPv6 is DHCP, IPv4 may be static
- iface_cfg['BOOTPROTO'] = 'dhcp6'
- iface_cfg['DHCLIENT6_MODE'] = 'managed'
+ iface_cfg["BOOTPROTO"] = "dhcp6"
+ iface_cfg["DHCLIENT6_MODE"] = "managed"
+ # only if rhel AND dhcpv6 stateful
+ elif (
+ flavor == "rhel" and subnet_type == "ipv6_dhcpv6-stateful"
+ ):
+ iface_cfg["BOOTPROTO"] = "dhcp"
+ iface_cfg["DHCPV6C"] = True
+ iface_cfg["IPV6INIT"] = True
+ iface_cfg["IPV6_AUTOCONF"] = False
else:
- iface_cfg['IPV6INIT'] = True
+ iface_cfg["IPV6INIT"] = True
# Configure network settings using DHCPv6
- iface_cfg['DHCPV6C'] = True
- elif subnet_type == 'ipv6_dhcpv6-stateless':
- if flavor == 'suse':
+ iface_cfg["DHCPV6C"] = True
+ elif subnet_type == "ipv6_dhcpv6-stateless":
+ if flavor == "suse":
# User wants dhcp for both protocols
- if iface_cfg['BOOTPROTO'] == 'dhcp4':
- iface_cfg['BOOTPROTO'] = 'dhcp'
+ if iface_cfg["BOOTPROTO"] == "dhcp4":
+ iface_cfg["BOOTPROTO"] = "dhcp"
else:
# Only IPv6 is DHCP, IPv4 may be static
- iface_cfg['BOOTPROTO'] = 'dhcp6'
- iface_cfg['DHCLIENT6_MODE'] = 'info'
+ iface_cfg["BOOTPROTO"] = "dhcp6"
+ iface_cfg["DHCLIENT6_MODE"] = "info"
else:
- iface_cfg['IPV6INIT'] = True
+ iface_cfg["IPV6INIT"] = True
# Configure network settings using SLAAC from RAs and
# optional info from dhcp server using DHCPv6
- iface_cfg['IPV6_AUTOCONF'] = True
- iface_cfg['DHCPV6C'] = True
+ iface_cfg["IPV6_AUTOCONF"] = True
+ iface_cfg["DHCPV6C"] = True
# Use Information-request to get only stateless
# configuration parameters (i.e., without address).
- iface_cfg['DHCPV6C_OPTIONS'] = '-S'
- elif subnet_type == 'ipv6_slaac':
- if flavor == 'suse':
+ iface_cfg["DHCPV6C_OPTIONS"] = "-S"
+ elif subnet_type == "ipv6_slaac":
+ if flavor == "suse":
# User wants dhcp for both protocols
- if iface_cfg['BOOTPROTO'] == 'dhcp4':
- iface_cfg['BOOTPROTO'] = 'dhcp'
+ if iface_cfg["BOOTPROTO"] == "dhcp4":
+ iface_cfg["BOOTPROTO"] = "dhcp"
else:
# Only IPv6 is DHCP, IPv4 may be static
- iface_cfg['BOOTPROTO'] = 'dhcp6'
- iface_cfg['DHCLIENT6_MODE'] = 'info'
+ iface_cfg["BOOTPROTO"] = "dhcp6"
+ iface_cfg["DHCLIENT6_MODE"] = "info"
else:
- iface_cfg['IPV6INIT'] = True
+ iface_cfg["IPV6INIT"] = True
# Configure network settings using SLAAC from RAs
- iface_cfg['IPV6_AUTOCONF'] = True
- elif subnet_type in ['dhcp4', 'dhcp']:
- bootproto_in = iface_cfg['BOOTPROTO']
- iface_cfg['BOOTPROTO'] = 'dhcp'
- if flavor == 'suse' and subnet_type == 'dhcp4':
+ iface_cfg["IPV6_AUTOCONF"] = True
+ elif subnet_type in ["dhcp4", "dhcp"]:
+ bootproto_in = iface_cfg["BOOTPROTO"]
+ iface_cfg["BOOTPROTO"] = "dhcp"
+ if flavor == "suse" and subnet_type == "dhcp4":
# If dhcp6 is already specified the user wants dhcp
# for both protocols
- if bootproto_in != 'dhcp6':
+ if bootproto_in != "dhcp6":
# Only IPv4 is DHCP, IPv6 may be static
- iface_cfg['BOOTPROTO'] = 'dhcp4'
- elif subnet_type in ['static', 'static6']:
+ iface_cfg["BOOTPROTO"] = "dhcp4"
+ elif subnet_type in ["static", "static6"]:
# RH info
# grep BOOTPROTO sysconfig.txt -A2 | head -3
# BOOTPROTO=none|bootp|dhcp
@@ -448,174 +514,189 @@ class Renderer(renderer.Renderer):
# to run on the device. Any other
# value causes any static configuration
# in the file to be applied.
- if subnet_is_ipv6(subnet) and flavor != 'suse':
- mtu_key = 'IPV6_MTU'
- iface_cfg['IPV6INIT'] = True
- if 'mtu' in subnet:
- mtu_mismatch = bool(mtu_key in iface_cfg and
- subnet['mtu'] != iface_cfg[mtu_key])
+ if subnet_is_ipv6(subnet) and flavor != "suse":
+ mtu_key = "IPV6_MTU"
+ iface_cfg["IPV6INIT"] = True
+ if "mtu" in subnet:
+ mtu_mismatch = bool(
+ mtu_key in iface_cfg
+ and subnet["mtu"] != iface_cfg[mtu_key]
+ )
if mtu_mismatch:
LOG.warning(
- 'Network config: ignoring %s device-level mtu:%s'
- ' because ipv4 subnet-level mtu:%s provided.',
- iface_cfg.name, iface_cfg[mtu_key], subnet['mtu'])
+ "Network config: ignoring %s device-level mtu:%s"
+ " because ipv4 subnet-level mtu:%s provided.",
+ iface_cfg.name,
+ iface_cfg[mtu_key],
+ subnet["mtu"],
+ )
if subnet_is_ipv6(subnet):
- if flavor == 'suse':
+ if flavor == "suse":
# TODO(rjschwei) write mtu setting to
# /etc/sysctl.d/
pass
else:
- iface_cfg[mtu_key] = subnet['mtu']
+ iface_cfg[mtu_key] = subnet["mtu"]
else:
- iface_cfg[mtu_key] = subnet['mtu']
+ iface_cfg[mtu_key] = subnet["mtu"]
- if subnet_is_ipv6(subnet) and flavor == 'rhel':
- iface_cfg['IPV6_FORCE_ACCEPT_RA'] = False
- iface_cfg['IPV6_AUTOCONF'] = False
- elif subnet_type == 'manual':
- if flavor == 'suse':
+ if subnet_is_ipv6(subnet) and flavor == "rhel":
+ iface_cfg["IPV6_FORCE_ACCEPT_RA"] = False
+ iface_cfg["IPV6_AUTOCONF"] = False
+ elif subnet_type == "manual":
+ if flavor == "suse":
LOG.debug('Unknown subnet type setting "%s"', subnet_type)
else:
# If the subnet has an MTU setting, then ONBOOT=True
# to apply the setting
- iface_cfg['ONBOOT'] = mtu_key in iface_cfg
+ iface_cfg["ONBOOT"] = mtu_key in iface_cfg
else:
- raise ValueError("Unknown subnet type '%s' found"
- " for interface '%s'" % (subnet_type,
- iface_cfg.name))
- if subnet.get('control') == 'manual':
- if flavor == 'suse':
- iface_cfg['STARTMODE'] = 'manual'
+ raise ValueError(
+ "Unknown subnet type '%s' found for interface '%s'"
+ % (subnet_type, iface_cfg.name)
+ )
+ if subnet.get("control") == "manual":
+ if flavor == "suse":
+ iface_cfg["STARTMODE"] = "manual"
else:
- iface_cfg['ONBOOT'] = False
+ iface_cfg["ONBOOT"] = False
# set IPv4 and IPv6 static addresses
ipv4_index = -1
ipv6_index = -1
for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
- subnet_type = subnet.get('type')
+ subnet_type = subnet.get("type")
# metric may apply to both dhcp and static config
- if 'metric' in subnet:
- if flavor != 'suse':
- iface_cfg['METRIC'] = subnet['metric']
- if subnet_type in ['dhcp', 'dhcp4']:
+ if "metric" in subnet:
+ if flavor != "suse":
+ iface_cfg["METRIC"] = subnet["metric"]
+ if subnet_type in ["dhcp", "dhcp4"]:
# On SUSE distros 'DHCLIENT_SET_DEFAULT_ROUTE' is a global
# setting in /etc/sysconfig/network/dhcp
- if flavor != 'suse':
- if has_default_route and iface_cfg['BOOTPROTO'] != 'none':
- iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False
+ if flavor != "suse":
+ if has_default_route and iface_cfg["BOOTPROTO"] != "none":
+ iface_cfg["DHCLIENT_SET_DEFAULT_ROUTE"] = False
continue
elif subnet_type in IPV6_DYNAMIC_TYPES:
continue
- elif subnet_type in ['static', 'static6']:
+ elif subnet_type in ["static", "static6"]:
if subnet_is_ipv6(subnet):
ipv6_index = ipv6_index + 1
- ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix'])
+ ipv6_cidr = "%s/%s" % (subnet["address"], subnet["prefix"])
if ipv6_index == 0:
- if flavor == 'suse':
- iface_cfg['IPADDR6'] = ipv6_cidr
+ if flavor == "suse":
+ iface_cfg["IPADDR6"] = ipv6_cidr
else:
- iface_cfg['IPV6ADDR'] = ipv6_cidr
+ iface_cfg["IPV6ADDR"] = ipv6_cidr
elif ipv6_index == 1:
- if flavor == 'suse':
- iface_cfg['IPADDR6_1'] = ipv6_cidr
+ if flavor == "suse":
+ iface_cfg["IPADDR6_1"] = ipv6_cidr
else:
- iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr
+ iface_cfg["IPV6ADDR_SECONDARIES"] = ipv6_cidr
else:
- if flavor == 'suse':
- iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr
+ if flavor == "suse":
+ iface_cfg["IPADDR6_%d" % ipv6_index] = ipv6_cidr
else:
- iface_cfg['IPV6ADDR_SECONDARIES'] += \
+ iface_cfg["IPV6ADDR_SECONDARIES"] += (
" " + ipv6_cidr
+ )
else:
ipv4_index = ipv4_index + 1
suff = "" if ipv4_index == 0 else str(ipv4_index)
- iface_cfg['IPADDR' + suff] = subnet['address']
- iface_cfg['NETMASK' + suff] = \
- net_prefix_to_ipv4_mask(subnet['prefix'])
-
- if 'gateway' in subnet and flavor != 'suse':
- iface_cfg['DEFROUTE'] = True
- if is_ipv6_addr(subnet['gateway']):
- iface_cfg['IPV6_DEFAULTGW'] = subnet['gateway']
+ iface_cfg["IPADDR" + suff] = subnet["address"]
+ iface_cfg["NETMASK" + suff] = net_prefix_to_ipv4_mask(
+ subnet["prefix"]
+ )
+
+ if "gateway" in subnet and flavor != "suse":
+ iface_cfg["DEFROUTE"] = True
+ if is_ipv6_addr(subnet["gateway"]):
+ iface_cfg["IPV6_DEFAULTGW"] = subnet["gateway"]
else:
- iface_cfg['GATEWAY'] = subnet['gateway']
+ iface_cfg["GATEWAY"] = subnet["gateway"]
- if 'dns_search' in subnet and flavor != 'suse':
- iface_cfg['DOMAIN'] = ' '.join(subnet['dns_search'])
+ if "dns_search" in subnet and flavor != "suse":
+ iface_cfg["DOMAIN"] = " ".join(subnet["dns_search"])
- if 'dns_nameservers' in subnet and flavor != 'suse':
- if len(subnet['dns_nameservers']) > 3:
+ if "dns_nameservers" in subnet and flavor != "suse":
+ if len(subnet["dns_nameservers"]) > 3:
# per resolv.conf(5) MAXNS sets this to 3.
- LOG.debug("%s has %d entries in dns_nameservers. "
- "Only 3 are used.", iface_cfg.name,
- len(subnet['dns_nameservers']))
- for i, k in enumerate(subnet['dns_nameservers'][:3], 1):
- iface_cfg['DNS' + str(i)] = k
+ LOG.debug(
+ "%s has %d entries in dns_nameservers. "
+ "Only 3 are used.",
+ iface_cfg.name,
+ len(subnet["dns_nameservers"]),
+ )
+ for i, k in enumerate(subnet["dns_nameservers"][:3], 1):
+ iface_cfg["DNS" + str(i)] = k
@classmethod
def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets, flavor):
# TODO(rjschwei): route configuration on SUSE distro happens via
# ifroute-* files, see lp#1812117. SUSE currently carries a local
# patch in their package.
- if flavor == 'suse':
+ if flavor == "suse":
return
for _, subnet in enumerate(subnets, start=len(iface_cfg.children)):
- subnet_type = subnet.get('type')
- for route in subnet.get('routes', []):
- is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway'])
+ subnet_type = subnet.get("type")
+ for route in subnet.get("routes", []):
+ is_ipv6 = subnet.get("ipv6") or is_ipv6_addr(route["gateway"])
# Any dynamic configuration method, slaac, dhcpv6-stateful/
# stateless should get router information from router RA's.
- if (_is_default_route(route) and subnet_type not in
- IPV6_DYNAMIC_TYPES):
+ if (
+ _is_default_route(route)
+ and subnet_type not in IPV6_DYNAMIC_TYPES
+ ):
if (
- (subnet.get('ipv4') and
- route_cfg.has_set_default_ipv4) or
- (subnet.get('ipv6') and
- route_cfg.has_set_default_ipv6)
+ subnet.get("ipv4") and route_cfg.has_set_default_ipv4
+ ) or (
+ subnet.get("ipv6") and route_cfg.has_set_default_ipv6
):
- raise ValueError("Duplicate declaration of default "
- "route found for interface '%s'"
- % (iface_cfg.name))
+ raise ValueError(
+ "Duplicate declaration of default "
+ "route found for interface '%s'" % (iface_cfg.name)
+ )
# NOTE(harlowja): ipv6 and ipv4 default gateways
- gw_key = 'GATEWAY0'
- nm_key = 'NETMASK0'
- addr_key = 'ADDRESS0'
+ gw_key = "GATEWAY0"
+ nm_key = "NETMASK0"
+ addr_key = "ADDRESS0"
# The owning interface provides the default route.
#
# TODO(harlowja): add validation that no other iface has
# also provided the default route?
- iface_cfg['DEFROUTE'] = True
- if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4'):
- iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True
- if 'gateway' in route:
+ iface_cfg["DEFROUTE"] = True
+ if iface_cfg["BOOTPROTO"] in ("dhcp", "dhcp4"):
+ iface_cfg["DHCLIENT_SET_DEFAULT_ROUTE"] = True
+ if "gateway" in route:
if is_ipv6:
- iface_cfg['IPV6_DEFAULTGW'] = route['gateway']
+ iface_cfg["IPV6_DEFAULTGW"] = route["gateway"]
route_cfg.has_set_default_ipv6 = True
else:
- iface_cfg['GATEWAY'] = route['gateway']
+ iface_cfg["GATEWAY"] = route["gateway"]
route_cfg.has_set_default_ipv4 = True
- if 'metric' in route:
- iface_cfg['METRIC'] = route['metric']
+ if "metric" in route:
+ iface_cfg["METRIC"] = route["metric"]
else:
- gw_key = 'GATEWAY%s' % route_cfg.last_idx
- nm_key = 'NETMASK%s' % route_cfg.last_idx
- addr_key = 'ADDRESS%s' % route_cfg.last_idx
- metric_key = 'METRIC%s' % route_cfg.last_idx
+ gw_key = "GATEWAY%s" % route_cfg.last_idx
+ nm_key = "NETMASK%s" % route_cfg.last_idx
+ addr_key = "ADDRESS%s" % route_cfg.last_idx
+ metric_key = "METRIC%s" % route_cfg.last_idx
route_cfg.last_idx += 1
# add default routes only to ifcfg files, not
# to route-* or route6-*
- for (old_key, new_key) in [('gateway', gw_key),
- ('metric', metric_key),
- ('netmask', nm_key),
- ('network', addr_key)]:
+ for (old_key, new_key) in [
+ ("gateway", gw_key),
+ ("metric", metric_key),
+ ("netmask", nm_key),
+ ("network", addr_key),
+ ]:
if old_key in route:
route_cfg[new_key] = route[old_key]
@classmethod
- def _render_bonding_opts(cls, iface_cfg, iface):
+ def _render_bonding_opts(cls, iface_cfg, iface, flavor):
bond_opts = []
for (bond_key, value_tpl) in cls.bond_tpl_opts:
# Seems like either dash or underscore is possible?
@@ -628,22 +709,35 @@ class Renderer(renderer.Renderer):
bond_opts.append(value_tpl % (bond_value))
break
if bond_opts:
- iface_cfg['BONDING_OPTS'] = " ".join(bond_opts)
+ if flavor == "suse":
+ # suse uses the sysconfig support which requires
+ # BONDING_MODULE_OPTS see
+ # https://www.kernel.org/doc/Documentation/networking/bonding.txt
+ # 3.1 Configuration with Sysconfig Support
+ iface_cfg["BONDING_MODULE_OPTS"] = " ".join(bond_opts)
+ else:
+ # rhel uses initscript support and thus requires BONDING_OPTS
+ # this is also the old default see
+ # https://www.kernel.org/doc/Documentation/networking/bonding.txt
+ # 3.2 Configuration with Initscripts Support
+ iface_cfg["BONDING_OPTS"] = " ".join(bond_opts)
@classmethod
def _render_physical_interfaces(
- cls, network_state, iface_contents, flavor
+ cls, network_state, iface_contents, flavor
):
physical_filter = renderer.filter_by_physical
for iface in network_state.iter_interfaces(physical_filter):
- iface_name = iface['name']
+ iface_name = iface["name"]
iface_subnets = iface.get("subnets", [])
iface_cfg = iface_contents[iface_name]
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route,
- flavor
+ iface_cfg,
+ iface_subnets,
+ network_state.has_default_route,
+ flavor,
)
cls._render_subnet_routes(
iface_cfg, route_cfg, iface_subnets, flavor
@@ -651,33 +745,35 @@ class Renderer(renderer.Renderer):
@classmethod
def _render_bond_interfaces(cls, network_state, iface_contents, flavor):
- bond_filter = renderer.filter_by_type('bond')
- slave_filter = renderer.filter_by_attr('bond-master')
+ bond_filter = renderer.filter_by_type("bond")
+ slave_filter = renderer.filter_by_attr("bond-master")
for iface in network_state.iter_interfaces(bond_filter):
- iface_name = iface['name']
+ iface_name = iface["name"]
iface_cfg = iface_contents[iface_name]
- cls._render_bonding_opts(iface_cfg, iface)
+ cls._render_bonding_opts(iface_cfg, iface, flavor)
# Ensure that the master interface (and any of its children)
# are actually marked as being bond types...
master_cfgs = [iface_cfg]
master_cfgs.extend(iface_cfg.children)
for master_cfg in master_cfgs:
- master_cfg['BONDING_MASTER'] = True
- if flavor != 'suse':
- master_cfg.kind = 'bond'
+ master_cfg["BONDING_MASTER"] = True
+ if flavor != "suse":
+ master_cfg.kind = "bond"
- if iface.get('mac_address'):
- if flavor == 'suse':
- iface_cfg['LLADDR'] = iface.get('mac_address')
+ if iface.get("mac_address"):
+ if flavor == "suse":
+ iface_cfg["LLADDR"] = iface.get("mac_address")
else:
- iface_cfg['MACADDR'] = iface.get('mac_address')
+ iface_cfg["MACADDR"] = iface.get("mac_address")
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route,
- flavor
+ iface_cfg,
+ iface_subnets,
+ network_state.has_default_route,
+ flavor,
)
cls._render_subnet_routes(
iface_cfg, route_cfg, iface_subnets, flavor
@@ -686,54 +782,64 @@ class Renderer(renderer.Renderer):
# iter_interfaces on network-state is not sorted to produce
# consistent numbers we need to sort.
bond_slaves = sorted(
- [slave_iface['name'] for slave_iface in
- network_state.iter_interfaces(slave_filter)
- if slave_iface['bond-master'] == iface_name])
+ [
+ slave_iface["name"]
+ for slave_iface in network_state.iter_interfaces(
+ slave_filter
+ )
+ if slave_iface["bond-master"] == iface_name
+ ]
+ )
for index, bond_slave in enumerate(bond_slaves):
- if flavor == 'suse':
- slavestr = 'BONDING_SLAVE_%s' % index
+ if flavor == "suse":
+ slavestr = "BONDING_SLAVE_%s" % index
else:
- slavestr = 'BONDING_SLAVE%s' % index
+ slavestr = "BONDING_SLAVE%s" % index
iface_cfg[slavestr] = bond_slave
slave_cfg = iface_contents[bond_slave]
- if flavor == 'suse':
- slave_cfg['BOOTPROTO'] = 'none'
- slave_cfg['STARTMODE'] = 'hotplug'
+ if flavor == "suse":
+ slave_cfg["BOOTPROTO"] = "none"
+ slave_cfg["STARTMODE"] = "hotplug"
else:
- slave_cfg['MASTER'] = iface_name
- slave_cfg['SLAVE'] = True
+ slave_cfg["MASTER"] = iface_name
+ slave_cfg["SLAVE"] = True
@classmethod
def _render_vlan_interfaces(cls, network_state, iface_contents, flavor):
- vlan_filter = renderer.filter_by_type('vlan')
+ vlan_filter = renderer.filter_by_type("vlan")
for iface in network_state.iter_interfaces(vlan_filter):
- iface_name = iface['name']
+ iface_name = iface["name"]
iface_cfg = iface_contents[iface_name]
- if flavor == 'suse':
- vlan_id = iface.get('vlan_id')
+ if flavor == "suse":
+ vlan_id = iface.get("vlan_id")
if vlan_id:
- iface_cfg['VLAN_ID'] = vlan_id
- iface_cfg['ETHERDEVICE'] = iface_name[:iface_name.rfind('.')]
+ iface_cfg["VLAN_ID"] = vlan_id
+ iface_cfg["ETHERDEVICE"] = iface_name[: iface_name.rfind(".")]
else:
- iface_cfg['VLAN'] = True
- iface_cfg.kind = 'vlan'
+ iface_cfg["VLAN"] = True
+ iface_cfg.kind = "vlan"
- rdev = iface['vlan-raw-device']
- supported = _supported_vlan_names(rdev, iface['vlan_id'])
+ rdev = iface["vlan-raw-device"]
+ supported = _supported_vlan_names(rdev, iface["vlan_id"])
if iface_name not in supported:
LOG.info(
"Name '%s' for vlan '%s' is not officially supported"
"by RHEL. Supported: %s",
- iface_name, rdev, ' '.join(supported))
- iface_cfg['PHYSDEV'] = rdev
+ iface_name,
+ rdev,
+ " ".join(supported),
+ )
+ iface_cfg["PHYSDEV"] = rdev
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route,
- flavor
+ iface_cfg,
+ iface_subnets,
+ network_state.has_default_route,
+ flavor,
)
cls._render_subnet_routes(
iface_cfg, route_cfg, iface_subnets, flavor
@@ -742,8 +848,12 @@ class Renderer(renderer.Renderer):
@staticmethod
def _render_dns(network_state, existing_dns_path=None):
# skip writing resolv.conf if network_state doesn't include any input.
- if not any([len(network_state.dns_nameservers),
- len(network_state.dns_searchdomains)]):
+ if not any(
+ [
+ len(network_state.dns_nameservers),
+ len(network_state.dns_searchdomains),
+ ]
+ ):
return None
content = resolv_conf.ResolvConf("")
if existing_dns_path and os.path.isfile(existing_dns_path):
@@ -752,10 +862,10 @@ class Renderer(renderer.Renderer):
content.add_nameserver(nameserver)
for searchdomain in network_state.dns_searchdomains:
content.add_search_domain(searchdomain)
- header = _make_header(';')
+ header = _make_header(";")
content_str = str(content)
if not content_str.startswith(header):
- content_str = header + '\n' + content_str
+ content_str = header + "\n" + content_str
return content_str
@staticmethod
@@ -766,7 +876,7 @@ class Renderer(renderer.Renderer):
# NetworkManager to not manage dns, so that /etc/resolv.conf
# does not get clobbered.
if network_state.dns_nameservers:
- content.set_section_keypair('main', 'dns', 'none')
+ content.set_section_keypair("main", "dns", "none")
if len(content) == 0:
return None
@@ -776,39 +886,41 @@ class Renderer(renderer.Renderer):
@classmethod
def _render_bridge_interfaces(cls, network_state, iface_contents, flavor):
bridge_key_map = {
- old_k: new_k for old_k, new_k in cls.cfg_key_maps[flavor].items()
- if old_k.startswith('bridge')}
- bridge_filter = renderer.filter_by_type('bridge')
+ old_k: new_k
+ for old_k, new_k in cls.cfg_key_maps[flavor].items()
+ if old_k.startswith("bridge")
+ }
+ bridge_filter = renderer.filter_by_type("bridge")
for iface in network_state.iter_interfaces(bridge_filter):
- iface_name = iface['name']
+ iface_name = iface["name"]
iface_cfg = iface_contents[iface_name]
- if flavor != 'suse':
- iface_cfg.kind = 'bridge'
+ if flavor != "suse":
+ iface_cfg.kind = "bridge"
for old_key, new_key in bridge_key_map.items():
if old_key in iface:
iface_cfg[new_key] = iface[old_key]
- if flavor == 'suse':
- if 'BRIDGE_STP' in iface_cfg:
- if iface_cfg.get('BRIDGE_STP'):
- iface_cfg['BRIDGE_STP'] = 'on'
+ if flavor == "suse":
+ if "BRIDGE_STP" in iface_cfg:
+ if iface_cfg.get("BRIDGE_STP"):
+ iface_cfg["BRIDGE_STP"] = "on"
else:
- iface_cfg['BRIDGE_STP'] = 'off'
-
- if iface.get('mac_address'):
- key = 'MACADDR'
- if flavor == 'suse':
- key = 'LLADDRESS'
- iface_cfg[key] = iface.get('mac_address')
-
- if flavor == 'suse':
- if iface.get('bridge_ports', []):
- iface_cfg['BRIDGE_PORTS'] = '%s' % " ".join(
- iface.get('bridge_ports')
+ iface_cfg["BRIDGE_STP"] = "off"
+
+ if iface.get("mac_address"):
+ key = "MACADDR"
+ if flavor == "suse":
+ key = "LLADDRESS"
+ iface_cfg[key] = iface.get("mac_address")
+
+ if flavor == "suse":
+ if iface.get("bridge_ports", []):
+ iface_cfg["BRIDGE_PORTS"] = "%s" % " ".join(
+ iface.get("bridge_ports")
)
# Is this the right key to get all the connected interfaces?
- for bridged_iface_name in iface.get('bridge_ports', []):
+ for bridged_iface_name in iface.get("bridge_ports", []):
# Ensure all bridged interfaces are correctly tagged
# as being bridged to this interface.
bridged_cfg = iface_contents[bridged_iface_name]
@@ -816,15 +928,17 @@ class Renderer(renderer.Renderer):
bridged_cfgs.extend(bridged_cfg.children)
for bridge_cfg in bridged_cfgs:
bridge_value = iface_name
- if flavor == 'suse':
- bridge_value = 'yes'
- bridge_cfg['BRIDGE'] = bridge_value
+ if flavor == "suse":
+ bridge_value = "yes"
+ bridge_cfg["BRIDGE"] = bridge_value
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route,
- flavor
+ iface_cfg,
+ iface_subnets,
+ network_state.has_default_route,
+ flavor,
)
cls._render_subnet_routes(
iface_cfg, route_cfg, iface_subnets, flavor
@@ -832,37 +946,40 @@ class Renderer(renderer.Renderer):
@classmethod
def _render_ib_interfaces(cls, network_state, iface_contents, flavor):
- ib_filter = renderer.filter_by_type('infiniband')
+ ib_filter = renderer.filter_by_type("infiniband")
for iface in network_state.iter_interfaces(ib_filter):
- iface_name = iface['name']
+ iface_name = iface["name"]
iface_cfg = iface_contents[iface_name]
- iface_cfg.kind = 'infiniband'
+ iface_cfg.kind = "infiniband"
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route,
- flavor
+ iface_cfg,
+ iface_subnets,
+ network_state.has_default_route,
+ flavor,
)
cls._render_subnet_routes(
iface_cfg, route_cfg, iface_subnets, flavor
)
@classmethod
- def _render_sysconfig(cls, base_sysconf_dir, network_state, flavor,
- templates=None):
- '''Given state, return /etc/sysconfig files + contents'''
+ def _render_sysconfig(
+ cls, base_sysconf_dir, network_state, flavor, templates=None
+ ):
+ """Given state, return /etc/sysconfig files + contents"""
if not templates:
templates = cls.templates
iface_contents = {}
for iface in network_state.iter_interfaces():
- if iface['type'] == "loopback":
+ if iface["type"] == "loopback":
continue
- iface_name = iface['name']
+ iface_name = iface["name"]
iface_cfg = NetInterface(iface_name, base_sysconf_dir, templates)
- if flavor == 'suse':
- iface_cfg.drop('DEVICE')
+ if flavor == "suse":
+ iface_cfg.drop("DEVICE")
# If type detection fails it is considered a bug in SUSE
- iface_cfg.drop('TYPE')
+ iface_cfg.drop("TYPE")
cls._render_iface_shared(iface, iface_cfg, flavor)
iface_contents[iface_name] = iface_cfg
cls._render_physical_interfaces(network_state, iface_contents, flavor)
@@ -878,9 +995,10 @@ class Renderer(renderer.Renderer):
if iface_cfg:
contents[iface_cfg.path] = iface_cfg.to_string()
if iface_cfg.routes:
- for cpath, proto in zip([iface_cfg.routes.path_ipv4,
- iface_cfg.routes.path_ipv6],
- ["ipv4", "ipv6"]):
+ for cpath, proto in zip(
+ [iface_cfg.routes.path_ipv4, iface_cfg.routes.path_ipv6],
+ ["ipv4", "ipv6"],
+ ):
if cpath not in contents:
contents[cpath] = iface_cfg.routes.to_string(proto)
return contents
@@ -890,21 +1008,24 @@ class Renderer(renderer.Renderer):
templates = self.templates
file_mode = 0o644
base_sysconf_dir = subp.target_path(target, self.sysconf_dir)
- for path, data in self._render_sysconfig(base_sysconf_dir,
- network_state, self.flavor,
- templates=templates).items():
+ for path, data in self._render_sysconfig(
+ base_sysconf_dir, network_state, self.flavor, templates=templates
+ ).items():
util.write_file(path, data, file_mode)
if self.dns_path:
dns_path = subp.target_path(target, self.dns_path)
- resolv_content = self._render_dns(network_state,
- existing_dns_path=dns_path)
+ resolv_content = self._render_dns(
+ network_state, existing_dns_path=dns_path
+ )
if resolv_content:
util.write_file(dns_path, resolv_content, file_mode)
if self.networkmanager_conf_path:
- nm_conf_path = subp.target_path(target,
- self.networkmanager_conf_path)
- nm_conf_content = self._render_networkmanager_conf(network_state,
- templates)
+ nm_conf_path = subp.target_path(
+ target, self.networkmanager_conf_path
+ )
+ nm_conf_content = self._render_networkmanager_conf(
+ network_state, templates
+ )
if nm_conf_content:
util.write_file(nm_conf_path, nm_conf_content, file_mode)
if self.netrules_path:
@@ -914,16 +1035,17 @@ class Renderer(renderer.Renderer):
if available_nm(target=target):
enable_ifcfg_rh(subp.target_path(target, path=NM_CFG_FILE))
- sysconfig_path = subp.target_path(target, templates.get('control'))
+ sysconfig_path = subp.target_path(target, templates.get("control"))
# Distros configuring /etc/sysconfig/network as a file e.g. Centos
- if sysconfig_path.endswith('network'):
+ if sysconfig_path.endswith("network"):
util.ensure_dir(os.path.dirname(sysconfig_path))
- netcfg = [_make_header(), 'NETWORKING=yes']
+ netcfg = [_make_header(), "NETWORKING=yes"]
if network_state.use_ipv6:
- netcfg.append('NETWORKING_IPV6=yes')
- netcfg.append('IPV6_AUTOCONF=no')
- util.write_file(sysconfig_path,
- "\n".join(netcfg) + "\n", file_mode)
+ netcfg.append("NETWORKING_IPV6=yes")
+ netcfg.append("IPV6_AUTOCONF=no")
+ util.write_file(
+ sysconfig_path, "\n".join(netcfg) + "\n", file_mode
+ )
def _supported_vlan_names(rdev, vid):
@@ -931,27 +1053,34 @@ def _supported_vlan_names(rdev, vid):
11.5. Naming Scheme for VLAN Interfaces."""
return [
v.format(rdev=rdev, vid=int(vid))
- for v in ("{rdev}{vid:04}", "{rdev}{vid}",
- "{rdev}.{vid:04}", "{rdev}.{vid}")]
+ for v in (
+ "{rdev}{vid:04}",
+ "{rdev}{vid}",
+ "{rdev}.{vid:04}",
+ "{rdev}.{vid}",
+ )
+ ]
def available(target=None):
sysconfig = available_sysconfig(target=target)
nm = available_nm(target=target)
- return (util.system_info()['variant'] in KNOWN_DISTROS
- and any([nm, sysconfig]))
+ return util.system_info()["variant"] in KNOWN_DISTROS and any(
+ [nm, sysconfig]
+ )
def available_sysconfig(target=None):
- expected = ['ifup', 'ifdown']
- search = ['/sbin', '/usr/sbin']
+ expected = ["ifup", "ifdown"]
+ search = ["/sbin", "/usr/sbin"]
for p in expected:
if not subp.which(p, search=search, target=target):
return False
expected_paths = [
- 'etc/sysconfig/network-scripts/network-functions',
- 'etc/sysconfig/config']
+ "etc/sysconfig/network-scripts/network-functions",
+ "etc/sysconfig/config",
+ ]
for p in expected_paths:
if os.path.isfile(subp.target_path(target, p)):
return True
diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
deleted file mode 100644
index 74cf4b94..00000000
--- a/cloudinit/net/tests/test_dhcp.py
+++ /dev/null
@@ -1,634 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import httpretty
-import os
-import signal
-from textwrap import dedent
-
-import cloudinit.net as net
-from cloudinit.net.dhcp import (
- InvalidDHCPLeaseFileError, maybe_perform_dhcp_discovery,
- parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases,
- parse_static_routes)
-from cloudinit.util import ensure_file, write_file
-from cloudinit.tests.helpers import (
- CiTestCase, HttprettyTestCase, mock, populate_dir, wrap_and_call)
-
-
-class TestParseDHCPLeasesFile(CiTestCase):
-
- def test_parse_empty_lease_file_errors(self):
- """parse_dhcp_lease_file errors when file content is empty."""
- empty_file = self.tmp_path('leases')
- ensure_file(empty_file)
- with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager:
- parse_dhcp_lease_file(empty_file)
- error = context_manager.exception
- self.assertIn('Cannot parse empty dhcp lease file', str(error))
-
- def test_parse_malformed_lease_file_content_errors(self):
- """parse_dhcp_lease_file errors when file content isn't dhcp leases."""
- non_lease_file = self.tmp_path('leases')
- write_file(non_lease_file, 'hi mom.')
- with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager:
- parse_dhcp_lease_file(non_lease_file)
- error = context_manager.exception
- self.assertIn('Cannot parse dhcp lease file', str(error))
-
- def test_parse_multiple_leases(self):
- """parse_dhcp_lease_file returns a list of all leases within."""
- lease_file = self.tmp_path('leases')
- content = dedent("""
- lease {
- interface "wlp3s0";
- fixed-address 192.168.2.74;
- option subnet-mask 255.255.255.0;
- option routers 192.168.2.1;
- renew 4 2017/07/27 18:02:30;
- expire 5 2017/07/28 07:08:15;
- }
- lease {
- interface "wlp3s0";
- fixed-address 192.168.2.74;
- option subnet-mask 255.255.255.0;
- option routers 192.168.2.1;
- }
- """)
- expected = [
- {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
- 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
- 'renew': '4 2017/07/27 18:02:30',
- 'expire': '5 2017/07/28 07:08:15'},
- {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
- 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}]
- write_file(lease_file, content)
- self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
-
-
-class TestDHCPRFC3442(CiTestCase):
-
- def test_parse_lease_finds_rfc3442_classless_static_routes(self):
- """parse_dhcp_lease_file returns rfc3442-classless-static-routes."""
- lease_file = self.tmp_path('leases')
- content = dedent("""
- lease {
- interface "wlp3s0";
- fixed-address 192.168.2.74;
- option subnet-mask 255.255.255.0;
- option routers 192.168.2.1;
- option rfc3442-classless-static-routes 0,130,56,240,1;
- renew 4 2017/07/27 18:02:30;
- expire 5 2017/07/28 07:08:15;
- }
- """)
- expected = [
- {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
- 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
- 'rfc3442-classless-static-routes': '0,130,56,240,1',
- 'renew': '4 2017/07/27 18:02:30',
- 'expire': '5 2017/07/28 07:08:15'}]
- write_file(lease_file, content)
- self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
-
- def test_parse_lease_finds_classless_static_routes(self):
- """
- parse_dhcp_lease_file returns classless-static-routes
- for Centos lease format.
- """
- lease_file = self.tmp_path('leases')
- content = dedent("""
- lease {
- interface "wlp3s0";
- fixed-address 192.168.2.74;
- option subnet-mask 255.255.255.0;
- option routers 192.168.2.1;
- option classless-static-routes 0 130.56.240.1;
- renew 4 2017/07/27 18:02:30;
- expire 5 2017/07/28 07:08:15;
- }
- """)
- expected = [
- {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
- 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
- 'classless-static-routes': '0 130.56.240.1',
- 'renew': '4 2017/07/27 18:02:30',
- 'expire': '5 2017/07/28 07:08:15'}]
- write_file(lease_file, content)
- self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
-
- @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
- def test_obtain_lease_parses_static_routes(self, m_maybe, m_ipv4):
- """EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network"""
- lease = [
- {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
- 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
- 'rfc3442-classless-static-routes': '0,130,56,240,1',
- 'renew': '4 2017/07/27 18:02:30',
- 'expire': '5 2017/07/28 07:08:15'}]
- m_maybe.return_value = lease
- eph = net.dhcp.EphemeralDHCPv4()
- eph.obtain_lease()
- expected_kwargs = {
- 'interface': 'wlp3s0',
- 'ip': '192.168.2.74',
- 'prefix_or_mask': '255.255.255.0',
- 'broadcast': '192.168.2.255',
- 'static_routes': [('0.0.0.0/0', '130.56.240.1')],
- 'router': '192.168.2.1'}
- m_ipv4.assert_called_with(**expected_kwargs)
-
- @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
- def test_obtain_centos_lease_parses_static_routes(self, m_maybe, m_ipv4):
- """
- EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network
- for Centos Lease format
- """
- lease = [
- {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
- 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
- 'classless-static-routes': '0 130.56.240.1',
- 'renew': '4 2017/07/27 18:02:30',
- 'expire': '5 2017/07/28 07:08:15'}]
- m_maybe.return_value = lease
- eph = net.dhcp.EphemeralDHCPv4()
- eph.obtain_lease()
- expected_kwargs = {
- 'interface': 'wlp3s0',
- 'ip': '192.168.2.74',
- 'prefix_or_mask': '255.255.255.0',
- 'broadcast': '192.168.2.255',
- 'static_routes': [('0.0.0.0/0', '130.56.240.1')],
- 'router': '192.168.2.1'}
- m_ipv4.assert_called_with(**expected_kwargs)
-
-
-class TestDHCPParseStaticRoutes(CiTestCase):
-
- with_logs = True
-
- def parse_static_routes_empty_string(self):
- self.assertEqual([], parse_static_routes(""))
-
- def test_parse_static_routes_invalid_input_returns_empty_list(self):
- rfc3442 = "32,169,254,169,254,130,56,248"
- self.assertEqual([], parse_static_routes(rfc3442))
-
- def test_parse_static_routes_bogus_width_returns_empty_list(self):
- rfc3442 = "33,169,254,169,254,130,56,248"
- self.assertEqual([], parse_static_routes(rfc3442))
-
- def test_parse_static_routes_single_ip(self):
- rfc3442 = "32,169,254,169,254,130,56,248,255"
- self.assertEqual([('169.254.169.254/32', '130.56.248.255')],
- parse_static_routes(rfc3442))
-
- def test_parse_static_routes_single_ip_handles_trailing_semicolon(self):
- rfc3442 = "32,169,254,169,254,130,56,248,255;"
- self.assertEqual([('169.254.169.254/32', '130.56.248.255')],
- parse_static_routes(rfc3442))
-
- def test_parse_static_routes_default_route(self):
- rfc3442 = "0,130,56,240,1"
- self.assertEqual([('0.0.0.0/0', '130.56.240.1')],
- parse_static_routes(rfc3442))
-
- def test_parse_static_routes_class_c_b_a(self):
- class_c = "24,192,168,74,192,168,0,4"
- class_b = "16,172,16,172,16,0,4"
- class_a = "8,10,10,0,0,4"
- rfc3442 = ",".join([class_c, class_b, class_a])
- self.assertEqual(sorted([
- ("192.168.74.0/24", "192.168.0.4"),
- ("172.16.0.0/16", "172.16.0.4"),
- ("10.0.0.0/8", "10.0.0.4")
- ]), sorted(parse_static_routes(rfc3442)))
-
- def test_parse_static_routes_logs_error_truncated(self):
- bad_rfc3442 = {
- "class_c": "24,169,254,169,10",
- "class_b": "16,172,16,10",
- "class_a": "8,10,10",
- "gateway": "0,0",
- "netlen": "33,0",
- }
- for rfc3442 in bad_rfc3442.values():
- self.assertEqual([], parse_static_routes(rfc3442))
-
- logs = self.logs.getvalue()
- self.assertEqual(len(bad_rfc3442.keys()), len(logs.splitlines()))
-
- def test_parse_static_routes_returns_valid_routes_until_parse_err(self):
- class_c = "24,192,168,74,192,168,0,4"
- class_b = "16,172,16,172,16,0,4"
- class_a_error = "8,10,10,0,0"
- rfc3442 = ",".join([class_c, class_b, class_a_error])
- self.assertEqual(sorted([
- ("192.168.74.0/24", "192.168.0.4"),
- ("172.16.0.0/16", "172.16.0.4"),
- ]), sorted(parse_static_routes(rfc3442)))
-
- logs = self.logs.getvalue()
- self.assertIn(rfc3442, logs.splitlines()[0])
-
- def test_redhat_format(self):
- redhat_format = "24.191.168.128 192.168.128.1,0 192.168.128.1"
- self.assertEqual(sorted([
- ("191.168.128.0/24", "192.168.128.1"),
- ("0.0.0.0/0", "192.168.128.1")
- ]), sorted(parse_static_routes(redhat_format)))
-
- def test_redhat_format_with_a_space_too_much_after_comma(self):
- redhat_format = "24.191.168.128 192.168.128.1, 0 192.168.128.1"
- self.assertEqual(sorted([
- ("191.168.128.0/24", "192.168.128.1"),
- ("0.0.0.0/0", "192.168.128.1")
- ]), sorted(parse_static_routes(redhat_format)))
-
-
-class TestDHCPDiscoveryClean(CiTestCase):
- with_logs = True
-
- @mock.patch('cloudinit.net.dhcp.find_fallback_nic')
- def test_no_fallback_nic_found(self, m_fallback_nic):
- """Log and do nothing when nic is absent and no fallback is found."""
- m_fallback_nic.return_value = None # No fallback nic found
- self.assertEqual([], maybe_perform_dhcp_discovery())
- self.assertIn(
- 'Skip dhcp_discovery: Unable to find fallback nic.',
- self.logs.getvalue())
-
- def test_provided_nic_does_not_exist(self):
- """When the provided nic doesn't exist, log a message and no-op."""
- self.assertEqual([], maybe_perform_dhcp_discovery('idontexist'))
- self.assertIn(
- 'Skip dhcp_discovery: nic idontexist not found in get_devicelist.',
- self.logs.getvalue())
-
- @mock.patch('cloudinit.net.dhcp.subp.which')
- @mock.patch('cloudinit.net.dhcp.find_fallback_nic')
- def test_absent_dhclient_command(self, m_fallback, m_which):
- """When dhclient doesn't exist in the OS, log the issue and no-op."""
- m_fallback.return_value = 'eth9'
- m_which.return_value = None # dhclient isn't found
- self.assertEqual([], maybe_perform_dhcp_discovery())
- self.assertIn(
- 'Skip dhclient configuration: No dhclient command found.',
- self.logs.getvalue())
-
- @mock.patch('cloudinit.temp_utils.os.getuid')
- @mock.patch('cloudinit.net.dhcp.dhcp_discovery')
- @mock.patch('cloudinit.net.dhcp.subp.which')
- @mock.patch('cloudinit.net.dhcp.find_fallback_nic')
- def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid):
- """maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery."""
- m_uid.return_value = 0 # Fake root user for tmpdir
- m_fback.return_value = 'eth9'
- m_which.return_value = '/sbin/dhclient'
- m_dhcp.return_value = {'address': '192.168.2.2'}
- retval = wrap_and_call(
- 'cloudinit.temp_utils',
- {'_TMPDIR': {'new': None},
- 'os.getuid': 0},
- maybe_perform_dhcp_discovery)
- self.assertEqual({'address': '192.168.2.2'}, retval)
- self.assertEqual(
- 1, m_dhcp.call_count, 'dhcp_discovery not called once')
- call = m_dhcp.call_args_list[0]
- self.assertEqual('/sbin/dhclient', call[0][0])
- self.assertEqual('eth9', call[0][1])
- self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2])
-
- @mock.patch('time.sleep', mock.MagicMock())
- @mock.patch('cloudinit.net.dhcp.os.kill')
- @mock.patch('cloudinit.net.dhcp.subp.subp')
- def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(self, m_subp,
- m_kill):
- """dhcp_discovery logs a warning when pidfile contains invalid content.
-
- Lease processing still occurs and no proc kill is attempted.
- """
- m_subp.return_value = ('', '')
- tmpdir = self.tmp_dir()
- dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
- script_content = '#!/bin/bash\necho fake-dhclient'
- write_file(dhclient_script, script_content, mode=0o755)
- write_file(self.tmp_path('dhclient.pid', tmpdir), '') # Empty pid ''
- lease_content = dedent("""
- lease {
- interface "eth9";
- fixed-address 192.168.2.74;
- option subnet-mask 255.255.255.0;
- option routers 192.168.2.1;
- }
- """)
- write_file(self.tmp_path('dhcp.leases', tmpdir), lease_content)
-
- self.assertCountEqual(
- [{'interface': 'eth9', 'fixed-address': '192.168.2.74',
- 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
- dhcp_discovery(dhclient_script, 'eth9', tmpdir))
- self.assertIn(
- "dhclient(pid=, parentpid=unknown) failed "
- "to daemonize after 10.0 seconds",
- self.logs.getvalue())
- m_kill.assert_not_called()
-
- @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
- @mock.patch('cloudinit.net.dhcp.os.kill')
- @mock.patch('cloudinit.net.dhcp.util.wait_for_files')
- @mock.patch('cloudinit.net.dhcp.subp.subp')
- def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self,
- m_subp,
- m_wait,
- m_kill,
- m_getppid):
- """dhcp_discovery waits for the presence of pidfile and dhcp.leases."""
- m_subp.return_value = ('', '')
- tmpdir = self.tmp_dir()
- dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
- script_content = '#!/bin/bash\necho fake-dhclient'
- write_file(dhclient_script, script_content, mode=0o755)
- # Don't create pid or leases file
- pidfile = self.tmp_path('dhclient.pid', tmpdir)
- leasefile = self.tmp_path('dhcp.leases', tmpdir)
- m_wait.return_value = [pidfile] # Return the missing pidfile wait for
- m_getppid.return_value = 1 # Indicate that dhclient has daemonized
- self.assertEqual([], dhcp_discovery(dhclient_script, 'eth9', tmpdir))
- self.assertEqual(
- mock.call([pidfile, leasefile], maxwait=5, naplen=0.01),
- m_wait.call_args_list[0])
- self.assertIn(
- 'WARNING: dhclient did not produce expected files: dhclient.pid',
- self.logs.getvalue())
- m_kill.assert_not_called()
-
- @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
- @mock.patch('cloudinit.net.dhcp.os.kill')
- @mock.patch('cloudinit.net.dhcp.subp.subp')
- def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid):
- """dhcp_discovery brings up the interface and runs dhclient.
-
- It also returns the parsed dhcp.leases file generated in the sandbox.
- """
- m_subp.return_value = ('', '')
- tmpdir = self.tmp_dir()
- dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
- script_content = '#!/bin/bash\necho fake-dhclient'
- write_file(dhclient_script, script_content, mode=0o755)
- lease_content = dedent("""
- lease {
- interface "eth9";
- fixed-address 192.168.2.74;
- option subnet-mask 255.255.255.0;
- option routers 192.168.2.1;
- }
- """)
- lease_file = os.path.join(tmpdir, 'dhcp.leases')
- write_file(lease_file, lease_content)
- pid_file = os.path.join(tmpdir, 'dhclient.pid')
- my_pid = 1
- write_file(pid_file, "%d\n" % my_pid)
- m_getppid.return_value = 1 # Indicate that dhclient has daemonized
-
- self.assertCountEqual(
- [{'interface': 'eth9', 'fixed-address': '192.168.2.74',
- 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
- dhcp_discovery(dhclient_script, 'eth9', tmpdir))
- # dhclient script got copied
- with open(os.path.join(tmpdir, 'dhclient')) as stream:
- self.assertEqual(script_content, stream.read())
- # Interface was brought up before dhclient called from sandbox
- m_subp.assert_has_calls([
- mock.call(
- ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True),
- mock.call(
- [os.path.join(tmpdir, 'dhclient'), '-1', '-v', '-lf',
- lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'),
- 'eth9', '-sf', '/bin/true'], capture=True)])
- m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
-
- @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
- @mock.patch('cloudinit.net.dhcp.os.kill')
- @mock.patch('cloudinit.net.dhcp.subp.subp')
- def test_dhcp_discovery_outside_sandbox(self, m_subp, m_kill, m_getppid):
- """dhcp_discovery brings up the interface and runs dhclient.
-
- It also returns the parsed dhcp.leases file generated in the sandbox.
- """
- m_subp.return_value = ('', '')
- tmpdir = self.tmp_dir()
- dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
- script_content = '#!/bin/bash\necho fake-dhclient'
- write_file(dhclient_script, script_content, mode=0o755)
- lease_content = dedent("""
- lease {
- interface "eth9";
- fixed-address 192.168.2.74;
- option subnet-mask 255.255.255.0;
- option routers 192.168.2.1;
- }
- """)
- lease_file = os.path.join(tmpdir, 'dhcp.leases')
- write_file(lease_file, lease_content)
- pid_file = os.path.join(tmpdir, 'dhclient.pid')
- my_pid = 1
- write_file(pid_file, "%d\n" % my_pid)
- m_getppid.return_value = 1 # Indicate that dhclient has daemonized
-
- with mock.patch('os.access', return_value=False):
- self.assertCountEqual(
- [{'interface': 'eth9', 'fixed-address': '192.168.2.74',
- 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
- dhcp_discovery(dhclient_script, 'eth9', tmpdir))
- # dhclient script got copied
- with open(os.path.join(tmpdir, 'dhclient.orig')) as stream:
- self.assertEqual(script_content, stream.read())
- # Interface was brought up before dhclient called from sandbox
- m_subp.assert_has_calls([
- mock.call(
- ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True),
- mock.call(
- [os.path.join(tmpdir, 'dhclient.orig'), '-1', '-v', '-lf',
- lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'),
- 'eth9', '-sf', '/bin/true'], capture=True)])
- m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
-
- @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
- @mock.patch('cloudinit.net.dhcp.os.kill')
- @mock.patch('cloudinit.net.dhcp.subp.subp')
- def test_dhcp_output_error_stream(self, m_subp, m_kill, m_getppid):
- """"dhcp_log_func is called with the output and error streams of
- dhclinet when the callable is passed."""
- dhclient_err = 'FAKE DHCLIENT ERROR'
- dhclient_out = 'FAKE DHCLIENT OUT'
- m_subp.return_value = (dhclient_out, dhclient_err)
- tmpdir = self.tmp_dir()
- dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
- script_content = '#!/bin/bash\necho fake-dhclient'
- write_file(dhclient_script, script_content, mode=0o755)
- lease_content = dedent("""
- lease {
- interface "eth9";
- fixed-address 192.168.2.74;
- option subnet-mask 255.255.255.0;
- option routers 192.168.2.1;
- }
- """)
- lease_file = os.path.join(tmpdir, 'dhcp.leases')
- write_file(lease_file, lease_content)
- pid_file = os.path.join(tmpdir, 'dhclient.pid')
- my_pid = 1
- write_file(pid_file, "%d\n" % my_pid)
- m_getppid.return_value = 1 # Indicate that dhclient has daemonized
-
- def dhcp_log_func(out, err):
- self.assertEqual(out, dhclient_out)
- self.assertEqual(err, dhclient_err)
-
- dhcp_discovery(
- dhclient_script, 'eth9', tmpdir, dhcp_log_func=dhcp_log_func)
-
-
-class TestSystemdParseLeases(CiTestCase):
-
- lxd_lease = dedent("""\
- # This is private data. Do not parse.
- ADDRESS=10.75.205.242
- NETMASK=255.255.255.0
- ROUTER=10.75.205.1
- SERVER_ADDRESS=10.75.205.1
- NEXT_SERVER=10.75.205.1
- BROADCAST=10.75.205.255
- T1=1580
- T2=2930
- LIFETIME=3600
- DNS=10.75.205.1
- DOMAINNAME=lxd
- HOSTNAME=a1
- CLIENTID=ffe617693400020000ab110c65a6a0866931c2
- """)
-
- lxd_parsed = {
- 'ADDRESS': '10.75.205.242',
- 'NETMASK': '255.255.255.0',
- 'ROUTER': '10.75.205.1',
- 'SERVER_ADDRESS': '10.75.205.1',
- 'NEXT_SERVER': '10.75.205.1',
- 'BROADCAST': '10.75.205.255',
- 'T1': '1580',
- 'T2': '2930',
- 'LIFETIME': '3600',
- 'DNS': '10.75.205.1',
- 'DOMAINNAME': 'lxd',
- 'HOSTNAME': 'a1',
- 'CLIENTID': 'ffe617693400020000ab110c65a6a0866931c2',
- }
-
- azure_lease = dedent("""\
- # This is private data. Do not parse.
- ADDRESS=10.132.0.5
- NETMASK=255.255.255.255
- ROUTER=10.132.0.1
- SERVER_ADDRESS=169.254.169.254
- NEXT_SERVER=10.132.0.1
- MTU=1460
- T1=43200
- T2=75600
- LIFETIME=86400
- DNS=169.254.169.254
- NTP=169.254.169.254
- DOMAINNAME=c.ubuntu-foundations.internal
- DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal
- HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal
- ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1
- CLIENTID=ff405663a200020000ab11332859494d7a8b4c
- OPTION_245=624c3620
- """)
-
- azure_parsed = {
- 'ADDRESS': '10.132.0.5',
- 'NETMASK': '255.255.255.255',
- 'ROUTER': '10.132.0.1',
- 'SERVER_ADDRESS': '169.254.169.254',
- 'NEXT_SERVER': '10.132.0.1',
- 'MTU': '1460',
- 'T1': '43200',
- 'T2': '75600',
- 'LIFETIME': '86400',
- 'DNS': '169.254.169.254',
- 'NTP': '169.254.169.254',
- 'DOMAINNAME': 'c.ubuntu-foundations.internal',
- 'DOMAIN_SEARCH_LIST': 'c.ubuntu-foundations.internal google.internal',
- 'HOSTNAME': 'tribaal-test-171002-1349.c.ubuntu-foundations.internal',
- 'ROUTES': '10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1',
- 'CLIENTID': 'ff405663a200020000ab11332859494d7a8b4c',
- 'OPTION_245': '624c3620'}
-
- def setUp(self):
- super(TestSystemdParseLeases, self).setUp()
- self.lease_d = self.tmp_dir()
-
- def test_no_leases_returns_empty_dict(self):
- """A leases dir with no lease files should return empty dictionary."""
- self.assertEqual({}, networkd_load_leases(self.lease_d))
-
- def test_no_leases_dir_returns_empty_dict(self):
- """A non-existing leases dir should return empty dict."""
- enodir = os.path.join(self.lease_d, 'does-not-exist')
- self.assertEqual({}, networkd_load_leases(enodir))
-
- def test_single_leases_file(self):
- """A leases dir with one leases file."""
- populate_dir(self.lease_d, {'2': self.lxd_lease})
- self.assertEqual(
- {'2': self.lxd_parsed}, networkd_load_leases(self.lease_d))
-
- def test_single_azure_leases_file(self):
- """On Azure, option 245 should be present, verify it specifically."""
- populate_dir(self.lease_d, {'1': self.azure_lease})
- self.assertEqual(
- {'1': self.azure_parsed}, networkd_load_leases(self.lease_d))
-
- def test_multiple_files(self):
- """Multiple leases files on azure with one found return that value."""
- self.maxDiff = None
- populate_dir(self.lease_d, {'1': self.azure_lease,
- '9': self.lxd_lease})
- self.assertEqual({'1': self.azure_parsed, '9': self.lxd_parsed},
- networkd_load_leases(self.lease_d))
-
-
-class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
-
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
- def test_ephemeral_dhcp_no_network_if_url_connectivity(self, m_dhcp):
- """No EphemeralDhcp4 network setup when connectivity_url succeeds."""
- url = 'http://example.org/index.html'
-
- httpretty.register_uri(httpretty.GET, url)
- with net.dhcp.EphemeralDHCPv4(connectivity_url=url) as lease:
- self.assertIsNone(lease)
- # Ensure that no teardown happens:
- m_dhcp.assert_not_called()
-
- @mock.patch('cloudinit.net.dhcp.subp.subp')
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
- def test_ephemeral_dhcp_setup_network_if_url_connectivity(
- self, m_dhcp, m_subp):
- """No EphemeralDhcp4 network setup when connectivity_url succeeds."""
- url = 'http://example.org/index.html'
- fake_lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.2',
- 'subnet-mask': '255.255.0.0'}
- m_dhcp.return_value = [fake_lease]
- m_subp.return_value = ('', '')
-
- httpretty.register_uri(httpretty.GET, url, body={}, status=404)
- with net.dhcp.EphemeralDHCPv4(connectivity_url=url) as lease:
- self.assertEqual(fake_lease, lease)
- # Ensure that dhcp discovery occurs
- m_dhcp.called_once_with()
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
deleted file mode 100644
index 0535387a..00000000
--- a/cloudinit/net/tests/test_init.py
+++ /dev/null
@@ -1,1270 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import copy
-import errno
-import ipaddress
-import os
-import textwrap
-from unittest import mock
-
-import httpretty
-import pytest
-import requests
-
-import cloudinit.net as net
-from cloudinit import safeyaml as yaml
-from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase
-from cloudinit.subp import ProcessExecutionError
-from cloudinit.util import ensure_file, write_file
-
-
-class TestSysDevPath(CiTestCase):
-
- def test_sys_dev_path(self):
- """sys_dev_path returns a path under SYS_CLASS_NET for a device."""
- dev = 'something'
- path = 'attribute'
- expected = net.SYS_CLASS_NET + dev + '/' + path
- self.assertEqual(expected, net.sys_dev_path(dev, path))
-
- def test_sys_dev_path_without_path(self):
- """When path param isn't provided it defaults to empty string."""
- dev = 'something'
- expected = net.SYS_CLASS_NET + dev + '/'
- self.assertEqual(expected, net.sys_dev_path(dev))
-
-
-class TestReadSysNet(CiTestCase):
- with_logs = True
-
- def setUp(self):
- super(TestReadSysNet, self).setUp()
- sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
- self.m_sys_path = sys_mock.start()
- self.sysdir = self.tmp_dir() + '/'
- self.m_sys_path.return_value = self.sysdir
- self.addCleanup(sys_mock.stop)
-
- def test_read_sys_net_strips_contents_of_sys_path(self):
- """read_sys_net strips whitespace from the contents of a sys file."""
- content = 'some stuff with trailing whitespace\t\r\n'
- write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
- self.assertEqual(content.strip(), net.read_sys_net('dev', 'attr'))
-
- def test_read_sys_net_reraises_oserror(self):
- """read_sys_net raises OSError/IOError when file doesn't exist."""
- # Non-specific Exception because versions of python OSError vs IOError.
- with self.assertRaises(Exception) as context_manager: # noqa: H202
- net.read_sys_net('dev', 'attr')
- error = context_manager.exception
- self.assertIn('No such file or directory', str(error))
-
- def test_read_sys_net_handles_error_with_on_enoent(self):
- """read_sys_net handles OSError/IOError with on_enoent if provided."""
- handled_errors = []
-
- def on_enoent(e):
- handled_errors.append(e)
-
- net.read_sys_net('dev', 'attr', on_enoent=on_enoent)
- error = handled_errors[0]
- self.assertIsInstance(error, Exception)
- self.assertIn('No such file or directory', str(error))
-
- def test_read_sys_net_translates_content(self):
- """read_sys_net translates content when translate dict is provided."""
- content = "you're welcome\n"
- write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
- translate = {"you're welcome": 'de nada'}
- self.assertEqual(
- 'de nada',
- net.read_sys_net('dev', 'attr', translate=translate))
-
- def test_read_sys_net_errors_on_translation_failures(self):
- """read_sys_net raises a KeyError and logs details on failure."""
- content = "you're welcome\n"
- write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
- with self.assertRaises(KeyError) as context_manager:
- net.read_sys_net('dev', 'attr', translate={})
- error = context_manager.exception
- self.assertEqual('"you\'re welcome"', str(error))
- self.assertIn(
- "Found unexpected (not translatable) value 'you're welcome' in "
- "'{0}dev/attr".format(self.sysdir),
- self.logs.getvalue())
-
- def test_read_sys_net_handles_handles_with_onkeyerror(self):
- """read_sys_net handles translation errors calling on_keyerror."""
- content = "you're welcome\n"
- write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
- handled_errors = []
-
- def on_keyerror(e):
- handled_errors.append(e)
-
- net.read_sys_net('dev', 'attr', translate={}, on_keyerror=on_keyerror)
- error = handled_errors[0]
- self.assertIsInstance(error, KeyError)
- self.assertEqual('"you\'re welcome"', str(error))
-
- def test_read_sys_net_safe_false_on_translate_failure(self):
- """read_sys_net_safe returns False on translation failures."""
- content = "you're welcome\n"
- write_file(os.path.join(self.sysdir, 'dev', 'attr'), content)
- self.assertFalse(net.read_sys_net_safe('dev', 'attr', translate={}))
-
- def test_read_sys_net_safe_returns_false_on_noent_failure(self):
- """read_sys_net_safe returns False on file not found failures."""
- self.assertFalse(net.read_sys_net_safe('dev', 'attr'))
-
- def test_read_sys_net_int_returns_none_on_error(self):
- """read_sys_net_safe returns None on failures."""
- self.assertFalse(net.read_sys_net_int('dev', 'attr'))
-
- def test_read_sys_net_int_returns_none_on_valueerror(self):
- """read_sys_net_safe returns None when content is not an int."""
- write_file(os.path.join(self.sysdir, 'dev', 'attr'), 'NOTINT\n')
- self.assertFalse(net.read_sys_net_int('dev', 'attr'))
-
- def test_read_sys_net_int_returns_integer_from_content(self):
- """read_sys_net_safe returns None on failures."""
- write_file(os.path.join(self.sysdir, 'dev', 'attr'), '1\n')
- self.assertEqual(1, net.read_sys_net_int('dev', 'attr'))
-
- def test_is_up_true(self):
- """is_up is True if sys/net/devname/operstate is 'up' or 'unknown'."""
- for state in ['up', 'unknown']:
- write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state)
- self.assertTrue(net.is_up('eth0'))
-
- def test_is_up_false(self):
- """is_up is False if sys/net/devname/operstate is 'down' or invalid."""
- for state in ['down', 'incomprehensible']:
- write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state)
- self.assertFalse(net.is_up('eth0'))
-
- def test_is_bridge(self):
- """is_bridge is True when /sys/net/devname/bridge exists."""
- self.assertFalse(net.is_bridge('eth0'))
- ensure_file(os.path.join(self.sysdir, 'eth0', 'bridge'))
- self.assertTrue(net.is_bridge('eth0'))
-
- def test_is_bond(self):
- """is_bond is True when /sys/net/devname/bonding exists."""
- self.assertFalse(net.is_bond('eth0'))
- ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding'))
- self.assertTrue(net.is_bond('eth0'))
-
- def test_get_master(self):
- """get_master returns the path when /sys/net/devname/master exists."""
- self.assertIsNone(net.get_master('enP1s1'))
- master_path = os.path.join(self.sysdir, 'enP1s1', 'master')
- ensure_file(master_path)
- self.assertEqual(master_path, net.get_master('enP1s1'))
-
- def test_master_is_bridge_or_bond(self):
- bridge_mac = 'aa:bb:cc:aa:bb:cc'
- bond_mac = 'cc:bb:aa:cc:bb:aa'
-
- # No master => False
- write_file(os.path.join(self.sysdir, 'eth1', 'address'), bridge_mac)
- write_file(os.path.join(self.sysdir, 'eth2', 'address'), bond_mac)
-
- self.assertFalse(net.master_is_bridge_or_bond('eth1'))
- self.assertFalse(net.master_is_bridge_or_bond('eth2'))
-
- # masters without bridge/bonding => False
- write_file(os.path.join(self.sysdir, 'br0', 'address'), bridge_mac)
- write_file(os.path.join(self.sysdir, 'bond0', 'address'), bond_mac)
-
- os.symlink('../br0', os.path.join(self.sysdir, 'eth1', 'master'))
- os.symlink('../bond0', os.path.join(self.sysdir, 'eth2', 'master'))
-
- self.assertFalse(net.master_is_bridge_or_bond('eth1'))
- self.assertFalse(net.master_is_bridge_or_bond('eth2'))
-
- # masters with bridge/bonding => True
- write_file(os.path.join(self.sysdir, 'br0', 'bridge'), '')
- write_file(os.path.join(self.sysdir, 'bond0', 'bonding'), '')
-
- self.assertTrue(net.master_is_bridge_or_bond('eth1'))
- self.assertTrue(net.master_is_bridge_or_bond('eth2'))
-
- def test_master_is_openvswitch(self):
- ovs_mac = 'bb:cc:aa:bb:cc:aa'
-
- # No master => False
- write_file(os.path.join(self.sysdir, 'eth1', 'address'), ovs_mac)
-
- self.assertFalse(net.master_is_bridge_or_bond('eth1'))
-
- # masters without ovs-system => False
- write_file(os.path.join(self.sysdir, 'ovs-system', 'address'), ovs_mac)
-
- os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth1',
- 'master'))
-
- self.assertFalse(net.master_is_openvswitch('eth1'))
-
- # masters with ovs-system => True
- os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth1',
- 'upper_ovs-system'))
-
- self.assertTrue(net.master_is_openvswitch('eth1'))
-
- def test_is_vlan(self):
- """is_vlan is True when /sys/net/devname/uevent has DEVTYPE=vlan."""
- ensure_file(os.path.join(self.sysdir, 'eth0', 'uevent'))
- self.assertFalse(net.is_vlan('eth0'))
- content = 'junk\nDEVTYPE=vlan\njunk\n'
- write_file(os.path.join(self.sysdir, 'eth0', 'uevent'), content)
- self.assertTrue(net.is_vlan('eth0'))
-
-
-class TestGenerateFallbackConfig(CiTestCase):
-
- def setUp(self):
- super(TestGenerateFallbackConfig, self).setUp()
- sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
- self.m_sys_path = sys_mock.start()
- self.sysdir = self.tmp_dir() + '/'
- self.m_sys_path.return_value = self.sysdir
- self.addCleanup(sys_mock.stop)
- self.add_patch('cloudinit.net.util.is_container', 'm_is_container',
- return_value=False)
- self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle')
- self.add_patch('cloudinit.net.is_netfailover', 'm_netfail',
- return_value=False)
- self.add_patch('cloudinit.net.is_netfail_master', 'm_netfail_master',
- return_value=False)
-
- def test_generate_fallback_finds_connected_eth_with_mac(self):
- """generate_fallback_config finds any connected device with a mac."""
- write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1')
- write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1')
- mac = 'aa:bb:cc:aa:bb:cc'
- write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac)
- expected = {
- 'ethernets': {'eth1': {'match': {'macaddress': mac},
- 'dhcp4': True, 'set-name': 'eth1'}},
- 'version': 2}
- self.assertEqual(expected, net.generate_fallback_config())
-
- def test_generate_fallback_finds_dormant_eth_with_mac(self):
- """generate_fallback_config finds any dormant device with a mac."""
- write_file(os.path.join(self.sysdir, 'eth0', 'dormant'), '1')
- mac = 'aa:bb:cc:aa:bb:cc'
- write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac)
- expected = {
- 'ethernets': {'eth0': {'match': {'macaddress': mac}, 'dhcp4': True,
- 'set-name': 'eth0'}},
- 'version': 2}
- self.assertEqual(expected, net.generate_fallback_config())
-
- def test_generate_fallback_finds_eth_by_operstate(self):
- """generate_fallback_config finds any dormant device with a mac."""
- mac = 'aa:bb:cc:aa:bb:cc'
- write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac)
- expected = {
- 'ethernets': {
- 'eth0': {'dhcp4': True, 'match': {'macaddress': mac},
- 'set-name': 'eth0'}},
- 'version': 2}
- valid_operstates = ['dormant', 'down', 'lowerlayerdown', 'unknown']
- for state in valid_operstates:
- write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state)
- self.assertEqual(expected, net.generate_fallback_config())
- write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'noworky')
- self.assertIsNone(net.generate_fallback_config())
-
- def test_generate_fallback_config_skips_veth(self):
- """generate_fallback_config will skip any veth interfaces."""
- # A connected veth which gets ignored
- write_file(os.path.join(self.sysdir, 'veth0', 'carrier'), '1')
- self.assertIsNone(net.generate_fallback_config())
-
- def test_generate_fallback_config_skips_bridges(self):
- """generate_fallback_config will skip any bridges interfaces."""
- # A connected veth which gets ignored
- write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1')
- mac = 'aa:bb:cc:aa:bb:cc'
- write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac)
- ensure_file(os.path.join(self.sysdir, 'eth0', 'bridge'))
- self.assertIsNone(net.generate_fallback_config())
-
- def test_generate_fallback_config_skips_bonds(self):
- """generate_fallback_config will skip any bonded interfaces."""
- # A connected veth which gets ignored
- write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1')
- mac = 'aa:bb:cc:aa:bb:cc'
- write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac)
- ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding'))
- self.assertIsNone(net.generate_fallback_config())
-
- def test_generate_fallback_config_skips_netfail_devs(self):
- """gen_fallback_config ignores netfail primary,sby no mac on master."""
- mac = 'aa:bb:cc:aa:bb:cc' # netfailover devs share the same mac
- for iface in ['ens3', 'ens3sby', 'enP0s1f3']:
- write_file(os.path.join(self.sysdir, iface, 'carrier'), '1')
- write_file(
- os.path.join(self.sysdir, iface, 'addr_assign_type'), '0')
- write_file(
- os.path.join(self.sysdir, iface, 'address'), mac)
-
- def is_netfail(iface, _driver=None):
- # ens3 is the master
- if iface == 'ens3':
- return False
- return True
- self.m_netfail.side_effect = is_netfail
-
- def is_netfail_master(iface, _driver=None):
- # ens3 is the master
- if iface == 'ens3':
- return True
- return False
- self.m_netfail_master.side_effect = is_netfail_master
- expected = {
- 'ethernets': {
- 'ens3': {'dhcp4': True, 'match': {'name': 'ens3'},
- 'set-name': 'ens3'}},
- 'version': 2}
- result = net.generate_fallback_config()
- self.assertEqual(expected, result)
-
-
-class TestNetFindFallBackNic(CiTestCase):
-
- def setUp(self):
- super(TestNetFindFallBackNic, self).setUp()
- sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
- self.m_sys_path = sys_mock.start()
- self.sysdir = self.tmp_dir() + '/'
- self.m_sys_path.return_value = self.sysdir
- self.addCleanup(sys_mock.stop)
- self.add_patch('cloudinit.net.util.is_container', 'm_is_container',
- return_value=False)
- self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle')
-
- def test_generate_fallback_finds_first_connected_eth_with_mac(self):
- """find_fallback_nic finds any connected device with a mac."""
- write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1')
- write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1')
- mac = 'aa:bb:cc:aa:bb:cc'
- write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac)
- self.assertEqual('eth1', net.find_fallback_nic())
-
-
-class TestGetDeviceList(CiTestCase):
-
- def setUp(self):
- super(TestGetDeviceList, self).setUp()
- sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
- self.m_sys_path = sys_mock.start()
- self.sysdir = self.tmp_dir() + '/'
- self.m_sys_path.return_value = self.sysdir
- self.addCleanup(sys_mock.stop)
-
- def test_get_devicelist_raise_oserror(self):
- """get_devicelist raise any non-ENOENT OSerror."""
- error = OSError('Can not do it')
- error.errno = errno.EPERM # Set non-ENOENT
- self.m_sys_path.side_effect = error
- with self.assertRaises(OSError) as context_manager:
- net.get_devicelist()
- exception = context_manager.exception
- self.assertEqual('Can not do it', str(exception))
-
- def test_get_devicelist_empty_without_sys_net(self):
- """get_devicelist returns empty list when missing SYS_CLASS_NET."""
- self.m_sys_path.return_value = 'idontexist'
- self.assertEqual([], net.get_devicelist())
-
- def test_get_devicelist_empty_with_no_devices_in_sys_net(self):
- """get_devicelist returns empty directoty listing for SYS_CLASS_NET."""
- self.assertEqual([], net.get_devicelist())
-
- def test_get_devicelist_lists_any_subdirectories_in_sys_net(self):
- """get_devicelist returns a directory listing for SYS_CLASS_NET."""
- write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'up')
- write_file(os.path.join(self.sysdir, 'eth1', 'operstate'), 'up')
- self.assertCountEqual(['eth0', 'eth1'], net.get_devicelist())
-
-
-class TestGetInterfaceMAC(CiTestCase):
-
- def setUp(self):
- super(TestGetInterfaceMAC, self).setUp()
- sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
- self.m_sys_path = sys_mock.start()
- self.sysdir = self.tmp_dir() + '/'
- self.m_sys_path.return_value = self.sysdir
- self.addCleanup(sys_mock.stop)
-
- def test_get_interface_mac_false_with_no_mac(self):
- """get_device_list returns False when no mac is reported."""
- ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding'))
- mac_path = os.path.join(self.sysdir, 'eth0', 'address')
- self.assertFalse(os.path.exists(mac_path))
- self.assertFalse(net.get_interface_mac('eth0'))
-
- def test_get_interface_mac(self):
- """get_interfaces returns the mac from SYS_CLASS_NET/dev/address."""
- mac = 'aa:bb:cc:aa:bb:cc'
- write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac)
- self.assertEqual(mac, net.get_interface_mac('eth1'))
-
- def test_get_interface_mac_grabs_bonding_address(self):
- """get_interfaces returns the source device mac for bonded devices."""
- source_dev_mac = 'aa:bb:cc:aa:bb:cc'
- bonded_mac = 'dd:ee:ff:dd:ee:ff'
- write_file(os.path.join(self.sysdir, 'eth1', 'address'), bonded_mac)
- write_file(
- os.path.join(self.sysdir, 'eth1', 'bonding_slave', 'perm_hwaddr'),
- source_dev_mac)
- self.assertEqual(source_dev_mac, net.get_interface_mac('eth1'))
-
- def test_get_interfaces_empty_list_without_sys_net(self):
- """get_interfaces returns an empty list when missing SYS_CLASS_NET."""
- self.m_sys_path.return_value = 'idontexist'
- self.assertEqual([], net.get_interfaces())
-
- def test_get_interfaces_by_mac_skips_empty_mac(self):
- """Ignore 00:00:00:00:00:00 addresses from get_interfaces_by_mac."""
- empty_mac = '00:00:00:00:00:00'
- mac = 'aa:bb:cc:aa:bb:cc'
- write_file(os.path.join(self.sysdir, 'eth1', 'address'), empty_mac)
- write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0')
- write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0')
- write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac)
- expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)]
- self.assertEqual(expected, net.get_interfaces())
-
- def test_get_interfaces_by_mac_skips_missing_mac(self):
- """Ignore interfaces without an address from get_interfaces_by_mac."""
- write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0')
- address_path = os.path.join(self.sysdir, 'eth1', 'address')
- self.assertFalse(os.path.exists(address_path))
- mac = 'aa:bb:cc:aa:bb:cc'
- write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0')
- write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac)
- expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)]
- self.assertEqual(expected, net.get_interfaces())
-
- def test_get_interfaces_by_mac_skips_master_devs(self):
- """Ignore interfaces with a master device which would have dup mac."""
- mac1 = mac2 = 'aa:bb:cc:aa:bb:cc'
- write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0')
- write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac1)
- write_file(os.path.join(self.sysdir, 'eth1', 'master'), "blah")
- write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0')
- write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac2)
- expected = [('eth2', mac2, None, None)]
- self.assertEqual(expected, net.get_interfaces())
-
- @mock.patch('cloudinit.net.is_netfailover')
- def test_get_interfaces_by_mac_skips_netfailvoer(self, m_netfail):
- """Ignore interfaces if netfailover primary or standby."""
- mac = 'aa:bb:cc:aa:bb:cc' # netfailover devs share the same mac
- for iface in ['ens3', 'ens3sby', 'enP0s1f3']:
- write_file(
- os.path.join(self.sysdir, iface, 'addr_assign_type'), '0')
- write_file(
- os.path.join(self.sysdir, iface, 'address'), mac)
-
- def is_netfail(iface, _driver=None):
- # ens3 is the master
- if iface == 'ens3':
- return False
- else:
- return True
- m_netfail.side_effect = is_netfail
- expected = [('ens3', mac, None, None)]
- self.assertEqual(expected, net.get_interfaces())
-
- def test_get_interfaces_does_not_skip_phys_members_of_bridges_and_bonds(
- self
- ):
- bridge_mac = 'aa:bb:cc:aa:bb:cc'
- bond_mac = 'cc:bb:aa:cc:bb:aa'
- ovs_mac = 'bb:cc:aa:bb:cc:aa'
-
- write_file(os.path.join(self.sysdir, 'br0', 'address'), bridge_mac)
- write_file(os.path.join(self.sysdir, 'br0', 'bridge'), '')
-
- write_file(os.path.join(self.sysdir, 'bond0', 'address'), bond_mac)
- write_file(os.path.join(self.sysdir, 'bond0', 'bonding'), '')
-
- write_file(os.path.join(self.sysdir, 'ovs-system', 'address'),
- ovs_mac)
-
- write_file(os.path.join(self.sysdir, 'eth1', 'address'), bridge_mac)
- os.symlink('../br0', os.path.join(self.sysdir, 'eth1', 'master'))
-
- write_file(os.path.join(self.sysdir, 'eth2', 'address'), bond_mac)
- os.symlink('../bond0', os.path.join(self.sysdir, 'eth2', 'master'))
-
- write_file(os.path.join(self.sysdir, 'eth3', 'address'), ovs_mac)
- os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth3',
- 'master'))
- os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth3',
- 'upper_ovs-system'))
-
- interface_names = [interface[0] for interface in net.get_interfaces()]
- self.assertEqual(['eth1', 'eth2', 'eth3', 'ovs-system'],
- sorted(interface_names))
-
-
-class TestInterfaceHasOwnMAC(CiTestCase):
-
- def setUp(self):
- super(TestInterfaceHasOwnMAC, self).setUp()
- sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
- self.m_sys_path = sys_mock.start()
- self.sysdir = self.tmp_dir() + '/'
- self.m_sys_path.return_value = self.sysdir
- self.addCleanup(sys_mock.stop)
-
- def test_interface_has_own_mac_false_when_stolen(self):
- """Return False from interface_has_own_mac when address is stolen."""
- write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '2')
- self.assertFalse(net.interface_has_own_mac('eth1'))
-
- def test_interface_has_own_mac_true_when_not_stolen(self):
- """Return False from interface_has_own_mac when mac isn't stolen."""
- valid_assign_types = ['0', '1', '3']
- assign_path = os.path.join(self.sysdir, 'eth1', 'addr_assign_type')
- for _type in valid_assign_types:
- write_file(assign_path, _type)
- self.assertTrue(net.interface_has_own_mac('eth1'))
-
- def test_interface_has_own_mac_strict_errors_on_absent_assign_type(self):
- """When addr_assign_type is absent, interface_has_own_mac errors."""
- with self.assertRaises(ValueError):
- net.interface_has_own_mac('eth1', strict=True)
-
-
-@mock.patch('cloudinit.net.subp.subp')
-class TestEphemeralIPV4Network(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestEphemeralIPV4Network, self).setUp()
- sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
- self.m_sys_path = sys_mock.start()
- self.sysdir = self.tmp_dir() + '/'
- self.m_sys_path.return_value = self.sysdir
- self.addCleanup(sys_mock.stop)
-
- def test_ephemeral_ipv4_network_errors_on_missing_params(self, m_subp):
- """No required params for EphemeralIPv4Network can be None."""
- required_params = {
- 'interface': 'eth0', 'ip': '192.168.2.2',
- 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'}
- for key in required_params.keys():
- params = copy.deepcopy(required_params)
- params[key] = None
- with self.assertRaises(ValueError) as context_manager:
- net.EphemeralIPv4Network(**params)
- error = context_manager.exception
- self.assertIn('Cannot init network on', str(error))
- self.assertEqual(0, m_subp.call_count)
-
- def test_ephemeral_ipv4_network_errors_invalid_mask_prefix(self, m_subp):
- """Raise an error when prefix_or_mask is not a netmask or prefix."""
- params = {
- 'interface': 'eth0', 'ip': '192.168.2.2',
- 'broadcast': '192.168.2.255'}
- invalid_masks = ('invalid', 'invalid.', '123.123.123')
- for error_val in invalid_masks:
- params['prefix_or_mask'] = error_val
- with self.assertRaises(ValueError) as context_manager:
- with net.EphemeralIPv4Network(**params):
- pass
- error = context_manager.exception
- self.assertIn('Cannot setup network: netmask', str(error))
- self.assertEqual(0, m_subp.call_count)
-
- def test_ephemeral_ipv4_network_performs_teardown(self, m_subp):
- """EphemeralIPv4Network performs teardown on the device if setup."""
- expected_setup_calls = [
- mock.call(
- ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
- 'broadcast', '192.168.2.255', 'dev', 'eth0'],
- capture=True, update_env={'LANG': 'C'}),
- mock.call(
- ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'],
- capture=True)]
- expected_teardown_calls = [
- mock.call(
- ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0',
- 'down'], capture=True),
- mock.call(
- ['ip', '-family', 'inet', 'addr', 'del', '192.168.2.2/24',
- 'dev', 'eth0'], capture=True)]
- params = {
- 'interface': 'eth0', 'ip': '192.168.2.2',
- 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'}
- with net.EphemeralIPv4Network(**params):
- self.assertEqual(expected_setup_calls, m_subp.call_args_list)
- m_subp.assert_has_calls(expected_teardown_calls)
-
- @mock.patch('cloudinit.net.readurl')
- def test_ephemeral_ipv4_no_network_if_url_connectivity(
- self, m_readurl, m_subp):
- """No network setup is performed if we can successfully connect to
- connectivity_url."""
- params = {
- 'interface': 'eth0', 'ip': '192.168.2.2',
- 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255',
- 'connectivity_url': 'http://example.org/index.html'}
-
- with net.EphemeralIPv4Network(**params):
- self.assertEqual([mock.call('http://example.org/index.html',
- timeout=5)], m_readurl.call_args_list)
- # Ensure that no teardown happens:
- m_subp.assert_has_calls([])
-
- def test_ephemeral_ipv4_network_noop_when_configured(self, m_subp):
- """EphemeralIPv4Network handles exception when address is setup.
-
- It performs no cleanup as the interface was already setup.
- """
- params = {
- 'interface': 'eth0', 'ip': '192.168.2.2',
- 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'}
- m_subp.side_effect = ProcessExecutionError(
- '', 'RTNETLINK answers: File exists', 2)
- expected_calls = [
- mock.call(
- ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
- 'broadcast', '192.168.2.255', 'dev', 'eth0'],
- capture=True, update_env={'LANG': 'C'})]
- with net.EphemeralIPv4Network(**params):
- pass
- self.assertEqual(expected_calls, m_subp.call_args_list)
- self.assertIn(
- 'Skip ephemeral network setup, eth0 already has address',
- self.logs.getvalue())
-
- def test_ephemeral_ipv4_network_with_prefix(self, m_subp):
- """EphemeralIPv4Network takes a valid prefix to setup the network."""
- params = {
- 'interface': 'eth0', 'ip': '192.168.2.2',
- 'prefix_or_mask': '24', 'broadcast': '192.168.2.255'}
- for prefix_val in ['24', 16]: # prefix can be int or string
- params['prefix_or_mask'] = prefix_val
- with net.EphemeralIPv4Network(**params):
- pass
- m_subp.assert_has_calls([mock.call(
- ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
- 'broadcast', '192.168.2.255', 'dev', 'eth0'],
- capture=True, update_env={'LANG': 'C'})])
- m_subp.assert_has_calls([mock.call(
- ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/16',
- 'broadcast', '192.168.2.255', 'dev', 'eth0'],
- capture=True, update_env={'LANG': 'C'})])
-
- def test_ephemeral_ipv4_network_with_new_default_route(self, m_subp):
- """Add the route when router is set and no default route exists."""
- params = {
- 'interface': 'eth0', 'ip': '192.168.2.2',
- 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255',
- 'router': '192.168.2.1'}
- m_subp.return_value = '', '' # Empty response from ip route gw check
- expected_setup_calls = [
- mock.call(
- ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
- 'broadcast', '192.168.2.255', 'dev', 'eth0'],
- capture=True, update_env={'LANG': 'C'}),
- mock.call(
- ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'],
- capture=True),
- mock.call(
- ['ip', 'route', 'show', '0.0.0.0/0'], capture=True),
- mock.call(['ip', '-4', 'route', 'add', '192.168.2.1',
- 'dev', 'eth0', 'src', '192.168.2.2'], capture=True),
- mock.call(
- ['ip', '-4', 'route', 'add', 'default', 'via',
- '192.168.2.1', 'dev', 'eth0'], capture=True)]
- expected_teardown_calls = [
- mock.call(['ip', '-4', 'route', 'del', 'default', 'dev', 'eth0'],
- capture=True),
- mock.call(['ip', '-4', 'route', 'del', '192.168.2.1',
- 'dev', 'eth0', 'src', '192.168.2.2'], capture=True),
- ]
-
- with net.EphemeralIPv4Network(**params):
- self.assertEqual(expected_setup_calls, m_subp.call_args_list)
- m_subp.assert_has_calls(expected_teardown_calls)
-
- def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp):
- params = {
- 'interface': 'eth0', 'ip': '192.168.2.2',
- 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255',
- 'static_routes': [('169.254.169.254/32', '192.168.2.1'),
- ('0.0.0.0/0', '192.168.2.1')],
- 'router': '192.168.2.1'}
- expected_setup_calls = [
- mock.call(
- ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
- 'broadcast', '192.168.2.255', 'dev', 'eth0'],
- capture=True, update_env={'LANG': 'C'}),
- mock.call(
- ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'],
- capture=True),
- mock.call(
- ['ip', '-4', 'route', 'add', '169.254.169.254/32',
- 'via', '192.168.2.1', 'dev', 'eth0'], capture=True),
- mock.call(
- ['ip', '-4', 'route', 'add', '0.0.0.0/0',
- 'via', '192.168.2.1', 'dev', 'eth0'], capture=True)]
- expected_teardown_calls = [
- mock.call(
- ['ip', '-4', 'route', 'del', '0.0.0.0/0',
- 'via', '192.168.2.1', 'dev', 'eth0'], capture=True),
- mock.call(
- ['ip', '-4', 'route', 'del', '169.254.169.254/32',
- 'via', '192.168.2.1', 'dev', 'eth0'], capture=True),
- mock.call(
- ['ip', '-family', 'inet', 'link', 'set', 'dev',
- 'eth0', 'down'], capture=True),
- mock.call(
- ['ip', '-family', 'inet', 'addr', 'del',
- '192.168.2.2/24', 'dev', 'eth0'], capture=True)
- ]
- with net.EphemeralIPv4Network(**params):
- self.assertEqual(expected_setup_calls, m_subp.call_args_list)
- m_subp.assert_has_calls(expected_setup_calls + expected_teardown_calls)
-
-
-class TestApplyNetworkCfgNames(CiTestCase):
- V1_CONFIG = textwrap.dedent("""\
- version: 1
- config:
- - type: physical
- name: interface0
- mac_address: "52:54:00:12:34:00"
- subnets:
- - type: static
- address: 10.0.2.15
- netmask: 255.255.255.0
- gateway: 10.0.2.2
- """)
- V2_CONFIG = textwrap.dedent("""\
- version: 2
- ethernets:
- interface0:
- match:
- macaddress: "52:54:00:12:34:00"
- addresses:
- - 10.0.2.15/24
- gateway4: 10.0.2.2
- set-name: interface0
- """)
-
- V2_CONFIG_NO_SETNAME = textwrap.dedent("""\
- version: 2
- ethernets:
- interface0:
- match:
- macaddress: "52:54:00:12:34:00"
- addresses:
- - 10.0.2.15/24
- gateway4: 10.0.2.2
- """)
-
- V2_CONFIG_NO_MAC = textwrap.dedent("""\
- version: 2
- ethernets:
- interface0:
- match:
- driver: virtio-net
- addresses:
- - 10.0.2.15/24
- gateway4: 10.0.2.2
- set-name: interface0
- """)
-
- @mock.patch('cloudinit.net.device_devid')
- @mock.patch('cloudinit.net.device_driver')
- @mock.patch('cloudinit.net._rename_interfaces')
- def test_apply_v1_renames(self, m_rename_interfaces, m_device_driver,
- m_device_devid):
- m_device_driver.return_value = 'virtio_net'
- m_device_devid.return_value = '0x15d8'
-
- net.apply_network_config_names(yaml.load(self.V1_CONFIG))
-
- call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8']
- m_rename_interfaces.assert_called_with([call])
-
- @mock.patch('cloudinit.net.device_devid')
- @mock.patch('cloudinit.net.device_driver')
- @mock.patch('cloudinit.net._rename_interfaces')
- def test_apply_v2_renames(self, m_rename_interfaces, m_device_driver,
- m_device_devid):
- m_device_driver.return_value = 'virtio_net'
- m_device_devid.return_value = '0x15d8'
-
- net.apply_network_config_names(yaml.load(self.V2_CONFIG))
-
- call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8']
- m_rename_interfaces.assert_called_with([call])
-
- @mock.patch('cloudinit.net._rename_interfaces')
- def test_apply_v2_renames_skips_without_setname(self, m_rename_interfaces):
- net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_SETNAME))
- m_rename_interfaces.assert_called_with([])
-
- @mock.patch('cloudinit.net._rename_interfaces')
- def test_apply_v2_renames_skips_without_mac(self, m_rename_interfaces):
- net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_MAC))
- m_rename_interfaces.assert_called_with([])
-
- def test_apply_v2_renames_raises_runtime_error_on_unknown_version(self):
- with self.assertRaises(RuntimeError):
- net.apply_network_config_names(yaml.load("version: 3"))
-
-
-class TestHasURLConnectivity(HttprettyTestCase):
-
- def setUp(self):
- super(TestHasURLConnectivity, self).setUp()
- self.url = 'http://fake/'
- self.kwargs = {'allow_redirects': True, 'timeout': 5.0}
-
- @mock.patch('cloudinit.net.readurl')
- def test_url_timeout_on_connectivity_check(self, m_readurl):
- """A timeout of 5 seconds is provided when reading a url."""
- self.assertTrue(
- net.has_url_connectivity(self.url), 'Expected True on url connect')
-
- def test_true_on_url_connectivity_success(self):
- httpretty.register_uri(httpretty.GET, self.url)
- self.assertTrue(
- net.has_url_connectivity(self.url), 'Expected True on url connect')
-
- @mock.patch('requests.Session.request')
- def test_true_on_url_connectivity_timeout(self, m_request):
- """A timeout raised accessing the url will return False."""
- m_request.side_effect = requests.Timeout('Fake Connection Timeout')
- self.assertFalse(
- net.has_url_connectivity(self.url),
- 'Expected False on url timeout')
-
- def test_true_on_url_connectivity_failure(self):
- httpretty.register_uri(httpretty.GET, self.url, body={}, status=404)
- self.assertFalse(
- net.has_url_connectivity(self.url), 'Expected False on url fail')
-
-
-def _mk_v1_phys(mac, name, driver, device_id):
- v1_cfg = {'type': 'physical', 'name': name, 'mac_address': mac}
- params = {}
- if driver:
- params.update({'driver': driver})
- if device_id:
- params.update({'device_id': device_id})
-
- if params:
- v1_cfg.update({'params': params})
-
- return v1_cfg
-
-
-def _mk_v2_phys(mac, name, driver=None, device_id=None):
- v2_cfg = {'set-name': name, 'match': {'macaddress': mac}}
- if driver:
- v2_cfg['match'].update({'driver': driver})
- if device_id:
- v2_cfg['match'].update({'device_id': device_id})
-
- return v2_cfg
-
-
-class TestExtractPhysdevs(CiTestCase):
-
- def setUp(self):
- super(TestExtractPhysdevs, self).setUp()
- self.add_patch('cloudinit.net.device_driver', 'm_driver')
- self.add_patch('cloudinit.net.device_devid', 'm_devid')
-
- def test_extract_physdevs_looks_up_driver_v1(self):
- driver = 'virtio'
- self.m_driver.return_value = driver
- physdevs = [
- ['aa:bb:cc:dd:ee:ff', 'eth0', None, '0x1000'],
- ]
- netcfg = {
- 'version': 1,
- 'config': [_mk_v1_phys(*args) for args in physdevs],
- }
- # insert the driver value for verification
- physdevs[0][2] = driver
- self.assertEqual(sorted(physdevs),
- sorted(net.extract_physdevs(netcfg)))
- self.m_driver.assert_called_with('eth0')
-
- def test_extract_physdevs_looks_up_driver_v2(self):
- driver = 'virtio'
- self.m_driver.return_value = driver
- physdevs = [
- ['aa:bb:cc:dd:ee:ff', 'eth0', None, '0x1000'],
- ]
- netcfg = {
- 'version': 2,
- 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs},
- }
- # insert the driver value for verification
- physdevs[0][2] = driver
- self.assertEqual(sorted(physdevs),
- sorted(net.extract_physdevs(netcfg)))
- self.m_driver.assert_called_with('eth0')
-
- def test_extract_physdevs_looks_up_devid_v1(self):
- devid = '0x1000'
- self.m_devid.return_value = devid
- physdevs = [
- ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', None],
- ]
- netcfg = {
- 'version': 1,
- 'config': [_mk_v1_phys(*args) for args in physdevs],
- }
- # insert the driver value for verification
- physdevs[0][3] = devid
- self.assertEqual(sorted(physdevs),
- sorted(net.extract_physdevs(netcfg)))
- self.m_devid.assert_called_with('eth0')
-
- def test_extract_physdevs_looks_up_devid_v2(self):
- devid = '0x1000'
- self.m_devid.return_value = devid
- physdevs = [
- ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', None],
- ]
- netcfg = {
- 'version': 2,
- 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs},
- }
- # insert the driver value for verification
- physdevs[0][3] = devid
- self.assertEqual(sorted(physdevs),
- sorted(net.extract_physdevs(netcfg)))
- self.m_devid.assert_called_with('eth0')
-
- def test_get_v1_type_physical(self):
- physdevs = [
- ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
- ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
- ['09:87:65:43:21:10', 'ens0p1', 'mlx4_core', '0:0:1000'],
- ]
- netcfg = {
- 'version': 1,
- 'config': [_mk_v1_phys(*args) for args in physdevs],
- }
- self.assertEqual(sorted(physdevs),
- sorted(net.extract_physdevs(netcfg)))
-
- def test_get_v2_type_physical(self):
- physdevs = [
- ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
- ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
- ['09:87:65:43:21:10', 'ens0p1', 'mlx4_core', '0:0:1000'],
- ]
- netcfg = {
- 'version': 2,
- 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs},
- }
- self.assertEqual(sorted(physdevs),
- sorted(net.extract_physdevs(netcfg)))
-
- def test_get_v2_type_physical_skips_if_no_set_name(self):
- netcfg = {
- 'version': 2,
- 'ethernets': {
- 'ens3': {
- 'match': {'macaddress': '00:11:22:33:44:55'},
- }
- }
- }
- self.assertEqual([], net.extract_physdevs(netcfg))
-
- def test_runtime_error_on_unknown_netcfg_version(self):
- with self.assertRaises(RuntimeError):
- net.extract_physdevs({'version': 3, 'awesome_config': []})
-
-
-class TestNetFailOver(CiTestCase):
-
- def setUp(self):
- super(TestNetFailOver, self).setUp()
- self.add_patch('cloudinit.net.util', 'm_util')
- self.add_patch('cloudinit.net.read_sys_net', 'm_read_sys_net')
- self.add_patch('cloudinit.net.device_driver', 'm_device_driver')
-
- def test_get_dev_features(self):
- devname = self.random_string()
- features = self.random_string()
- self.m_read_sys_net.return_value = features
-
- self.assertEqual(features, net.get_dev_features(devname))
- self.assertEqual(1, self.m_read_sys_net.call_count)
- self.assertEqual(mock.call(devname, 'device/features'),
- self.m_read_sys_net.call_args_list[0])
-
- def test_get_dev_features_none_returns_empty_string(self):
- devname = self.random_string()
- self.m_read_sys_net.side_effect = Exception('error')
- self.assertEqual('', net.get_dev_features(devname))
- self.assertEqual(1, self.m_read_sys_net.call_count)
- self.assertEqual(mock.call(devname, 'device/features'),
- self.m_read_sys_net.call_args_list[0])
-
- @mock.patch('cloudinit.net.get_dev_features')
- def test_has_netfail_standby_feature(self, m_dev_features):
- devname = self.random_string()
- standby_features = ('0' * 62) + '1' + '0'
- m_dev_features.return_value = standby_features
- self.assertTrue(net.has_netfail_standby_feature(devname))
-
- @mock.patch('cloudinit.net.get_dev_features')
- def test_has_netfail_standby_feature_short_is_false(self, m_dev_features):
- devname = self.random_string()
- standby_features = self.random_string()
- m_dev_features.return_value = standby_features
- self.assertFalse(net.has_netfail_standby_feature(devname))
-
- @mock.patch('cloudinit.net.get_dev_features')
- def test_has_netfail_standby_feature_not_present_is_false(self,
- m_dev_features):
- devname = self.random_string()
- standby_features = '0' * 64
- m_dev_features.return_value = standby_features
- self.assertFalse(net.has_netfail_standby_feature(devname))
-
- @mock.patch('cloudinit.net.get_dev_features')
- def test_has_netfail_standby_feature_no_features_is_false(self,
- m_dev_features):
- devname = self.random_string()
- standby_features = None
- m_dev_features.return_value = standby_features
- self.assertFalse(net.has_netfail_standby_feature(devname))
-
- @mock.patch('cloudinit.net.has_netfail_standby_feature')
- @mock.patch('cloudinit.net.os.path.exists')
- def test_is_netfail_master(self, m_exists, m_standby):
- devname = self.random_string()
- driver = 'virtio_net'
- m_exists.return_value = False # no master sysfs attr
- m_standby.return_value = True # has standby feature flag
- self.assertTrue(net.is_netfail_master(devname, driver))
-
- @mock.patch('cloudinit.net.sys_dev_path')
- def test_is_netfail_master_checks_master_attr(self, m_sysdev):
- devname = self.random_string()
- driver = 'virtio_net'
- m_sysdev.return_value = self.random_string()
- self.assertFalse(net.is_netfail_master(devname, driver))
- self.assertEqual(1, m_sysdev.call_count)
- self.assertEqual(mock.call(devname, path='master'),
- m_sysdev.call_args_list[0])
-
- @mock.patch('cloudinit.net.has_netfail_standby_feature')
- @mock.patch('cloudinit.net.os.path.exists')
- def test_is_netfail_master_wrong_driver(self, m_exists, m_standby):
- devname = self.random_string()
- driver = self.random_string()
- self.assertFalse(net.is_netfail_master(devname, driver))
-
- @mock.patch('cloudinit.net.has_netfail_standby_feature')
- @mock.patch('cloudinit.net.os.path.exists')
- def test_is_netfail_master_has_master_attr(self, m_exists, m_standby):
- devname = self.random_string()
- driver = 'virtio_net'
- m_exists.return_value = True # has master sysfs attr
- self.assertFalse(net.is_netfail_master(devname, driver))
-
- @mock.patch('cloudinit.net.has_netfail_standby_feature')
- @mock.patch('cloudinit.net.os.path.exists')
- def test_is_netfail_master_no_standby_feat(self, m_exists, m_standby):
- devname = self.random_string()
- driver = 'virtio_net'
- m_exists.return_value = False # no master sysfs attr
- m_standby.return_value = False # no standby feature flag
- self.assertFalse(net.is_netfail_master(devname, driver))
-
- @mock.patch('cloudinit.net.has_netfail_standby_feature')
- @mock.patch('cloudinit.net.os.path.exists')
- @mock.patch('cloudinit.net.sys_dev_path')
- def test_is_netfail_primary(self, m_sysdev, m_exists, m_standby):
- devname = self.random_string()
- driver = self.random_string() # device not virtio_net
- master_devname = self.random_string()
- m_sysdev.return_value = "%s/%s" % (self.random_string(),
- master_devname)
- m_exists.return_value = True # has master sysfs attr
- self.m_device_driver.return_value = 'virtio_net' # master virtio_net
- m_standby.return_value = True # has standby feature flag
- self.assertTrue(net.is_netfail_primary(devname, driver))
- self.assertEqual(1, self.m_device_driver.call_count)
- self.assertEqual(mock.call(master_devname),
- self.m_device_driver.call_args_list[0])
- self.assertEqual(1, m_standby.call_count)
- self.assertEqual(mock.call(master_devname),
- m_standby.call_args_list[0])
-
- @mock.patch('cloudinit.net.has_netfail_standby_feature')
- @mock.patch('cloudinit.net.os.path.exists')
- @mock.patch('cloudinit.net.sys_dev_path')
- def test_is_netfail_primary_wrong_driver(self, m_sysdev, m_exists,
- m_standby):
- devname = self.random_string()
- driver = 'virtio_net'
- self.assertFalse(net.is_netfail_primary(devname, driver))
-
- @mock.patch('cloudinit.net.has_netfail_standby_feature')
- @mock.patch('cloudinit.net.os.path.exists')
- @mock.patch('cloudinit.net.sys_dev_path')
- def test_is_netfail_primary_no_master(self, m_sysdev, m_exists, m_standby):
- devname = self.random_string()
- driver = self.random_string() # device not virtio_net
- m_exists.return_value = False # no master sysfs attr
- self.assertFalse(net.is_netfail_primary(devname, driver))
-
- @mock.patch('cloudinit.net.has_netfail_standby_feature')
- @mock.patch('cloudinit.net.os.path.exists')
- @mock.patch('cloudinit.net.sys_dev_path')
- def test_is_netfail_primary_bad_master(self, m_sysdev, m_exists,
- m_standby):
- devname = self.random_string()
- driver = self.random_string() # device not virtio_net
- master_devname = self.random_string()
- m_sysdev.return_value = "%s/%s" % (self.random_string(),
- master_devname)
- m_exists.return_value = True # has master sysfs attr
- self.m_device_driver.return_value = 'XXXX' # master not virtio_net
- self.assertFalse(net.is_netfail_primary(devname, driver))
-
- @mock.patch('cloudinit.net.has_netfail_standby_feature')
- @mock.patch('cloudinit.net.os.path.exists')
- @mock.patch('cloudinit.net.sys_dev_path')
- def test_is_netfail_primary_no_standby(self, m_sysdev, m_exists,
- m_standby):
- devname = self.random_string()
- driver = self.random_string() # device not virtio_net
- master_devname = self.random_string()
- m_sysdev.return_value = "%s/%s" % (self.random_string(),
- master_devname)
- m_exists.return_value = True # has master sysfs attr
- self.m_device_driver.return_value = 'virtio_net' # master virtio_net
- m_standby.return_value = False # master has no standby feature flag
- self.assertFalse(net.is_netfail_primary(devname, driver))
-
- @mock.patch('cloudinit.net.has_netfail_standby_feature')
- @mock.patch('cloudinit.net.os.path.exists')
- def test_is_netfail_standby(self, m_exists, m_standby):
- devname = self.random_string()
- driver = 'virtio_net'
- m_exists.return_value = True # has master sysfs attr
- m_standby.return_value = True # has standby feature flag
- self.assertTrue(net.is_netfail_standby(devname, driver))
-
- @mock.patch('cloudinit.net.has_netfail_standby_feature')
- @mock.patch('cloudinit.net.os.path.exists')
- def test_is_netfail_standby_wrong_driver(self, m_exists, m_standby):
- devname = self.random_string()
- driver = self.random_string()
- self.assertFalse(net.is_netfail_standby(devname, driver))
-
- @mock.patch('cloudinit.net.has_netfail_standby_feature')
- @mock.patch('cloudinit.net.os.path.exists')
- def test_is_netfail_standby_no_master(self, m_exists, m_standby):
- devname = self.random_string()
- driver = 'virtio_net'
- m_exists.return_value = False # has master sysfs attr
- self.assertFalse(net.is_netfail_standby(devname, driver))
-
- @mock.patch('cloudinit.net.has_netfail_standby_feature')
- @mock.patch('cloudinit.net.os.path.exists')
- def test_is_netfail_standby_no_standby_feature(self, m_exists, m_standby):
- devname = self.random_string()
- driver = 'virtio_net'
- m_exists.return_value = True # has master sysfs attr
- m_standby.return_value = False # has standby feature flag
- self.assertFalse(net.is_netfail_standby(devname, driver))
-
- @mock.patch('cloudinit.net.is_netfail_standby')
- @mock.patch('cloudinit.net.is_netfail_primary')
- def test_is_netfailover_primary(self, m_primary, m_standby):
- devname = self.random_string()
- driver = self.random_string()
- m_primary.return_value = True
- m_standby.return_value = False
- self.assertTrue(net.is_netfailover(devname, driver))
-
- @mock.patch('cloudinit.net.is_netfail_standby')
- @mock.patch('cloudinit.net.is_netfail_primary')
- def test_is_netfailover_standby(self, m_primary, m_standby):
- devname = self.random_string()
- driver = self.random_string()
- m_primary.return_value = False
- m_standby.return_value = True
- self.assertTrue(net.is_netfailover(devname, driver))
-
- @mock.patch('cloudinit.net.is_netfail_standby')
- @mock.patch('cloudinit.net.is_netfail_primary')
- def test_is_netfailover_returns_false(self, m_primary, m_standby):
- devname = self.random_string()
- driver = self.random_string()
- m_primary.return_value = False
- m_standby.return_value = False
- self.assertFalse(net.is_netfailover(devname, driver))
-
-
-class TestIsIpAddress:
- """Tests for net.is_ip_address.
-
- Instead of testing with values we rely on the ipaddress stdlib module to
- handle all values correctly, so simply test that is_ip_address defers to
- the ipaddress module correctly.
- """
-
- @pytest.mark.parametrize('ip_address_side_effect,expected_return', (
- (ValueError, False),
- (lambda _: ipaddress.IPv4Address('192.168.0.1'), True),
- (lambda _: ipaddress.IPv6Address('2001:db8::'), True),
- ))
- def test_is_ip_address(self, ip_address_side_effect, expected_return):
- with mock.patch('cloudinit.net.ipaddress.ip_address',
- side_effect=ip_address_side_effect) as m_ip_address:
- ret = net.is_ip_address(mock.sentinel.ip_address_in)
- assert expected_return == ret
- expected_call = mock.call(mock.sentinel.ip_address_in)
- assert [expected_call] == m_ip_address.call_args_list
-
-
-class TestIsIpv4Address:
- """Tests for net.is_ipv4_address.
-
- Instead of testing with values we rely on the ipaddress stdlib module to
- handle all values correctly, so simply test that is_ipv4_address defers to
- the ipaddress module correctly.
- """
-
- @pytest.mark.parametrize('ipv4address_mock,expected_return', (
- (mock.Mock(side_effect=ValueError), False),
- (mock.Mock(return_value=ipaddress.IPv4Address('192.168.0.1')), True),
- ))
- def test_is_ip_address(self, ipv4address_mock, expected_return):
- with mock.patch('cloudinit.net.ipaddress.IPv4Address',
- ipv4address_mock) as m_ipv4address:
- ret = net.is_ipv4_address(mock.sentinel.ip_address_in)
- assert expected_return == ret
- expected_call = mock.call(mock.sentinel.ip_address_in)
- assert [expected_call] == m_ipv4address.call_args_list
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py
deleted file mode 100644
index 07d726e2..00000000
--- a/cloudinit/net/tests/test_network_state.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from unittest import mock
-
-from cloudinit.net import network_state
-from cloudinit.tests.helpers import CiTestCase
-
-netstate_path = 'cloudinit.net.network_state'
-
-
-class TestNetworkStateParseConfig(CiTestCase):
-
- def setUp(self):
- super(TestNetworkStateParseConfig, self).setUp()
- nsi_path = netstate_path + '.NetworkStateInterpreter'
- self.add_patch(nsi_path, 'm_nsi')
-
- def test_missing_version_returns_none(self):
- ncfg = {}
- self.assertEqual(None, network_state.parse_net_config_data(ncfg))
-
- def test_unknown_versions_returns_none(self):
- ncfg = {'version': 13.2}
- self.assertEqual(None, network_state.parse_net_config_data(ncfg))
-
- def test_version_2_passes_self_as_config(self):
- ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]}
- network_state.parse_net_config_data(ncfg)
- self.assertEqual([mock.call(version=2, config=ncfg)],
- self.m_nsi.call_args_list)
-
- def test_valid_config_gets_network_state(self):
- ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]}
- result = network_state.parse_net_config_data(ncfg)
- self.assertNotEqual(None, result)
-
- def test_empty_v1_config_gets_network_state(self):
- ncfg = {'version': 1, 'config': []}
- result = network_state.parse_net_config_data(ncfg)
- self.assertNotEqual(None, result)
-
- def test_empty_v2_config_gets_network_state(self):
- ncfg = {'version': 2}
- result = network_state.parse_net_config_data(ncfg)
- self.assertNotEqual(None, result)
-
-
-class TestNetworkStateParseConfigV2(CiTestCase):
-
- def test_version_2_ignores_renderer_key(self):
- ncfg = {'version': 2, 'renderer': 'networkd', 'ethernets': {}}
- nsi = network_state.NetworkStateInterpreter(version=ncfg['version'],
- config=ncfg)
- nsi.parse_config(skip_broken=False)
- self.assertEqual(ncfg, nsi.as_dict()['config'])
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/net/udev.py b/cloudinit/net/udev.py
index 58c0a708..b79e4426 100644
--- a/cloudinit/net/udev.py
+++ b/cloudinit/net/udev.py
@@ -32,15 +32,18 @@ def generate_udev_rule(interface, mac, driver=None):
ATTR{address}="ff:ee:dd:cc:bb:aa", NAME="eth0"
"""
if not driver:
- driver = '?*'
-
- rule = ', '.join([
- compose_udev_equality('SUBSYSTEM', 'net'),
- compose_udev_equality('ACTION', 'add'),
- compose_udev_equality('DRIVERS', driver),
- compose_udev_attr_equality('address', mac),
- compose_udev_setting('NAME', interface),
- ])
- return '%s\n' % rule
+ driver = "?*"
+
+ rule = ", ".join(
+ [
+ compose_udev_equality("SUBSYSTEM", "net"),
+ compose_udev_equality("ACTION", "add"),
+ compose_udev_equality("DRIVERS", driver),
+ compose_udev_attr_equality("address", mac),
+ compose_udev_setting("NAME", interface),
+ ]
+ )
+ return "%s\n" % rule
+
# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 628e2908..5eeeb967 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -8,30 +8,96 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from copy import copy, deepcopy
+import json
import re
+from copy import copy, deepcopy
+from ipaddress import IPv4Network
from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.net.network_state import net_prefix_to_ipv4_mask
-from cloudinit import subp
-from cloudinit import util
-
from cloudinit.simpletable import SimpleTable
LOG = logging.getLogger()
+# Example netdev format:
+# {'eth0': {'hwaddr': '00:16:3e:16:db:54',
+# 'ipv4': [{'bcast': '10.85.130.255',
+# 'ip': '10.85.130.116',
+# 'mask': '255.255.255.0',
+# 'scope': 'global'}],
+# 'ipv6': [{'ip': 'fd42:baa2:3dd:17a:216:3eff:fe16:db54/64',
+# 'scope6': 'global'},
+# {'ip': 'fe80::216:3eff:fe16:db54/64', 'scope6': 'link'}],
+# 'up': True},
+# 'lo': {'hwaddr': '',
+# 'ipv4': [{'bcast': '',
+# 'ip': '127.0.0.1',
+# 'mask': '255.0.0.0',
+# 'scope': 'host'}],
+# 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}],
+# 'up': True}}
+DEFAULT_NETDEV_INFO = {"ipv4": [], "ipv6": [], "hwaddr": "", "up": False}
+
+
+def _netdev_info_iproute_json(ipaddr_json):
+ """Get network device dicts from ip route and ip link info.
+
+ ipaddr_json: Output string from 'ip --json addr' command.
+
+ Returns a dict of device info keyed by network device name containing
+ device configuration values.
+
+ Raises json.JSONDecodeError if json could not be decoded
+ """
+ ipaddr_data = json.loads(ipaddr_json)
+ devs = {}
-DEFAULT_NETDEV_INFO = {
- "ipv4": [],
- "ipv6": [],
- "hwaddr": "",
- "up": False
-}
+ for dev in ipaddr_data:
+ flags = dev["flags"] if "flags" in dev else []
+ address = dev["address"] if dev.get("link_type") == "ether" else ""
+ dev_info = {
+ "hwaddr": address,
+ "up": bool("UP" in flags and "LOWER_UP" in flags),
+ "ipv4": [],
+ "ipv6": [],
+ }
+ for addr in dev.get("addr_info", []):
+ if addr.get("family") == "inet":
+ mask = (
+ str(IPv4Network(f'0.0.0.0/{addr["prefixlen"]}').netmask)
+ if "prefixlen" in addr
+ else ""
+ )
+ parsed_addr = {
+ "ip": addr.get("local", ""),
+ "mask": mask,
+ "bcast": addr.get("broadcast", ""),
+ "scope": addr.get("scope", ""),
+ }
+ dev_info["ipv4"].append(parsed_addr)
+ elif addr["family"] == "inet6":
+ ip = addr.get("local", "")
+ # address here refers to a peer address, and according
+ # to "man 8 ip-address":
+ # If a peer address is specified, the local address cannot
+ # have a prefix length. The network prefix is associated
+ # with the peer rather than with the local address.
+ if ip and not addr.get("address"):
+ ip = f"{ip}/{addr.get('prefixlen', 64)}"
+ parsed_addr = {
+ "ip": ip,
+ "scope6": addr.get("scope", ""),
+ }
+ dev_info["ipv6"].append(parsed_addr)
+ devs[dev["ifname"]] = dev_info
+ return devs
def _netdev_info_iproute(ipaddr_out):
"""
- Get network device dicts from ip route and ip link info.
+ DEPRECATED: Only used on distros that don't support ip json output
+ Use _netdev_info_iproute_json() when possible.
@param ipaddr_out: Output string from 'ip addr show' command.
@@ -42,51 +108,68 @@ def _netdev_info_iproute(ipaddr_out):
devs = {}
dev_name = None
for num, line in enumerate(ipaddr_out.splitlines()):
- m = re.match(r'^\d+:\s(?P<dev>[^:]+):\s+<(?P<flags>\S+)>\s+.*', line)
+ m = re.match(r"^\d+:\s(?P<dev>[^:]+):\s+<(?P<flags>\S+)>\s+.*", line)
if m:
- dev_name = m.group('dev').lower().split('@')[0]
- flags = m.group('flags').split(',')
+ dev_name = m.group("dev").lower().split("@")[0]
+ flags = m.group("flags").split(",")
devs[dev_name] = {
- 'ipv4': [], 'ipv6': [], 'hwaddr': '',
- 'up': bool('UP' in flags and 'LOWER_UP' in flags),
+ "ipv4": [],
+ "ipv6": [],
+ "hwaddr": "",
+ "up": bool("UP" in flags and "LOWER_UP" in flags),
}
- elif 'inet6' in line:
+ elif "inet6" in line:
m = re.match(
- r'\s+inet6\s(?P<ip>\S+)\sscope\s(?P<scope6>\S+).*', line)
+ r"\s+inet6\s(?P<ip>\S+)"
+ r"(\s(peer\s\S+))?"
+ r"\sscope\s(?P<scope6>\S+).*",
+ line,
+ )
if not m:
LOG.warning(
- 'Could not parse ip addr show: (line:%d) %s', num, line)
+ "Could not parse ip addr show: (line:%d) %s", num, line
+ )
continue
- devs[dev_name]['ipv6'].append(m.groupdict())
- elif 'inet' in line:
+ devs[dev_name]["ipv6"].append(m.groupdict())
+ elif "inet" in line:
m = re.match(
- r'\s+inet\s(?P<cidr4>\S+)(\sbrd\s(?P<bcast>\S+))?\sscope\s'
- r'(?P<scope>\S+).*', line)
+ r"\s+inet\s(?P<cidr4>\S+)"
+ r"(\smetric\s(?P<metric>\d+))?"
+ r"(\sbrd\s(?P<bcast>\S+))?"
+ r"\sscope\s(?P<scope>\S+).*",
+ line,
+ )
if not m:
LOG.warning(
- 'Could not parse ip addr show: (line:%d) %s', num, line)
+ "Could not parse ip addr show: (line:%d) %s", num, line
+ )
continue
match = m.groupdict()
- cidr4 = match.pop('cidr4')
- addr, _, prefix = cidr4.partition('/')
+ cidr4 = match.pop("cidr4")
+ addr, _, prefix = cidr4.partition("/")
if not prefix:
- prefix = '32'
- devs[dev_name]['ipv4'].append({
- 'ip': addr,
- 'bcast': match['bcast'] if match['bcast'] else '',
- 'mask': net_prefix_to_ipv4_mask(prefix),
- 'scope': match['scope']})
- elif 'link' in line:
+ prefix = "32"
+ devs[dev_name]["ipv4"].append(
+ {
+ "ip": addr,
+ "bcast": match["bcast"] if match["bcast"] else "",
+ "mask": net_prefix_to_ipv4_mask(prefix),
+ "scope": match["scope"],
+ }
+ )
+ elif "link" in line:
m = re.match(
- r'\s+link/(?P<link_type>\S+)\s(?P<hwaddr>\S+).*', line)
+ r"\s+link/(?P<link_type>\S+)\s(?P<hwaddr>\S+).*", line
+ )
if not m:
LOG.warning(
- 'Could not parse ip addr show: (line:%d) %s', num, line)
+ "Could not parse ip addr show: (line:%d) %s", num, line
+ )
continue
- if m.group('link_type') == 'ether':
- devs[dev_name]['hwaddr'] = m.group('hwaddr')
+ if m.group("link_type") == "ether":
+ devs[dev_name]["hwaddr"] = m.group("hwaddr")
else:
- devs[dev_name]['hwaddr'] = ''
+ devs[dev_name]["hwaddr"] = ""
else:
continue
return devs
@@ -101,40 +184,41 @@ def _netdev_info_ifconfig_netbsd(ifconfig_data):
if line[0] not in ("\t", " "):
curdev = line.split()[0]
# current ifconfig pops a ':' on the end of the device
- if curdev.endswith(':'):
+ if curdev.endswith(":"):
curdev = curdev[:-1]
if curdev not in devs:
devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO)
toks = line.lower().strip().split()
if len(toks) > 1:
if re.search(r"flags=[x\d]+<up.*>", toks[1]):
- devs[curdev]['up'] = True
+ devs[curdev]["up"] = True
for i in range(len(toks)):
if toks[i] == "inet": # Create new ipv4 addr entry
- network, net_bits = toks[i + 1].split('/')
- devs[curdev]['ipv4'].append(
- {'ip': network, 'mask': net_prefix_to_ipv4_mask(net_bits)})
+ network, net_bits = toks[i + 1].split("/")
+ devs[curdev]["ipv4"].append(
+ {"ip": network, "mask": net_prefix_to_ipv4_mask(net_bits)}
+ )
elif toks[i] == "broadcast":
- devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1]
+ devs[curdev]["ipv4"][-1]["bcast"] = toks[i + 1]
elif toks[i] == "address:":
- devs[curdev]['hwaddr'] = toks[i + 1]
+ devs[curdev]["hwaddr"] = toks[i + 1]
elif toks[i] == "inet6":
if toks[i + 1] == "addr:":
- devs[curdev]['ipv6'].append({'ip': toks[i + 2]})
+ devs[curdev]["ipv6"].append({"ip": toks[i + 2]})
else:
- devs[curdev]['ipv6'].append({'ip': toks[i + 1]})
+ devs[curdev]["ipv6"].append({"ip": toks[i + 1]})
elif toks[i] == "prefixlen": # Add prefix to current ipv6 value
- addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1]
- devs[curdev]['ipv6'][-1]['ip'] = addr6
+ addr6 = devs[curdev]["ipv6"][-1]["ip"] + "/" + toks[i + 1]
+ devs[curdev]["ipv6"][-1]["ip"] = addr6
elif toks[i].startswith("scope:"):
- devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:")
+ devs[curdev]["ipv6"][-1]["scope6"] = toks[i].lstrip("scope:")
elif toks[i] == "scopeid":
- res = re.match(r'.*<(\S+)>', toks[i + 1])
+ res = re.match(r".*<(\S+)>", toks[i + 1])
if res:
- devs[curdev]['ipv6'][-1]['scope6'] = res.group(1)
+ devs[curdev]["ipv6"][-1]["scope6"] = res.group(1)
else:
- devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1]
+ devs[curdev]["ipv6"][-1]["scope6"] = toks[i + 1]
return devs
@@ -148,49 +232,50 @@ def _netdev_info_ifconfig(ifconfig_data):
if line[0] not in ("\t", " "):
curdev = line.split()[0]
# current ifconfig pops a ':' on the end of the device
- if curdev.endswith(':'):
+ if curdev.endswith(":"):
curdev = curdev[:-1]
if curdev not in devs:
devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO)
toks = line.lower().strip().split()
if toks[0] == "up":
- devs[curdev]['up'] = True
+ devs[curdev]["up"] = True
# If the output of ifconfig doesn't contain the required info in the
# obvious place, use a regex filter to be sure.
elif len(toks) > 1:
if re.search(r"flags=\d+<up,", toks[1]):
- devs[curdev]['up'] = True
+ devs[curdev]["up"] = True
for i in range(len(toks)):
if toks[i] == "inet": # Create new ipv4 addr entry
- devs[curdev]['ipv4'].append(
- {'ip': toks[i + 1].lstrip("addr:")})
+ devs[curdev]["ipv4"].append(
+ {"ip": toks[i + 1].lstrip("addr:")}
+ )
elif toks[i].startswith("bcast:"):
- devs[curdev]['ipv4'][-1]['bcast'] = toks[i].lstrip("bcast:")
+ devs[curdev]["ipv4"][-1]["bcast"] = toks[i].lstrip("bcast:")
elif toks[i] == "broadcast":
- devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1]
+ devs[curdev]["ipv4"][-1]["bcast"] = toks[i + 1]
elif toks[i].startswith("mask:"):
- devs[curdev]['ipv4'][-1]['mask'] = toks[i].lstrip("mask:")
+ devs[curdev]["ipv4"][-1]["mask"] = toks[i].lstrip("mask:")
elif toks[i] == "netmask":
- devs[curdev]['ipv4'][-1]['mask'] = toks[i + 1]
+ devs[curdev]["ipv4"][-1]["mask"] = toks[i + 1]
elif toks[i] == "hwaddr" or toks[i] == "ether":
- devs[curdev]['hwaddr'] = toks[i + 1]
+ devs[curdev]["hwaddr"] = toks[i + 1]
elif toks[i] == "inet6":
if toks[i + 1] == "addr:":
- devs[curdev]['ipv6'].append({'ip': toks[i + 2]})
+ devs[curdev]["ipv6"].append({"ip": toks[i + 2]})
else:
- devs[curdev]['ipv6'].append({'ip': toks[i + 1]})
+ devs[curdev]["ipv6"].append({"ip": toks[i + 1]})
elif toks[i] == "prefixlen": # Add prefix to current ipv6 value
- addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1]
- devs[curdev]['ipv6'][-1]['ip'] = addr6
+ addr6 = devs[curdev]["ipv6"][-1]["ip"] + "/" + toks[i + 1]
+ devs[curdev]["ipv6"][-1]["ip"] = addr6
elif toks[i].startswith("scope:"):
- devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:")
+ devs[curdev]["ipv6"][-1]["scope6"] = toks[i].lstrip("scope:")
elif toks[i] == "scopeid":
- res = re.match(r'.*<(\S+)>', toks[i + 1])
+ res = re.match(r".*<(\S+)>", toks[i + 1])
if res:
- devs[curdev]['ipv6'][-1]['scope6'] = res.group(1)
+ devs[curdev]["ipv6"][-1]["scope6"] = res.group(1)
else:
- devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1]
+ devs[curdev]["ipv6"][-1]["scope6"] = toks[i + 1]
return devs
@@ -200,17 +285,23 @@ def netdev_info(empty=""):
if util.is_NetBSD():
(ifcfg_out, _err) = subp.subp(["ifconfig", "-a"], rcs=[0, 1])
devs = _netdev_info_ifconfig_netbsd(ifcfg_out)
- elif subp.which('ip'):
+ elif subp.which("ip"):
# Try iproute first of all
- (ipaddr_out, _err) = subp.subp(["ip", "addr", "show"])
- devs = _netdev_info_iproute(ipaddr_out)
- elif subp.which('ifconfig'):
+ try:
+ (ipaddr_out, _err) = subp.subp(["ip", "--json", "addr"])
+ devs = _netdev_info_iproute_json(ipaddr_out)
+ except subp.ProcessExecutionError:
+ # Can be removed when "ip --json" is available everywhere
+ (ipaddr_out, _err) = subp.subp(["ip", "addr", "show"])
+ devs = _netdev_info_iproute(ipaddr_out)
+ elif subp.which("ifconfig"):
# Fall back to net-tools if iproute2 is not present
(ifcfg_out, _err) = subp.subp(["ifconfig", "-a"], rcs=[0, 1])
devs = _netdev_info_ifconfig(ifcfg_out)
else:
LOG.warning(
- "Could not print networks: missing 'ip' and 'ifconfig' commands")
+ "Could not print networks: missing 'ip' and 'ifconfig' commands"
+ )
if empty == "":
return devs
@@ -219,7 +310,7 @@ def netdev_info(empty=""):
def fill(data, new_val="", empty_vals=("", b"")):
"""Recursively replace 'empty_vals' in data (dict, tuple, list)
- with new_val"""
+ with new_val"""
if isinstance(data, dict):
myiter = data.items()
elif isinstance(data, (tuple, list)):
@@ -249,46 +340,52 @@ def _netdev_route_info_iproute(iproute_data):
"""
routes = {}
- routes['ipv4'] = []
- routes['ipv6'] = []
+ routes["ipv4"] = []
+ routes["ipv6"] = []
entries = iproute_data.splitlines()
default_route_entry = {
- 'destination': '', 'flags': '', 'gateway': '', 'genmask': '',
- 'iface': '', 'metric': ''}
+ "destination": "",
+ "flags": "",
+ "gateway": "",
+ "genmask": "",
+ "iface": "",
+ "metric": "",
+ }
for line in entries:
entry = copy(default_route_entry)
if not line:
continue
toks = line.split()
- flags = ['U']
+ flags = ["U"]
if toks[0] == "default":
- entry['destination'] = "0.0.0.0"
- entry['genmask'] = "0.0.0.0"
+ entry["destination"] = "0.0.0.0"
+ entry["genmask"] = "0.0.0.0"
else:
- if '/' in toks[0]:
+ if "/" in toks[0]:
(addr, cidr) = toks[0].split("/")
else:
addr = toks[0]
- cidr = '32'
+ cidr = "32"
flags.append("H")
- entry['genmask'] = net_prefix_to_ipv4_mask(cidr)
- entry['destination'] = addr
- entry['genmask'] = net_prefix_to_ipv4_mask(cidr)
- entry['gateway'] = "0.0.0.0"
+ entry["genmask"] = net_prefix_to_ipv4_mask(cidr)
+ entry["destination"] = addr
+ entry["genmask"] = net_prefix_to_ipv4_mask(cidr)
+ entry["gateway"] = "0.0.0.0"
for i in range(len(toks)):
if toks[i] == "via":
- entry['gateway'] = toks[i + 1]
+ entry["gateway"] = toks[i + 1]
flags.insert(1, "G")
if toks[i] == "dev":
entry["iface"] = toks[i + 1]
if toks[i] == "metric":
- entry['metric'] = toks[i + 1]
- entry['flags'] = ''.join(flags)
- routes['ipv4'].append(entry)
+ entry["metric"] = toks[i + 1]
+ entry["flags"] = "".join(flags)
+ routes["ipv4"].append(entry)
try:
(iproute_data6, _err6) = subp.subp(
["ip", "--oneline", "-6", "route", "list", "table", "all"],
- rcs=[0, 1])
+ rcs=[0, 1],
+ )
except subp.ProcessExecutionError:
pass
else:
@@ -299,30 +396,30 @@ def _netdev_route_info_iproute(iproute_data):
continue
toks = line.split()
if toks[0] == "default":
- entry['destination'] = "::/0"
- entry['flags'] = "UG"
+ entry["destination"] = "::/0"
+ entry["flags"] = "UG"
else:
- entry['destination'] = toks[0]
- entry['gateway'] = "::"
- entry['flags'] = "U"
+ entry["destination"] = toks[0]
+ entry["gateway"] = "::"
+ entry["flags"] = "U"
for i in range(len(toks)):
if toks[i] == "via":
- entry['gateway'] = toks[i + 1]
- entry['flags'] = "UG"
+ entry["gateway"] = toks[i + 1]
+ entry["flags"] = "UG"
if toks[i] == "dev":
entry["iface"] = toks[i + 1]
if toks[i] == "metric":
- entry['metric'] = toks[i + 1]
+ entry["metric"] = toks[i + 1]
if toks[i] == "expires":
- entry['flags'] = entry['flags'] + 'e'
- routes['ipv6'].append(entry)
+ entry["flags"] = entry["flags"] + "e"
+ routes["ipv6"].append(entry)
return routes
def _netdev_route_info_netstat(route_data):
routes = {}
- routes['ipv4'] = []
- routes['ipv6'] = []
+ routes["ipv4"] = []
+ routes["ipv6"] = []
entries = route_data.splitlines()
for line in entries:
@@ -336,9 +433,14 @@ def _netdev_route_info_netstat(route_data):
# Linux netstat shows 2 more:
# Destination Gateway Genmask Flags Metric Ref Use Iface
# 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
- if (len(toks) < 6 or toks[0] == "Kernel" or
- toks[0] == "Destination" or toks[0] == "Internet" or
- toks[0] == "Internet6" or toks[0] == "Routing"):
+ if (
+ len(toks) < 6
+ or toks[0] == "Kernel"
+ or toks[0] == "Destination"
+ or toks[0] == "Internet"
+ or toks[0] == "Internet6"
+ or toks[0] == "Routing"
+ ):
continue
if len(toks) < 8:
toks.append("-")
@@ -346,20 +448,21 @@ def _netdev_route_info_netstat(route_data):
toks[7] = toks[5]
toks[5] = "-"
entry = {
- 'destination': toks[0],
- 'gateway': toks[1],
- 'genmask': toks[2],
- 'flags': toks[3],
- 'metric': toks[4],
- 'ref': toks[5],
- 'use': toks[6],
- 'iface': toks[7],
+ "destination": toks[0],
+ "gateway": toks[1],
+ "genmask": toks[2],
+ "flags": toks[3],
+ "metric": toks[4],
+ "ref": toks[5],
+ "use": toks[6],
+ "iface": toks[7],
}
- routes['ipv4'].append(entry)
+ routes["ipv4"].append(entry)
try:
(route_data6, _err6) = subp.subp(
- ["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1])
+ ["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1]
+ )
except subp.ProcessExecutionError:
pass
else:
@@ -368,44 +471,52 @@ def _netdev_route_info_netstat(route_data):
if not line:
continue
toks = line.split()
- if (len(toks) < 7 or toks[0] == "Kernel" or
- toks[0] == "Destination" or toks[0] == "Internet" or
- toks[0] == "Proto" or toks[0] == "Active"):
+ if (
+ len(toks) < 7
+ or toks[0] == "Kernel"
+ or toks[0] == "Destination"
+ or toks[0] == "Internet"
+ or toks[0] == "Proto"
+ or toks[0] == "Active"
+ ):
continue
entry = {
- 'destination': toks[0],
- 'gateway': toks[1],
- 'flags': toks[2],
- 'metric': toks[3],
- 'ref': toks[4],
- 'use': toks[5],
- 'iface': toks[6],
+ "destination": toks[0],
+ "gateway": toks[1],
+ "flags": toks[2],
+ "metric": toks[3],
+ "ref": toks[4],
+ "use": toks[5],
+ "iface": toks[6],
}
# skip lo interface on ipv6
- if entry['iface'] == "lo":
+ if entry["iface"] == "lo":
continue
# strip /128 from address if it's included
- if entry['destination'].endswith('/128'):
- entry['destination'] = re.sub(
- r'\/128$', '', entry['destination'])
- routes['ipv6'].append(entry)
+ if entry["destination"].endswith("/128"):
+ entry["destination"] = re.sub(
+ r"\/128$", "", entry["destination"]
+ )
+ routes["ipv6"].append(entry)
return routes
def route_info():
routes = {}
- if subp.which('ip'):
+ if subp.which("ip"):
# Try iproute first of all
(iproute_out, _err) = subp.subp(["ip", "-o", "route", "list"])
routes = _netdev_route_info_iproute(iproute_out)
- elif subp.which('netstat'):
+ elif subp.which("netstat"):
# Fall back to net-tools if iproute2 is not present
(route_out, _err) = subp.subp(
- ["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1])
+ ["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1]
+ )
routes = _netdev_route_info_netstat(route_out)
else:
LOG.warning(
- "Could not print routes: missing 'ip' and 'netstat' commands")
+ "Could not print routes: missing 'ip' and 'netstat' commands"
+ )
return routes
@@ -418,24 +529,42 @@ def netdev_pformat():
lines.append(
util.center(
"Net device info failed ({error})".format(error=str(e)),
- '!', 80))
+ "!",
+ 80,
+ )
+ )
else:
if not netdev:
- return '\n'
- fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
+ return "\n"
+ fields = ["Device", "Up", "Address", "Mask", "Scope", "Hw-Address"]
tbl = SimpleTable(fields)
for (dev, data) in sorted(netdev.items()):
- for addr in data.get('ipv4'):
+ for addr in data.get("ipv4"):
tbl.add_row(
- (dev, data["up"], addr["ip"], addr["mask"],
- addr.get('scope', empty), data["hwaddr"]))
- for addr in data.get('ipv6'):
+ (
+ dev,
+ data["up"],
+ addr["ip"],
+ addr["mask"],
+ addr.get("scope", empty),
+ data["hwaddr"],
+ )
+ )
+ for addr in data.get("ipv6"):
tbl.add_row(
- (dev, data["up"], addr["ip"], empty,
- addr.get("scope6", empty), data["hwaddr"]))
- if len(data.get('ipv6')) + len(data.get('ipv4')) == 0:
- tbl.add_row((dev, data["up"], empty, empty, empty,
- data["hwaddr"]))
+ (
+ dev,
+ data["up"],
+ addr["ip"],
+ empty,
+ addr.get("scope6", empty),
+ data["hwaddr"],
+ )
+ )
+ if len(data.get("ipv6")) + len(data.get("ipv4")) == 0:
+ tbl.add_row(
+ (dev, data["up"], empty, empty, empty, data["hwaddr"])
+ )
netdev_s = tbl.get_string()
max_len = len(max(netdev_s.splitlines(), key=len))
header = util.center("Net device info", "+", max_len)
@@ -450,33 +579,59 @@ def route_pformat():
except Exception as e:
lines.append(
util.center(
- 'Route info failed ({error})'.format(error=str(e)),
- '!', 80))
+ "Route info failed ({error})".format(error=str(e)), "!", 80
+ )
+ )
util.logexc(LOG, "Route info failed: %s" % e)
else:
- if routes.get('ipv4'):
- fields_v4 = ['Route', 'Destination', 'Gateway',
- 'Genmask', 'Interface', 'Flags']
+ if routes.get("ipv4"):
+ fields_v4 = [
+ "Route",
+ "Destination",
+ "Gateway",
+ "Genmask",
+ "Interface",
+ "Flags",
+ ]
tbl_v4 = SimpleTable(fields_v4)
- for (n, r) in enumerate(routes.get('ipv4')):
+ for (n, r) in enumerate(routes.get("ipv4")):
route_id = str(n)
- tbl_v4.add_row([route_id, r['destination'],
- r['gateway'], r['genmask'],
- r['iface'], r['flags']])
+ tbl_v4.add_row(
+ [
+ route_id,
+ r["destination"],
+ r["gateway"],
+ r["genmask"],
+ r["iface"],
+ r["flags"],
+ ]
+ )
route_s = tbl_v4.get_string()
max_len = len(max(route_s.splitlines(), key=len))
header = util.center("Route IPv4 info", "+", max_len)
lines.extend([header, route_s])
- if routes.get('ipv6'):
- fields_v6 = ['Route', 'Destination', 'Gateway', 'Interface',
- 'Flags']
+ if routes.get("ipv6"):
+ fields_v6 = [
+ "Route",
+ "Destination",
+ "Gateway",
+ "Interface",
+ "Flags",
+ ]
tbl_v6 = SimpleTable(fields_v6)
- for (n, r) in enumerate(routes.get('ipv6')):
+ for (n, r) in enumerate(routes.get("ipv6")):
route_id = str(n)
- if r['iface'] == 'lo':
+ if r["iface"] == "lo":
continue
- tbl_v6.add_row([route_id, r['destination'],
- r['gateway'], r['iface'], r['flags']])
+ tbl_v6.add_row(
+ [
+ route_id,
+ r["destination"],
+ r["gateway"],
+ r["iface"],
+ r["flags"],
+ ]
+ )
route_s = tbl_v6.get_string()
max_len = len(max(route_s.splitlines(), key=len))
header = util.center("Route IPv6 info", "+", max_len)
@@ -484,7 +639,7 @@ def route_pformat():
return "\n".join(lines) + "\n"
-def debug_info(prefix='ci-info: '):
+def debug_info(prefix="ci-info: "):
lines = []
netdev_lines = netdev_pformat().splitlines()
if prefix:
@@ -500,4 +655,5 @@ def debug_info(prefix='ci-info: '):
lines.extend(route_lines)
return "\n".join(lines)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/patcher.py b/cloudinit/patcher.py
index 2df9441a..516be22c 100644
--- a/cloudinit/patcher.py
+++ b/cloudinit/patcher.py
@@ -6,13 +6,13 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import imp
import logging
import sys
# Default fallback format
-FALL_FORMAT = ('FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: ' +
- '%(message)s')
+FALL_FORMAT = (
+ "FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: " + "%(message)s"
+)
class QuietStreamHandler(logging.StreamHandler):
@@ -20,7 +20,7 @@ class QuietStreamHandler(logging.StreamHandler):
pass
-def _patch_logging():
+def patch_logging():
# Replace 'handleError' with one that will be more
# tolerant of errors in that it can avoid
# re-notifying on exceptions and when errors
@@ -35,14 +35,8 @@ def _patch_logging():
fallback_handler.flush()
except IOError:
pass
- setattr(logging.Handler, 'handleError', handleError)
+ setattr(logging.Handler, "handleError", handleError)
-def patch():
- imp.acquire_lock()
- try:
- _patch_logging()
- finally:
- imp.release_lock()
# vi: ts=4 expandtab
diff --git a/cloudinit/registry.py b/cloudinit/registry.py
index 8e495641..5044e760 100644
--- a/cloudinit/registry.py
+++ b/cloudinit/registry.py
@@ -18,7 +18,8 @@ class DictRegistry(object):
"""Add item to the registry."""
if key in self._items:
raise ValueError(
- 'Item already registered with key {0}'.format(key))
+ "Item already registered with key {0}".format(key)
+ )
self._items[key] = item
def unregister_item(self, key, force=True):
@@ -36,4 +37,5 @@ class DictRegistry(object):
"""
return copy.copy(self._items)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
index ed5c7038..06b5b49f 100644
--- a/cloudinit/reporting/__init__.py
+++ b/cloudinit/reporting/__init__.py
@@ -13,7 +13,7 @@ from ..registry import DictRegistry
from .handlers import available_handlers
DEFAULT_CONFIG = {
- 'logging': {'type': 'log'},
+ "logging": {"type": "log"},
}
@@ -28,10 +28,11 @@ def update_configuration(config):
for handler_name, handler_config in config.items():
if not handler_config:
instantiated_handler_registry.unregister_item(
- handler_name, force=True)
+ handler_name, force=True
+ )
continue
handler_config = handler_config.copy()
- cls = available_handlers.registered_items[handler_config.pop('type')]
+ cls = available_handlers.registered_items[handler_config.pop("type")]
instantiated_handler_registry.unregister_item(handler_name)
instance = cls(**handler_config)
instantiated_handler_registry.register_item(handler_name, instance)
@@ -39,7 +40,7 @@ def update_configuration(config):
def flush_events():
for _, handler in instantiated_handler_registry.registered_items.items():
- if hasattr(handler, 'flush'):
+ if hasattr(handler, "flush"):
handler.flush()
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
index b8677c8b..e53186a3 100644
--- a/cloudinit/reporting/events.py
+++ b/cloudinit/reporting/events.py
@@ -12,12 +12,12 @@ import base64
import os.path
import time
-from . import instantiated_handler_registry, available_handlers
+from . import available_handlers, instantiated_handler_registry
-FINISH_EVENT_TYPE = 'finish'
-START_EVENT_TYPE = 'start'
+FINISH_EVENT_TYPE = "finish"
+START_EVENT_TYPE = "start"
-DEFAULT_EVENT_ORIGIN = 'cloudinit'
+DEFAULT_EVENT_ORIGIN = "cloudinit"
class _nameset(set):
@@ -33,8 +33,14 @@ status = _nameset(("SUCCESS", "WARN", "FAIL"))
class ReportingEvent(object):
"""Encapsulation of event formatting."""
- def __init__(self, event_type, name, description,
- origin=DEFAULT_EVENT_ORIGIN, timestamp=None):
+ def __init__(
+ self,
+ event_type,
+ name,
+ description,
+ origin=DEFAULT_EVENT_ORIGIN,
+ timestamp=None,
+ ):
self.event_type = event_type
self.name = name
self.description = description
@@ -45,22 +51,28 @@ class ReportingEvent(object):
def as_string(self):
"""The event represented as a string."""
- return '{0}: {1}: {2}'.format(
- self.event_type, self.name, self.description)
+ return "{0}: {1}: {2}".format(
+ self.event_type, self.name, self.description
+ )
def as_dict(self):
"""The event represented as a dictionary."""
- return {'name': self.name, 'description': self.description,
- 'event_type': self.event_type, 'origin': self.origin,
- 'timestamp': self.timestamp}
+ return {
+ "name": self.name,
+ "description": self.description,
+ "event_type": self.event_type,
+ "origin": self.origin,
+ "timestamp": self.timestamp,
+ }
class FinishReportingEvent(ReportingEvent):
-
- def __init__(self, name, description, result=status.SUCCESS,
- post_files=None):
+ def __init__(
+ self, name, description, result=status.SUCCESS, post_files=None
+ ):
super(FinishReportingEvent, self).__init__(
- FINISH_EVENT_TYPE, name, description)
+ FINISH_EVENT_TYPE, name, description
+ )
self.result = result
if post_files is None:
post_files = []
@@ -69,15 +81,16 @@ class FinishReportingEvent(ReportingEvent):
raise ValueError("Invalid result: %s" % result)
def as_string(self):
- return '{0}: {1}: {2}: {3}'.format(
- self.event_type, self.name, self.result, self.description)
+ return "{0}: {1}: {2}: {3}".format(
+ self.event_type, self.name, self.result, self.description
+ )
def as_dict(self):
"""The event represented as json friendly."""
data = super(FinishReportingEvent, self).as_dict()
- data['result'] = self.result
+ data["result"] = self.result
if self.post_files:
- data['files'] = _collect_file_info(self.post_files)
+ data["files"] = _collect_file_info(self.post_files)
return data
@@ -110,14 +123,16 @@ def report_event(event, excluded_handler_types=None):
handler.publish_event(event)
-def report_finish_event(event_name, event_description,
- result=status.SUCCESS, post_files=None):
+def report_finish_event(
+ event_name, event_description, result=status.SUCCESS, post_files=None
+):
"""Report a "finish" event.
See :py:func:`.report_event` for parameter details.
"""
- event = FinishReportingEvent(event_name, event_description, result,
- post_files=post_files)
+ event = FinishReportingEvent(
+ event_name, event_description, result, post_files=post_files
+ )
return report_event(event)
@@ -165,10 +180,25 @@ class ReportEventStack(object):
:param result_on_exception:
The result value to set if an exception is caught. default
value is FAIL.
+
+ :param post_files:
+ Can hold filepaths of files that are to get posted/created
+ regarding a given event. Something like success or failure information
+ in a given log file. For each filepath, if it's a valid regular file
+ it will get: read & encoded as base64 at the close of the event.
+ Default value, if None, is an empty list.
"""
- def __init__(self, name, description, message=None, parent=None,
- reporting_enabled=None, result_on_exception=status.FAIL,
- post_files=None):
+
+ def __init__(
+ self,
+ name,
+ description,
+ message=None,
+ parent=None,
+ reporting_enabled=None,
+ result_on_exception=status.FAIL,
+ post_files=None,
+ ):
self.parent = parent
self.name = name
self.description = description
@@ -188,14 +218,22 @@ class ReportEventStack(object):
self.reporting_enabled = reporting_enabled
if parent:
- self.fullname = '/'.join((parent.fullname, name,))
+ self.fullname = "/".join(
+ (
+ parent.fullname,
+ name,
+ )
+ )
else:
self.fullname = self.name
self.children = {}
def __repr__(self):
- return ("ReportEventStack(%s, %s, reporting_enabled=%s)" %
- (self.name, self.description, self.reporting_enabled))
+ return "ReportEventStack(%s, %s, reporting_enabled=%s)" % (
+ self.name,
+ self.description,
+ self.reporting_enabled,
+ )
def __enter__(self):
self.result = status.SUCCESS
@@ -243,8 +281,9 @@ class ReportEventStack(object):
if self.parent:
self.parent.children[self.name] = (result, msg)
if self.reporting_enabled:
- report_finish_event(self.fullname, msg, result,
- post_files=self.post_files)
+ report_finish_event(
+ self.fullname, msg, result, post_files=self.post_files
+ )
def _collect_file_info(files):
@@ -257,8 +296,8 @@ def _collect_file_info(files):
else:
with open(fname, "rb") as fp:
content = base64.b64encode(fp.read()).decode()
- ret.append({'path': fname, 'content': content,
- 'encoding': 'base64'})
+ ret.append({"path": fname, "content": content, "encoding": "base64"})
return ret
+
# vi: ts=4 expandtab
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 0a8c7af3..e163e168 100755
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -12,8 +12,8 @@ import uuid
from datetime import datetime
from cloudinit import log as logging
+from cloudinit import url_helper, util
from cloudinit.registry import DictRegistry
-from cloudinit import (url_helper, util)
LOG = logging.getLogger(__name__)
@@ -55,7 +55,8 @@ class LogHandler(ReportingHandler):
def publish_event(self, event):
logger = logging.getLogger(
- '.'.join(['cloudinit', 'reporting', event.event_type, event.name]))
+ ".".join(["cloudinit", "reporting", event.event_type, event.name])
+ )
logger.log(self.level, event.as_string())
@@ -67,15 +68,25 @@ class PrintHandler(ReportingHandler):
class WebHookHandler(ReportingHandler):
- def __init__(self, endpoint, consumer_key=None, token_key=None,
- token_secret=None, consumer_secret=None, timeout=None,
- retries=None):
+ def __init__(
+ self,
+ endpoint,
+ consumer_key=None,
+ token_key=None,
+ token_secret=None,
+ consumer_secret=None,
+ timeout=None,
+ retries=None,
+ ):
super(WebHookHandler, self).__init__()
if any([consumer_key, token_key, token_secret, consumer_secret]):
self.oauth_helper = url_helper.OauthUrlHelper(
- consumer_key=consumer_key, token_key=token_key,
- token_secret=token_secret, consumer_secret=consumer_secret)
+ consumer_key=consumer_key,
+ token_key=token_key,
+ token_secret=token_secret,
+ consumer_secret=consumer_secret,
+ )
else:
self.oauth_helper = None
self.endpoint = endpoint
@@ -90,9 +101,12 @@ class WebHookHandler(ReportingHandler):
readurl = url_helper.readurl
try:
return readurl(
- self.endpoint, data=json.dumps(event.as_dict()),
+ self.endpoint,
+ data=json.dumps(event.as_dict()),
timeout=self.timeout,
- retries=self.retries, ssl_details=self.ssl_details)
+ retries=self.retries,
+ ssl_details=self.ssl_details,
+ )
except Exception:
LOG.warning("failed posting event: %s", event.as_string())
@@ -112,33 +126,35 @@ class HyperVKvpReportingHandler(ReportingHandler):
For more information, see
https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests
"""
+
HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048
# The maximum value size expected in Azure
HV_KVP_AZURE_MAX_VALUE_SIZE = 1024
HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512
- HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE +
- HV_KVP_EXCHANGE_MAX_VALUE_SIZE)
- EVENT_PREFIX = 'CLOUD_INIT'
- MSG_KEY = 'msg'
- RESULT_KEY = 'result'
- DESC_IDX_KEY = 'msg_i'
- JSON_SEPARATORS = (',', ':')
- KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1'
+ HV_KVP_RECORD_SIZE = (
+ HV_KVP_EXCHANGE_MAX_KEY_SIZE + HV_KVP_EXCHANGE_MAX_VALUE_SIZE
+ )
+ EVENT_PREFIX = "CLOUD_INIT"
+ MSG_KEY = "msg"
+ RESULT_KEY = "result"
+ DESC_IDX_KEY = "msg_i"
+ JSON_SEPARATORS = (",", ":")
+ KVP_POOL_FILE_GUEST = "/var/lib/hyperv/.kvp_pool_1"
_already_truncated_pool_file = False
- def __init__(self,
- kvp_file_path=KVP_POOL_FILE_GUEST,
- event_types=None):
+ def __init__(self, kvp_file_path=KVP_POOL_FILE_GUEST, event_types=None):
super(HyperVKvpReportingHandler, self).__init__()
self._kvp_file_path = kvp_file_path
HyperVKvpReportingHandler._truncate_guest_pool_file(
- self._kvp_file_path)
+ self._kvp_file_path
+ )
self._event_types = event_types
self.q = queue.Queue()
self.incarnation_no = self._get_incarnation_no()
- self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
- self.incarnation_no)
+ self.event_key_prefix = "{0}|{1}".format(
+ self.EVENT_PREFIX, self.incarnation_no
+ )
self.publish_thread = threading.Thread(
target=self._publish_event_routine
)
@@ -184,7 +200,7 @@ class HyperVKvpReportingHandler(ReportingHandler):
def _iterate_kvps(self, offset):
"""iterate the kvp file from the current offset."""
- with open(self._kvp_file_path, 'rb') as f:
+ with open(self._kvp_file_path, "rb") as f:
fcntl.flock(f, fcntl.LOCK_EX)
f.seek(offset)
record_data = f.read(self.HV_KVP_RECORD_SIZE)
@@ -200,9 +216,9 @@ class HyperVKvpReportingHandler(ReportingHandler):
CLOUD_INIT|<incarnation number>|<event_type>|<event_name>|<uuid>
[|subevent_index]
"""
- return u"{0}|{1}|{2}|{3}".format(self.event_key_prefix,
- event.event_type, event.name,
- uuid.uuid4())
+ return "{0}|{1}|{2}|{3}".format(
+ self.event_key_prefix, event.event_type, event.name, uuid.uuid4()
+ )
def _encode_kvp_item(self, key, value):
data = struct.pack(
@@ -220,19 +236,27 @@ class HyperVKvpReportingHandler(ReportingHandler):
record_data_len = len(record_data)
if record_data_len != self.HV_KVP_RECORD_SIZE:
raise ReportException(
- "record_data len not correct {0} {1}."
- .format(record_data_len, self.HV_KVP_RECORD_SIZE))
- k = (record_data[0:self.HV_KVP_EXCHANGE_MAX_KEY_SIZE].decode('utf-8')
- .strip('\x00'))
+ "record_data len not correct {0} {1}.".format(
+ record_data_len, self.HV_KVP_RECORD_SIZE
+ )
+ )
+ k = (
+ record_data[0 : self.HV_KVP_EXCHANGE_MAX_KEY_SIZE]
+ .decode("utf-8")
+ .strip("\x00")
+ )
v = (
record_data[
- self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE
- ].decode('utf-8').strip('\x00'))
+ self.HV_KVP_EXCHANGE_MAX_KEY_SIZE : self.HV_KVP_RECORD_SIZE
+ ]
+ .decode("utf-8")
+ .strip("\x00")
+ )
- return {'key': k, 'value': v}
+ return {"key": k, "value": v}
def _append_kvp_item(self, record_data):
- with open(self._kvp_file_path, 'ab') as f:
+ with open(self._kvp_file_path, "ab") as f:
fcntl.flock(f, fcntl.LOCK_EX)
for data in record_data:
f.write(data)
@@ -242,22 +266,25 @@ class HyperVKvpReportingHandler(ReportingHandler):
def _break_down(self, key, meta_data, description):
del meta_data[self.MSG_KEY]
des_in_json = json.dumps(description)
- des_in_json = des_in_json[1:(len(des_in_json) - 1)]
+ des_in_json = des_in_json[1 : (len(des_in_json) - 1)]
i = 0
result_array = []
- message_place_holder = "\"" + self.MSG_KEY + "\":\"\""
+ message_place_holder = '"' + self.MSG_KEY + '":""'
while True:
meta_data[self.DESC_IDX_KEY] = i
- meta_data[self.MSG_KEY] = ''
- data_without_desc = json.dumps(meta_data,
- separators=self.JSON_SEPARATORS)
+ meta_data[self.MSG_KEY] = ""
+ data_without_desc = json.dumps(
+ meta_data, separators=self.JSON_SEPARATORS
+ )
room_for_desc = (
- self.HV_KVP_AZURE_MAX_VALUE_SIZE -
- len(data_without_desc) - 8)
+ self.HV_KVP_AZURE_MAX_VALUE_SIZE - len(data_without_desc) - 8
+ )
value = data_without_desc.replace(
message_place_holder,
'"{key}":"{desc}"'.format(
- key=self.MSG_KEY, desc=des_in_json[:room_for_desc]))
+ key=self.MSG_KEY, desc=des_in_json[:room_for_desc]
+ ),
+ )
subkey = "{}|{}".format(key, i)
result_array.append(self._encode_kvp_item(subkey, value))
i += 1
@@ -276,8 +303,9 @@ class HyperVKvpReportingHandler(ReportingHandler):
meta_data = {
"name": event.name,
"type": event.event_type,
- "ts": (datetime.utcfromtimestamp(event.timestamp)
- .isoformat() + 'Z'),
+ "ts": (
+ datetime.utcfromtimestamp(event.timestamp).isoformat() + "Z"
+ ),
}
if hasattr(event, self.RESULT_KEY):
meta_data[self.RESULT_KEY] = event.result
@@ -327,14 +355,14 @@ class HyperVKvpReportingHandler(ReportingHandler):
self.q.put(event)
def flush(self):
- LOG.debug('HyperVReportingHandler flushing remaining events')
+ LOG.debug("HyperVReportingHandler flushing remaining events")
self.q.join()
available_handlers = DictRegistry()
-available_handlers.register_item('log', LogHandler)
-available_handlers.register_item('print', PrintHandler)
-available_handlers.register_item('webhook', WebHookHandler)
-available_handlers.register_item('hyperv', HyperVKvpReportingHandler)
+available_handlers.register_item("log", LogHandler)
+available_handlers.register_item("print", PrintHandler)
+available_handlers.register_item("webhook", WebHookHandler)
+available_handlers.register_item("hyperv", HyperVKvpReportingHandler)
# vi: ts=4 expandtab
diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py
index d6f5f95b..ba0e88c8 100644
--- a/cloudinit/safeyaml.py
+++ b/cloudinit/safeyaml.py
@@ -15,8 +15,9 @@ class _CustomSafeLoader(yaml.SafeLoader):
_CustomSafeLoader.add_constructor(
- u'tag:yaml.org,2002:python/unicode',
- _CustomSafeLoader.construct_python_unicode)
+ "tag:yaml.org,2002:python/unicode",
+ _CustomSafeLoader.construct_python_unicode,
+)
class NoAliasSafeDumper(yaml.dumper.SafeDumper):
@@ -27,19 +28,21 @@ class NoAliasSafeDumper(yaml.dumper.SafeDumper):
def load(blob):
- return(yaml.load(blob, Loader=_CustomSafeLoader))
+ return yaml.load(blob, Loader=_CustomSafeLoader)
def dumps(obj, explicit_start=True, explicit_end=True, noalias=False):
"""Return data in nicely formatted yaml."""
- return yaml.dump(obj,
- line_break="\n",
- indent=4,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- default_flow_style=False,
- Dumper=(NoAliasSafeDumper
- if noalias else yaml.dumper.Dumper))
+ return yaml.dump(
+ obj,
+ line_break="\n",
+ indent=4,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ default_flow_style=False,
+ Dumper=(NoAliasSafeDumper if noalias else yaml.dumper.Dumper),
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/serial.py b/cloudinit/serial.py
index 67486e09..a6f710ef 100644
--- a/cloudinit/serial.py
+++ b/cloudinit/serial.py
@@ -16,22 +16,31 @@ except ImportError:
@staticmethod
def write(data):
- raise IOError("Unable to perform serial `write` operation,"
- " pyserial not installed.")
+ raise IOError(
+ "Unable to perform serial `write` operation,"
+ " pyserial not installed."
+ )
@staticmethod
def readline():
- raise IOError("Unable to perform serial `readline` operation,"
- " pyserial not installed.")
+ raise IOError(
+ "Unable to perform serial `readline` operation,"
+ " pyserial not installed."
+ )
@staticmethod
def flush():
- raise IOError("Unable to perform serial `flush` operation,"
- " pyserial not installed.")
+ raise IOError(
+ "Unable to perform serial `flush` operation,"
+ " pyserial not installed."
+ )
@staticmethod
def read(size=1):
- raise IOError("Unable to perform serial `read` operation,"
- " pyserial not installed.")
+ raise IOError(
+ "Unable to perform serial `read` operation,"
+ " pyserial not installed."
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index ca4ffa8e..ecc1403b 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -12,50 +12,55 @@
CFG_ENV_NAME = "CLOUD_CFG"
# This is expected to be a yaml formatted file
-CLOUD_CONFIG = '/etc/cloud/cloud.cfg'
+CLOUD_CONFIG = "/etc/cloud/cloud.cfg"
-RUN_CLOUD_CONFIG = '/run/cloud-init/cloud.cfg'
+RUN_CLOUD_CONFIG = "/run/cloud-init/cloud.cfg"
# What u get if no config is provided
CFG_BUILTIN = {
- 'datasource_list': [
- 'NoCloud',
- 'ConfigDrive',
- 'OpenNebula',
- 'DigitalOcean',
- 'Azure',
- 'AltCloud',
- 'OVF',
- 'MAAS',
- 'GCE',
- 'OpenStack',
- 'AliYun',
- 'Ec2',
- 'CloudSigma',
- 'CloudStack',
- 'SmartOS',
- 'Bigstep',
- 'Scaleway',
- 'Hetzner',
- 'IBMCloud',
- 'Oracle',
- 'Exoscale',
- 'RbxCloud',
+ "datasource_list": [
+ "NoCloud",
+ "ConfigDrive",
+ "LXD",
+ "OpenNebula",
+ "DigitalOcean",
+ "Azure",
+ "AltCloud",
+ "OVF",
+ "MAAS",
+ "GCE",
+ "OpenStack",
+ "AliYun",
+ "Vultr",
+ "Ec2",
+ "CloudSigma",
+ "CloudStack",
+ "SmartOS",
+ "Bigstep",
+ "Scaleway",
+ "Hetzner",
+ "IBMCloud",
+ "Oracle",
+ "Exoscale",
+ "RbxCloud",
+ "UpCloud",
+ "VMware",
# At the end to act as a 'catch' when none of the above work...
- 'None',
+ "None",
],
- 'def_log_file': '/var/log/cloud-init.log',
- 'log_cfgs': [],
- 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel', 'root:root'],
- 'system_info': {
- 'paths': {
- 'cloud_dir': '/var/lib/cloud',
- 'templates_dir': '/etc/cloud/templates/',
+ "def_log_file": "/var/log/cloud-init.log",
+ "log_cfgs": [],
+ "syslog_fix_perms": ["syslog:adm", "root:adm", "root:wheel", "root:root"],
+ "system_info": {
+ "paths": {
+ "cloud_dir": "/var/lib/cloud",
+ "templates_dir": "/etc/cloud/templates/",
},
- 'distro': 'ubuntu',
- 'network': {'renderers': None},
+ "distro": "ubuntu",
+ "network": {"renderers": None},
},
- 'vendor_data': {'enabled': True, 'prefix': []},
+ "vendor_data": {"enabled": True, "prefix": []},
+ "vendor_data2": {"enabled": True, "prefix": []},
}
# Valid frequencies of handlers/modules
diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py
index 9272d22d..382c4616 100644
--- a/cloudinit/signal_handler.py
+++ b/cloudinit/signal_handler.py
@@ -20,11 +20,11 @@ LOG = logging.getLogger(__name__)
BACK_FRAME_TRACE_DEPTH = 3
EXIT_FOR = {
- signal.SIGINT: ('Cloud-init %(version)s received SIGINT, exiting...', 1),
- signal.SIGTERM: ('Cloud-init %(version)s received SIGTERM, exiting...', 1),
+ signal.SIGINT: ("Cloud-init %(version)s received SIGINT, exiting...", 1),
+ signal.SIGTERM: ("Cloud-init %(version)s received SIGTERM, exiting...", 1),
# Can't be caught...
# signal.SIGKILL: ('Cloud-init killed, exiting...', 1),
- signal.SIGABRT: ('Cloud-init %(version)s received SIGABRT, exiting...', 1),
+ signal.SIGABRT: ("Cloud-init %(version)s received SIGABRT, exiting...", 1),
}
@@ -41,12 +41,11 @@ def _pprint_frame(frame, depth, max_depth, contents):
def _handle_exit(signum, frame):
(msg, rc) = EXIT_FOR[signum]
- msg = msg % ({'version': vr.version_string()})
+ msg = msg % ({"version": vr.version_string()})
contents = StringIO()
contents.write("%s\n" % (msg))
_pprint_frame(frame, 1, BACK_FRAME_TRACE_DEPTH, contents)
- util.multi_log(contents.getvalue(),
- console=True, stderr=False, log=LOG)
+ util.multi_log(contents.getvalue(), console=True, stderr=False, log=LOG)
sys.exit(rc)
@@ -57,4 +56,5 @@ def attach_handlers():
sigs_attached += len(EXIT_FOR)
return sigs_attached
+
# vi: ts=4 expandtab
diff --git a/cloudinit/simpletable.py b/cloudinit/simpletable.py
index ca663cce..90281e06 100644
--- a/cloudinit/simpletable.py
+++ b/cloudinit/simpletable.py
@@ -22,27 +22,33 @@ class SimpleTable(object):
def update_column_widths(self, values):
for i, value in enumerate(values):
- self.column_widths[i] = max(
- len(value),
- self.column_widths[i])
+ self.column_widths[i] = max(len(value), self.column_widths[i])
def add_row(self, values):
if len(values) > len(self.fields):
- raise TypeError('too many values')
+ raise TypeError("too many values")
values = [str(value) for value in values]
self.rows.append(values)
self.update_column_widths(values)
def _hdiv(self):
"""Returns a horizontal divider for the table."""
- return '+' + '+'.join(
- ['-' * (w + 2) for w in self.column_widths]) + '+'
+ return (
+ "+" + "+".join(["-" * (w + 2) for w in self.column_widths]) + "+"
+ )
def _row(self, row):
"""Returns a formatted row."""
- return '|' + '|'.join(
- [col.center(self.column_widths[i] + 2)
- for i, col in enumerate(row)]) + '|'
+ return (
+ "|"
+ + "|".join(
+ [
+ col.center(self.column_widths[i] + 2)
+ for i, col in enumerate(row)
+ ]
+ )
+ + "|"
+ )
def __str__(self):
"""Returns a string representation of the table with lines around.
@@ -56,7 +62,7 @@ class SimpleTable(object):
"""
lines = [self._hdiv(), self._row(self.fields), self._hdiv()]
lines += [self._row(r) for r in self.rows] + [self._hdiv()]
- return '\n'.join(lines)
+ return "\n".join(lines)
def get_string(self):
return self.__str__()
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 09052873..37f512e3 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -1,7 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import dmi
-from cloudinit import sources
+from cloudinit import dmi, sources
from cloudinit.sources import DataSourceEc2 as EC2
ALIYUN_PRODUCT = "Alibaba Cloud ECS"
@@ -9,18 +8,18 @@ ALIYUN_PRODUCT = "Alibaba Cloud ECS"
class DataSourceAliYun(EC2.DataSourceEc2):
- dsname = 'AliYun'
- metadata_urls = ['http://100.100.100.200']
+ dsname = "AliYun"
+ metadata_urls = ["http://100.100.100.200"]
# The minimum supported metadata_version from the ec2 metadata apis
- min_metadata_version = '2016-01-01'
+ min_metadata_version = "2016-01-01"
extended_metadata_versions = []
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
- return self.metadata.get('hostname', 'localhost.localdomain')
+ return self.metadata.get("hostname", "localhost.localdomain")
def get_public_ssh_keys(self):
- return parse_public_keys(self.metadata.get('public-keys', {}))
+ return parse_public_keys(self.metadata.get("public-keys", {}))
def _get_cloud_name(self):
if _is_aliyun():
@@ -30,7 +29,7 @@ class DataSourceAliYun(EC2.DataSourceEc2):
def _is_aliyun():
- return dmi.read_dmi_data('system-product-name') == ALIYUN_PRODUCT
+ return dmi.read_dmi_data("system-product-name") == ALIYUN_PRODUCT
def parse_public_keys(public_keys):
@@ -41,7 +40,7 @@ def parse_public_keys(public_keys):
elif isinstance(key_body, list):
keys.extend(key_body)
elif isinstance(key_body, dict):
- key = key_body.get('openssh-key', [])
+ key = key_body.get("openssh-key", [])
if isinstance(key, str):
keys.append(key.strip())
elif isinstance(key, list):
@@ -59,4 +58,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index cd93412a..9029b535 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -7,10 +7,10 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-'''
+"""
This file contains code used to gather the user data passed to an
instance on RHEVm and vSphere.
-'''
+"""
import errno
import os
@@ -18,29 +18,26 @@ import os.path
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import sources, subp, util
LOG = logging.getLogger(__name__)
# Needed file paths
-CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
+CLOUD_INFO_FILE = "/etc/sysconfig/cloud-info"
# Shell command lists
-CMD_PROBE_FLOPPY = ['modprobe', 'floppy']
+CMD_PROBE_FLOPPY = ["modprobe", "floppy"]
META_DATA_NOT_SUPPORTED = {
- 'block-device-mapping': {},
- 'instance-id': 455,
- 'local-hostname': 'localhost',
- 'placement': {},
+ "block-device-mapping": {},
+ "instance-id": 455,
+ "local-hostname": "localhost",
+ "placement": {},
}
def read_user_data_callback(mount_dir):
- '''
+ """
Description:
This callback will be applied by util.mount_cb() on the mounted
file.
@@ -55,10 +52,10 @@ def read_user_data_callback(mount_dir):
Returns:
User Data
- '''
+ """
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
+ deltacloud_user_data_file = mount_dir + "/deltacloud-user-data.txt"
+ user_data_file = mount_dir + "/user-data.txt"
# First try deltacloud_user_data_file. On failure try user_data_file.
try:
@@ -67,7 +64,7 @@ def read_user_data_callback(mount_dir):
try:
user_data = util.load_file(user_data_file).strip()
except IOError:
- util.logexc(LOG, 'Failed accessing user data file.')
+ util.logexc(LOG, "Failed accessing user data file.")
return None
return user_data
@@ -75,7 +72,7 @@ def read_user_data_callback(mount_dir):
class DataSourceAltCloud(sources.DataSource):
- dsname = 'AltCloud'
+ dsname = "AltCloud"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -87,7 +84,7 @@ class DataSourceAltCloud(sources.DataSource):
return "%s [seed=%s]" % (root, self.seed)
def get_cloud_type(self):
- '''
+ """
Description:
Get the type for the cloud back end this instance is running on
by examining the string returned by reading either:
@@ -101,31 +98,34 @@ class DataSourceAltCloud(sources.DataSource):
One of the following strings:
'RHEV', 'VSPHERE' or 'UNKNOWN'
- '''
+ """
if os.path.exists(CLOUD_INFO_FILE):
try:
cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper()
except IOError:
- util.logexc(LOG, 'Unable to access cloud info file at %s.',
- CLOUD_INFO_FILE)
- return 'UNKNOWN'
+ util.logexc(
+ LOG,
+ "Unable to access cloud info file at %s.",
+ CLOUD_INFO_FILE,
+ )
+ return "UNKNOWN"
return cloud_type
system_name = dmi.read_dmi_data("system-product-name")
if not system_name:
- return 'UNKNOWN'
+ return "UNKNOWN"
sys_name = system_name.upper()
- if sys_name.startswith('RHEV'):
- return 'RHEV'
+ if sys_name.startswith("RHEV"):
+ return "RHEV"
- if sys_name.startswith('VMWARE'):
- return 'VSPHERE'
+ if sys_name.startswith("VMWARE"):
+ return "VSPHERE"
- return 'UNKNOWN'
+ return "UNKNOWN"
def _get_data(self):
- '''
+ """
Description:
User Data is passed to the launching instance which
is used to perform instance configuration.
@@ -140,18 +140,18 @@ class DataSourceAltCloud(sources.DataSource):
Images not built with Imagefactory will try to
determine what the cloud provider is based on system
information.
- '''
+ """
- LOG.debug('Invoked get_data()')
+ LOG.debug("Invoked get_data()")
cloud_type = self.get_cloud_type()
- LOG.debug('cloud_type: %s', str(cloud_type))
+ LOG.debug("cloud_type: %s", str(cloud_type))
- if 'RHEV' in cloud_type:
+ if "RHEV" in cloud_type:
if self.user_data_rhevm():
return True
- elif 'VSPHERE' in cloud_type:
+ elif "VSPHERE" in cloud_type:
if self.user_data_vsphere():
return True
else:
@@ -160,20 +160,20 @@ class DataSourceAltCloud(sources.DataSource):
return False
# No user data found
- util.logexc(LOG, 'Failed accessing user data.')
+ util.logexc(LOG, "Failed accessing user data.")
return False
def _get_subplatform(self):
"""Return the subplatform metadata details."""
cloud_type = self.get_cloud_type()
- if not hasattr(self, 'source'):
+ if not hasattr(self, "source"):
self.source = sources.METADATA_UNKNOWN
- if cloud_type == 'RHEV':
- self.source = '/dev/fd0'
- return '%s (%s)' % (cloud_type.lower(), self.source)
+ if cloud_type == "RHEV":
+ self.source = "/dev/fd0"
+ return "%s (%s)" % (cloud_type.lower(), self.source)
def user_data_rhevm(self):
- '''
+ """
RHEVM specific userdata read
If on RHEV-M the user data will be contained on the
@@ -186,7 +186,7 @@ class DataSourceAltCloud(sources.DataSource):
mount /dev/fd0 <tmp mount dir>
The call back passed to util.mount_cb will do:
read <tmp mount dir>/<user_data_file>
- '''
+ """
return_str = None
@@ -194,16 +194,16 @@ class DataSourceAltCloud(sources.DataSource):
try:
modprobe_floppy()
except subp.ProcessExecutionError as e:
- util.logexc(LOG, 'Failed modprobe: %s', e)
+ util.logexc(LOG, "Failed modprobe: %s", e)
return False
- floppy_dev = '/dev/fd0'
+ floppy_dev = "/dev/fd0"
# udevadm settle for floppy device
try:
util.udevadm_settle(exists=floppy_dev, timeout=5)
except (subp.ProcessExecutionError, OSError) as e:
- util.logexc(LOG, 'Failed udevadm_settle: %s\n', e)
+ util.logexc(LOG, "Failed udevadm_settle: %s\n", e)
return False
try:
@@ -212,8 +212,11 @@ class DataSourceAltCloud(sources.DataSource):
if err.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user data",
- floppy_dev)
+ util.logexc(
+ LOG,
+ "Failed to mount %s when looking for user data",
+ floppy_dev,
+ )
self.userdata_raw = return_str
self.metadata = META_DATA_NOT_SUPPORTED
@@ -224,7 +227,7 @@ class DataSourceAltCloud(sources.DataSource):
return False
def user_data_vsphere(self):
- '''
+ """
vSphere specific userdata read
If on vSphere the user data will be contained on the
@@ -235,10 +238,10 @@ class DataSourceAltCloud(sources.DataSource):
mount /dev/fd0 <tmp mount dir>
The call back passed to util.mount_cb will do:
read <tmp mount dir>/<user_data_file>
- '''
+ """
return_str = None
- cdrom_list = util.find_devs_with('LABEL=CDROM')
+ cdrom_list = util.find_devs_with("LABEL=CDROM")
for cdrom_dev in cdrom_list:
try:
return_str = util.mount_cb(cdrom_dev, read_user_data_callback)
@@ -249,8 +252,11 @@ class DataSourceAltCloud(sources.DataSource):
if err.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user "
- "data", cdrom_dev)
+ util.logexc(
+ LOG,
+ "Failed to mount %s when looking for user data",
+ cdrom_dev,
+ )
self.userdata_raw = return_str
self.metadata = META_DATA_NOT_SUPPORTED
@@ -263,7 +269,7 @@ class DataSourceAltCloud(sources.DataSource):
def modprobe_floppy():
out, _err = subp.subp(CMD_PROBE_FLOPPY)
- LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out)
+ LOG.debug("Command: %s\nOutput%s", " ".join(CMD_PROBE_FLOPPY), out)
# Used to match classes to dependencies
@@ -279,4 +285,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 04ff2131..359dfbde 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -5,101 +5,93 @@
# This file is part of cloud-init. See LICENSE file for license information.
import base64
-import contextlib
import crypt
-from functools import partial
+import datetime
import os
import os.path
import re
-from time import time
-from time import sleep
-from xml.dom import minidom
import xml.etree.ElementTree as ET
from enum import Enum
+from time import sleep, time
+from typing import Any, Dict, List, Optional
+from xml.dom import minidom
+
+import requests
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit.event import EventType
+from cloudinit import net, sources, ssh_util, subp, util
+from cloudinit.event import EventScope, EventType
from cloudinit.net import device_driver
-from cloudinit.net.dhcp import EphemeralDHCPv4
-from cloudinit import sources
-from cloudinit.sources.helpers import netlink
-from cloudinit import subp
-from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
-from cloudinit import util
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
from cloudinit.reporting import events
-
+from cloudinit.sources.helpers import netlink
from cloudinit.sources.helpers.azure import (
DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE,
+ DEFAULT_WIRESERVER_ENDPOINT,
azure_ds_reporter,
azure_ds_telemetry_reporter,
- get_metadata_from_fabric,
+ build_minimal_ovf,
+ dhcp_log_cb,
get_boot_telemetry,
+ get_metadata_from_fabric,
get_system_info,
- report_diagnostic_event,
- EphemeralDHCPv4WithReporting,
is_byte_swapped,
- dhcp_log_cb,
push_log_to_kvp,
- report_failure_to_fabric)
+ report_diagnostic_event,
+ report_failure_to_fabric,
+)
+from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
LOG = logging.getLogger(__name__)
-DS_NAME = 'Azure'
+DS_NAME = "Azure"
DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
-AGENT_START = ['service', 'walinuxagent', 'start']
-AGENT_START_BUILTIN = "__builtin__"
-BOUNCE_COMMAND_IFUP = [
- 'sh', '-xc',
- "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"
-]
-BOUNCE_COMMAND_FREEBSD = [
- 'sh', '-xc',
- ("i=$interface; x=0; ifconfig down $i || x=$?; "
- "ifconfig up $i || x=$?; exit $x")
-]
# azure systems will always have a resource disk, and 66-azure-ephemeral.rules
# ensures that it gets linked to this path.
-RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource'
-DEFAULT_PRIMARY_NIC = 'eth0'
-LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases'
-DEFAULT_FS = 'ext4'
+RESOURCE_DISK_PATH = "/dev/disk/cloud/azure_resource"
+LEASE_FILE = "/var/lib/dhcp/dhclient.eth0.leases"
+DEFAULT_FS = "ext4"
# DMI chassis-asset-tag is set static for all azure instances
-AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
+AZURE_CHASSIS_ASSET_TAG = "7783-7084-3265-9085-8269-3286-77"
REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
-REPROVISION_NIC_ATTACH_MARKER_FILE = "/var/lib/cloud/data/wait_for_nic_attach"
REPROVISION_NIC_DETACHED_MARKER_FILE = "/var/lib/cloud/data/nic_detached"
REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
-AGENT_SEED_DIR = '/var/lib/waagent'
-
+AGENT_SEED_DIR = "/var/lib/waagent"
+DEFAULT_PROVISIONING_ISO_DEV = "/dev/sr0"
# In the event where the IMDS primary server is not
# available, it takes 1s to fallback to the secondary one
IMDS_TIMEOUT_IN_SECONDS = 2
-IMDS_URL = "http://169.254.169.254/metadata/"
-IMDS_VER = "2019-06-01"
-IMDS_VER_PARAM = "api-version={}".format(IMDS_VER)
+IMDS_URL = "http://169.254.169.254/metadata"
+IMDS_VER_MIN = "2019-06-01"
+IMDS_VER_WANT = "2021-08-01"
+IMDS_EXTENDED_VER_MIN = "2021-03-01"
+
+class MetadataType(Enum):
+ ALL = "{}/instance".format(IMDS_URL)
+ NETWORK = "{}/instance/network".format(IMDS_URL)
+ REPROVISION_DATA = "{}/reprovisiondata".format(IMDS_URL)
-class metadata_type(Enum):
- compute = "{}instance?{}".format(IMDS_URL, IMDS_VER_PARAM)
- network = "{}instance/network?{}".format(IMDS_URL,
- IMDS_VER_PARAM)
- reprovisiondata = "{}reprovisiondata?{}".format(IMDS_URL,
- IMDS_VER_PARAM)
+class PPSType(Enum):
+ NONE = "None"
+ RUNNING = "Running"
+ SAVABLE = "Savable"
+ UNKNOWN = "Unknown"
-PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0"
+
+PLATFORM_ENTROPY_SOURCE: Optional[str] = "/sys/firmware/acpi/tables/OEM0"
# List of static scripts and network config artifacts created by
# stock ubuntu suported images.
UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
- '/etc/netplan/90-hotplug-azure.yaml',
- '/usr/local/sbin/ephemeral_eth.sh',
- '/etc/udev/rules.d/10-net-device-added.rules',
- '/run/network/interfaces.ephemeral.d',
+ "/etc/netplan/90-hotplug-azure.yaml",
+ "/usr/local/sbin/ephemeral_eth.sh",
+ "/etc/udev/rules.d/10-net-device-added.rules",
+ "/run/network/interfaces.ephemeral.d",
]
# This list is used to blacklist devices that will be considered
@@ -119,7 +111,7 @@ UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
# https://docs.microsoft.com/en-us/azure/virtual-machines/dv2-dsv2-series
# https://docs.microsoft.com/en-us/azure/virtual-machines/dv3-dsv3-series
# https://docs.microsoft.com/en-us/azure/virtual-machines/ev3-esv3-series
-BLACKLIST_DRIVERS = ['mlx4_core', 'mlx5_core']
+BLACKLIST_DRIVERS = ["mlx4_core", "mlx5_core"]
def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
@@ -133,11 +125,13 @@ def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
if re.search(r"pnpinfo", line):
fields = line.split()
if len(fields) >= 3:
- columns = fields[2].split('=')
- if (len(columns) >= 2 and
- columns[0] == "deviceid" and
- columns[1].startswith(deviceid)):
- comps = fields[0].split('.')
+ columns = fields[2].split("=")
+ if (
+ len(columns) >= 2
+ and columns[0] == "deviceid"
+ and columns[1].startswith(deviceid)
+ ):
+ comps = fields[0].split(".")
return comps[2]
return None
@@ -161,7 +155,7 @@ def find_busdev_from_disk(camcontrol_out, disk_drv):
return None
-def find_dev_from_busdev(camcontrol_out, busdev):
+def find_dev_from_busdev(camcontrol_out: str, busdev: str) -> Optional[str]:
# find the daX from 'camcontrol devlist' output
# if busdev matches the specified value, i.e. 'scbus2'
"""
@@ -171,18 +165,38 @@ def find_dev_from_busdev(camcontrol_out, busdev):
"""
for line in camcontrol_out.splitlines():
if re.search(busdev, line):
- items = line.split('(')
+ items = line.split("(")
if len(items) == 2:
- dev_pass = items[1].split(',')
+ dev_pass = items[1].split(",")
return dev_pass[0]
return None
-def execute_or_debug(cmd, fail_ret=None):
+def normalize_mac_address(mac: str) -> str:
+ """Normalize mac address with colons and lower-case."""
+ if len(mac) == 12:
+ mac = ":".join(
+ [mac[0:2], mac[2:4], mac[4:6], mac[6:8], mac[8:10], mac[10:12]]
+ )
+
+ return mac.lower()
+
+
+@azure_ds_telemetry_reporter
+def get_hv_netvsc_macs_normalized() -> List[str]:
+ """Get Hyper-V NICs as normalized MAC addresses."""
+ return [
+ normalize_mac_address(n[1])
+ for n in net.get_interfaces()
+ if n[2] == "hv_netvsc"
+ ]
+
+
+def execute_or_debug(cmd, fail_ret=None) -> str:
try:
- return subp.subp(cmd)[0]
+ return subp.subp(cmd)[0] # type: ignore
except subp.ProcessExecutionError:
- LOG.debug("Failed to execute: %s", ' '.join(cmd))
+ LOG.debug("Failed to execute: %s", " ".join(cmd))
return fail_ret
@@ -191,14 +205,14 @@ def get_dev_storvsc_sysctl():
def get_camcontrol_dev_bus():
- return execute_or_debug(['camcontrol', 'devlist', '-b'])
+ return execute_or_debug(["camcontrol", "devlist", "-b"])
def get_camcontrol_dev():
- return execute_or_debug(['camcontrol', 'devlist'])
+ return execute_or_debug(["camcontrol", "devlist"])
-def get_resource_disk_on_freebsd(port_id):
+def get_resource_disk_on_freebsd(port_id) -> Optional[str]:
g0 = "00000000"
if port_id > 1:
g0 = "00000001"
@@ -242,9 +256,8 @@ def get_resource_disk_on_freebsd(port_id):
# update the FreeBSD specific information
if util.is_FreeBSD():
- DEFAULT_PRIMARY_NIC = 'hn0'
- LEASE_FILE = '/var/db/dhclient.leases.hn0'
- DEFAULT_FS = 'freebsd-ufs'
+ LEASE_FILE = "/var/db/dhclient.leases.hn0"
+ DEFAULT_FS = "freebsd-ufs"
res_disk = get_resource_disk_on_freebsd(1)
if res_disk is not None:
LOG.debug("resource disk is not None")
@@ -255,186 +268,152 @@ if util.is_FreeBSD():
PLATFORM_ENTROPY_SOURCE = None
BUILTIN_DS_CONFIG = {
- 'agent_command': AGENT_START_BUILTIN,
- 'data_dir': AGENT_SEED_DIR,
- 'set_hostname': True,
- 'hostname_bounce': {
- 'interface': DEFAULT_PRIMARY_NIC,
- 'policy': True,
- 'command': 'builtin',
- 'hostname_command': 'hostname',
- },
- 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH},
- 'dhclient_lease_file': LEASE_FILE,
- 'apply_network_config': True, # Use IMDS published network configuration
+ "data_dir": AGENT_SEED_DIR,
+ "disk_aliases": {"ephemeral0": RESOURCE_DISK_PATH},
+ "dhclient_lease_file": LEASE_FILE,
+ "apply_network_config": True, # Use IMDS published network configuration
}
# RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False
-BUILTIN_CLOUD_CONFIG = {
- 'disk_setup': {
- 'ephemeral0': {'table_type': 'gpt',
- 'layout': [100],
- 'overwrite': True},
+BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG = {
+ "disk_setup": {
+ "ephemeral0": {
+ "table_type": "gpt",
+ "layout": [100],
+ "overwrite": True,
+ },
},
- 'fs_setup': [{'filesystem': DEFAULT_FS,
- 'device': 'ephemeral0.1'}],
+ "fs_setup": [{"filesystem": DEFAULT_FS, "device": "ephemeral0.1"}],
}
-DS_CFG_PATH = ['datasource', DS_NAME]
-DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs'
-DEF_EPHEMERAL_LABEL = 'Temporary Storage'
+DS_CFG_PATH = ["datasource", DS_NAME]
+DS_CFG_KEY_PRESERVE_NTFS = "never_destroy_ntfs"
+DEF_EPHEMERAL_LABEL = "Temporary Storage"
# The redacted password fails to meet password complexity requirements
# so we can safely use this to mask/redact the password in the ovf-env.xml
-DEF_PASSWD_REDACTION = 'REDACTED'
-
-
-def get_hostname(hostname_command='hostname'):
- if not isinstance(hostname_command, (list, tuple)):
- hostname_command = (hostname_command,)
- return subp.subp(hostname_command, capture=True)[0].strip()
-
-
-def set_hostname(hostname, hostname_command='hostname'):
- subp.subp([hostname_command, hostname])
-
-
-@azure_ds_telemetry_reporter
-@contextlib.contextmanager
-def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
- """
- Set a temporary hostname, restoring the previous hostname on exit.
-
- Will have the value of the previous hostname when used as a context
- manager, or None if the hostname was not changed.
- """
- policy = cfg['hostname_bounce']['policy']
- previous_hostname = get_hostname(hostname_command)
- if (not util.is_true(cfg.get('set_hostname')) or
- util.is_false(policy) or
- (previous_hostname == temp_hostname and policy != 'force')):
- yield None
- return
- try:
- set_hostname(temp_hostname, hostname_command)
- except Exception as e:
- report_diagnostic_event(
- 'Failed setting temporary hostname: %s' % e,
- logger_func=LOG.warning)
- yield None
- return
- try:
- yield previous_hostname
- finally:
- set_hostname(previous_hostname, hostname_command)
+DEF_PASSWD_REDACTION = "REDACTED"
class DataSourceAzure(sources.DataSource):
- dsname = 'Azure'
+ dsname = "Azure"
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ }
+ }
_negotiated = False
_metadata_imds = sources.UNSET
+ _ci_pkl_version = 1
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'azure')
+ self.seed_dir = os.path.join(paths.seed_dir, "azure")
self.cfg = {}
self.seed = None
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
- BUILTIN_DS_CONFIG])
- self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
+ self.ds_cfg = util.mergemanydict(
+ [util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]
+ )
+ self.dhclient_lease_file = self.ds_cfg.get("dhclient_lease_file")
+ self._iso_dev = None
self._network_config = None
- # Regenerate network config new_instance boot and every boot
- self.update_events['network'].add(EventType.BOOT)
self._ephemeral_dhcp_ctx = None
+ self._wireserver_endpoint = DEFAULT_WIRESERVER_ENDPOINT
+
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ super()._unpickle(ci_pkl_version)
+
+ self._ephemeral_dhcp_ctx = None
+ self._iso_dev = None
+ self._wireserver_endpoint = DEFAULT_WIRESERVER_ENDPOINT
def __str__(self):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
- @azure_ds_telemetry_reporter
- def bounce_network_with_azure_hostname(self):
- # When using cloud-init to provision, we have to set the hostname from
- # the metadata and "bounce" the network to force DDNS to update via
- # dhclient
- azure_hostname = self.metadata.get('local-hostname')
- LOG.debug("Hostname in metadata is %s", azure_hostname)
- hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
-
- with temporary_hostname(azure_hostname, self.ds_cfg,
- hostname_command=hostname_command) \
- as previous_hn:
- if (previous_hn is not None and
- util.is_true(self.ds_cfg.get('set_hostname'))):
- cfg = self.ds_cfg['hostname_bounce']
-
- # "Bouncing" the network
- try:
- return perform_hostname_bounce(hostname=azure_hostname,
- cfg=cfg,
- prev_hostname=previous_hn)
- except Exception as e:
- report_diagnostic_event(
- "Failed publishing hostname: %s" % e,
- logger_func=LOG.warning)
- util.logexc(LOG, "handling set_hostname failed")
- return False
+ def _get_subplatform(self):
+ """Return the subplatform metadata source details."""
+ if self.seed is None:
+ subplatform_type = "unknown"
+ elif self.seed.startswith("/dev"):
+ subplatform_type = "config-disk"
+ elif self.seed.lower() == "imds":
+ subplatform_type = "imds"
+ else:
+ subplatform_type = "seed-dir"
+ return "%s (%s)" % (subplatform_type, self.seed)
@azure_ds_telemetry_reporter
- def get_metadata_from_agent(self):
- temp_hostname = self.metadata.get('local-hostname')
- agent_cmd = self.ds_cfg['agent_command']
- LOG.debug("Getting metadata via agent. hostname=%s cmd=%s",
- temp_hostname, agent_cmd)
+ def _setup_ephemeral_networking(
+ self, *, iface: Optional[str] = None, timeout_minutes: int = 5
+ ) -> None:
+ """Setup ephemeral networking.
- self.bounce_network_with_azure_hostname()
+ Keep retrying DHCP up to specified number of minutes. This does
+ not kill dhclient, so the timeout in practice may be up to
+ timeout_minutes + the system-configured timeout for dhclient.
- try:
- invoke_agent(agent_cmd)
- except subp.ProcessExecutionError:
- # claim the datasource even if the command failed
- util.logexc(LOG, "agent command '%s' failed.",
- self.ds_cfg['agent_command'])
-
- ddir = self.ds_cfg['data_dir']
-
- fp_files = []
- key_value = None
- for pk in self.cfg.get('_pubkeys', []):
- if pk.get('value', None):
- key_value = pk['value']
- LOG.debug("SSH authentication: using value from fabric")
- else:
- bname = str(pk['fingerprint'] + ".crt")
- fp_files += [os.path.join(ddir, bname)]
- LOG.debug("SSH authentication: "
- "using fingerprint from fabric")
+ :param timeout_minutes: Number of minutes to keep retrying for.
+
+ :raises NoDHCPLeaseError: If unable to obtain DHCP lease.
+ """
+ if self._ephemeral_dhcp_ctx is not None:
+ raise RuntimeError(
+ "Bringing up networking when already configured."
+ )
+
+ LOG.debug("Requested ephemeral networking (iface=%s)", iface)
+
+ start = datetime.datetime.utcnow()
+ timeout = start + datetime.timedelta(minutes=timeout_minutes)
+ self._ephemeral_dhcp_ctx = EphemeralDHCPv4(
+ iface=iface, dhcp_log_func=dhcp_log_cb
+ )
+
+ lease = None
with events.ReportEventStack(
- name="waiting-for-ssh-public-key",
- description="wait for agents to retrieve SSH keys",
- parent=azure_ds_reporter):
- # wait very long for public SSH keys to arrive
- # https://bugs.launchpad.net/cloud-init/+bug/1717611
- missing = util.log_time(logfunc=LOG.debug,
- msg="waiting for SSH public key files",
- func=util.wait_for_files,
- args=(fp_files, 900))
- if len(missing):
- LOG.warning("Did not find files, but going on: %s", missing)
-
- metadata = {}
- metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
- return metadata
+ name="obtain-dhcp-lease",
+ description="obtain dhcp lease",
+ parent=azure_ds_reporter,
+ ):
+ while datetime.datetime.utcnow() < timeout:
+ try:
+ lease = self._ephemeral_dhcp_ctx.obtain_lease()
+ break
+ except NoDHCPLeaseError:
+ continue
- def _get_subplatform(self):
- """Return the subplatform metadata source details."""
- if self.seed.startswith('/dev'):
- subplatform_type = 'config-disk'
- else:
- subplatform_type = 'seed-dir'
- return '%s (%s)' % (subplatform_type, self.seed)
+ if lease is None:
+ msg = "Failed to obtain DHCP lease (iface=%s)" % iface
+ report_diagnostic_event(msg, logger_func=LOG.error)
+ self._ephemeral_dhcp_ctx = None
+ raise NoDHCPLeaseError()
+ else:
+ # Ensure iface is set.
+ self._ephemeral_dhcp_ctx.iface = lease["interface"]
+
+ # Update wireserver IP from DHCP options.
+ if "unknown-245" in lease:
+ self._wireserver_endpoint = lease["unknown-245"]
+
+ @azure_ds_telemetry_reporter
+ def _teardown_ephemeral_networking(self) -> None:
+ """Teardown ephemeral networking."""
+ if self._ephemeral_dhcp_ctx is None:
+ return
+
+ self._ephemeral_dhcp_ctx.clean_network()
+ self._ephemeral_dhcp_ctx = None
+
+ def _is_ephemeral_networking_up(self) -> bool:
+ """Check if networking is configured."""
+ return not (
+ self._ephemeral_dhcp_ctx is None
+ or self._ephemeral_dhcp_ctx.lease is None
+ )
@azure_ds_telemetry_reporter
def crawl_metadata(self):
@@ -448,126 +427,205 @@ class DataSourceAzure(sources.DataSource):
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
# need to look in the datadir and consider that valid
- ddir = self.ds_cfg['data_dir']
+ ddir = self.ds_cfg["data_dir"]
# The order in which the candidates are inserted matters here, because
# it determines the value of ret. More specifically, the first one in
# the candidate list determines the path to take in order to get the
# metadata we need.
- candidates = [self.seed_dir]
+ ovf_is_accessible = False
+ metadata_source = None
+ md = {}
+ userdata_raw = ""
+ cfg = {}
+ files = {}
+
if os.path.isfile(REPROVISION_MARKER_FILE):
- candidates.insert(0, "IMDS")
- report_diagnostic_event("Reprovision marker file already present "
- "before crawling Azure metadata: %s" %
- REPROVISION_MARKER_FILE,
- logger_func=LOG.debug)
- elif os.path.isfile(REPROVISION_NIC_ATTACH_MARKER_FILE):
- candidates.insert(0, "NIC_ATTACH_MARKER_PRESENT")
- report_diagnostic_event("Reprovision nic attach marker file "
- "already present before crawling Azure "
- "metadata: %s" %
- REPROVISION_NIC_ATTACH_MARKER_FILE,
- logger_func=LOG.debug)
- candidates.extend(list_possible_azure_ds_devs())
- if ddir:
- candidates.append(ddir)
-
- found = None
- reprovision = False
- reprovision_after_nic_attach = False
- for cdev in candidates:
- try:
- if cdev == "IMDS":
- ret = None
- reprovision = True
- elif cdev == "NIC_ATTACH_MARKER_PRESENT":
- ret = None
- reprovision_after_nic_attach = True
- elif cdev.startswith("/dev/"):
- if util.is_FreeBSD():
- ret = util.mount_cb(cdev, load_azure_ds_dir,
- mtype="udf")
+ metadata_source = "IMDS"
+ report_diagnostic_event(
+ "Reprovision marker file already present "
+ "before crawling Azure metadata: %s" % REPROVISION_MARKER_FILE,
+ logger_func=LOG.debug,
+ )
+ else:
+ for src in list_possible_azure_ds(self.seed_dir, ddir):
+ try:
+ if src.startswith("/dev/"):
+ if util.is_FreeBSD():
+ md, userdata_raw, cfg, files = util.mount_cb(
+ src, load_azure_ds_dir, mtype="udf"
+ )
+ else:
+ md, userdata_raw, cfg, files = util.mount_cb(
+ src, load_azure_ds_dir
+ )
+ # save the device for ejection later
+ self._iso_dev = src
else:
- ret = util.mount_cb(cdev, load_azure_ds_dir)
- else:
- ret = load_azure_ds_dir(cdev)
+ md, userdata_raw, cfg, files = load_azure_ds_dir(src)
+ ovf_is_accessible = True
+ metadata_source = src
+ break
+ except NonAzureDataSource:
+ report_diagnostic_event(
+ "Did not find Azure data source in %s" % src,
+ logger_func=LOG.debug,
+ )
+ continue
+ except util.MountFailedError:
+ report_diagnostic_event(
+ "%s was not mountable" % src, logger_func=LOG.debug
+ )
+ md = {"local-hostname": ""}
+ cfg = {"system_info": {"default_user": {"name": ""}}}
+ metadata_source = "IMDS"
+ continue
+ except BrokenAzureDataSource as exc:
+ msg = "BrokenAzureDataSource: %s" % exc
+ report_diagnostic_event(msg, logger_func=LOG.error)
+ raise sources.InvalidMetaDataException(msg)
- except NonAzureDataSource:
- report_diagnostic_event(
- "Did not find Azure data source in %s" % cdev,
- logger_func=LOG.debug)
- continue
- except BrokenAzureDataSource as exc:
- msg = 'BrokenAzureDataSource: %s' % exc
+ report_diagnostic_event(
+ "Found provisioning metadata in %s" % metadata_source,
+ logger_func=LOG.debug,
+ )
+
+ # If we read OVF from attached media, we are provisioning. If OVF
+ # is not found, we are probably provisioning on a system which does
+ # not have UDF support. In either case, require IMDS metadata.
+ # If we require IMDS metadata, try harder to obtain networking, waiting
+ # for at least 20 minutes. Otherwise only wait 5 minutes.
+ requires_imds_metadata = bool(self._iso_dev) or not ovf_is_accessible
+ timeout_minutes = 5 if requires_imds_metadata else 20
+ try:
+ self._setup_ephemeral_networking(timeout_minutes=timeout_minutes)
+ except NoDHCPLeaseError:
+ pass
+
+ if self._is_ephemeral_networking_up():
+ imds_md = self.get_imds_data_with_api_fallback(retries=10)
+ else:
+ imds_md = {}
+
+ if not imds_md and not ovf_is_accessible:
+ msg = "No OVF or IMDS available"
+ report_diagnostic_event(msg)
+ raise sources.InvalidMetaDataException(msg)
+
+ # Refresh PPS type using metadata.
+ pps_type = self._determine_pps_type(cfg, imds_md)
+ if pps_type != PPSType.NONE:
+ if util.is_FreeBSD():
+ msg = "Free BSD is not supported for PPS VMs"
report_diagnostic_event(msg, logger_func=LOG.error)
raise sources.InvalidMetaDataException(msg)
- except util.MountFailedError:
- report_diagnostic_event(
- '%s was not mountable' % cdev, logger_func=LOG.warning)
- continue
- perform_reprovision = reprovision or self._should_reprovision(ret)
- perform_reprovision_after_nic_attach = (
- reprovision_after_nic_attach or
- self._should_reprovision_after_nic_attach(ret))
+ self._write_reprovision_marker()
+
+ if pps_type == PPSType.SAVABLE:
+ self._wait_for_all_nics_ready()
+
+ md, userdata_raw, cfg, files = self._reprovision()
+ # fetch metadata again as it has changed after reprovisioning
+ imds_md = self.get_imds_data_with_api_fallback(retries=10)
+
+ # Report errors if IMDS network configuration is missing data.
+ self.validate_imds_network_metadata(imds_md=imds_md)
+
+ self.seed = metadata_source
+ crawled_data.update(
+ {
+ "cfg": cfg,
+ "files": files,
+ "metadata": util.mergemanydict([md, {"imds": imds_md}]),
+ "userdata_raw": userdata_raw,
+ }
+ )
+ imds_username = _username_from_imds(imds_md)
+ imds_hostname = _hostname_from_imds(imds_md)
+ imds_disable_password = _disable_password_from_imds(imds_md)
+ if imds_username:
+ LOG.debug("Username retrieved from IMDS: %s", imds_username)
+ cfg["system_info"]["default_user"]["name"] = imds_username
+ if imds_hostname:
+ LOG.debug("Hostname retrieved from IMDS: %s", imds_hostname)
+ crawled_data["metadata"]["local-hostname"] = imds_hostname
+ if imds_disable_password:
+ LOG.debug(
+ "Disable password retrieved from IMDS: %s",
+ imds_disable_password,
+ )
+ crawled_data["metadata"][
+ "disable_password"
+ ] = imds_disable_password
- if perform_reprovision or perform_reprovision_after_nic_attach:
- if util.is_FreeBSD():
- msg = "Free BSD is not supported for PPS VMs"
- report_diagnostic_event(msg, logger_func=LOG.error)
- raise sources.InvalidMetaDataException(msg)
- if perform_reprovision_after_nic_attach:
- self._wait_for_all_nics_ready()
- ret = self._reprovision()
-
- imds_md = get_metadata_from_imds(
- self.fallback_interface, retries=10)
- (md, userdata_raw, cfg, files) = ret
- self.seed = cdev
- crawled_data.update({
- 'cfg': cfg,
- 'files': files,
- 'metadata': util.mergemanydict(
- [md, {'imds': imds_md}]),
- 'userdata_raw': userdata_raw})
- found = cdev
+ if metadata_source == "IMDS" and not crawled_data["files"]:
+ try:
+ contents = build_minimal_ovf(
+ username=imds_username, # type: ignore
+ hostname=imds_hostname, # type: ignore
+ disableSshPwd=imds_disable_password, # type: ignore
+ )
+ crawled_data["files"] = {"ovf-env.xml": contents}
+ except Exception as e:
+ report_diagnostic_event(
+ "Failed to construct OVF from IMDS data %s" % e,
+ logger_func=LOG.debug,
+ )
- report_diagnostic_event(
- 'found datasource in %s' % cdev, logger_func=LOG.debug)
- break
+ # only use userdata from imds if OVF did not provide custom data
+ # userdata provided by IMDS is always base64 encoded
+ if not userdata_raw:
+ imds_userdata = _userdata_from_imds(imds_md)
+ if imds_userdata:
+ LOG.debug("Retrieved userdata from IMDS")
+ try:
+ crawled_data["userdata_raw"] = base64.b64decode(
+ "".join(imds_userdata.split())
+ )
+ except Exception:
+ report_diagnostic_event(
+ "Bad userdata in IMDS", logger_func=LOG.warning
+ )
- if not found:
- msg = 'No Azure metadata found'
+ if not metadata_source:
+ msg = "No Azure metadata found"
report_diagnostic_event(msg, logger_func=LOG.error)
raise sources.InvalidMetaDataException(msg)
+ else:
+ report_diagnostic_event(
+ "found datasource in %s" % metadata_source,
+ logger_func=LOG.debug,
+ )
- if found == ddir:
+ if metadata_source == ddir:
report_diagnostic_event(
- "using files cached in %s" % ddir, logger_func=LOG.debug)
+ "using files cached in %s" % ddir, logger_func=LOG.debug
+ )
seed = _get_random_seed()
if seed:
- crawled_data['metadata']['random_seed'] = seed
- crawled_data['metadata']['instance-id'] = self._iid()
-
- if perform_reprovision or perform_reprovision_after_nic_attach:
- LOG.info("Reporting ready to Azure after getting ReprovisionData")
- use_cached_ephemeral = (
- self.distro.networking.is_up(self.fallback_interface) and
- getattr(self, '_ephemeral_dhcp_ctx', None))
- if use_cached_ephemeral:
- self._report_ready(lease=self._ephemeral_dhcp_ctx.lease)
- self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral
+ crawled_data["metadata"]["random_seed"] = seed
+ crawled_data["metadata"]["instance-id"] = self._iid()
+
+ if self._negotiated is False and self._is_ephemeral_networking_up():
+ # Report ready and fetch public-keys from Wireserver, if required.
+ pubkey_info = self._determine_wireserver_pubkey_info(
+ cfg=cfg, imds_md=imds_md
+ )
+ try:
+ ssh_keys = self._report_ready(pubkey_info=pubkey_info)
+ except Exception:
+ # Failed to report ready, but continue with best effort.
+ pass
else:
- try:
- with EphemeralDHCPv4WithReporting(
- azure_ds_reporter) as lease:
- self._report_ready(lease=lease)
- except Exception as e:
- report_diagnostic_event(
- "exception while reporting ready: %s" % e,
- logger_func=LOG.error)
- raise
+ LOG.debug("negotiating returned %s", ssh_keys)
+ if ssh_keys:
+ crawled_data["metadata"]["public-keys"] = ssh_keys
+
+ self._cleanup_markers()
+ self._negotiated = True
+
return crawled_data
def _is_platform_viable(self):
@@ -602,28 +660,57 @@ class DataSourceAzure(sources.DataSource):
try:
crawled_data = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
)
except Exception as e:
report_diagnostic_event(
- 'Could not crawl Azure metadata: %s' % e,
- logger_func=LOG.error)
+ "Could not crawl Azure metadata: %s" % e, logger_func=LOG.error
+ )
self._report_failure(
- description=DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
+ description=DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
return False
+ finally:
+ self._teardown_ephemeral_networking()
- if (self.distro and self.distro.name == 'ubuntu' and
- self.ds_cfg.get('apply_network_config')):
+ if (
+ self.distro
+ and self.distro.name == "ubuntu"
+ and self.ds_cfg.get("apply_network_config")
+ ):
maybe_remove_ubuntu_network_config_scripts()
# Process crawled data and augment with various config defaults
- self.cfg = util.mergemanydict(
- [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG])
- self._metadata_imds = crawled_data['metadata']['imds']
+
+ # Only merge in default cloud config related to the ephemeral disk
+ # if the ephemeral disk exists
+ devpath = RESOURCE_DISK_PATH
+ if os.path.exists(devpath):
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' exists. "
+ "Merging default Azure cloud ephemeral disk configs."
+ % devpath,
+ logger_func=LOG.debug,
+ )
+ self.cfg = util.mergemanydict(
+ [crawled_data["cfg"], BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG]
+ )
+ else:
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' does not exist. "
+ "Not merging default Azure cloud ephemeral disk configs."
+ % devpath,
+ logger_func=LOG.debug,
+ )
+ self.cfg = crawled_data["cfg"]
+
+ self._metadata_imds = crawled_data["metadata"]["imds"]
self.metadata = util.mergemanydict(
- [crawled_data['metadata'], DEFAULT_METADATA])
- self.userdata_raw = crawled_data['userdata_raw']
+ [crawled_data["metadata"], DEFAULT_METADATA]
+ )
+ self.userdata_raw = crawled_data["userdata_raw"]
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
@@ -631,41 +718,108 @@ class DataSourceAzure(sources.DataSource):
# walinux agent writes files world readable, but expects
# the directory to be protected.
write_files(
- self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700)
+ self.ds_cfg["data_dir"], crawled_data["files"], dirmode=0o700
+ )
return True
+ @azure_ds_telemetry_reporter
+ def get_imds_data_with_api_fallback(
+ self,
+ *,
+ retries,
+ md_type=MetadataType.ALL,
+ exc_cb=retry_on_url_exc,
+ infinite=False,
+ ):
+ """
+ Wrapper for get_metadata_from_imds so that we can have flexibility
+ in which IMDS api-version we use. If a particular instance of IMDS
+ does not have the api version that is desired, we want to make
+ this fault tolerant and fall back to a good known minimum api
+ version.
+ """
+ for _ in range(retries):
+ try:
+ LOG.info("Attempting IMDS api-version: %s", IMDS_VER_WANT)
+ return get_metadata_from_imds(
+ retries=0,
+ md_type=md_type,
+ api_version=IMDS_VER_WANT,
+ exc_cb=exc_cb,
+ )
+ except UrlError as err:
+ LOG.info("UrlError with IMDS api-version: %s", IMDS_VER_WANT)
+ if err.code == 400:
+ log_msg = "Fall back to IMDS api-version: {}".format(
+ IMDS_VER_MIN
+ )
+ report_diagnostic_event(log_msg, logger_func=LOG.info)
+ break
+
+ LOG.info("Using IMDS api-version: %s", IMDS_VER_MIN)
+ return get_metadata_from_imds(
+ retries=retries,
+ md_type=md_type,
+ api_version=IMDS_VER_MIN,
+ exc_cb=exc_cb,
+ infinite=infinite,
+ )
+
def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
+ return self.ds_cfg["disk_aliases"].get(name)
@azure_ds_telemetry_reporter
- def get_public_ssh_keys(self):
+ def get_public_ssh_keys(self) -> List[str]:
+ """
+ Retrieve public SSH keys.
"""
- Try to get the ssh keys from IMDS first, and if that fails
- (i.e. IMDS is unavailable) then fallback to getting the ssh
- keys from OVF.
+ try:
+ return self._get_public_keys_from_imds(self.metadata["imds"])
+ except (KeyError, ValueError):
+ pass
+
+ return self._get_public_keys_from_ovf()
+
+ def _get_public_keys_from_imds(self, imds_md: dict) -> List[str]:
+ """Get SSH keys from IMDS metadata.
- The benefit to getting keys from IMDS is a large performance
- advantage, so this is a strong preference. But we must keep
- OVF as a second option for environments that don't have IMDS.
+ :raises KeyError: if IMDS metadata is malformed/missing.
+ :raises ValueError: if key format is not supported.
+
+ :returns: List of keys.
"""
- LOG.debug('Retrieving public SSH keys')
- ssh_keys = []
try:
ssh_keys = [
- public_key['keyData']
- for public_key
- in self.metadata['imds']['compute']['publicKeys']
+ public_key["keyData"]
+ for public_key in imds_md["compute"]["publicKeys"]
]
- LOG.debug('Retrieved SSH keys from IMDS')
except KeyError:
- log_msg = 'Unable to get keys from IMDS, falling back to OVF'
+ log_msg = "No SSH keys found in IMDS metadata"
+ report_diagnostic_event(log_msg, logger_func=LOG.debug)
+ raise
+
+ if any(not _key_is_openssh_formatted(key=key) for key in ssh_keys):
+ log_msg = "Key(s) not in OpenSSH format"
+ report_diagnostic_event(log_msg, logger_func=LOG.debug)
+ raise ValueError(log_msg)
+
+ log_msg = "Retrieved {} keys from IMDS".format(len(ssh_keys))
+ report_diagnostic_event(log_msg, logger_func=LOG.debug)
+ return ssh_keys
+
+ def _get_public_keys_from_ovf(self) -> List[str]:
+ """Get SSH keys that were fetched from wireserver.
+
+ :returns: List of keys.
+ """
+ ssh_keys = []
+ try:
+ ssh_keys = self.metadata["public-keys"]
+ log_msg = "Retrieved {} keys from OVF".format(len(ssh_keys))
+ report_diagnostic_event(log_msg, logger_func=LOG.debug)
+ except KeyError:
+ log_msg = "No keys available from OVF"
report_diagnostic_event(log_msg, logger_func=LOG.debug)
- try:
- ssh_keys = self.metadata['public-keys']
- LOG.debug('Retrieved keys from OVF')
- except KeyError:
- log_msg = 'No keys available from OVF'
- report_diagnostic_event(log_msg, logger_func=LOG.debug)
return ssh_keys
@@ -678,33 +832,32 @@ class DataSourceAzure(sources.DataSource):
def _iid(self, previous=None):
prev_iid_path = os.path.join(
- self.paths.get_cpath('data'), 'instance-id')
- iid = dmi.read_dmi_data('system-uuid')
+ self.paths.get_cpath("data"), "instance-id"
+ )
+ # Older kernels than 4.15 will have UPPERCASE product_uuid.
+ # We don't want Azure to react to an UPPER/lower difference as a new
+ # instance id as it rewrites SSH host keys.
+ # LP: #1835584
+ system_uuid = dmi.read_dmi_data("system-uuid")
+ if system_uuid is None:
+ raise RuntimeError("failed to read system-uuid")
+
+ iid = system_uuid.lower()
if os.path.exists(prev_iid_path):
previous = util.load_file(prev_iid_path).strip()
- if is_byte_swapped(previous, iid):
+ if previous.lower() == iid:
+ # If uppercase/lowercase equivalent, return the previous value
+ # to avoid new instance id.
+ return previous
+ if is_byte_swapped(previous.lower(), iid):
return previous
return iid
@azure_ds_telemetry_reporter
- def setup(self, is_new_instance):
- if self._negotiated is False:
- LOG.debug("negotiating for %s (new_instance=%s)",
- self.get_instance_id(), is_new_instance)
- fabric_data = self._negotiate()
- LOG.debug("negotiating returned %s", fabric_data)
- if fabric_data:
- self.metadata.update(fabric_data)
- self._negotiated = True
- else:
- LOG.debug("negotiating already done for %s",
- self.get_instance_id())
-
- @azure_ds_telemetry_reporter
def _wait_for_nic_detach(self, nl_sock):
"""Use the netlink socket provided to wait for nic detach event.
- NOTE: The function doesn't close the socket. The caller owns closing
- the socket and disposing it safely.
+ NOTE: The function doesn't close the socket. The caller owns closing
+ the socket and disposing it safely.
"""
try:
ifname = None
@@ -712,106 +865,124 @@ class DataSourceAzure(sources.DataSource):
# Preprovisioned VM will only have one NIC, and it gets
# detached immediately after deployment.
with events.ReportEventStack(
- name="wait-for-nic-detach",
- description=("wait for nic detach"),
- parent=azure_ds_reporter):
+ name="wait-for-nic-detach",
+ description="wait for nic detach",
+ parent=azure_ds_reporter,
+ ):
ifname = netlink.wait_for_nic_detach_event(nl_sock)
if ifname is None:
- msg = ("Preprovisioned nic not detached as expected. "
- "Proceeding without failing.")
+ msg = (
+ "Preprovisioned nic not detached as expected. "
+ "Proceeding without failing."
+ )
report_diagnostic_event(msg, logger_func=LOG.warning)
else:
- report_diagnostic_event("The preprovisioned nic %s is detached"
- % ifname, logger_func=LOG.warning)
+ report_diagnostic_event(
+ "The preprovisioned nic %s is detached" % ifname,
+ logger_func=LOG.warning,
+ )
path = REPROVISION_NIC_DETACHED_MARKER_FILE
LOG.info("Creating a marker file for nic detached: %s", path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
+ util.write_file(
+ path, "{pid}: {time}\n".format(pid=os.getpid(), time=time())
+ )
except AssertionError as error:
- report_diagnostic_event(error, logger_func=LOG.error)
+ report_diagnostic_event(str(error), logger_func=LOG.error)
raise
@azure_ds_telemetry_reporter
def wait_for_link_up(self, ifname):
"""In cases where the link state is still showing down after a nic is
- hot-attached, we can attempt to bring it up by forcing the hv_netvsc
- drivers to query the link state by unbinding and then binding the
- device. This function attempts infinitely until the link is up,
- because we cannot proceed further until we have a stable link."""
+ hot-attached, we can attempt to bring it up by forcing the hv_netvsc
+ drivers to query the link state by unbinding and then binding the
+ device. This function attempts infinitely until the link is up,
+ because we cannot proceed further until we have a stable link."""
if self.distro.networking.try_set_link_up(ifname):
- report_diagnostic_event("The link %s is already up." % ifname,
- logger_func=LOG.info)
+ report_diagnostic_event(
+ "The link %s is already up." % ifname, logger_func=LOG.info
+ )
return
- LOG.info("Attempting to bring %s up", ifname)
+ LOG.debug("Attempting to bring %s up", ifname)
attempts = 0
+ LOG.info("Unbinding and binding the interface %s", ifname)
while True:
-
- LOG.info("Unbinding and binding the interface %s", ifname)
- devicename = net.read_sys_net(ifname,
- 'device/device_id').strip('{}')
- util.write_file('/sys/bus/vmbus/drivers/hv_netvsc/unbind',
- devicename)
- util.write_file('/sys/bus/vmbus/drivers/hv_netvsc/bind',
- devicename)
+ device_id = net.read_sys_net(ifname, "device/device_id")
+ if device_id is False or not isinstance(device_id, str):
+ raise RuntimeError("Unable to read device ID: %s" % device_id)
+ devicename = device_id.strip("{}")
+ util.write_file(
+ "/sys/bus/vmbus/drivers/hv_netvsc/unbind", devicename
+ )
+ util.write_file(
+ "/sys/bus/vmbus/drivers/hv_netvsc/bind", devicename
+ )
attempts = attempts + 1
if self.distro.networking.try_set_link_up(ifname):
- msg = "The link %s is up after %s attempts" % (ifname,
- attempts)
+ msg = "The link %s is up after %s attempts" % (
+ ifname,
+ attempts,
+ )
report_diagnostic_event(msg, logger_func=LOG.info)
return
- sleep_duration = 1
- msg = ("Link is not up after %d attempts with %d seconds sleep "
- "between attempts." % (attempts, sleep_duration))
-
if attempts % 10 == 0:
+ msg = "Link is not up after %d attempts to rebind" % attempts
report_diagnostic_event(msg, logger_func=LOG.info)
- else:
LOG.info(msg)
- sleep(sleep_duration)
+ # It could take some time after rebind for the interface to be up.
+ # So poll for the status for some time before attempting to rebind
+ # again.
+ sleep_duration = 0.5
+ max_status_polls = 20
+ LOG.debug(
+ "Polling %d seconds for primary NIC link up after rebind.",
+ sleep_duration * max_status_polls,
+ )
+
+ for i in range(0, max_status_polls):
+ if self.distro.networking.is_up(ifname):
+ msg = (
+ "After %d attempts to rebind, link is up after "
+ "polling the link status %d times" % (attempts, i)
+ )
+ report_diagnostic_event(msg, logger_func=LOG.info)
+ LOG.debug(msg)
+ return
+ else:
+ sleep(sleep_duration)
@azure_ds_telemetry_reporter
def _create_report_ready_marker(self):
path = REPORTED_READY_MARKER_FILE
- LOG.info(
- "Creating a marker file to report ready: %s", path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
+ LOG.info("Creating a marker file to report ready: %s", path)
+ util.write_file(
+ path, "{pid}: {time}\n".format(pid=os.getpid(), time=time())
+ )
report_diagnostic_event(
- 'Successfully created reported ready marker file '
- 'while in the preprovisioning pool.',
- logger_func=LOG.debug)
+ "Successfully created reported ready marker file "
+ "while in the preprovisioning pool.",
+ logger_func=LOG.debug,
+ )
@azure_ds_telemetry_reporter
- def _report_ready_if_needed(self):
- """Report ready to the platform if the marker file is not present,
- and create the marker file.
+ def _report_ready_for_pps(self) -> None:
+ """Report ready for PPS, creating the marker file upon completion.
+
+ :raises sources.InvalidMetaDataException: On error reporting ready.
"""
- have_not_reported_ready = (
- not os.path.isfile(REPORTED_READY_MARKER_FILE))
+ try:
+ self._report_ready()
+ except Exception as error:
+ msg = "Failed reporting ready while in the preprovisioning pool."
+ report_diagnostic_event(msg, logger_func=LOG.error)
+ raise sources.InvalidMetaDataException(msg) from error
- if have_not_reported_ready:
- report_diagnostic_event("Reporting ready before nic detach",
- logger_func=LOG.info)
- try:
- with EphemeralDHCPv4WithReporting(azure_ds_reporter) as lease:
- self._report_ready(lease=lease)
- except Exception as e:
- report_diagnostic_event("Exception reporting ready during "
- "preprovisioning before nic detach: %s"
- % e, logger_func=LOG.error)
- raise
- self._create_report_ready_marker()
- else:
- report_diagnostic_event("Already reported ready before nic detach."
- " The marker file already exists: %s" %
- REPORTED_READY_MARKER_FILE,
- logger_func=LOG.error)
+ self._create_report_ready_marker()
@azure_ds_telemetry_reporter
def _check_if_nic_is_primary(self, ifname):
@@ -822,61 +993,89 @@ class DataSourceAzure(sources.DataSource):
is_primary = False
expected_nic_count = -1
imds_md = None
+ metadata_poll_count = 0
+ metadata_logging_threshold = 1
+ expected_errors_count = 0
# For now, only a VM's primary NIC can contact IMDS and WireServer. If
# DHCP fails for a NIC, we have no mechanism to determine if the NIC is
- # primary or secondary. In this case, the desired behavior is to fail
- # VM provisioning if there is any DHCP failure when trying to determine
- # the primary NIC.
- try:
- with events.ReportEventStack(
- name="obtain-dhcp-lease",
- description=("obtain dhcp lease for %s when attempting to "
- "determine primary NIC during reprovision of "
- "a pre-provisioned VM" % ifname),
- parent=azure_ds_reporter):
- dhcp_ctx = EphemeralDHCPv4(
- iface=ifname,
- dhcp_log_func=dhcp_log_cb)
- dhcp_ctx.obtain_lease()
- except Exception as e:
- report_diagnostic_event("Giving up. Failed to obtain dhcp lease "
- "for %s when attempting to determine "
- "primary NIC during reprovision due to %s"
- % (ifname, e), logger_func=LOG.error)
- raise
+ # primary or secondary. In this case, retry DHCP until successful.
+ self._setup_ephemeral_networking(iface=ifname, timeout_minutes=20)
+
+ # Retry polling network metadata for a limited duration only when the
+ # calls fail due to network unreachable error or timeout.
+ # This is because the platform drops packets going towards IMDS
+ # when it is not a primary nic. If the calls fail due to other issues
+ # like 410, 503 etc, then it means we are primary but IMDS service
+ # is unavailable at the moment. Retry indefinitely in those cases
+ # since we cannot move on without the network metadata. In the future,
+ # all this will not be necessary, as a new dhcp option would tell
+ # whether the nic is primary or not.
+ def network_metadata_exc_cb(msg, exc):
+ nonlocal expected_errors_count, metadata_poll_count
+ nonlocal metadata_logging_threshold
+
+ metadata_poll_count = metadata_poll_count + 1
+
+ # Log when needed but back off exponentially to avoid exploding
+ # the log file.
+ if metadata_poll_count >= metadata_logging_threshold:
+ metadata_logging_threshold *= 2
+ report_diagnostic_event(
+ "Ran into exception when attempting to reach %s "
+ "after %d polls." % (msg, metadata_poll_count),
+ logger_func=LOG.error,
+ )
+
+ if isinstance(exc, UrlError):
+ report_diagnostic_event(
+ "poll IMDS with %s failed. Exception: %s and code: %s"
+ % (msg, exc.cause, exc.code),
+ logger_func=LOG.error,
+ )
+
+ # Retry up to a certain limit for both timeout and network
+ # unreachable errors.
+ if exc.cause and isinstance(
+ exc.cause, (requests.Timeout, requests.ConnectionError)
+ ):
+ expected_errors_count = expected_errors_count + 1
+ return expected_errors_count <= 10
+ return True
# Primary nic detection will be optimized in the future. The fact that
# primary nic is being attached first helps here. Otherwise each nic
# could add several seconds of delay.
try:
- imds_md = get_metadata_from_imds(
- ifname,
- 5,
- metadata_type.network)
+ imds_md = self.get_imds_data_with_api_fallback(
+ retries=0,
+ md_type=MetadataType.NETWORK,
+ exc_cb=network_metadata_exc_cb,
+ infinite=True,
+ )
except Exception as e:
LOG.warning(
"Failed to get network metadata using nic %s. Attempt to "
"contact IMDS failed with error %s. Assuming this is not the "
- "primary nic.", ifname, e)
- finally:
- # If we are not the primary nic, then clean the dhcp context.
- if imds_md is None:
- dhcp_ctx.clean_network()
+ "primary nic.",
+ ifname,
+ e,
+ )
- if imds_md is not None:
+ if imds_md:
# Only primary NIC will get a response from IMDS.
LOG.info("%s is the primary nic", ifname)
is_primary = True
- # If primary, set ephemeral dhcp ctx so we can report ready
- self._ephemeral_dhcp_ctx = dhcp_ctx
-
# Set the expected nic count based on the response received.
- expected_nic_count = len(
- imds_md['interface'])
- report_diagnostic_event("Expected nic count: %d" %
- expected_nic_count, logger_func=LOG.info)
+ expected_nic_count = len(imds_md["interface"])
+ report_diagnostic_event(
+ "Expected nic count: %d" % expected_nic_count,
+ logger_func=LOG.info,
+ )
+ else:
+ # If we are not the primary nic, then clean the dhcp context.
+ self._teardown_ephemeral_networking()
return is_primary, expected_nic_count
@@ -901,17 +1100,22 @@ class DataSourceAzure(sources.DataSource):
while True:
ifname = None
with events.ReportEventStack(
- name="wait-for-nic-attach",
- description=("wait for nic attach after %d nics have "
- "been attached" % len(nics_found)),
- parent=azure_ds_reporter):
- ifname = netlink.wait_for_nic_attach_event(nl_sock,
- nics_found)
+ name="wait-for-nic-attach",
+ description=(
+ "wait for nic attach after %d nics have been attached"
+ % len(nics_found)
+ ),
+ parent=azure_ds_reporter,
+ ):
+ ifname = netlink.wait_for_nic_attach_event(
+ nl_sock, nics_found
+ )
# wait_for_nic_attach_event guarantees that ifname it not None
nics_found.append(ifname)
- report_diagnostic_event("Detected nic %s attached." % ifname,
- logger_func=LOG.info)
+ report_diagnostic_event(
+ "Detected nic %s attached." % ifname, logger_func=LOG.info
+ )
# Attempt to bring the interface's operating state to
# UP in case it is not already.
@@ -921,26 +1125,29 @@ class DataSourceAzure(sources.DataSource):
# platform will attach the primary nic first so we
# won't be in primary_nic_found = false state for long.
if not primary_nic_found:
- LOG.info("Checking if %s is the primary nic",
- ifname)
- (primary_nic_found, expected_nic_count) = (
- self._check_if_nic_is_primary(ifname))
+ LOG.info("Checking if %s is the primary nic", ifname)
+ (
+ primary_nic_found,
+ expected_nic_count,
+ ) = self._check_if_nic_is_primary(ifname)
# Exit criteria: check if we've discovered all nics
- if (expected_nic_count != -1
- and len(nics_found) >= expected_nic_count):
+ if (
+ expected_nic_count != -1
+ and len(nics_found) >= expected_nic_count
+ ):
LOG.info("Found all the nics for this VM.")
break
except AssertionError as error:
- report_diagnostic_event(error, logger_func=LOG.error)
+ report_diagnostic_event(str(error), logger_func=LOG.error)
@azure_ds_telemetry_reporter
def _wait_for_all_nics_ready(self):
"""Wait for nic(s) to be hot-attached. There may be multiple nics
- depending on the customer request.
- But only primary nic would be able to communicate with wireserver
- and IMDS. So we detect and save the primary nic to be used later.
+ depending on the customer request.
+ But only primary nic would be able to communicate with wireserver
+ and IMDS. So we detect and save the primary nic to be used later.
"""
nl_sock = None
@@ -948,18 +1155,22 @@ class DataSourceAzure(sources.DataSource):
nl_sock = netlink.create_bound_netlink_socket()
report_ready_marker_present = bool(
- os.path.isfile(REPORTED_READY_MARKER_FILE))
+ os.path.isfile(REPORTED_READY_MARKER_FILE)
+ )
# Report ready if the marker file is not already present.
# The nic of the preprovisioned vm gets hot-detached as soon as
# we report ready. So no need to save the dhcp context.
- self._report_ready_if_needed()
+ if not os.path.isfile(REPORTED_READY_MARKER_FILE):
+ self._report_ready_for_pps()
has_nic_been_detached = bool(
- os.path.isfile(REPROVISION_NIC_DETACHED_MARKER_FILE))
+ os.path.isfile(REPROVISION_NIC_DETACHED_MARKER_FILE)
+ )
if not has_nic_been_detached:
LOG.info("NIC has not been detached yet.")
+ self._teardown_ephemeral_networking()
self._wait_for_nic_detach(nl_sock)
# If we know that the preprovisioned nic has been detached, and we
@@ -970,31 +1181,35 @@ class DataSourceAzure(sources.DataSource):
if not self.fallback_interface:
self._wait_for_hot_attached_nics(nl_sock)
else:
- report_diagnostic_event("Skipping waiting for nic attach "
- "because we already have a fallback "
- "interface. Report Ready marker "
- "present before detaching nics: %s" %
- report_ready_marker_present,
- logger_func=LOG.info)
+ report_diagnostic_event(
+ "Skipping waiting for nic attach "
+ "because we already have a fallback "
+ "interface. Report Ready marker "
+ "present before detaching nics: %s"
+ % report_ready_marker_present,
+ logger_func=LOG.info,
+ )
except netlink.NetlinkCreateSocketError as e:
- report_diagnostic_event(e, logger_func=LOG.warning)
+ report_diagnostic_event(str(e), logger_func=LOG.warning)
raise
finally:
if nl_sock:
nl_sock.close()
+ @azure_ds_telemetry_reporter
def _poll_imds(self):
"""Poll IMDS for the new provisioning data until we get a valid
response. Then return the returned JSON object."""
- url = metadata_type.reprovisiondata.value
+ url = "{}?api-version={}".format(
+ MetadataType.REPROVISION_DATA.value, IMDS_VER_MIN
+ )
headers = {"Metadata": "true"}
nl_sock = None
report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
self.imds_logging_threshold = 1
self.imds_poll_counter = 1
dhcp_attempts = 0
- vnet_switched = False
- return_val = None
+ reprovision_data = None
def exc_cb(msg, exception):
if isinstance(exception, UrlError):
@@ -1002,339 +1217,328 @@ class DataSourceAzure(sources.DataSource):
if self.imds_poll_counter == self.imds_logging_threshold:
# Reducing the logging frequency as we are polling IMDS
self.imds_logging_threshold *= 2
- LOG.debug("Backing off logging threshold for the same "
- "exception to %d",
- self.imds_logging_threshold)
- report_diagnostic_event("poll IMDS with %s failed. "
- "Exception: %s and code: %s" %
- (msg, exception.cause,
- exception.code),
- logger_func=LOG.debug)
+ LOG.debug(
+ "Backing off logging threshold for the same "
+ "exception to %d",
+ self.imds_logging_threshold,
+ )
+ report_diagnostic_event(
+ "poll IMDS with %s failed. "
+ "Exception: %s and code: %s"
+ % (msg, exception.cause, exception.code),
+ logger_func=LOG.debug,
+ )
self.imds_poll_counter += 1
return True
else:
# If we get an exception while trying to call IMDS, we call
# DHCP and setup the ephemeral network to acquire a new IP.
- report_diagnostic_event("poll IMDS with %s failed. "
- "Exception: %s and code: %s" %
- (msg, exception.cause,
- exception.code),
- logger_func=LOG.warning)
+ report_diagnostic_event(
+ "poll IMDS with %s failed. Exception: %s and code: %s"
+ % (msg, exception.cause, exception.code),
+ logger_func=LOG.warning,
+ )
return False
report_diagnostic_event(
- "poll IMDS failed with an "
- "unexpected exception: %s" % exception,
- logger_func=LOG.warning)
+ "poll IMDS failed with an unexpected exception: %s"
+ % exception,
+ logger_func=LOG.warning,
+ )
return False
- # When the interface is hot-attached, we would have already
- # done dhcp and set the dhcp context. In that case, skip
- # the attempt to do dhcp.
- is_ephemeral_ctx_present = self._ephemeral_dhcp_ctx is not None
- msg = ("Unexpected error. Dhcp context is not expected to be already "
- "set when we need to wait for vnet switch")
- if is_ephemeral_ctx_present and report_ready:
- report_diagnostic_event(msg, logger_func=LOG.error)
- raise RuntimeError(msg)
+ if report_ready:
+ # Networking must be up for netlink to detect
+ # media disconnect/connect. It may be down to due
+ # initial DHCP failure, if so check for it and retry,
+ # ensuring we flag it as required.
+ if not self._is_ephemeral_networking_up():
+ self._setup_ephemeral_networking(timeout_minutes=20)
- while True:
try:
- # Since is_ephemeral_ctx_present is set only once, this ensures
- # that with regular reprovisioning, dhcp is always done every
- # time the loop runs.
- if not is_ephemeral_ctx_present:
- # Save our EphemeralDHCPv4 context to avoid repeated dhcp
- # later when we report ready
- with events.ReportEventStack(
- name="obtain-dhcp-lease",
- description="obtain dhcp lease",
- parent=azure_ds_reporter):
- self._ephemeral_dhcp_ctx = EphemeralDHCPv4(
- dhcp_log_func=dhcp_log_cb)
- lease = self._ephemeral_dhcp_ctx.obtain_lease()
-
- if vnet_switched:
- dhcp_attempts += 1
- if report_ready:
+ if (
+ self._ephemeral_dhcp_ctx is None
+ or self._ephemeral_dhcp_ctx.iface is None
+ ):
+ raise RuntimeError("Missing ephemeral context")
+ iface = self._ephemeral_dhcp_ctx.iface
+
+ nl_sock = netlink.create_bound_netlink_socket()
+ self._report_ready_for_pps()
+
+ LOG.debug(
+ "Wait for vnetswitch to happen on %s",
+ iface,
+ )
+ with events.ReportEventStack(
+ name="wait-for-media-disconnect-connect",
+ description="wait for vnet switch",
+ parent=azure_ds_reporter,
+ ):
try:
- nl_sock = netlink.create_bound_netlink_socket()
- except netlink.NetlinkCreateSocketError as e:
+ netlink.wait_for_media_disconnect_connect(
+ nl_sock, iface
+ )
+ except AssertionError as e:
report_diagnostic_event(
- 'Failed to create bound netlink socket: %s' % e,
- logger_func=LOG.warning)
- self._ephemeral_dhcp_ctx.clean_network()
- break
-
- report_ready_succeeded = self._report_ready(lease=lease)
- if not report_ready_succeeded:
- msg = ('Failed reporting ready while in '
- 'the preprovisioning pool.')
- report_diagnostic_event(msg, logger_func=LOG.error)
- self._ephemeral_dhcp_ctx.clean_network()
- raise sources.InvalidMetaDataException(msg)
-
- self._create_report_ready_marker()
- report_ready = False
-
- LOG.debug("Wait for vnetswitch to happen")
- with events.ReportEventStack(
- name="wait-for-media-disconnect-connect",
- description="wait for vnet switch",
- parent=azure_ds_reporter):
- try:
- netlink.wait_for_media_disconnect_connect(
- nl_sock, lease['interface'])
- except AssertionError as e:
- report_diagnostic_event(
- 'Error while waiting for vnet switch: %s' % e,
- logger_func=LOG.error)
- break
-
- vnet_switched = True
- self._ephemeral_dhcp_ctx.clean_network()
- else:
- with events.ReportEventStack(
- name="get-reprovision-data-from-imds",
- description="get reprovision data from imds",
- parent=azure_ds_reporter):
- return_val = readurl(url,
- timeout=IMDS_TIMEOUT_IN_SECONDS,
- headers=headers,
- exception_cb=exc_cb,
- infinite=True,
- log_req_resp=False).contents
- break
- except UrlError:
- # Teardown our EphemeralDHCPv4 context on failure as we retry
- self._ephemeral_dhcp_ctx.clean_network()
+ "Error while waiting for vnet switch: %s" % e,
+ logger_func=LOG.error,
+ )
+ except netlink.NetlinkCreateSocketError as e:
+ report_diagnostic_event(
+ "Failed to create bound netlink socket: %s" % e,
+ logger_func=LOG.warning,
+ )
+ raise sources.InvalidMetaDataException(
+ "Failed to report ready while in provisioning pool."
+ ) from e
+ except NoDHCPLeaseError as e:
+ report_diagnostic_event(
+ "DHCP failed while in provisioning pool",
+ logger_func=LOG.warning,
+ )
+ raise sources.InvalidMetaDataException(
+ "Failed to report ready while in provisioning pool."
+ ) from e
finally:
if nl_sock:
nl_sock.close()
- if vnet_switched:
- report_diagnostic_event("attempted dhcp %d times after reuse" %
- dhcp_attempts,
- logger_func=LOG.debug)
- report_diagnostic_event("polled imds %d times after reuse" %
- self.imds_poll_counter,
- logger_func=LOG.debug)
+ # Teardown old network configuration.
+ self._teardown_ephemeral_networking()
- return return_val
+ while not reprovision_data:
+ if not self._is_ephemeral_networking_up():
+ dhcp_attempts += 1
+ try:
+ self._setup_ephemeral_networking(timeout_minutes=5)
+ except NoDHCPLeaseError:
+ continue
+
+ with events.ReportEventStack(
+ name="get-reprovision-data-from-imds",
+ description="get reprovision data from imds",
+ parent=azure_ds_reporter,
+ ):
+ try:
+ reprovision_data = readurl(
+ url,
+ timeout=IMDS_TIMEOUT_IN_SECONDS,
+ headers=headers,
+ exception_cb=exc_cb,
+ infinite=True,
+ log_req_resp=False,
+ ).contents
+ except UrlError:
+ self._teardown_ephemeral_networking()
+ continue
+
+ report_diagnostic_event(
+ "attempted dhcp %d times after reuse" % dhcp_attempts,
+ logger_func=LOG.debug,
+ )
+ report_diagnostic_event(
+ "polled imds %d times after reuse" % self.imds_poll_counter,
+ logger_func=LOG.debug,
+ )
+
+ return reprovision_data
@azure_ds_telemetry_reporter
- def _report_failure(self, description=None) -> bool:
+ def _report_failure(self, description: Optional[str] = None) -> bool:
"""Tells the Azure fabric that provisioning has failed.
@param description: A description of the error encountered.
@return: The success status of sending the failure signal.
"""
- unknown_245_key = 'unknown-245'
-
- try:
- if (self.distro.networking.is_up(self.fallback_interface) and
- getattr(self, '_ephemeral_dhcp_ctx', None) and
- getattr(self._ephemeral_dhcp_ctx, 'lease', None) and
- unknown_245_key in self._ephemeral_dhcp_ctx.lease):
+ if self._is_ephemeral_networking_up():
+ try:
report_diagnostic_event(
- 'Using cached ephemeral dhcp context '
- 'to report failure to Azure', logger_func=LOG.debug)
+ "Using cached ephemeral dhcp context "
+ "to report failure to Azure",
+ logger_func=LOG.debug,
+ )
report_failure_to_fabric(
- dhcp_opts=self._ephemeral_dhcp_ctx.lease[unknown_245_key],
- description=description)
- self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral
+ dhcp_opts=self._wireserver_endpoint,
+ description=description,
+ )
return True
- except Exception as e:
- report_diagnostic_event(
- 'Failed to report failure using '
- 'cached ephemeral dhcp context: %s' % e,
- logger_func=LOG.error)
-
- try:
- report_diagnostic_event(
- 'Using new ephemeral dhcp to report failure to Azure',
- logger_func=LOG.debug)
- with EphemeralDHCPv4WithReporting(azure_ds_reporter) as lease:
- report_failure_to_fabric(
- dhcp_opts=lease[unknown_245_key],
- description=description)
- return True
- except Exception as e:
- report_diagnostic_event(
- 'Failed to report failure using new ephemeral dhcp: %s' % e,
- logger_func=LOG.debug)
+ except Exception as e:
+ report_diagnostic_event(
+ "Failed to report failure using "
+ "cached ephemeral dhcp context: %s" % e,
+ logger_func=LOG.error,
+ )
try:
report_diagnostic_event(
- 'Using fallback lease to report failure to Azure')
+ "Using new ephemeral dhcp to report failure to Azure",
+ logger_func=LOG.debug,
+ )
+ self._teardown_ephemeral_networking()
+ try:
+ self._setup_ephemeral_networking(timeout_minutes=20)
+ except NoDHCPLeaseError:
+ # Reporting failure will fail, but it will emit telemetry.
+ pass
report_failure_to_fabric(
- fallback_lease_file=self.dhclient_lease_file,
- description=description)
+ dhcp_opts=self._wireserver_endpoint, description=description
+ )
return True
except Exception as e:
report_diagnostic_event(
- 'Failed to report failure using fallback lease: %s' % e,
- logger_func=LOG.debug)
+ "Failed to report failure using new ephemeral dhcp: %s" % e,
+ logger_func=LOG.debug,
+ )
return False
- def _report_ready(self, lease: dict) -> bool:
+ @azure_ds_telemetry_reporter
+ def _report_ready(
+ self, *, pubkey_info: Optional[List[str]] = None
+ ) -> Optional[List[str]]:
"""Tells the fabric provisioning has completed.
- @param lease: dhcp lease to use for sending the ready signal.
- @return: The success status of sending the ready signal.
+ :param pubkey_info: Fingerprints of keys to request from Wireserver.
+
+ :raises Exception: if failed to report.
+
+ :returns: List of SSH keys, if requested.
"""
try:
- get_metadata_from_fabric(None, lease['unknown-245'])
- return True
+ data = get_metadata_from_fabric(
+ fallback_lease_file=None,
+ dhcp_opts=self._wireserver_endpoint,
+ iso_dev=self._iso_dev,
+ pubkey_info=pubkey_info,
+ )
except Exception as e:
report_diagnostic_event(
"Error communicating with Azure fabric; You may experience "
- "connectivity issues: %s" % e, logger_func=LOG.warning)
- return False
+ "connectivity issues: %s" % e,
+ logger_func=LOG.warning,
+ )
+ raise
- def _should_reprovision_after_nic_attach(self, candidate_metadata) -> bool:
- """Whether or not we should wait for nic attach and then poll
- IMDS for reprovisioning data. Also sets a marker file to poll IMDS.
+ # Reporting ready ejected OVF media, no need to do so again.
+ self._iso_dev = None
+ return data
- The marker file is used for the following scenario: the VM boots into
- wait for nic attach, which we expect to be proceeding infinitely until
- the nic is attached. If for whatever reason the platform moves us to a
- new host (for instance a hardware issue), we need to keep waiting.
- However, since the VM reports ready to the Fabric, we will not attach
- the ISO, thus cloud-init needs to have a way of knowing that it should
- jump back into the waiting mode in order to retrieve the ovf_env.
+ def _ppstype_from_imds(self, imds_md: dict) -> Optional[str]:
+ try:
+ return imds_md["extended"]["compute"]["ppsType"]
+ except Exception as e:
+ report_diagnostic_event(
+ "Could not retrieve pps configuration from IMDS: %s" % e,
+ logger_func=LOG.debug,
+ )
+ return None
- @param candidate_metadata: Metadata obtained from reading ovf-env.
- @return: Whether to reprovision after waiting for nics to be attached.
- """
- if not candidate_metadata:
- return False
- (_md, _userdata_raw, cfg, _files) = candidate_metadata
- path = REPROVISION_NIC_ATTACH_MARKER_FILE
- if (cfg.get('PreprovisionedVMType', None) == "Savable" or
- os.path.isfile(path)):
- if not os.path.isfile(path):
- LOG.info("Creating a marker file to wait for nic attach: %s",
- path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
- return True
- return False
+ def _determine_pps_type(self, ovf_cfg: dict, imds_md: dict) -> PPSType:
+ """Determine PPS type using OVF, IMDS data, and reprovision marker."""
+ if os.path.isfile(REPROVISION_MARKER_FILE):
+ pps_type = PPSType.UNKNOWN
+ elif (
+ ovf_cfg.get("PreprovisionedVMType", None) == PPSType.SAVABLE.value
+ or self._ppstype_from_imds(imds_md) == PPSType.SAVABLE.value
+ ):
+ pps_type = PPSType.SAVABLE
+ elif (
+ ovf_cfg.get("PreprovisionedVm") is True
+ or ovf_cfg.get("PreprovisionedVMType", None)
+ == PPSType.RUNNING.value
+ or self._ppstype_from_imds(imds_md) == PPSType.RUNNING.value
+ ):
+ pps_type = PPSType.RUNNING
+ else:
+ pps_type = PPSType.NONE
- def _should_reprovision(self, ret):
- """Whether or not we should poll IMDS for reprovisioning data.
- Also sets a marker file to poll IMDS.
-
- The marker file is used for the following scenario: the VM boots into
- this polling loop, which we expect to be proceeding infinitely until
- the VM is picked. If for whatever reason the platform moves us to a
- new host (for instance a hardware issue), we need to keep polling.
- However, since the VM reports ready to the Fabric, we will not attach
- the ISO, thus cloud-init needs to have a way of knowing that it should
- jump back into the polling loop in order to retrieve the ovf_env."""
- if not ret:
- return False
- (_md, _userdata_raw, cfg, _files) = ret
- path = REPROVISION_MARKER_FILE
- if (cfg.get('PreprovisionedVm') is True or
- cfg.get('PreprovisionedVMType', None) == 'Running' or
- os.path.isfile(path)):
- if not os.path.isfile(path):
- LOG.info("Creating a marker file to poll imds: %s",
- path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
- return True
- return False
+ report_diagnostic_event(
+ "PPS type: %s" % pps_type.value, logger_func=LOG.info
+ )
+ return pps_type
+ def _write_reprovision_marker(self):
+ """Write reprovision marker file in case system is rebooted."""
+ LOG.info(
+ "Creating a marker file to poll imds: %s", REPROVISION_MARKER_FILE
+ )
+ util.write_file(
+ REPROVISION_MARKER_FILE,
+ "{pid}: {time}\n".format(pid=os.getpid(), time=time()),
+ )
+
+ @azure_ds_telemetry_reporter
def _reprovision(self):
- """Initiate the reprovisioning workflow."""
+ """Initiate the reprovisioning workflow.
+
+ Ephemeral networking is up upon successful reprovisioning.
+ """
contents = self._poll_imds()
with events.ReportEventStack(
- name="reprovisioning-read-azure-ovf",
- description="read azure ovf during reprovisioning",
- parent=azure_ds_reporter):
+ name="reprovisioning-read-azure-ovf",
+ description="read azure ovf during reprovisioning",
+ parent=azure_ds_reporter,
+ ):
md, ud, cfg = read_azure_ovf(contents)
- return (md, ud, cfg, {'ovf-env.xml': contents})
+ return (md, ud, cfg, {"ovf-env.xml": contents})
@azure_ds_telemetry_reporter
- def _negotiate(self):
- """Negotiate with fabric and return data from it.
+ def _determine_wireserver_pubkey_info(
+ self, *, cfg: dict, imds_md: dict
+ ) -> Optional[List[str]]:
+ """Determine the fingerprints we need to retrieve from Wireserver.
- On success, returns a dictionary including 'public_keys'.
- On failure, returns False.
+ :return: List of keys to request from Wireserver, if any, else None.
"""
-
- if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN:
- self.bounce_network_with_azure_hostname()
-
- pubkey_info = None
- try:
- public_keys = self.metadata['imds']['compute']['publicKeys']
- LOG.debug(
- 'Successfully retrieved %s key(s) from IMDS',
- len(public_keys)
- if public_keys is not None
- else 0
- )
- except KeyError:
- LOG.debug(
- 'Unable to retrieve SSH keys from IMDS during '
- 'negotiation, falling back to OVF'
- )
- pubkey_info = self.cfg.get('_pubkeys', None)
-
- metadata_func = partial(get_metadata_from_fabric,
- fallback_lease_file=self.
- dhclient_lease_file,
- pubkey_info=pubkey_info)
- else:
- metadata_func = self.get_metadata_from_agent
-
- LOG.debug("negotiating with fabric via agent command %s",
- self.ds_cfg['agent_command'])
+ pubkey_info: Optional[List[str]] = None
try:
- fabric_data = metadata_func()
- except Exception as e:
- report_diagnostic_event(
- "Error communicating with Azure fabric; You may experience "
- "connectivity issues: %s" % e, logger_func=LOG.warning)
- return False
+ self._get_public_keys_from_imds(imds_md)
+ except (KeyError, ValueError):
+ pubkey_info = cfg.get("_pubkeys", None)
+ log_msg = "Retrieved {} fingerprints from OVF".format(
+ len(pubkey_info) if pubkey_info is not None else 0
+ )
+ report_diagnostic_event(log_msg, logger_func=LOG.debug)
+ return pubkey_info
+ def _cleanup_markers(self):
+ """Cleanup any marker files."""
util.del_file(REPORTED_READY_MARKER_FILE)
util.del_file(REPROVISION_MARKER_FILE)
- util.del_file(REPROVISION_NIC_ATTACH_MARKER_FILE)
util.del_file(REPROVISION_NIC_DETACHED_MARKER_FILE)
- return fabric_data
@azure_ds_telemetry_reporter
def activate(self, cfg, is_new_instance):
try:
- address_ephemeral_resize(is_new_instance=is_new_instance,
- preserve_ntfs=self.ds_cfg.get(
- DS_CFG_KEY_PRESERVE_NTFS, False))
+ address_ephemeral_resize(
+ is_new_instance=is_new_instance,
+ preserve_ntfs=self.ds_cfg.get(DS_CFG_KEY_PRESERVE_NTFS, False),
+ )
finally:
- push_log_to_kvp(self.sys_cfg['def_log_file'])
+ push_log_to_kvp(self.sys_cfg["def_log_file"])
return
@property
def availability_zone(self):
- return self.metadata.get(
- 'imds', {}).get('compute', {}).get('platformFaultDomain')
+ return (
+ self.metadata.get("imds", {})
+ .get("compute", {})
+ .get("platformFaultDomain")
+ )
@property
def network_config(self):
"""Generate a network config like net.generate_fallback_network() with
- the following exceptions.
+ the following exceptions.
- 1. Probe the drivers of the net-devices present and inject them in
- the network configuration under params: driver: <driver> value
- 2. Generate a fallback network config that does not include any of
- the blacklisted devices.
+ 1. Probe the drivers of the net-devices present and inject them in
+ the network configuration under params: driver: <driver> value
+ 2. Generate a fallback network config that does not include any of
+ the blacklisted devices.
"""
if not self._network_config or self._network_config == sources.UNSET:
- if self.ds_cfg.get('apply_network_config'):
+ if self.ds_cfg.get("apply_network_config"):
nc_src = self._metadata_imds
else:
nc_src = None
@@ -1343,7 +1547,103 @@ class DataSourceAzure(sources.DataSource):
@property
def region(self):
- return self.metadata.get('imds', {}).get('compute', {}).get('location')
+ return self.metadata.get("imds", {}).get("compute", {}).get("location")
+
+ @azure_ds_telemetry_reporter
+ def validate_imds_network_metadata(self, imds_md: dict) -> bool:
+ """Validate IMDS network config and report telemetry for errors."""
+ local_macs = get_hv_netvsc_macs_normalized()
+
+ try:
+ network_config = imds_md["network"]
+ imds_macs = [
+ normalize_mac_address(i["macAddress"])
+ for i in network_config["interface"]
+ ]
+ except KeyError:
+ report_diagnostic_event(
+ "IMDS network metadata has incomplete configuration: %r"
+ % imds_md.get("network"),
+ logger_func=LOG.warning,
+ )
+ return False
+
+ missing_macs = [m for m in local_macs if m not in imds_macs]
+ if not missing_macs:
+ return True
+
+ report_diagnostic_event(
+ "IMDS network metadata is missing configuration for NICs %r: %r"
+ % (missing_macs, network_config),
+ logger_func=LOG.warning,
+ )
+
+ if not self._ephemeral_dhcp_ctx or not self._ephemeral_dhcp_ctx.iface:
+ # No primary interface to check against.
+ return False
+
+ primary_mac = net.get_interface_mac(self._ephemeral_dhcp_ctx.iface)
+ if not primary_mac or not isinstance(primary_mac, str):
+ # Unexpected data for primary interface.
+ return False
+
+ primary_mac = normalize_mac_address(primary_mac)
+ if primary_mac in missing_macs:
+ report_diagnostic_event(
+ "IMDS network metadata is missing primary NIC %r: %r"
+ % (primary_mac, network_config),
+ logger_func=LOG.warning,
+ )
+
+ return False
+
+
+def _username_from_imds(imds_data):
+ try:
+ return imds_data["compute"]["osProfile"]["adminUsername"]
+ except KeyError:
+ return None
+
+
+def _userdata_from_imds(imds_data):
+ try:
+ return imds_data["compute"]["userData"]
+ except KeyError:
+ return None
+
+
+def _hostname_from_imds(imds_data):
+ try:
+ return imds_data["compute"]["osProfile"]["computerName"]
+ except KeyError:
+ return None
+
+
+def _disable_password_from_imds(imds_data):
+ try:
+ return (
+ imds_data["compute"]["osProfile"]["disablePasswordAuthentication"]
+ == "true"
+ )
+ except KeyError:
+ return None
+
+
+def _key_is_openssh_formatted(key):
+ """
+ Validate whether or not the key is OpenSSH-formatted.
+ """
+ # See https://bugs.launchpad.net/cloud-init/+bug/1910835
+ if "\r\n" in key.strip():
+ return False
+
+ parser = ssh_util.AuthKeyLineParser()
+ try:
+ akl = parser.parse(key)
+ except TypeError:
+ return False
+
+ return akl.keytype is not None
def _partitions_on_device(devpath, maxnum=16):
@@ -1362,7 +1662,7 @@ def _partitions_on_device(devpath, maxnum=16):
@azure_ds_telemetry_reporter
def _has_ntfs_filesystem(devpath):
ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)
- LOG.debug('ntfs_devices found = %s', ntfs_devices)
+ LOG.debug("ntfs_devices found = %s", ntfs_devices)
return os.path.realpath(devpath) in ntfs_devices
@@ -1386,24 +1686,29 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
If cloud-init cannot mount the disk to check for data, destruction
will be allowed, unless the dscfg key is set."""
if preserve_ntfs:
- msg = ('config says to never destroy NTFS (%s.%s), skipping checks' %
- (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS))
+ msg = "config says to never destroy NTFS (%s.%s), skipping checks" % (
+ ".".join(DS_CFG_PATH),
+ DS_CFG_KEY_PRESERVE_NTFS,
+ )
return False, msg
if not os.path.exists(devpath):
- return False, 'device %s does not exist' % devpath
+ return False, "device %s does not exist" % devpath
- LOG.debug('Resolving realpath of %s -> %s', devpath,
- os.path.realpath(devpath))
+ LOG.debug(
+ "Resolving realpath of %s -> %s", devpath, os.path.realpath(devpath)
+ )
# devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource
# where partitions are "<devpath>1" or "<devpath>-part1" or "<devpath>p1"
partitions = _partitions_on_device(devpath)
if len(partitions) == 0:
- return False, 'device %s was not partitioned' % devpath
+ return False, "device %s was not partitioned" % devpath
elif len(partitions) > 2:
- msg = ('device %s had 3 or more partitions: %s' %
- (devpath, ' '.join([p[1] for p in partitions])))
+ msg = "device %s had 3 or more partitions: %s" % (
+ devpath,
+ " ".join([p[1] for p in partitions]),
+ )
return False, msg
elif len(partitions) == 2:
cand_part, cand_path = partitions[1]
@@ -1411,66 +1716,78 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
cand_part, cand_path = partitions[0]
if not _has_ntfs_filesystem(cand_path):
- msg = ('partition %s (%s) on device %s was not ntfs formatted' %
- (cand_part, cand_path, devpath))
+ msg = "partition %s (%s) on device %s was not ntfs formatted" % (
+ cand_part,
+ cand_path,
+ devpath,
+ )
return False, msg
@azure_ds_telemetry_reporter
def count_files(mp):
- ignored = set(['dataloss_warning_readme.txt'])
+ ignored = set(["dataloss_warning_readme.txt"])
return len([f for f in os.listdir(mp) if f.lower() not in ignored])
- bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
- (cand_part, cand_path, devpath))
+ bmsg = "partition %s (%s) on device %s was ntfs formatted" % (
+ cand_part,
+ cand_path,
+ devpath,
+ )
with events.ReportEventStack(
name="mount-ntfs-and-count",
description="mount-ntfs-and-count",
- parent=azure_ds_reporter
+ parent=azure_ds_reporter,
) as evt:
try:
- file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
- update_env_for_mount={'LANG': 'C'})
+ file_count = util.mount_cb(
+ cand_path,
+ count_files,
+ mtype="ntfs",
+ update_env_for_mount={"LANG": "C"},
+ )
except util.MountFailedError as e:
evt.description = "cannot mount ntfs"
if "unknown filesystem type 'ntfs'" in str(e):
- return True, (bmsg + ' but this system cannot mount NTFS,'
- ' assuming there are no important files.'
- ' Formatting allowed.')
- return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
+ return (
+ True,
+ (
+ bmsg + " but this system cannot mount NTFS,"
+ " assuming there are no important files."
+ " Formatting allowed."
+ ),
+ )
+ return False, bmsg + " but mount of %s failed: %s" % (cand_part, e)
if file_count != 0:
evt.description = "mounted and counted %d files" % file_count
- LOG.warning("it looks like you're using NTFS on the ephemeral"
- " disk, to ensure that filesystem does not get wiped,"
- " set %s.%s in config", '.'.join(DS_CFG_PATH),
- DS_CFG_KEY_PRESERVE_NTFS)
- return False, bmsg + ' but had %d files on it.' % file_count
+ LOG.warning(
+ "it looks like you're using NTFS on the ephemeral"
+ " disk, to ensure that filesystem does not get wiped,"
+ " set %s.%s in config",
+ ".".join(DS_CFG_PATH),
+ DS_CFG_KEY_PRESERVE_NTFS,
+ )
+ return False, bmsg + " but had %d files on it." % file_count
- return True, bmsg + ' and had no important files. Safe for reformatting.'
+ return True, bmsg + " and had no important files. Safe for reformatting."
@azure_ds_telemetry_reporter
-def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
- is_new_instance=False, preserve_ntfs=False):
- # wait for ephemeral disk to come up
- naplen = .2
- with events.ReportEventStack(
- name="wait-for-ephemeral-disk",
- description="wait for ephemeral disk",
- parent=azure_ds_reporter
- ):
- missing = util.wait_for_files([devpath],
- maxwait=maxwait,
- naplen=naplen,
- log_pre="Azure ephemeral disk: ")
-
- if missing:
- report_diagnostic_event(
- "ephemeral device '%s' did not appear after %d seconds." %
- (devpath, maxwait),
- logger_func=LOG.warning)
- return
+def address_ephemeral_resize(
+ devpath=RESOURCE_DISK_PATH, is_new_instance=False, preserve_ntfs=False
+):
+ if not os.path.exists(devpath):
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' does not exist." % devpath,
+ logger_func=LOG.debug,
+ )
+ return
+ else:
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' exists." % devpath,
+ logger_func=LOG.debug,
+ )
result = False
msg = None
@@ -1483,94 +1800,32 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
if not result:
return
- for mod in ['disk_setup', 'mounts']:
- sempath = '/var/lib/cloud/instance/sem/config_' + mod
+ for mod in ["disk_setup", "mounts"]:
+ sempath = "/var/lib/cloud/instance/sem/config_" + mod
bmsg = 'Marker "%s" for module "%s"' % (sempath, mod)
if os.path.exists(sempath):
try:
os.unlink(sempath)
- LOG.debug('%s removed.', bmsg)
+ LOG.debug("%s removed.", bmsg)
except Exception as e:
# python3 throws FileNotFoundError, python2 throws OSError
- LOG.warning('%s: remove failed! (%s)', bmsg, e)
+ LOG.warning("%s: remove failed! (%s)", bmsg, e)
else:
- LOG.debug('%s did not exist.', bmsg)
+ LOG.debug("%s did not exist.", bmsg)
return
@azure_ds_telemetry_reporter
-def perform_hostname_bounce(hostname, cfg, prev_hostname):
- # set the hostname to 'hostname' if it is not already set to that.
- # then, if policy is not off, bounce the interface using command
- # Returns True if the network was bounced, False otherwise.
- command = cfg['command']
- interface = cfg['interface']
- policy = cfg['policy']
-
- msg = ("hostname=%s policy=%s interface=%s" %
- (hostname, policy, interface))
- env = os.environ.copy()
- env['interface'] = interface
- env['hostname'] = hostname
- env['old_hostname'] = prev_hostname
-
- if command == "builtin":
- if util.is_FreeBSD():
- command = BOUNCE_COMMAND_FREEBSD
- elif subp.which('ifup'):
- command = BOUNCE_COMMAND_IFUP
- else:
- LOG.debug(
- "Skipping network bounce: ifupdown utils aren't present.")
- # Don't bounce as networkd handles hostname DDNS updates
- return False
- LOG.debug("pubhname: publishing hostname [%s]", msg)
- shell = not isinstance(command, (list, tuple))
- # capture=False, see comments in bug 1202758 and bug 1206164.
- util.log_time(logfunc=LOG.debug, msg="publishing hostname",
- get_uptime=True, func=subp.subp,
- kwargs={'args': command, 'shell': shell, 'capture': False,
- 'env': env})
- return True
-
-
-@azure_ds_telemetry_reporter
-def crtfile_to_pubkey(fname, data=None):
- pipeline = ('openssl x509 -noout -pubkey < "$0" |'
- 'ssh-keygen -i -m PKCS8 -f /dev/stdin')
- (out, _err) = subp.subp(['sh', '-c', pipeline, fname],
- capture=True, data=data)
- return out.rstrip()
-
-
-@azure_ds_telemetry_reporter
-def pubkeys_from_crt_files(flist):
- pubkeys = []
- errors = []
- for fname in flist:
- try:
- pubkeys.append(crtfile_to_pubkey(fname))
- except subp.ProcessExecutionError:
- errors.append(fname)
-
- if errors:
- report_diagnostic_event(
- "failed to convert the crt files to pubkey: %s" % errors,
- logger_func=LOG.warning)
-
- return pubkeys
-
-
-@azure_ds_telemetry_reporter
def write_files(datadir, files, dirmode=None):
-
def _redact_password(cnt, fname):
"""Azure provides the UserPassword in plain text. So we redact it"""
try:
root = ET.fromstring(cnt)
for elem in root.iter():
- if ('UserPassword' in elem.tag and
- elem.text != DEF_PASSWD_REDACTION):
+ if (
+ "UserPassword" in elem.tag
+ and elem.text != DEF_PASSWD_REDACTION
+ ):
elem.text = DEF_PASSWD_REDACTION
return ET.tostring(root)
except Exception:
@@ -1584,21 +1839,11 @@ def write_files(datadir, files, dirmode=None):
util.ensure_dir(datadir, dirmode)
for (name, content) in files.items():
fname = os.path.join(datadir, name)
- if 'ovf-env.xml' in name:
+ if "ovf-env.xml" in name:
content = _redact_password(content, fname)
util.write_file(filename=fname, content=content, mode=0o600)
-@azure_ds_telemetry_reporter
-def invoke_agent(cmd):
- # this is a function itself to simplify patching it for test
- if cmd:
- LOG.debug("invoking agent: %s", cmd)
- subp.subp(cmd, shell=(not isinstance(cmd, list)))
- else:
- LOG.debug("not invoking agent")
-
-
def find_child(node, filter_func):
ret = []
if not node.hasChildNodes():
@@ -1626,8 +1871,9 @@ def load_azure_ovf_pubkeys(sshnode):
if len(results) == 0:
return []
if len(results) > 1:
- raise BrokenAzureDataSource("Multiple 'PublicKeys'(%s) in SSH node" %
- len(results))
+ raise BrokenAzureDataSource(
+ "Multiple 'PublicKeys'(%s) in SSH node" % len(results)
+ )
pubkeys_node = results[0]
pubkeys = find_child(pubkeys_node, lambda n: n.localName == "PublicKey")
@@ -1642,7 +1888,7 @@ def load_azure_ovf_pubkeys(sshnode):
if not pk_node.hasChildNodes():
continue
- cur = {'fingerprint': "", 'path': "", 'value': ""}
+ cur = {"fingerprint": "", "path": "", "value": ""}
for child in pk_node.childNodes:
if child.nodeType == text_node or not child.localName:
continue
@@ -1652,8 +1898,10 @@ def load_azure_ovf_pubkeys(sshnode):
if name not in cur.keys():
continue
- if (len(child.childNodes) != 1 or
- child.childNodes[0].nodeType != text_node):
+ if (
+ len(child.childNodes) != 1
+ or child.childNodes[0].nodeType != text_node
+ ):
continue
cur[name] = child.childNodes[0].wholeText.strip()
@@ -1671,33 +1919,37 @@ def read_azure_ovf(contents):
report_diagnostic_event(error_str, logger_func=LOG.warning)
raise BrokenAzureDataSource(error_str) from e
- results = find_child(dom.documentElement,
- lambda n: n.localName == "ProvisioningSection")
+ results = find_child(
+ dom.documentElement, lambda n: n.localName == "ProvisioningSection"
+ )
if len(results) == 0:
raise NonAzureDataSource("No ProvisioningSection")
if len(results) > 1:
- raise BrokenAzureDataSource("found '%d' ProvisioningSection items" %
- len(results))
+ raise BrokenAzureDataSource(
+ "found '%d' ProvisioningSection items" % len(results)
+ )
provSection = results[0]
- lpcs_nodes = find_child(provSection,
- lambda n:
- n.localName == "LinuxProvisioningConfigurationSet")
+ lpcs_nodes = find_child(
+ provSection,
+ lambda n: n.localName == "LinuxProvisioningConfigurationSet",
+ )
if len(lpcs_nodes) == 0:
raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
if len(lpcs_nodes) > 1:
- raise BrokenAzureDataSource("found '%d' %ss" %
- (len(lpcs_nodes),
- "LinuxProvisioningConfigurationSet"))
+ raise BrokenAzureDataSource(
+ "found '%d' %ss"
+ % (len(lpcs_nodes), "LinuxProvisioningConfigurationSet")
+ )
lpcs = lpcs_nodes[0]
if not lpcs.hasChildNodes():
raise BrokenAzureDataSource("no child nodes of configuration set")
- md_props = 'seedfrom'
- md = {'azure_data': {}}
+ md_props = "seedfrom"
+ md: Dict[str, Any] = {"azure_data": {}}
cfg = {}
ud = ""
password = None
@@ -1711,8 +1963,10 @@ def read_azure_ovf(contents):
simple = False
value = ""
- if (len(child.childNodes) == 1 and
- child.childNodes[0].nodeType == dom.TEXT_NODE):
+ if (
+ len(child.childNodes) == 1
+ and child.childNodes[0].nodeType == dom.TEXT_NODE
+ ):
simple = True
value = child.childNodes[0].wholeText
@@ -1721,8 +1975,8 @@ def read_azure_ovf(contents):
# we accept either UserData or CustomData. If both are present
# then behavior is undefined.
if name == "userdata" or name == "customdata":
- if attrs.get('encoding') in (None, "base64"):
- ud = base64.b64decode(''.join(value.split()))
+ if attrs.get("encoding") in (None, "base64"):
+ ud = base64.b64decode("".join(value.split()))
else:
ud = value
elif name == "username":
@@ -1730,36 +1984,36 @@ def read_azure_ovf(contents):
elif name == "userpassword":
password = value
elif name == "hostname":
- md['local-hostname'] = value
+ md["local-hostname"] = value
elif name == "dscfg":
- if attrs.get('encoding') in (None, "base64"):
- dscfg = base64.b64decode(''.join(value.split()))
+ if attrs.get("encoding") in (None, "base64"):
+ dscfg = base64.b64decode("".join(value.split()))
else:
dscfg = value
- cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})}
+ cfg["datasource"] = {DS_NAME: util.load_yaml(dscfg, default={})}
elif name == "ssh":
- cfg['_pubkeys'] = load_azure_ovf_pubkeys(child)
+ cfg["_pubkeys"] = load_azure_ovf_pubkeys(child)
elif name == "disablesshpasswordauthentication":
- cfg['ssh_pwauth'] = util.is_false(value)
+ cfg["ssh_pwauth"] = util.is_false(value)
elif simple:
if name in md_props:
md[name] = value
else:
- md['azure_data'][name] = value
+ md["azure_data"][name] = value
defuser = {}
if username:
- defuser['name'] = username
+ defuser["name"] = username
if password:
- defuser['lock_passwd'] = False
+ defuser["lock_passwd"] = False
if DEF_PASSWD_REDACTION != password:
- defuser['passwd'] = cfg['password'] = encrypt_pass(password)
+ defuser["passwd"] = cfg["password"] = encrypt_pass(password)
if defuser:
- cfg['system_info'] = {'default_user': defuser}
+ cfg["system_info"] = {"default_user": defuser}
- if 'ssh_pwauth' not in cfg and password:
- cfg['ssh_pwauth'] = True
+ if "ssh_pwauth" not in cfg and password:
+ cfg["ssh_pwauth"] = True
preprovisioning_cfg = _get_preprovisioning_cfgs(dom)
cfg = util.mergemanydict([cfg, preprovisioning_cfg])
@@ -1785,20 +2039,18 @@ def _get_preprovisioning_cfgs(dom):
More specifically, this will never happen:
- PreprovisionedVm=True and PreprovisionedVMType=Savable
"""
- cfg = {
- "PreprovisionedVm": False,
- "PreprovisionedVMType": None
- }
+ cfg = {"PreprovisionedVm": False, "PreprovisionedVMType": None}
platform_settings_section = find_child(
- dom.documentElement,
- lambda n: n.localName == "PlatformSettingsSection")
+ dom.documentElement, lambda n: n.localName == "PlatformSettingsSection"
+ )
if not platform_settings_section or len(platform_settings_section) == 0:
LOG.debug("PlatformSettingsSection not found")
return cfg
platform_settings = find_child(
platform_settings_section[0],
- lambda n: n.localName == "PlatformSettings")
+ lambda n: n.localName == "PlatformSettings",
+ )
if not platform_settings or len(platform_settings) == 0:
LOG.debug("PlatformSettings not found")
return cfg
@@ -1807,10 +2059,12 @@ def _get_preprovisioning_cfgs(dom):
# platform has removed PreprovisionedVm and only surfaces
# PreprovisionedVMType.
cfg["PreprovisionedVm"] = _get_preprovisionedvm_cfg_value(
- platform_settings)
+ platform_settings
+ )
cfg["PreprovisionedVMType"] = _get_preprovisionedvmtype_cfg_value(
- platform_settings)
+ platform_settings
+ )
return cfg
@@ -1822,16 +2076,18 @@ def _get_preprovisionedvm_cfg_value(platform_settings):
# platform has removed PreprovisionedVm and only surfaces
# PreprovisionedVMType.
preprovisionedVmVal = find_child(
- platform_settings[0],
- lambda n: n.localName == "PreprovisionedVm")
+ platform_settings[0], lambda n: n.localName == "PreprovisionedVm"
+ )
if not preprovisionedVmVal or len(preprovisionedVmVal) == 0:
LOG.debug("PreprovisionedVm not found")
return preprovisionedVm
preprovisionedVm = util.translate_bool(
- preprovisionedVmVal[0].firstChild.nodeValue)
+ preprovisionedVmVal[0].firstChild.nodeValue
+ )
report_diagnostic_event(
- "PreprovisionedVm: %s" % preprovisionedVm, logger_func=LOG.info)
+ "PreprovisionedVm: %s" % preprovisionedVm, logger_func=LOG.info
+ )
return preprovisionedVm
@@ -1850,18 +2106,21 @@ def _get_preprovisionedvmtype_cfg_value(platform_settings):
# Once assigned to customer, the customer-requested nics are
# hot-attached to it and reprovision happens like today.
preprovisionedVMTypeVal = find_child(
- platform_settings[0],
- lambda n: n.localName == "PreprovisionedVMType")
- if (not preprovisionedVMTypeVal or len(preprovisionedVMTypeVal) == 0 or
- preprovisionedVMTypeVal[0].firstChild is None):
+ platform_settings[0], lambda n: n.localName == "PreprovisionedVMType"
+ )
+ if (
+ not preprovisionedVMTypeVal
+ or len(preprovisionedVMTypeVal) == 0
+ or preprovisionedVMTypeVal[0].firstChild is None
+ ):
LOG.debug("PreprovisionedVMType not found")
return preprovisionedVMType
preprovisionedVMType = preprovisionedVMTypeVal[0].firstChild.nodeValue
report_diagnostic_event(
- "PreprovisionedVMType: %s" % preprovisionedVMType,
- logger_func=LOG.info)
+ "PreprovisionedVMType: %s" % preprovisionedVMType, logger_func=LOG.info
+ )
return preprovisionedVMType
@@ -1885,7 +2144,7 @@ def _check_freebsd_cdrom(cdrom_dev):
@azure_ds_telemetry_reporter
def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
"""Return content random seed file if available, otherwise,
- return None."""
+ return None."""
# azure / hyper-v provides random data here
# now update ds_cfg to reflect contents pass in config
if source is None:
@@ -1901,24 +2160,22 @@ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
# string. Same number of bits of entropy, just with 25% more zeroes.
# There's no need to undo this base64-encoding when the random seed is
# actually used in cc_seed_random.py.
- seed = base64.b64encode(seed).decode()
-
- return seed
+ return base64.b64encode(seed).decode() # type: ignore
@azure_ds_telemetry_reporter
-def list_possible_azure_ds_devs():
- devlist = []
+def list_possible_azure_ds(seed, cache_dir):
+ yield seed
+ yield DEFAULT_PROVISIONING_ISO_DEV
if util.is_FreeBSD():
cdrom_dev = "/dev/cd0"
if _check_freebsd_cdrom(cdrom_dev):
- return [cdrom_dev]
+ yield cdrom_dev
else:
for fstype in ("iso9660", "udf"):
- devlist.extend(util.find_devs_with("TYPE=%s" % fstype))
-
- devlist.sort(reverse=True)
- return devlist
+ yield from util.find_devs_with("TYPE=%s" % fstype)
+ if cache_dir:
+ yield cache_dir
@azure_ds_telemetry_reporter
@@ -1932,7 +2189,7 @@ def load_azure_ds_dir(source_dir):
contents = fp.read()
md, ud, cfg = read_azure_ovf(contents)
- return (md, ud, cfg, {'ovf-env.xml': contents})
+ return (md, ud, cfg, {"ovf-env.xml": contents})
@azure_ds_telemetry_reporter
@@ -1949,12 +2206,14 @@ def parse_network_config(imds_metadata) -> dict:
return _generate_network_config_from_imds_metadata(imds_metadata)
except Exception as e:
LOG.error(
- 'Failed generating network config '
- 'from IMDS network metadata: %s', str(e))
+ "Failed generating network config "
+ "from IMDS network metadata: %s",
+ str(e),
+ )
try:
return _generate_network_config_from_fallback_config()
except Exception as e:
- LOG.error('Failed generating fallback network config: %s', str(e))
+ LOG.error("Failed generating fallback network config: %s", str(e))
return {}
@@ -1966,51 +2225,69 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict:
@param: imds_metadata: Dict of content read from IMDS network service.
@return: Dictionary containing network version 2 standard configuration.
"""
- netconfig = {'version': 2, 'ethernets': {}}
- network_metadata = imds_metadata['network']
- for idx, intf in enumerate(network_metadata['interface']):
+ netconfig: Dict[str, Any] = {"version": 2, "ethernets": {}}
+ network_metadata = imds_metadata["network"]
+ for idx, intf in enumerate(network_metadata["interface"]):
+ has_ip_address = False
# First IPv4 and/or IPv6 address will be obtained via DHCP.
# Any additional IPs of each type will be set as static
# addresses.
- nicname = 'eth{idx}'.format(idx=idx)
- dhcp_override = {'route-metric': (idx + 1) * 100}
- dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
- 'dhcp6': False}
- for addr_type in ('ipv4', 'ipv6'):
- addresses = intf.get(addr_type, {}).get('ipAddress', [])
- if addr_type == 'ipv4':
- default_prefix = '24'
+ nicname = "eth{idx}".format(idx=idx)
+ dhcp_override = {"route-metric": (idx + 1) * 100}
+ dev_config: Dict[str, Any] = {
+ "dhcp4": True,
+ "dhcp4-overrides": dhcp_override,
+ "dhcp6": False,
+ }
+ for addr_type in ("ipv4", "ipv6"):
+ addresses = intf.get(addr_type, {}).get("ipAddress", [])
+ # If there are no available IP addresses, then we don't
+ # want to add this interface to the generated config.
+ if not addresses:
+ LOG.debug("No %s addresses found for: %r", addr_type, intf)
+ continue
+ has_ip_address = True
+ if addr_type == "ipv4":
+ default_prefix = "24"
else:
- default_prefix = '128'
+ default_prefix = "128"
if addresses:
- dev_config['dhcp6'] = True
+ dev_config["dhcp6"] = True
# non-primary interfaces should have a higher
# route-metric (cost) so default routes prefer
# primary nic due to lower route-metric value
- dev_config['dhcp6-overrides'] = dhcp_override
+ dev_config["dhcp6-overrides"] = dhcp_override
for addr in addresses[1:]:
# Append static address config for ip > 1
- netPrefix = intf[addr_type]['subnet'][0].get(
- 'prefix', default_prefix)
- privateIp = addr['privateIpAddress']
- if not dev_config.get('addresses'):
- dev_config['addresses'] = []
- dev_config['addresses'].append(
- '{ip}/{prefix}'.format(
- ip=privateIp, prefix=netPrefix))
- if dev_config:
- mac = ':'.join(re.findall(r'..', intf['macAddress']))
- dev_config.update({
- 'match': {'macaddress': mac.lower()},
- 'set-name': nicname
- })
+ netPrefix = intf[addr_type]["subnet"][0].get(
+ "prefix", default_prefix
+ )
+ privateIp = addr["privateIpAddress"]
+ if not dev_config.get("addresses"):
+ dev_config["addresses"] = []
+ dev_config["addresses"].append(
+ "{ip}/{prefix}".format(ip=privateIp, prefix=netPrefix)
+ )
+ if dev_config and has_ip_address:
+ mac = normalize_mac_address(intf["macAddress"])
+ dev_config.update(
+ {"match": {"macaddress": mac.lower()}, "set-name": nicname}
+ )
# With netvsc, we can get two interfaces that
# share the same MAC, so we need to make sure
# our match condition also contains the driver
driver = device_driver(nicname)
- if driver and driver == 'hv_netvsc':
- dev_config['match']['driver'] = driver
- netconfig['ethernets'][nicname] = dev_config
+ if driver and driver == "hv_netvsc":
+ dev_config["match"]["driver"] = driver
+ netconfig["ethernets"][nicname] = dev_config
+ continue
+
+ LOG.debug(
+ "No configuration for: %s (dev_config=%r) (has_ip_address=%r)",
+ nicname,
+ dev_config,
+ has_ip_address,
+ )
return netconfig
@@ -2020,72 +2297,101 @@ def _generate_network_config_from_fallback_config() -> dict:
@return: Dictionary containing network version 2 standard configuration.
"""
- return net.generate_fallback_config(
- blacklist_drivers=BLACKLIST_DRIVERS, config_driver=True)
+ cfg = net.generate_fallback_config(
+ blacklist_drivers=BLACKLIST_DRIVERS, config_driver=True
+ )
+ if cfg is None:
+ return {}
+ return cfg
@azure_ds_telemetry_reporter
-def get_metadata_from_imds(fallback_nic,
- retries,
- md_type=metadata_type.compute):
+def get_metadata_from_imds(
+ retries,
+ md_type=MetadataType.ALL,
+ api_version=IMDS_VER_MIN,
+ exc_cb=retry_on_url_exc,
+ infinite=False,
+):
"""Query Azure's instance metadata service, returning a dictionary.
- If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
- IMDS. For more info on IMDS:
+ For more info on IMDS:
https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service
- @param fallback_nic: String. The name of the nic which requires active
- network in order to query IMDS.
@param retries: The number of retries of the IMDS_URL.
+ @param md_type: Metadata type for IMDS request.
+ @param api_version: IMDS api-version to use in the request.
@return: A dict of instance metadata containing compute and network
info.
"""
- kwargs = {'logfunc': LOG.debug,
- 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
- 'func': _get_metadata_from_imds, 'args': (retries, md_type,)}
- if net.is_up(fallback_nic):
+ kwargs = {
+ "logfunc": LOG.debug,
+ "msg": "Crawl of Azure Instance Metadata Service (IMDS)",
+ "func": _get_metadata_from_imds,
+ "args": (retries, exc_cb, md_type, api_version, infinite),
+ }
+ try:
return util.log_time(**kwargs)
- else:
- try:
- with EphemeralDHCPv4WithReporting(
- azure_ds_reporter, fallback_nic):
- return util.log_time(**kwargs)
- except Exception as e:
- report_diagnostic_event(
- "exception while getting metadata: %s" % e,
- logger_func=LOG.warning)
- raise
+ except Exception as e:
+ report_diagnostic_event(
+ "exception while getting metadata: %s" % e,
+ logger_func=LOG.warning,
+ )
+ raise
@azure_ds_telemetry_reporter
-def _get_metadata_from_imds(retries, md_type=metadata_type.compute):
-
- url = md_type.value
+def _get_metadata_from_imds(
+ retries,
+ exc_cb,
+ md_type=MetadataType.ALL,
+ api_version=IMDS_VER_MIN,
+ infinite=False,
+):
+ url = "{}?api-version={}".format(md_type.value, api_version)
headers = {"Metadata": "true"}
+
+ # support for extended metadata begins with 2021-03-01
+ if api_version >= IMDS_EXTENDED_VER_MIN and md_type == MetadataType.ALL:
+ url = url + "&extended=true"
+
try:
response = readurl(
- url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers,
- retries=retries, exception_cb=retry_on_url_exc)
+ url,
+ timeout=IMDS_TIMEOUT_IN_SECONDS,
+ headers=headers,
+ retries=retries,
+ exception_cb=exc_cb,
+ infinite=infinite,
+ )
except Exception as e:
- report_diagnostic_event(
- 'Ignoring IMDS instance metadata. '
- 'Get metadata from IMDS failed: %s' % e,
- logger_func=LOG.warning)
- return {}
+ # pylint:disable=no-member
+ if isinstance(e, UrlError) and e.code == 400:
+ raise
+ else:
+ report_diagnostic_event(
+ "Ignoring IMDS instance metadata. "
+ "Get metadata from IMDS failed: %s" % e,
+ logger_func=LOG.warning,
+ )
+ return {}
try:
from json.decoder import JSONDecodeError
+
json_decode_error = JSONDecodeError
except ImportError:
json_decode_error = ValueError
try:
- return util.load_json(str(response))
+ return util.load_json(response.contents)
except json_decode_error as e:
report_diagnostic_event(
- 'Ignoring non-json IMDS instance metadata response: %s. '
- 'Loading non-json IMDS response failed: %s' % (str(response), e),
- logger_func=LOG.warning)
+ "Ignoring non-json IMDS instance metadata response: %s. "
+ "Loading non-json IMDS response failed: %s"
+ % (response.contents, e),
+ logger_func=LOG.warning,
+ )
return {}
@@ -2115,10 +2421,11 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
if os.path.exists(path):
if not logged:
LOG.info(
- 'Removing Ubuntu extended network scripts because'
- ' cloud-init updates Azure network configuration on the'
- ' following event: %s.',
- EventType.BOOT)
+ "Removing Ubuntu extended network scripts because"
+ " cloud-init updates Azure network configuration on the"
+ " following events: %s.",
+ [EventType.BOOT.value, EventType.BOOT_LEGACY.value],
+ )
logged = True
if os.path.isdir(path):
util.del_dir(path)
@@ -2131,15 +2438,15 @@ def _is_platform_viable(seed_dir):
with events.ReportEventStack(
name="check-platform-viability",
description="found azure asset tag",
- parent=azure_ds_reporter
+ parent=azure_ds_reporter,
) as evt:
- asset_tag = dmi.read_dmi_data('chassis-asset-tag')
+ asset_tag = dmi.read_dmi_data("chassis-asset-tag")
if asset_tag == AZURE_CHASSIS_ASSET_TAG:
return True
msg = "Non-Azure DMI asset tag '%s' discovered." % asset_tag
evt.description = msg
report_diagnostic_event(msg, logger_func=LOG.debug)
- if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
+ if os.path.exists(os.path.join(seed_dir, "ovf-env.xml")):
return True
return False
@@ -2157,7 +2464,7 @@ DataSourceAzureNet = DataSourceAzure
# Used to match classes to dependencies
datasources = [
- (DataSourceAzure, (sources.DEP_FILESYSTEM, )),
+ (DataSourceAzure, (sources.DEP_FILESYSTEM,)),
]
@@ -2165,4 +2472,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
index 63435279..426a762e 100644
--- a/cloudinit/sources/DataSourceBigstep.py
+++ b/cloudinit/sources/DataSourceBigstep.py
@@ -7,14 +7,12 @@
import errno
import json
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import sources, url_helper, util
class DataSourceBigstep(sources.DataSource):
- dsname = 'Bigstep'
+ dsname = "Bigstep"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -35,7 +33,7 @@ class DataSourceBigstep(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'metadata (%s)' % get_url_from_file()
+ return "metadata (%s)" % get_url_from_file()
def get_url_from_file():
@@ -61,4 +59,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index f63baf74..de71c3e9 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -4,14 +4,13 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from base64 import b64decode
import re
-
-from cloudinit.cs_utils import Cepko, SERIAL_PORT
+from base64 import b64decode
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit.cs_utils import SERIAL_PORT, Cepko
LOG = logging.getLogger(__name__)
@@ -24,11 +23,11 @@ class DataSourceCloudSigma(sources.DataSource):
http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
"""
- dsname = 'CloudSigma'
+ dsname = "CloudSigma"
def __init__(self, sys_cfg, distro, paths):
self.cepko = Cepko()
- self.ssh_public_key = ''
+ self.ssh_public_key = ""
sources.DataSource.__init__(self, sys_cfg, distro, paths)
def is_running_in_cloudsigma(self):
@@ -43,7 +42,7 @@ class DataSourceCloudSigma(sources.DataSource):
LOG.debug("system-product-name not available in dmi data")
return False
LOG.debug("detected hypervisor as %s", sys_product_name)
- return 'cloudsigma' in sys_product_name.lower()
+ return "cloudsigma" in sys_product_name.lower()
def _get_data(self):
"""
@@ -56,7 +55,7 @@ class DataSourceCloudSigma(sources.DataSource):
try:
server_context = self.cepko.all().result
- server_meta = server_context['meta']
+ server_meta = server_context["meta"]
except Exception:
# TODO: check for explicit "config on", and then warn
# but since no explicit config is available now, just debug.
@@ -64,41 +63,42 @@ class DataSourceCloudSigma(sources.DataSource):
return False
self.dsmode = self._determine_dsmode(
- [server_meta.get('cloudinit-dsmode')])
+ [server_meta.get("cloudinit-dsmode")]
+ )
if dsmode == sources.DSMODE_DISABLED:
return False
- base64_fields = server_meta.get('base64_fields', '').split(',')
- self.userdata_raw = server_meta.get('cloudinit-user-data', "")
- if 'cloudinit-user-data' in base64_fields:
+ base64_fields = server_meta.get("base64_fields", "").split(",")
+ self.userdata_raw = server_meta.get("cloudinit-user-data", "")
+ if "cloudinit-user-data" in base64_fields:
self.userdata_raw = b64decode(self.userdata_raw)
- if 'cloudinit' in server_context.get('vendor_data', {}):
+ if "cloudinit" in server_context.get("vendor_data", {}):
self.vendordata_raw = server_context["vendor_data"]["cloudinit"]
self.metadata = server_context
- self.ssh_public_key = server_meta['ssh_public_key']
+ self.ssh_public_key = server_meta["ssh_public_key"]
return True
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'cepko (%s)' % SERIAL_PORT
+ return "cepko (%s)" % SERIAL_PORT
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
"""
Cleans up and uses the server's name if the latter is set. Otherwise
the first part from uuid is being used.
"""
- if re.match(r'^[A-Za-z0-9 -_\.]+$', self.metadata['name']):
- return self.metadata['name'][:61]
+ if re.match(r"^[A-Za-z0-9 -_\.]+$", self.metadata["name"]):
+ return self.metadata["name"][:61]
else:
- return self.metadata['uuid'].split('-')[0]
+ return self.metadata["uuid"].split("-")[0]
def get_public_ssh_keys(self):
return [self.ssh_public_key]
def get_instance_id(self):
- return self.metadata['uuid']
+ return self.metadata["uuid"]
# Legacy: Must be present in case we load an old pkl object
@@ -107,7 +107,7 @@ DataSourceCloudSigmaNet = DataSourceCloudSigma
# Used to match classes to dependencies. Since this datasource uses the serial
# port network is not really required, so it's okay to load without it, too.
datasources = [
- (DataSourceCloudSigma, (sources.DEP_FILESYSTEM, )),
+ (DataSourceCloudSigma, (sources.DEP_FILESYSTEM,)),
]
@@ -117,4 +117,5 @@ def get_datasource_list(depends):
"""
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 54810439..a742a5e6 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -13,17 +13,16 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
-from socket import inet_ntoa, getaddrinfo, gaierror
-from struct import pack
import time
+from socket import gaierror, getaddrinfo, inet_ntoa
+from struct import pack
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
-from cloudinit.net import dhcp
-from cloudinit import sources
+from cloudinit import sources, subp
from cloudinit import url_helper as uhelp
-from cloudinit import subp
from cloudinit import util
+from cloudinit.net import dhcp
LOG = logging.getLogger(__name__)
@@ -47,27 +46,36 @@ class CloudStackPasswordServerClient(object):
# The password server was in the past, a broken HTTP server, but is now
# fixed. wget handles this seamlessly, so it's easier to shell out to
# that rather than write our own handling code.
- output, _ = subp.subp([
- 'wget', '--quiet', '--tries', '3', '--timeout', '20',
- '--output-document', '-', '--header',
- 'DomU_Request: {0}'.format(domu_request),
- '{0}:8080'.format(self.virtual_router_address)
- ])
+ output, _ = subp.subp(
+ [
+ "wget",
+ "--quiet",
+ "--tries",
+ "3",
+ "--timeout",
+ "20",
+ "--output-document",
+ "-",
+ "--header",
+ "DomU_Request: {0}".format(domu_request),
+ "{0}:8080".format(self.virtual_router_address),
+ ]
+ )
return output.strip()
def get_password(self):
- password = self._do_request('send_my_password')
- if password in ['', 'saved_password']:
+ password = self._do_request("send_my_password")
+ if password in ["", "saved_password"]:
return None
- if password == 'bad_request':
- raise RuntimeError('Error when attempting to fetch root password.')
- self._do_request('saved_password')
+ if password == "bad_request":
+ raise RuntimeError("Error when attempting to fetch root password.")
+ self._do_request("saved_password")
return password
class DataSourceCloudStack(sources.DataSource):
- dsname = 'CloudStack'
+ dsname = "CloudStack"
# Setup read_url parameters per get_url_params.
url_max_wait = 120
@@ -75,10 +83,10 @@ class DataSourceCloudStack(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'cs')
+ self.seed_dir = os.path.join(paths.seed_dir, "cs")
# Cloudstack has its metadata/userdata URLs located at
# http://<virtual-router-ip>/latest/
- self.api_ver = 'latest'
+ self.api_ver = "latest"
self.vr_addr = get_vr_address()
if not self.vr_addr:
raise RuntimeError("No virtual router found!")
@@ -91,19 +99,28 @@ class DataSourceCloudStack(sources.DataSource):
if url_params.max_wait_seconds <= 0:
return False
- urls = [uhelp.combine_url(self.metadata_address,
- 'latest/meta-data/instance-id')]
+ urls = [
+ uhelp.combine_url(
+ self.metadata_address, "latest/meta-data/instance-id"
+ )
+ ]
start_time = time.time()
url, _response = uhelp.wait_for_url(
- urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warning)
+ urls=urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ status_cb=LOG.warning,
+ )
if url:
LOG.debug("Using metadata source: '%s'", url)
else:
- LOG.critical(("Giving up on waiting for the metadata from %s"
- " after %s seconds"),
- urls, int(time.time() - start_time))
+ LOG.critical(
+ "Giving up on waiting for the metadata from %s"
+ " after %s seconds",
+ urls,
+ int(time.time() - start_time),
+ )
return bool(url)
@@ -113,8 +130,8 @@ class DataSourceCloudStack(sources.DataSource):
def _get_data(self):
seed_ret = {}
if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
- self.userdata_raw = seed_ret['user-data']
- self.metadata = seed_ret['meta-data']
+ self.userdata_raw = seed_ret["user-data"]
+ self.metadata = seed_ret["meta-data"]
LOG.debug("Using seeded cloudstack data from: %s", self.seed_dir)
return True
try:
@@ -122,45 +139,54 @@ class DataSourceCloudStack(sources.DataSource):
return False
start_time = time.time()
self.userdata_raw = ec2.get_instance_userdata(
- self.api_ver, self.metadata_address)
- self.metadata = ec2.get_instance_metadata(self.api_ver,
- self.metadata_address)
- LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
+ self.api_ver, self.metadata_address
+ )
+ self.metadata = ec2.get_instance_metadata(
+ self.api_ver, self.metadata_address
+ )
+ LOG.debug(
+ "Crawl of metadata service took %s seconds",
+ int(time.time() - start_time),
+ )
password_client = CloudStackPasswordServerClient(self.vr_addr)
try:
set_password = password_client.get_password()
except Exception:
- util.logexc(LOG,
- 'Failed to fetch password from virtual router %s',
- self.vr_addr)
+ util.logexc(
+ LOG,
+ "Failed to fetch password from virtual router %s",
+ self.vr_addr,
+ )
else:
if set_password:
self.cfg = {
- 'ssh_pwauth': True,
- 'password': set_password,
- 'chpasswd': {
- 'expire': False,
+ "ssh_pwauth": True,
+ "password": set_password,
+ "chpasswd": {
+ "expire": False,
},
}
return True
except Exception:
- util.logexc(LOG, 'Failed fetching from metadata service %s',
- self.metadata_address)
+ util.logexc(
+ LOG,
+ "Failed fetching from metadata service %s",
+ self.metadata_address,
+ )
return False
def get_instance_id(self):
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
@property
def availability_zone(self):
- return self.metadata['availability-zone']
+ return self.metadata["availability-zone"]
def get_data_server():
# Returns the metadataserver from dns
try:
- addrinfo = getaddrinfo("data-server.", 80)
+ addrinfo = getaddrinfo("data-server", 80)
except gaierror:
LOG.debug("DNS Entry data-server not found")
return None
@@ -183,8 +209,11 @@ def get_default_gateway():
def get_dhclient_d():
# find lease files directory
- supported_dirs = ["/var/lib/dhclient", "/var/lib/dhcp",
- "/var/lib/NetworkManager"]
+ supported_dirs = [
+ "/var/lib/dhclient",
+ "/var/lib/dhcp",
+ "/var/lib/NetworkManager",
+ ]
for d in supported_dirs:
if os.path.exists(d) and len(os.listdir(d)) > 0:
LOG.debug("Using %s lease directory", d)
@@ -233,15 +262,18 @@ def get_vr_address():
# Try data-server DNS entry first
latest_address = get_data_server()
if latest_address:
- LOG.debug("Found metadata server '%s' via data-server DNS entry",
- latest_address)
+ LOG.debug(
+ "Found metadata server '%s' via data-server DNS entry",
+ latest_address,
+ )
return latest_address
# Try networkd second...
- latest_address = dhcp.networkd_get_option_from_leases('SERVER_ADDRESS')
+ latest_address = dhcp.networkd_get_option_from_leases("SERVER_ADDRESS")
if latest_address:
- LOG.debug("Found SERVER_ADDRESS '%s' via networkd_leases",
- latest_address)
+ LOG.debug(
+ "Found SERVER_ADDRESS '%s' via networkd_leases", latest_address
+ )
return latest_address
# Try dhcp lease files next...
@@ -275,4 +307,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 62756cf7..f7c58b12 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -9,12 +9,9 @@
import os
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import sources, subp, util
+from cloudinit.event import EventScope, EventType
from cloudinit.net import eni
-
from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform
from cloudinit.sources.helpers import openstack
@@ -22,25 +19,35 @@ LOG = logging.getLogger(__name__)
# Various defaults/constants...
DEFAULT_IID = "iid-dsconfigdrive"
-DEFAULT_MODE = 'pass'
+DEFAULT_MODE = "pass"
DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
}
-FS_TYPES = ('vfat', 'iso9660')
-LABEL_TYPES = ('config-2', 'CONFIG-2')
-POSSIBLE_MOUNTS = ('sr', 'cd')
-OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS
- for i in range(0, 2)))
+FS_TYPES = ("vfat", "iso9660")
+LABEL_TYPES = ("config-2", "CONFIG-2")
+POSSIBLE_MOUNTS = ("sr", "cd")
+OPTICAL_DEVICES = tuple(
+ ("/dev/%s%s" % (z, i) for z in POSSIBLE_MOUNTS for i in range(0, 2))
+)
class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
- dsname = 'ConfigDrive'
+ dsname = "ConfigDrive"
+
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
self.source = None
- self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
+ self.seed_dir = os.path.join(paths.seed_dir, "config_drive")
self.version = None
self.ec2_metadata = None
self._network_config = None
@@ -70,15 +77,16 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
util.logexc(LOG, "Failed reading config drive from %s", sdir)
if not found:
- dslist = self.sys_cfg.get('datasource_list')
+ dslist = self.sys_cfg.get("datasource_list")
for dev in find_candidate_devs(dslist=dslist):
mtype = None
if util.is_BSD():
if dev.startswith("/dev/cd"):
mtype = "cd9660"
try:
- results = util.mount_cb(dev, read_config_drive,
- mtype=mtype)
+ results = util.mount_cb(
+ dev, read_config_drive, mtype=mtype
+ )
found = dev
except openstack.NonReadable:
pass
@@ -91,41 +99,49 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
if not found:
return False
- md = results.get('metadata', {})
+ md = results.get("metadata", {})
md = util.mergemanydict([md, DEFAULT_METADATA])
self.dsmode = self._determine_dsmode(
- [results.get('dsmode'), self.ds_cfg.get('dsmode'),
- sources.DSMODE_PASS if results['version'] == 1 else None])
+ [
+ results.get("dsmode"),
+ self.ds_cfg.get("dsmode"),
+ sources.DSMODE_PASS if results["version"] == 1 else None,
+ ]
+ )
if self.dsmode == sources.DSMODE_DISABLED:
return False
prev_iid = get_previous_iid(self.paths)
- cur_iid = md['instance-id']
+ cur_iid = md["instance-id"]
if prev_iid != cur_iid:
# better would be to handle this centrally, allowing
# the datasource to do something on new instance id
# note, networking is only rendered here if dsmode is DSMODE_PASS
# which means "DISABLED, but render files and networking"
- on_first_boot(results, distro=self.distro,
- network=self.dsmode == sources.DSMODE_PASS)
+ on_first_boot(
+ results,
+ distro=self.distro,
+ network=self.dsmode == sources.DSMODE_PASS,
+ )
# This is legacy and sneaky. If dsmode is 'pass' then do not claim
# the datasource was used, even though we did run on_first_boot above.
if self.dsmode == sources.DSMODE_PASS:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- self.dsmode)
+ LOG.debug(
+ "%s: not claiming datasource, dsmode=%s", self, self.dsmode
+ )
return False
self.source = found
self.metadata = md
- self.ec2_metadata = results.get('ec2-metadata')
- self.userdata_raw = results.get('userdata')
- self.version = results['version']
- self.files.update(results.get('files', {}))
+ self.ec2_metadata = results.get("ec2-metadata")
+ self.userdata_raw = results.get("userdata")
+ self.version = results["version"]
+ self.files.update(results.get("files", {}))
- vd = results.get('vendordata')
+ vd = results.get("vendordata")
self.vendordata_pure = vd
try:
self.vendordata_raw = sources.convert_vendordata(vd)
@@ -137,7 +153,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
# obsolete compared to networkdata (from network_data.json) but both
# might be present.
self.network_eni = results.get("network_config")
- self.network_json = results.get('networkdata')
+ self.network_json = results.get("networkdata")
return True
def check_instance_id(self, sys_cfg):
@@ -150,7 +166,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
if self.network_json not in (None, sources.UNSET):
LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=self.known_macs)
+ self.network_json, known_macs=self.known_macs
+ )
elif self.network_eni is not None:
self._network_config = eni.convert_eni_data(self.network_eni)
LOG.debug("network config provided via converted eni data")
@@ -160,15 +177,15 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
@property
def platform(self):
- return 'openstack'
+ return "openstack"
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- if self.source.startswith('/dev'):
- subplatform_type = 'config-disk'
+ if self.source.startswith("/dev"):
+ subplatform_type = "config-disk"
else:
- subplatform_type = 'seed-dir'
- return '%s (%s)' % (subplatform_type, self.source)
+ subplatform_type = "seed-dir"
+ return "%s (%s)" % (subplatform_type, self.source)
def read_config_drive(source_dir):
@@ -190,7 +207,7 @@ def get_previous_iid(paths):
# interestingly, for this purpose the "previous" instance-id is the current
# instance-id. cloud-init hasn't moved them over yet as this datasource
# hasn't declared itself found.
- fname = os.path.join(paths.get_cpath('data'), 'instance-id')
+ fname = os.path.join(paths.get_cpath("data"), "instance-id")
try:
return util.load_file(fname).rstrip("\n")
except IOError:
@@ -200,14 +217,15 @@ def get_previous_iid(paths):
def on_first_boot(data, distro=None, network=True):
"""Performs any first-boot actions using data read from a config-drive."""
if not isinstance(data, dict):
- raise TypeError("Config-drive data expected to be a dict; not %s"
- % (type(data)))
+ raise TypeError(
+ "Config-drive data expected to be a dict; not %s" % (type(data))
+ )
if network:
- net_conf = data.get("network_config", '')
+ net_conf = data.get("network_config", "")
if net_conf and distro:
LOG.warning("Updating network interfaces from config drive")
distro.apply_network_config(eni.convert_eni_data(net_conf))
- write_injected_files(data.get('files'))
+ write_injected_files(data.get("files"))
def write_injected_files(files):
@@ -264,12 +282,13 @@ def find_candidate_devs(probe_optical=True, dslist=None):
# combine list of items by putting by-label items first
# followed by fstype items, but with dupes removed
- candidates = (by_label + [d for d in by_fstype if d not in by_label])
+ candidates = by_label + [d for d in by_fstype if d not in by_label]
# We are looking for a block device or partition with necessary label or
# an unpartitioned block device (ex sda, not sda1)
- devices = [d for d in candidates
- if d in by_label or not util.is_partition(d)]
+ devices = [
+ d for d in candidates if d in by_label or not util.is_partition(d)
+ ]
LOG.debug("devices=%s dslist=%s", devices, dslist)
if devices and "IBMCloud" in dslist:
@@ -277,8 +296,11 @@ def find_candidate_devs(probe_optical=True, dslist=None):
ibm_platform, ibm_path = get_ibm_platform()
if ibm_path in devices:
devices.remove(ibm_path)
- LOG.debug("IBMCloud device '%s' (%s) removed from candidate list",
- ibm_path, ibm_platform)
+ LOG.debug(
+ "IBMCloud device '%s' (%s) removed from candidate list",
+ ibm_path,
+ ibm_platform,
+ )
return devices
@@ -296,4 +318,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 5040ce5b..52d3ad26 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -6,16 +6,14 @@
# DigitalOcean Droplet API:
# https://developers.digitalocean.com/documentation/metadata/
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
import cloudinit.sources.helpers.digitalocean as do_helper
+from cloudinit import log as logging
+from cloudinit import sources, util
LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
- 'metadata_url': 'http://169.254.169.254/metadata/v1.json',
+ "metadata_url": "http://169.254.169.254/metadata/v1.json",
}
# Wait for a up to a minute, retrying the meta-data server
@@ -28,20 +26,25 @@ MD_USE_IPV4LL = True
class DataSourceDigitalOcean(sources.DataSource):
- dsname = 'DigitalOcean'
+ dsname = "DigitalOcean"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.distro = distro
self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
- self.retries = self.ds_cfg.get('retries', MD_RETRIES)
- self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT)
- self.use_ip4LL = self.ds_cfg.get('use_ip4LL', MD_USE_IPV4LL)
- self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY)
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(
+ sys_cfg, ["datasource", "DigitalOcean"], {}
+ ),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.retries = self.ds_cfg.get("retries", MD_RETRIES)
+ self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT)
+ self.use_ip4LL = self.ds_cfg.get("use_ip4LL", MD_USE_IPV4LL)
+ self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY)
self._network_config = None
def _get_sysinfo(self):
@@ -54,22 +57,25 @@ class DataSourceDigitalOcean(sources.DataSource):
if not is_do:
return False
- LOG.info("Running on digital ocean. droplet_id=%s", droplet_id)
+ LOG.info("Running on DigitalOcean. droplet_id=%s", droplet_id)
ipv4LL_nic = None
if self.use_ip4LL:
ipv4LL_nic = do_helper.assign_ipv4_link_local(self.distro)
md = do_helper.read_metadata(
- self.metadata_address, timeout=self.timeout,
- sec_between=self.wait_retry, retries=self.retries)
+ self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
self.metadata_full = md
- self.metadata['instance-id'] = md.get('droplet_id', droplet_id)
- self.metadata['local-hostname'] = md.get('hostname', droplet_id)
- self.metadata['interfaces'] = md.get('interfaces')
- self.metadata['public-keys'] = md.get('public_keys')
- self.metadata['availability_zone'] = md.get('region', 'default')
+ self.metadata["instance-id"] = md.get("droplet_id", droplet_id)
+ self.metadata["local-hostname"] = md.get("hostname", droplet_id)
+ self.metadata["interfaces"] = md.get("interfaces")
+ self.metadata["public-keys"] = md.get("public_keys")
+ self.metadata["availability_zone"] = md.get("region", "default")
self.vendordata_raw = md.get("vendor_data", None)
self.userdata_raw = md.get("user_data", None)
@@ -80,32 +86,34 @@ class DataSourceDigitalOcean(sources.DataSource):
def check_instance_id(self, sys_cfg):
return sources.instance_id_matches_system_uuid(
- self.get_instance_id(), 'system-serial-number')
+ self.get_instance_id(), "system-serial-number"
+ )
@property
def network_config(self):
"""Configure the networking. This needs to be done each boot, since
- the IP information may have changed due to snapshot and/or
- migration.
+ the IP information may have changed due to snapshot and/or
+ migration.
"""
if self._network_config:
return self._network_config
- interfaces = self.metadata.get('interfaces')
+ interfaces = self.metadata.get("interfaces")
LOG.debug(interfaces)
if not interfaces:
raise Exception("Unable to get meta-data from server....")
- nameservers = self.metadata_full['dns']['nameservers']
+ nameservers = self.metadata_full["dns"]["nameservers"]
self._network_config = do_helper.convert_network_configuration(
- interfaces, nameservers)
+ interfaces, nameservers
+ )
return self._network_config
# Used to match classes to dependencies
datasources = [
- (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, )),
+ (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM,)),
]
@@ -113,4 +121,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 1930a509..03b3870c 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -8,19 +8,18 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
import os
import time
from cloudinit import dmi
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from cloudinit import sources
+from cloudinit import net, sources
from cloudinit import url_helper as uhelp
-from cloudinit import util
-from cloudinit import warnings
-from cloudinit.event import EventType
+from cloudinit import util, warnings
+from cloudinit.event import EventScope, EventType
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
LOG = logging.getLogger(__name__)
@@ -29,10 +28,10 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND])
STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
-API_TOKEN_ROUTE = 'latest/api/token'
-AWS_TOKEN_TTL_SECONDS = '21600'
-AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token'
-AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds'
+API_TOKEN_ROUTE = "latest/api/token"
+AWS_TOKEN_TTL_SECONDS = "21600"
+AWS_TOKEN_PUT_HEADER = "X-aws-ec2-metadata-token"
+AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + "-ttl-seconds"
AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER]
@@ -52,18 +51,18 @@ class CloudNames(object):
class DataSourceEc2(sources.DataSource):
- dsname = 'Ec2'
+ dsname = "Ec2"
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"]
# The minimum supported metadata_version from the ec2 metadata apis
- min_metadata_version = '2009-04-04'
+ min_metadata_version = "2009-04-04"
# Priority ordered list of additional metadata versions which will be tried
# for extended metadata content. IPv6 support comes in 2016-09-02
- extended_metadata_versions = ['2018-09-24', '2016-09-02']
+ extended_metadata_versions = ["2018-09-24", "2016-09-02"]
# Setup read_url parameters per get_url_params.
url_max_wait = 120
@@ -75,6 +74,15 @@ class DataSourceEc2(sources.DataSource):
# Whether we want to get network configuration from the metadata service.
perform_dhcp_setup = False
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)
self.metadata_address = None
@@ -85,11 +93,18 @@ class DataSourceEc2(sources.DataSource):
def _get_data(self):
strict_mode, _sleep = read_strict_mode(
- util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH,
- STRICT_ID_DEFAULT), ("warn", None))
-
- LOG.debug("strict_mode: %s, cloud_name=%s cloud_platform=%s",
- strict_mode, self.cloud_name, self.platform)
+ util.get_cfg_by_path(
+ self.sys_cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT
+ ),
+ ("warn", None),
+ )
+
+ LOG.debug(
+ "strict_mode: %s, cloud_name=%s cloud_platform=%s",
+ strict_mode,
+ self.cloud_name,
+ self.platform,
+ )
if strict_mode == "true" and self.cloud_name == CloudNames.UNKNOWN:
return False
elif self.cloud_name == CloudNames.NO_EC2_METADATA:
@@ -102,20 +117,27 @@ class DataSourceEc2(sources.DataSource):
try:
with EphemeralDHCPv4(self.fallback_interface):
self._crawled_metadata = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
+ )
except NoDHCPLeaseError:
return False
else:
self._crawled_metadata = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
+ )
if not self._crawled_metadata:
return False
- self.metadata = self._crawled_metadata.get('meta-data', None)
- self.userdata_raw = self._crawled_metadata.get('user-data', None)
- self.identity = self._crawled_metadata.get(
- 'dynamic', {}).get('instance-identity', {}).get('document', {})
+ self.metadata = self._crawled_metadata.get("meta-data", None)
+ self.userdata_raw = self._crawled_metadata.get("user-data", None)
+ self.identity = (
+ self._crawled_metadata.get("dynamic", {})
+ .get("instance-identity", {})
+ .get("document", {})
+ )
return True
def is_classic_instance(self):
@@ -125,9 +147,9 @@ class DataSourceEc2(sources.DataSource):
# network_config where metadata will be present.
# Secondary call site is in packaging postinst script.
return False
- ifaces_md = self.metadata.get('network', {}).get('interfaces', {})
- for _mac, mac_data in ifaces_md.get('macs', {}).items():
- if 'vpc-id' in mac_data:
+ ifaces_md = self.metadata.get("network", {}).get("interfaces", {})
+ for _mac, mac_data in ifaces_md.get("macs", {}).items():
+ if "vpc-id" in mac_data:
return False
return True
@@ -135,12 +157,12 @@ class DataSourceEc2(sources.DataSource):
def launch_index(self):
if not self.metadata:
return None
- return self.metadata.get('ami-launch-index')
+ return self.metadata.get("ami-launch-index")
@property
def platform(self):
# Handle upgrade path of pickled ds
- if not hasattr(self, '_platform_type'):
+ if not hasattr(self, "_platform_type"):
self._platform_type = DataSourceEc2.dsname.lower()
if not self._platform_type:
self._platform_type = DataSourceEc2.dsname.lower()
@@ -156,44 +178,47 @@ class DataSourceEc2(sources.DataSource):
min_metadata_version.
"""
# Assumes metadata service is already up
- url_tmpl = '{0}/{1}/meta-data/instance-id'
+ url_tmpl = "{0}/{1}/meta-data/instance-id"
headers = self._get_headers()
for api_ver in self.extended_metadata_versions:
url = url_tmpl.format(self.metadata_address, api_ver)
try:
- resp = uhelp.readurl(url=url, headers=headers,
- headers_redact=AWS_TOKEN_REDACT)
+ resp = uhelp.readurl(
+ url=url, headers=headers, headers_redact=AWS_TOKEN_REDACT
+ )
except uhelp.UrlError as e:
- LOG.debug('url %s raised exception %s', url, e)
+ LOG.debug("url %s raised exception %s", url, e)
else:
if resp.code == 200:
- LOG.debug('Found preferred metadata version %s', api_ver)
+ LOG.debug("Found preferred metadata version %s", api_ver)
return api_ver
elif resp.code == 404:
- msg = 'Metadata api version %s not present. Headers: %s'
+ msg = "Metadata api version %s not present. Headers: %s"
LOG.debug(msg, api_ver, resp.headers)
return self.min_metadata_version
def get_instance_id(self):
if self.cloud_name == CloudNames.AWS:
# Prefer the ID from the instance identity document, but fall back
- if not getattr(self, 'identity', None):
+ if not getattr(self, "identity", None):
# If re-using cached datasource, it's get_data run didn't
# setup self.identity. So we need to do that now.
api_version = self.get_metadata_api_version()
self.identity = ec2.get_instance_identity(
- api_version, self.metadata_address,
+ api_version,
+ self.metadata_address,
headers_cb=self._get_headers,
headers_redact=AWS_TOKEN_REDACT,
- exception_cb=self._refresh_stale_aws_token_cb).get(
- 'document', {})
+ exception_cb=self._refresh_stale_aws_token_cb,
+ ).get("document", {})
return self.identity.get(
- 'instanceId', self.metadata['instance-id'])
+ "instanceId", self.metadata["instance-id"]
+ )
else:
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None):
- """ Get an API token for EC2 Instance Metadata Service.
+ """Get an API token for EC2 Instance Metadata Service.
On EC2. IMDS will always answer an API token, unless
the instance owner has disabled the IMDS HTTP endpoint or
@@ -205,26 +230,29 @@ class DataSourceEc2(sources.DataSource):
urls = []
url2base = {}
url_path = API_TOKEN_ROUTE
- request_method = 'PUT'
+ request_method = "PUT"
for url in mdurls:
- cur = '{0}/{1}'.format(url, url_path)
+ cur = "{0}/{1}".format(url, url_path)
urls.append(cur)
url2base[cur] = url
# use the self._imds_exception_cb to check for Read errors
- LOG.debug('Fetching Ec2 IMDSv2 API Token')
+ LOG.debug("Fetching Ec2 IMDSv2 API Token")
response = None
url = None
url_params = self.get_url_params()
try:
url, response = uhelp.wait_for_url(
- urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warning,
+ urls=urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ status_cb=LOG.warning,
headers_cb=self._get_headers,
exception_cb=self._imds_exception_cb,
request_method=request_method,
- headers_redact=AWS_TOKEN_REDACT)
+ headers_redact=AWS_TOKEN_REDACT,
+ )
except uhelp.UrlError:
# We use the raised exception to interupt the retry loop.
# Nothing else to do here.
@@ -250,8 +278,10 @@ class DataSourceEc2(sources.DataSource):
filtered = [x for x in mdurls if util.is_resolvable_url(x)]
if set(filtered) != set(mdurls):
- LOG.debug("Removed the following from metadata urls: %s",
- list((set(mdurls) - set(filtered))))
+ LOG.debug(
+ "Removed the following from metadata urls: %s",
+ list((set(mdurls) - set(filtered))),
+ )
if len(filtered):
mdurls = filtered
@@ -269,20 +299,25 @@ class DataSourceEc2(sources.DataSource):
# if we can't get a token, use instance-id path
urls = []
url2base = {}
- url_path = '{ver}/meta-data/instance-id'.format(
- ver=self.min_metadata_version)
- request_method = 'GET'
+ url_path = "{ver}/meta-data/instance-id".format(
+ ver=self.min_metadata_version
+ )
+ request_method = "GET"
for url in mdurls:
- cur = '{0}/{1}'.format(url, url_path)
+ cur = "{0}/{1}".format(url, url_path)
urls.append(cur)
url2base[cur] = url
start_time = time.time()
url, _ = uhelp.wait_for_url(
- urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warning,
- headers_redact=AWS_TOKEN_REDACT, headers_cb=self._get_headers,
- request_method=request_method)
+ urls=urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ status_cb=LOG.warning,
+ headers_redact=AWS_TOKEN_REDACT,
+ headers_cb=self._get_headers,
+ request_method=request_method,
+ )
if url:
metadata_address = url2base[url]
@@ -293,8 +328,11 @@ class DataSourceEc2(sources.DataSource):
elif self.cloud_name == CloudNames.AWS:
LOG.warning("IMDS's HTTP endpoint is probably disabled")
else:
- LOG.critical("Giving up on md from %s after %s seconds",
- urls, int(time.time() - start_time))
+ LOG.critical(
+ "Giving up on md from %s after %s seconds",
+ urls,
+ int(time.time() - start_time),
+ )
return bool(metadata_address)
@@ -302,7 +340,7 @@ class DataSourceEc2(sources.DataSource):
# Consult metadata service, that has
# ephemeral0: sdb
# and return 'sdb' for input 'ephemeral0'
- if 'block-device-mapping' not in self.metadata:
+ if "block-device-mapping" not in self.metadata:
return None
# Example:
@@ -311,7 +349,7 @@ class DataSourceEc2(sources.DataSource):
# 'ephemeral0': '/dev/sdb',
# 'root': '/dev/sda1'}
found = None
- bdm = self.metadata['block-device-mapping']
+ bdm = self.metadata["block-device-mapping"]
if not isinstance(bdm, dict):
LOG.debug("block-device-mapping not a dictionary: '%s'", bdm)
return None
@@ -354,17 +392,18 @@ class DataSourceEc2(sources.DataSource):
try:
if self.cloud_name == CloudNames.AWS:
return self.identity.get(
- 'availabilityZone',
- self.metadata['placement']['availability-zone'])
+ "availabilityZone",
+ self.metadata["placement"]["availability-zone"],
+ )
else:
- return self.metadata['placement']['availability-zone']
+ return self.metadata["placement"]["availability-zone"]
except KeyError:
return None
@property
def region(self):
if self.cloud_name == CloudNames.AWS:
- region = self.identity.get('region')
+ region = self.identity.get("region")
# Fallback to trimming the availability zone if region is missing
if self.availability_zone and not region:
region = self.availability_zone[:-1]
@@ -381,7 +420,8 @@ class DataSourceEc2(sources.DataSource):
if self.cloud_name == CloudNames.UNKNOWN:
warn_if_necessary(
util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT),
- cfg)
+ cfg,
+ )
@property
def network_config(self):
@@ -392,30 +432,39 @@ class DataSourceEc2(sources.DataSource):
if self.metadata is None:
# this would happen if get_data hadn't been called. leave as UNSET
LOG.warning(
- "Unexpected call to network_config when metadata is None.")
+ "Unexpected call to network_config when metadata is None."
+ )
return None
result = None
no_network_metadata_on_aws = bool(
- 'network' not in self.metadata and
- self.cloud_name == CloudNames.AWS)
+ "network" not in self.metadata
+ and self.cloud_name == CloudNames.AWS
+ )
if no_network_metadata_on_aws:
- LOG.debug("Metadata 'network' not present:"
- " Refreshing stale metadata from prior to upgrade.")
+ LOG.debug(
+ "Metadata 'network' not present:"
+ " Refreshing stale metadata from prior to upgrade."
+ )
util.log_time(
- logfunc=LOG.debug, msg='Re-crawl of metadata service',
- func=self.get_data)
+ logfunc=LOG.debug,
+ msg="Re-crawl of metadata service",
+ func=self.get_data,
+ )
iface = self.fallback_interface
- net_md = self.metadata.get('network')
+ net_md = self.metadata.get("network")
if isinstance(net_md, dict):
# SRU_BLOCKER: xenial, bionic and eoan should default
# apply_full_imds_network_config to False to retain original
# behavior on those releases.
result = convert_ec2_metadata_network_config(
- net_md, fallback_nic=iface,
+ net_md,
+ fallback_nic=iface,
full_network_config=util.get_cfg_option_bool(
- self.ds_cfg, 'apply_full_imds_network_config', True))
+ self.ds_cfg, "apply_full_imds_network_config", True
+ ),
+ )
# RELEASE_BLOCKER: xenial should drop the below if statement,
# because the issue being addressed doesn't exist pre-netplan.
@@ -426,7 +475,15 @@ class DataSourceEc2(sources.DataSource):
# Non-VPC (aka Classic) Ec2 instances need to rewrite the
# network config file every boot due to MAC address change.
if self.is_classic_instance():
- self.update_events['network'].add(EventType.BOOT)
+ self.default_update_events = copy.deepcopy(
+ self.default_update_events
+ )
+ self.default_update_events[EventScope.NETWORK].add(
+ EventType.BOOT
+ )
+ self.default_update_events[EventScope.NETWORK].add(
+ EventType.BOOT_LEGACY
+ )
else:
LOG.warning("Metadata 'network' key not valid: %s.", net_md)
self._network_config = result
@@ -438,7 +495,7 @@ class DataSourceEc2(sources.DataSource):
if self._fallback_interface is None:
# fallback_nic was used at one point, so restored objects may
# have an attribute there. respect that if found.
- _legacy_fbnic = getattr(self, 'fallback_nic', None)
+ _legacy_fbnic = getattr(self, "fallback_nic", None)
if _legacy_fbnic:
self._fallback_interface = _legacy_fbnic
self.fallback_nic = None
@@ -463,26 +520,37 @@ class DataSourceEc2(sources.DataSource):
else:
exc_cb = exc_cb_ud = None
try:
- crawled_metadata['user-data'] = ec2.get_instance_userdata(
- api_version, self.metadata_address,
- headers_cb=self._get_headers, headers_redact=redact,
- exception_cb=exc_cb_ud)
- crawled_metadata['meta-data'] = ec2.get_instance_metadata(
- api_version, self.metadata_address,
- headers_cb=self._get_headers, headers_redact=redact,
- exception_cb=exc_cb)
+ crawled_metadata["user-data"] = ec2.get_instance_userdata(
+ api_version,
+ self.metadata_address,
+ headers_cb=self._get_headers,
+ headers_redact=redact,
+ exception_cb=exc_cb_ud,
+ )
+ crawled_metadata["meta-data"] = ec2.get_instance_metadata(
+ api_version,
+ self.metadata_address,
+ headers_cb=self._get_headers,
+ headers_redact=redact,
+ exception_cb=exc_cb,
+ )
if self.cloud_name == CloudNames.AWS:
identity = ec2.get_instance_identity(
- api_version, self.metadata_address,
- headers_cb=self._get_headers, headers_redact=redact,
- exception_cb=exc_cb)
- crawled_metadata['dynamic'] = {'instance-identity': identity}
+ api_version,
+ self.metadata_address,
+ headers_cb=self._get_headers,
+ headers_redact=redact,
+ exception_cb=exc_cb,
+ )
+ crawled_metadata["dynamic"] = {"instance-identity": identity}
except Exception:
util.logexc(
- LOG, "Failed reading from metadata address %s",
- self.metadata_address)
+ LOG,
+ "Failed reading from metadata address %s",
+ self.metadata_address,
+ )
return {}
- crawled_metadata['_metadata_api_version'] = api_version
+ crawled_metadata["_metadata_api_version"] = api_version
return crawled_metadata
def _refresh_api_token(self, seconds=AWS_TOKEN_TTL_SECONDS):
@@ -495,23 +563,27 @@ class DataSourceEc2(sources.DataSource):
return None
LOG.debug("Refreshing Ec2 metadata API token")
request_header = {AWS_TOKEN_REQ_HEADER: seconds}
- token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE)
+ token_url = "{}/{}".format(self.metadata_address, API_TOKEN_ROUTE)
try:
- response = uhelp.readurl(token_url, headers=request_header,
- headers_redact=AWS_TOKEN_REDACT,
- request_method="PUT")
+ response = uhelp.readurl(
+ token_url,
+ headers=request_header,
+ headers_redact=AWS_TOKEN_REDACT,
+ request_method="PUT",
+ )
except uhelp.UrlError as e:
LOG.warning(
- 'Unable to get API token: %s raised exception %s',
- token_url, e)
+ "Unable to get API token: %s raised exception %s", token_url, e
+ )
return None
return response.contents
def _skip_or_refresh_stale_aws_token_cb(self, msg, exception):
"""Callback will not retry on SKIP_USERDATA_CODES or if no token
- is available."""
+ is available."""
retry = ec2.skip_retry_on_codes(
- ec2.SKIP_USERDATA_CODES, msg, exception)
+ ec2.SKIP_USERDATA_CODES, msg, exception
+ )
if not retry:
return False # False raises exception
return self._refresh_stale_aws_token_cb(msg, exception)
@@ -541,14 +613,17 @@ class DataSourceEc2(sources.DataSource):
# requests.ConnectionError will have exception.code == None
if exception.code and exception.code >= 400:
if exception.code == 403:
- LOG.warning('Ec2 IMDS endpoint returned a 403 error. '
- 'HTTP endpoint is disabled. Aborting.')
+ LOG.warning(
+ "Ec2 IMDS endpoint returned a 403 error. "
+ "HTTP endpoint is disabled. Aborting."
+ )
else:
- LOG.warning('Fatal error while requesting '
- 'Ec2 IMDSv2 API tokens')
+ LOG.warning(
+ "Fatal error while requesting Ec2 IMDSv2 API tokens"
+ )
raise exception
- def _get_headers(self, url=''):
+ def _get_headers(self, url=""):
"""Return a dict of headers for accessing a url.
If _api_token is unset on AWS, attempt to refresh the token via a PUT
@@ -578,13 +653,17 @@ class DataSourceEc2Local(DataSourceEc2):
metadata service. If the metadata service provides network configuration
then render the network configuration for that instance based on metadata.
"""
+
perform_dhcp_setup = True # Use dhcp before querying metadata
def get_data(self):
supported_platforms = (CloudNames.AWS,)
if self.cloud_name not in supported_platforms:
- LOG.debug("Local Ec2 mode only supported on %s, not %s",
- supported_platforms, self.cloud_name)
+ LOG.debug(
+ "Local Ec2 mode only supported on %s, not %s",
+ supported_platforms,
+ self.cloud_name,
+ )
return False
return super(DataSourceEc2Local, self).get_data()
@@ -602,18 +681,19 @@ def parse_strict_mode(cfgval):
# true, false, warn,[sleep]
# return tuple with string mode (true|false|warn) and sleep.
if cfgval is True:
- return 'true', None
+ return "true", None
if cfgval is False:
- return 'false', None
+ return "false", None
if not cfgval:
- return 'warn', 0
+ return "warn", 0
mode, _, sleep = cfgval.partition(",")
- if mode not in ('true', 'false', 'warn'):
+ if mode not in ("true", "false", "warn"):
raise ValueError(
"Invalid mode '%s' in strict_id setting '%s': "
- "Expected one of 'true', 'false', 'warn'." % (mode, cfgval))
+ "Expected one of 'true', 'false', 'warn'." % (mode, cfgval)
+ )
if sleep:
try:
@@ -639,47 +719,53 @@ def warn_if_necessary(cfgval, cfg):
if mode == "false":
return
- warnings.show_warning('non_ec2_md', cfg, mode=True, sleep=sleep)
+ warnings.show_warning("non_ec2_md", cfg, mode=True, sleep=sleep)
def identify_aws(data):
# data is a dictionary returned by _collect_platform_data.
- if (data['uuid'].startswith('ec2') and
- (data['uuid_source'] == 'hypervisor' or
- data['uuid'] == data['serial'])):
+ if data["uuid"].startswith("ec2") and (
+ data["uuid_source"] == "hypervisor" or data["uuid"] == data["serial"]
+ ):
return CloudNames.AWS
return None
def identify_brightbox(data):
- if data['serial'].endswith('.brightbox.com'):
+ if data["serial"].endswith(".brightbox.com"):
return CloudNames.BRIGHTBOX
def identify_zstack(data):
- if data['asset_tag'].endswith('.zstack.io'):
+ if data["asset_tag"].endswith(".zstack.io"):
return CloudNames.ZSTACK
def identify_e24cloud(data):
- if data['vendor'] == 'e24cloud':
+ if data["vendor"] == "e24cloud":
return CloudNames.E24CLOUD
def identify_platform():
# identify the platform and return an entry in CloudNames.
data = _collect_platform_data()
- checks = (identify_aws, identify_brightbox, identify_zstack,
- identify_e24cloud, lambda x: CloudNames.UNKNOWN)
+ checks = (
+ identify_aws,
+ identify_brightbox,
+ identify_zstack,
+ identify_e24cloud,
+ lambda x: CloudNames.UNKNOWN,
+ )
for checker in checks:
try:
result = checker(data)
if result:
return result
except Exception as e:
- LOG.warning("calling %s with %s raised exception: %s",
- checker, data, e)
+ LOG.warning(
+ "calling %s with %s raised exception: %s", checker, data, e
+ )
def _collect_platform_data():
@@ -698,36 +784,36 @@ def _collect_platform_data():
data = {}
try:
uuid = util.load_file("/sys/hypervisor/uuid").strip()
- data['uuid_source'] = 'hypervisor'
+ data["uuid_source"] = "hypervisor"
except Exception:
- uuid = dmi.read_dmi_data('system-uuid')
- data['uuid_source'] = 'dmi'
+ uuid = dmi.read_dmi_data("system-uuid")
+ data["uuid_source"] = "dmi"
if uuid is None:
- uuid = ''
- data['uuid'] = uuid.lower()
+ uuid = ""
+ data["uuid"] = uuid.lower()
- serial = dmi.read_dmi_data('system-serial-number')
+ serial = dmi.read_dmi_data("system-serial-number")
if serial is None:
- serial = ''
+ serial = ""
- data['serial'] = serial.lower()
+ data["serial"] = serial.lower()
- asset_tag = dmi.read_dmi_data('chassis-asset-tag')
+ asset_tag = dmi.read_dmi_data("chassis-asset-tag")
if asset_tag is None:
- asset_tag = ''
+ asset_tag = ""
- data['asset_tag'] = asset_tag.lower()
+ data["asset_tag"] = asset_tag.lower()
- vendor = dmi.read_dmi_data('system-manufacturer')
- data['vendor'] = (vendor if vendor else '').lower()
+ vendor = dmi.read_dmi_data("system-manufacturer")
+ data["vendor"] = (vendor if vendor else "").lower()
return data
def convert_ec2_metadata_network_config(
- network_md, macs_to_nics=None, fallback_nic=None,
- full_network_config=True):
+ network_md, macs_to_nics=None, fallback_nic=None, full_network_config=True
+):
"""Convert ec2 metadata to network config version 2 data dict.
@param: network_md: 'network' portion of EC2 metadata.
@@ -746,49 +832,55 @@ def convert_ec2_metadata_network_config(
@return A dict of network config version 2 based on the metadata and macs.
"""
- netcfg = {'version': 2, 'ethernets': {}}
+ netcfg = {"version": 2, "ethernets": {}}
if not macs_to_nics:
macs_to_nics = net.get_interfaces_by_mac()
- macs_metadata = network_md['interfaces']['macs']
+ macs_metadata = network_md["interfaces"]["macs"]
if not full_network_config:
for mac, nic_name in macs_to_nics.items():
if nic_name == fallback_nic:
break
- dev_config = {'dhcp4': True,
- 'dhcp6': False,
- 'match': {'macaddress': mac.lower()},
- 'set-name': nic_name}
+ dev_config = {
+ "dhcp4": True,
+ "dhcp6": False,
+ "match": {"macaddress": mac.lower()},
+ "set-name": nic_name,
+ }
nic_metadata = macs_metadata.get(mac)
- if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
- dev_config['dhcp6'] = True
- netcfg['ethernets'][nic_name] = dev_config
+ if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured
+ dev_config["dhcp6"] = True
+ netcfg["ethernets"][nic_name] = dev_config
return netcfg
# Apply network config for all nics and any secondary IPv4/v6 addresses
+ nic_idx = 0
for mac, nic_name in sorted(macs_to_nics.items()):
nic_metadata = macs_metadata.get(mac)
if not nic_metadata:
continue # Not a physical nic represented in metadata
# device-number is zero-indexed, we want it 1-indexed for the
# multiplication on the following line
- nic_idx = int(nic_metadata['device-number']) + 1
- dhcp_override = {'route-metric': nic_idx * 100}
- dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
- 'dhcp6': False,
- 'match': {'macaddress': mac.lower()},
- 'set-name': nic_name}
- if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
- dev_config['dhcp6'] = True
- dev_config['dhcp6-overrides'] = dhcp_override
- dev_config['addresses'] = get_secondary_addresses(nic_metadata, mac)
- if not dev_config['addresses']:
- dev_config.pop('addresses') # Since we found none configured
- netcfg['ethernets'][nic_name] = dev_config
+ nic_idx = int(nic_metadata.get("device-number", nic_idx)) + 1
+ dhcp_override = {"route-metric": nic_idx * 100}
+ dev_config = {
+ "dhcp4": True,
+ "dhcp4-overrides": dhcp_override,
+ "dhcp6": False,
+ "match": {"macaddress": mac.lower()},
+ "set-name": nic_name,
+ }
+ if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured
+ dev_config["dhcp6"] = True
+ dev_config["dhcp6-overrides"] = dhcp_override
+ dev_config["addresses"] = get_secondary_addresses(nic_metadata, mac)
+ if not dev_config["addresses"]:
+ dev_config.pop("addresses") # Since we found none configured
+ netcfg["ethernets"][nic_name] = dev_config
# Remove route-metric dhcp overrides if only one nic configured
- if len(netcfg['ethernets']) == 1:
- for nic_name in netcfg['ethernets'].keys():
- netcfg['ethernets'][nic_name].pop('dhcp4-overrides')
- netcfg['ethernets'][nic_name].pop('dhcp6-overrides', None)
+ if len(netcfg["ethernets"]) == 1:
+ for nic_name in netcfg["ethernets"].keys():
+ netcfg["ethernets"][nic_name].pop("dhcp4-overrides")
+ netcfg["ethernets"][nic_name].pop("dhcp6-overrides", None)
return netcfg
@@ -798,18 +890,22 @@ def get_secondary_addresses(nic_metadata, mac):
:return: List of secondary IPv4 or IPv6 addresses to configure on the
interface
"""
- ipv4s = nic_metadata.get('local-ipv4s')
- ipv6s = nic_metadata.get('ipv6s')
+ ipv4s = nic_metadata.get("local-ipv4s")
+ ipv6s = nic_metadata.get("ipv6s")
addresses = []
# In version < 2018-09-24 local_ipv4s or ipv6s is a str with one IP
if bool(isinstance(ipv4s, list) and len(ipv4s) > 1):
addresses.extend(
_get_secondary_addresses(
- nic_metadata, 'subnet-ipv4-cidr-block', mac, ipv4s, '24'))
+ nic_metadata, "subnet-ipv4-cidr-block", mac, ipv4s, "24"
+ )
+ )
if bool(isinstance(ipv6s, list) and len(ipv6s) > 1):
addresses.extend(
_get_secondary_addresses(
- nic_metadata, 'subnet-ipv6-cidr-block', mac, ipv6s, '128'))
+ nic_metadata, "subnet-ipv6-cidr-block", mac, ipv6s, "128"
+ )
+ )
return sorted(addresses)
@@ -822,18 +918,22 @@ def _get_secondary_addresses(nic_metadata, cidr_key, mac, ips, default_prefix):
addresses = []
cidr = nic_metadata.get(cidr_key)
prefix = default_prefix
- if not cidr or len(cidr.split('/')) != 2:
- ip_type = 'ipv4' if 'ipv4' in cidr_key else 'ipv6'
+ if not cidr or len(cidr.split("/")) != 2:
+ ip_type = "ipv4" if "ipv4" in cidr_key else "ipv6"
LOG.warning(
- 'Could not parse %s %s for mac %s. %s network'
- ' config prefix defaults to /%s',
- cidr_key, cidr, mac, ip_type, prefix)
+ "Could not parse %s %s for mac %s. %s network"
+ " config prefix defaults to /%s",
+ cidr_key,
+ cidr,
+ mac,
+ ip_type,
+ prefix,
+ )
else:
- prefix = cidr.split('/')[1]
+ prefix = cidr.split("/")[1]
# We know we have > 1 ips for in metadata for this IP type
for ip in ips[1:]:
- addresses.append(
- '{ip}/{prefix}'.format(ip=ip, prefix=prefix))
+ addresses.append("{ip}/{prefix}".format(ip=ip, prefix=prefix))
return addresses
@@ -848,4 +948,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py
index adee6d79..cc5136d7 100644
--- a/cloudinit/sources/DataSourceExoscale.py
+++ b/cloudinit/sources/DataSourceExoscale.py
@@ -5,11 +5,9 @@
from cloudinit import dmi
from cloudinit import ec2_utils as ec2
-from cloudinit import log as logging
-from cloudinit import sources
from cloudinit import helpers
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import log as logging
+from cloudinit import sources, url_helper, util
LOG = logging.getLogger(__name__)
@@ -25,7 +23,7 @@ EXOSCALE_DMI_NAME = "Exoscale"
class DataSourceExoscale(sources.DataSource):
- dsname = 'Exoscale'
+ dsname = "Exoscale"
url_max_wait = 120
@@ -33,12 +31,13 @@ class DataSourceExoscale(sources.DataSource):
super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths)
LOG.debug("Initializing the Exoscale datasource")
- self.metadata_url = self.ds_cfg.get('metadata_url', METADATA_URL)
- self.api_version = self.ds_cfg.get('api_version', API_VERSION)
+ self.metadata_url = self.ds_cfg.get("metadata_url", METADATA_URL)
+ self.api_version = self.ds_cfg.get("api_version", API_VERSION)
self.password_server_port = int(
- self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT))
- self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT)
- self.url_retries = self.ds_cfg.get('retries', URL_RETRIES)
+ self.ds_cfg.get("password_server_port", PASSWORD_SERVER_PORT)
+ )
+ self.url_timeout = self.ds_cfg.get("timeout", URL_TIMEOUT)
+ self.url_retries = self.ds_cfg.get("retries", URL_RETRIES)
self.extra_config = {}
def activate(self, cfg, is_new_instance):
@@ -50,23 +49,25 @@ class DataSourceExoscale(sources.DataSource):
# a user has triggered a password reset. So calling that password
# service generally results in no additional cloud-config.
# TODO(Create util functions for overriding merged sys_cfg module freq)
- mod = 'set_passwords'
- sem_path = self.paths.get_ipath_cur('sem')
+ mod = "set_passwords"
+ sem_path = self.paths.get_ipath_cur("sem")
sem_helper = helpers.FileSemaphores(sem_path)
- if sem_helper.clear('config_' + mod, None):
- LOG.debug('Overriding module set-passwords with frequency always')
+ if sem_helper.clear("config_" + mod, None):
+ LOG.debug("Overriding module set-passwords with frequency always")
def wait_for_metadata_service(self):
"""Wait for the metadata service to be reachable."""
metadata_url = "{}/{}/meta-data/instance-id".format(
- self.metadata_url, self.api_version)
+ self.metadata_url, self.api_version
+ )
url, _response = url_helper.wait_for_url(
urls=[metadata_url],
max_wait=self.url_max_wait,
timeout=self.url_timeout,
- status_cb=LOG.critical)
+ status_cb=LOG.critical,
+ )
return bool(url)
@@ -78,15 +79,20 @@ class DataSourceExoscale(sources.DataSource):
"""
metadata_ready = util.log_time(
logfunc=LOG.info,
- msg='waiting for the metadata service',
- func=self.wait_for_metadata_service)
+ msg="waiting for the metadata service",
+ func=self.wait_for_metadata_service,
+ )
if not metadata_ready:
return {}
- return read_metadata(self.metadata_url, self.api_version,
- self.password_server_port, self.url_timeout,
- self.url_retries)
+ return read_metadata(
+ self.metadata_url,
+ self.api_version,
+ self.password_server_port,
+ self.url_timeout,
+ self.url_retries,
+ )
def _get_data(self):
"""Fetch the user data, the metadata and the VM password
@@ -100,15 +106,16 @@ class DataSourceExoscale(sources.DataSource):
data = util.log_time(
logfunc=LOG.debug,
- msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
+ )
if not data:
return False
- self.userdata_raw = data['user-data']
- self.metadata = data['meta-data']
- password = data.get('password')
+ self.userdata_raw = data["user-data"]
+ self.metadata = data["meta-data"]
+ password = data.get("password")
password_config = {}
if password:
@@ -119,16 +126,17 @@ class DataSourceExoscale(sources.DataSource):
# leave the password always disabled if no password is ever set, or
# leave the password login enabled if we set it once.
password_config = {
- 'ssh_pwauth': True,
- 'password': password,
- 'chpasswd': {
- 'expire': False,
+ "ssh_pwauth": True,
+ "password": password,
+ "chpasswd": {
+ "expire": False,
},
}
# builtin extra_config overrides password_config
self.extra_config = util.mergemanydict(
- [self.extra_config, password_config])
+ [self.extra_config, password_config]
+ )
return True
@@ -136,8 +144,9 @@ class DataSourceExoscale(sources.DataSource):
return self.extra_config
def _is_platform_viable(self):
- return dmi.read_dmi_data('system-product-name').startswith(
- EXOSCALE_DMI_NAME)
+ return dmi.read_dmi_data("system-product-name").startswith(
+ EXOSCALE_DMI_NAME
+ )
# Used to match classes to dependencies
@@ -151,28 +160,32 @@ def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
-def get_password(metadata_url=METADATA_URL,
- api_version=API_VERSION,
- password_server_port=PASSWORD_SERVER_PORT,
- url_timeout=URL_TIMEOUT,
- url_retries=URL_RETRIES):
+def get_password(
+ metadata_url=METADATA_URL,
+ api_version=API_VERSION,
+ password_server_port=PASSWORD_SERVER_PORT,
+ url_timeout=URL_TIMEOUT,
+ url_retries=URL_RETRIES,
+):
"""Obtain the VM's password if set.
Once fetched the password is marked saved. Future calls to this method may
return empty string or 'saved_password'."""
- password_url = "{}:{}/{}/".format(metadata_url, password_server_port,
- api_version)
+ password_url = "{}:{}/{}/".format(
+ metadata_url, password_server_port, api_version
+ )
response = url_helper.read_file_or_url(
password_url,
ssl_details=None,
headers={"DomU_Request": "send_my_password"},
timeout=url_timeout,
- retries=url_retries)
- password = response.contents.decode('utf-8')
+ retries=url_retries,
+ )
+ password = response.contents.decode("utf-8")
# the password is empty or already saved
# Note: the original metadata server would answer an additional
# 'bad_request' status, but the Exoscale implementation does not.
- if password in ['', 'saved_password']:
+ if password in ["", "saved_password"]:
return None
# save the password
url_helper.read_file_or_url(
@@ -180,44 +193,50 @@ def get_password(metadata_url=METADATA_URL,
ssl_details=None,
headers={"DomU_Request": "saved_password"},
timeout=url_timeout,
- retries=url_retries)
+ retries=url_retries,
+ )
return password
-def read_metadata(metadata_url=METADATA_URL,
- api_version=API_VERSION,
- password_server_port=PASSWORD_SERVER_PORT,
- url_timeout=URL_TIMEOUT,
- url_retries=URL_RETRIES):
+def read_metadata(
+ metadata_url=METADATA_URL,
+ api_version=API_VERSION,
+ password_server_port=PASSWORD_SERVER_PORT,
+ url_timeout=URL_TIMEOUT,
+ url_retries=URL_RETRIES,
+):
"""Query the metadata server and return the retrieved data."""
crawled_metadata = {}
- crawled_metadata['_metadata_api_version'] = api_version
+ crawled_metadata["_metadata_api_version"] = api_version
try:
- crawled_metadata['user-data'] = ec2.get_instance_userdata(
- api_version,
- metadata_url,
- timeout=url_timeout,
- retries=url_retries)
- crawled_metadata['meta-data'] = ec2.get_instance_metadata(
- api_version,
- metadata_url,
- timeout=url_timeout,
- retries=url_retries)
+ crawled_metadata["user-data"] = ec2.get_instance_userdata(
+ api_version, metadata_url, timeout=url_timeout, retries=url_retries
+ )
+ crawled_metadata["meta-data"] = ec2.get_instance_metadata(
+ api_version, metadata_url, timeout=url_timeout, retries=url_retries
+ )
except Exception as e:
- util.logexc(LOG, "failed reading from metadata url %s (%s)",
- metadata_url, e)
+ util.logexc(
+ LOG, "failed reading from metadata url %s (%s)", metadata_url, e
+ )
return {}
try:
- crawled_metadata['password'] = get_password(
+ crawled_metadata["password"] = get_password(
api_version=api_version,
metadata_url=metadata_url,
password_server_port=password_server_port,
url_retries=url_retries,
- url_timeout=url_timeout)
+ url_timeout=url_timeout,
+ )
except Exception as e:
- util.logexc(LOG, "failed to read from password server url %s:%s (%s)",
- metadata_url, password_server_port, e)
+ util.logexc(
+ LOG,
+ "failed to read from password server url %s:%s (%s)",
+ metadata_url,
+ password_server_port,
+ e,
+ )
return crawled_metadata
@@ -225,35 +244,40 @@ def read_metadata(metadata_url=METADATA_URL,
if __name__ == "__main__":
import argparse
- parser = argparse.ArgumentParser(description='Query Exoscale Metadata')
+ parser = argparse.ArgumentParser(description="Query Exoscale Metadata")
parser.add_argument(
"--endpoint",
metavar="URL",
help="The url of the metadata service.",
- default=METADATA_URL)
+ default=METADATA_URL,
+ )
parser.add_argument(
"--version",
metavar="VERSION",
help="The version of the metadata endpoint to query.",
- default=API_VERSION)
+ default=API_VERSION,
+ )
parser.add_argument(
"--retries",
metavar="NUM",
type=int,
help="The number of retries querying the endpoint.",
- default=URL_RETRIES)
+ default=URL_RETRIES,
+ )
parser.add_argument(
"--timeout",
metavar="NUM",
type=int,
help="The time in seconds to wait before timing out.",
- default=URL_TIMEOUT)
+ default=URL_TIMEOUT,
+ )
parser.add_argument(
"--password-port",
metavar="PORT",
type=int,
help="The port on which the password endpoint listens",
- default=PASSWORD_SERVER_PORT)
+ default=PASSWORD_SERVER_PORT,
+ )
args = parser.parse_args()
@@ -262,7 +286,8 @@ if __name__ == "__main__":
api_version=args.version,
password_server_port=args.password_port,
url_timeout=args.timeout,
- url_retries=args.retries)
+ url_retries=args.retries,
+ )
print(util.json_dumps(data))
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 746caddb..c470bea8 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -4,39 +4,46 @@
import datetime
import json
-
from base64 import b64decode
+from contextlib import suppress as noop
from cloudinit import dmi
-from cloudinit.distros import ug_util
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import sources, url_helper, util
+from cloudinit.distros import ug_util
+from cloudinit.net.dhcp import EphemeralDHCPv4
LOG = logging.getLogger(__name__)
-MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/'
-BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL}
-REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
-GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/'
- 'v1/instance/guest-attributes')
-HOSTKEY_NAMESPACE = 'hostkeys'
-HEADERS = {'Metadata-Flavor': 'Google'}
+MD_V1_URL = "http://metadata.google.internal/computeMetadata/v1/"
+BUILTIN_DS_CONFIG = {"metadata_url": MD_V1_URL}
+REQUIRED_FIELDS = ("instance-id", "availability-zone", "local-hostname")
+GUEST_ATTRIBUTES_URL = (
+ "http://metadata.google.internal/computeMetadata/"
+ "v1/instance/guest-attributes"
+)
+HOSTKEY_NAMESPACE = "hostkeys"
+HEADERS = {"Metadata-Flavor": "Google"}
class GoogleMetadataFetcher(object):
-
- def __init__(self, metadata_address):
+ def __init__(self, metadata_address, num_retries, sec_between_retries):
self.metadata_address = metadata_address
+ self.num_retries = num_retries
+ self.sec_between_retries = sec_between_retries
def get_value(self, path, is_text, is_recursive=False):
value = None
try:
url = self.metadata_address + path
if is_recursive:
- url += '/?recursive=True'
- resp = url_helper.readurl(url=url, headers=HEADERS)
+ url += "/?recursive=True"
+ resp = url_helper.readurl(
+ url=url,
+ headers=HEADERS,
+ retries=self.num_retries,
+ sec_between=self.sec_between_retries,
+ )
except url_helper.UrlError as exc:
msg = "url %s raised exception %s"
LOG.debug(msg, path, exc)
@@ -45,7 +52,7 @@ class GoogleMetadataFetcher(object):
if is_text:
value = util.decode_binary(resp.contents)
else:
- value = resp.contents.decode('utf-8')
+ value = resp.contents.decode("utf-8")
else:
LOG.debug("url %s returned code %s", path, resp.code)
return value
@@ -53,7 +60,8 @@ class GoogleMetadataFetcher(object):
class DataSourceGCE(sources.DataSource):
- dsname = 'GCE'
+ dsname = "GCE"
+ perform_dhcp_setup = False
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -62,24 +70,38 @@ class DataSourceGCE(sources.DataSource):
(users, _groups) = ug_util.normalize_users_groups(sys_cfg, distro)
(self.default_user, _user_config) = ug_util.extract_default(users)
self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
def _get_data(self):
- ret = util.log_time(
- LOG.debug, 'Crawl of GCE metadata service',
- read_md, kwargs={'address': self.metadata_address})
-
- if not ret['success']:
- if ret['platform_reports_gce']:
- LOG.warning(ret['reason'])
+ url_params = self.get_url_params()
+ network_context = noop()
+ if self.perform_dhcp_setup:
+ network_context = EphemeralDHCPv4(self.fallback_interface)
+ with network_context:
+ ret = util.log_time(
+ LOG.debug,
+ "Crawl of GCE metadata service",
+ read_md,
+ kwargs={
+ "address": self.metadata_address,
+ "url_params": url_params,
+ },
+ )
+
+ if not ret["success"]:
+ if ret["platform_reports_gce"]:
+ LOG.warning(ret["reason"])
else:
- LOG.debug(ret['reason'])
+ LOG.debug(ret["reason"])
return False
- self.metadata = ret['meta-data']
- self.userdata_raw = ret['user-data']
+ self.metadata = ret["meta-data"]
+ self.userdata_raw = ret["user-data"]
return True
@property
@@ -88,10 +110,10 @@ class DataSourceGCE(sources.DataSource):
return None
def get_instance_id(self):
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
def get_public_ssh_keys(self):
- public_keys_data = self.metadata['public-keys-data']
+ public_keys_data = self.metadata["public-keys-data"]
return _parse_public_keys(public_keys_data, self.default_user)
def publish_host_keys(self, hostkeys):
@@ -100,26 +122,35 @@ class DataSourceGCE(sources.DataSource):
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
# GCE has long FDQN's and has asked for short hostnames.
- return self.metadata['local-hostname'].split('.')[0]
+ return self.metadata["local-hostname"].split(".")[0]
@property
def availability_zone(self):
- return self.metadata['availability-zone']
+ return self.metadata["availability-zone"]
@property
def region(self):
- return self.availability_zone.rsplit('-', 1)[0]
+ return self.availability_zone.rsplit("-", 1)[0]
+
+
+class DataSourceGCELocal(DataSourceGCE):
+ perform_dhcp_setup = True
def _write_host_key_to_guest_attributes(key_type, key_value):
- url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type)
- key_value = key_value.encode('utf-8')
- resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS,
- request_method='PUT', check_status=False)
+ url = "%s/%s/%s" % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type)
+ key_value = key_value.encode("utf-8")
+ resp = url_helper.readurl(
+ url=url,
+ data=key_value,
+ headers=HEADERS,
+ request_method="PUT",
+ check_status=False,
+ )
if resp.ok():
- LOG.debug('Wrote %s host key to guest attributes.', key_type)
+ LOG.debug("Wrote %s host key to guest attributes.", key_type)
else:
- LOG.debug('Unable to write %s host key to guest attributes.', key_type)
+ LOG.debug("Unable to write %s host key to guest attributes.", key_type)
def _has_expired(public_key):
@@ -133,7 +164,7 @@ def _has_expired(public_key):
return False
# Do not expire keys if they do not have the expected schema identifier.
- if schema != 'google-ssh':
+ if schema != "google-ssh":
return False
try:
@@ -142,11 +173,11 @@ def _has_expired(public_key):
return False
# Do not expire keys if there is no expriation timestamp.
- if 'expireOn' not in json_obj:
+ if "expireOn" not in json_obj:
return False
- expire_str = json_obj['expireOn']
- format_str = '%Y-%m-%dT%H:%M:%S+0000'
+ expire_str = json_obj["expireOn"]
+ format_str = "%Y-%m-%dT%H:%M:%S+0000"
try:
expire_time = datetime.datetime.strptime(expire_str, format_str)
except ValueError:
@@ -167,44 +198,49 @@ def _parse_public_keys(public_keys_data, default_user=None):
for public_key in public_keys_data:
if not public_key or not all(ord(c) < 128 for c in public_key):
continue
- split_public_key = public_key.split(':', 1)
+ split_public_key = public_key.split(":", 1)
if len(split_public_key) != 2:
continue
user, key = split_public_key
- if user in ('cloudinit', default_user) and not _has_expired(key):
+ if user in ("cloudinit", default_user) and not _has_expired(key):
public_keys.append(key)
return public_keys
-def read_md(address=None, platform_check=True):
+def read_md(address=None, url_params=None, platform_check=True):
if address is None:
address = MD_V1_URL
- ret = {'meta-data': None, 'user-data': None,
- 'success': False, 'reason': None}
- ret['platform_reports_gce'] = platform_reports_gce()
+ ret = {
+ "meta-data": None,
+ "user-data": None,
+ "success": False,
+ "reason": None,
+ }
+ ret["platform_reports_gce"] = platform_reports_gce()
- if platform_check and not ret['platform_reports_gce']:
- ret['reason'] = "Not running on GCE."
+ if platform_check and not ret["platform_reports_gce"]:
+ ret["reason"] = "Not running on GCE."
return ret
# If we cannot resolve the metadata server, then no point in trying.
if not util.is_resolvable_url(address):
LOG.debug("%s is not resolvable", address)
- ret['reason'] = 'address "%s" is not resolvable' % address
+ ret["reason"] = 'address "%s" is not resolvable' % address
return ret
# url_map: (our-key, path, required, is_text, is_recursive)
url_map = [
- ('instance-id', ('instance/id',), True, True, False),
- ('availability-zone', ('instance/zone',), True, True, False),
- ('local-hostname', ('instance/hostname',), True, True, False),
- ('instance-data', ('instance/attributes',), False, False, True),
- ('project-data', ('project/attributes',), False, False, True),
+ ("instance-id", ("instance/id",), True, True, False),
+ ("availability-zone", ("instance/zone",), True, True, False),
+ ("local-hostname", ("instance/hostname",), True, True, False),
+ ("instance-data", ("instance/attributes",), False, False, True),
+ ("project-data", ("project/attributes",), False, False, True),
]
-
- metadata_fetcher = GoogleMetadataFetcher(address)
+ metadata_fetcher = GoogleMetadataFetcher(
+ address, url_params.num_retries, url_params.sec_between_retries
+ )
md = {}
# Iterate over url_map keys to get metadata items.
for (mkey, paths, required, is_text, is_recursive) in url_map:
@@ -215,56 +251,58 @@ def read_md(address=None, platform_check=True):
value = new_value
if required and value is None:
msg = "required key %s returned nothing. not GCE"
- ret['reason'] = msg % mkey
+ ret["reason"] = msg % mkey
return ret
md[mkey] = value
- instance_data = json.loads(md['instance-data'] or '{}')
- project_data = json.loads(md['project-data'] or '{}')
- valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
- block_project = instance_data.get('block-project-ssh-keys', '').lower()
- if block_project != 'true' and not instance_data.get('sshKeys'):
- valid_keys.append(project_data.get('ssh-keys'))
- valid_keys.append(project_data.get('sshKeys'))
- public_keys_data = '\n'.join([key for key in valid_keys if key])
- md['public-keys-data'] = public_keys_data.splitlines()
+ instance_data = json.loads(md["instance-data"] or "{}")
+ project_data = json.loads(md["project-data"] or "{}")
+ valid_keys = [instance_data.get("sshKeys"), instance_data.get("ssh-keys")]
+ block_project = instance_data.get("block-project-ssh-keys", "").lower()
+ if block_project != "true" and not instance_data.get("sshKeys"):
+ valid_keys.append(project_data.get("ssh-keys"))
+ valid_keys.append(project_data.get("sshKeys"))
+ public_keys_data = "\n".join([key for key in valid_keys if key])
+ md["public-keys-data"] = public_keys_data.splitlines()
- if md['availability-zone']:
- md['availability-zone'] = md['availability-zone'].split('/')[-1]
+ if md["availability-zone"]:
+ md["availability-zone"] = md["availability-zone"].split("/")[-1]
- if 'user-data' in instance_data:
+ if "user-data" in instance_data:
# instance_data was json, so values are all utf-8 strings.
- ud = instance_data['user-data'].encode("utf-8")
- encoding = instance_data.get('user-data-encoding')
- if encoding == 'base64':
+ ud = instance_data["user-data"].encode("utf-8")
+ encoding = instance_data.get("user-data-encoding")
+ if encoding == "base64":
ud = b64decode(ud)
elif encoding:
- LOG.warning('unknown user-data-encoding: %s, ignoring', encoding)
- ret['user-data'] = ud
+ LOG.warning("unknown user-data-encoding: %s, ignoring", encoding)
+ ret["user-data"] = ud
- ret['meta-data'] = md
- ret['success'] = True
+ ret["meta-data"] = md
+ ret["success"] = True
return ret
def platform_reports_gce():
- pname = dmi.read_dmi_data('system-product-name') or "N/A"
- if pname == "Google Compute Engine":
+ pname = dmi.read_dmi_data("system-product-name") or "N/A"
+ if pname == "Google Compute Engine" or pname == "Google":
return True
# system-product-name is not always guaranteed (LP: #1674861)
- serial = dmi.read_dmi_data('system-serial-number') or "N/A"
+ serial = dmi.read_dmi_data("system-serial-number") or "N/A"
if serial.startswith("GoogleCloud-"):
return True
- LOG.debug("Not running on google cloud. product-name=%s serial=%s",
- pname, serial)
+ LOG.debug(
+ "Not running on google cloud. product-name=%s serial=%s", pname, serial
+ )
return False
# Used to match classes to dependencies.
datasources = [
+ (DataSourceGCELocal, (sources.DEP_FILESYSTEM,)),
(DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
@@ -277,31 +315,38 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import argparse
import sys
-
from base64 import b64encode
- parser = argparse.ArgumentParser(description='Query GCE Metadata Service')
- parser.add_argument("--endpoint", metavar="URL",
- help="The url of the metadata service.",
- default=MD_V1_URL)
- parser.add_argument("--no-platform-check", dest="platform_check",
- help="Ignore smbios platform check",
- action='store_false', default=True)
+ parser = argparse.ArgumentParser(description="Query GCE Metadata Service")
+ parser.add_argument(
+ "--endpoint",
+ metavar="URL",
+ help="The url of the metadata service.",
+ default=MD_V1_URL,
+ )
+ parser.add_argument(
+ "--no-platform-check",
+ dest="platform_check",
+ help="Ignore smbios platform check",
+ action="store_false",
+ default=True,
+ )
args = parser.parse_args()
data = read_md(address=args.endpoint, platform_check=args.platform_check)
- if 'user-data' in data:
+ if "user-data" in data:
# user-data is bytes not string like other things. Handle it specially.
# If it can be represented as utf-8 then do so. Otherwise print base64
# encoded value in the key user-data-b64.
try:
- data['user-data'] = data['user-data'].decode()
+ data["user-data"] = data["user-data"].decode()
except UnicodeDecodeError:
- sys.stderr.write("User-data cannot be decoded. "
- "Writing as base64\n")
- del data['user-data']
+ sys.stderr.write(
+ "User-data cannot be decoded. Writing as base64\n"
+ )
+ del data["user-data"]
# b64encode returns a bytes value. Decode to get the string.
- data['user-data-b64'] = b64encode(data['user-data']).decode()
+ data["user-data-b64"] = b64encode(data["user-data"]).decode()
- print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': ')))
+ print(json.dumps(data, indent=1, sort_keys=True, separators=(",", ": ")))
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py
index c7c88dd7..91a6f9c9 100644
--- a/cloudinit/sources/DataSourceHetzner.py
+++ b/cloudinit/sources/DataSourceHetzner.py
@@ -6,21 +6,19 @@
"""Hetzner Cloud API Documentation
https://docs.hetzner.cloud/"""
+import cloudinit.sources.helpers.hetzner as hc_helper
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import net as cloudnet
-from cloudinit import sources
-from cloudinit import util
-
-import cloudinit.sources.helpers.hetzner as hc_helper
+from cloudinit import net, sources, util
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
LOG = logging.getLogger(__name__)
-BASE_URL_V1 = 'http://169.254.169.254/hetzner/v1'
+BASE_URL_V1 = "http://169.254.169.254/hetzner/v1"
BUILTIN_DS_CONFIG = {
- 'metadata_url': BASE_URL_V1 + '/metadata',
- 'userdata_url': BASE_URL_V1 + '/userdata',
+ "metadata_url": BASE_URL_V1 + "/metadata",
+ "userdata_url": BASE_URL_V1 + "/userdata",
}
MD_RETRIES = 60
@@ -30,21 +28,24 @@ MD_WAIT_RETRY = 2
class DataSourceHetzner(sources.DataSource):
- dsname = 'Hetzner'
+ dsname = "Hetzner"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.distro = distro
self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
- self.userdata_address = self.ds_cfg['userdata_url']
- self.retries = self.ds_cfg.get('retries', MD_RETRIES)
- self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT)
- self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY)
- self._network_config = None
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.userdata_address = self.ds_cfg["userdata_url"]
+ self.retries = self.ds_cfg.get("retries", MD_RETRIES)
+ self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT)
+ self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY)
+ self._network_config = sources.UNSET
self.dsmode = sources.DSMODE_NETWORK
def _get_data(self):
@@ -53,15 +54,28 @@ class DataSourceHetzner(sources.DataSource):
if not on_hetzner:
return False
- nic = cloudnet.find_fallback_nic()
- with cloudnet.EphemeralIPv4Network(nic, "169.254.0.1", 16,
- "169.254.255.255"):
- md = hc_helper.read_metadata(
- self.metadata_address, timeout=self.timeout,
- sec_between=self.wait_retry, retries=self.retries)
- ud = hc_helper.read_userdata(
- self.userdata_address, timeout=self.timeout,
- sec_between=self.wait_retry, retries=self.retries)
+ try:
+ with EphemeralDHCPv4(
+ iface=net.find_fallback_nic(),
+ connectivity_url_data={
+ "url": BASE_URL_V1 + "/metadata/instance-id",
+ },
+ ):
+ md = hc_helper.read_metadata(
+ self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
+ ud = hc_helper.read_userdata(
+ self.userdata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
+ except (NoDHCPLeaseError) as e:
+ LOG.error("Bailing, DHCP Exception: %s", e)
+ raise
# Hetzner cloud does not support binary user-data. So here, do a
# base64 decode of the data if we can. The end result being that a
@@ -76,10 +90,10 @@ class DataSourceHetzner(sources.DataSource):
# hostname is name provided by user at launch. The API enforces it is
# a valid hostname, but it is not guaranteed to be resolvable in dns or
# fully qualified.
- self.metadata['instance-id'] = md['instance-id']
- self.metadata['local-hostname'] = md['hostname']
- self.metadata['network-config'] = md.get('network-config', None)
- self.metadata['public-keys'] = md.get('public-keys', None)
+ self.metadata["instance-id"] = md["instance-id"]
+ self.metadata["local-hostname"] = md["hostname"]
+ self.metadata["network-config"] = md.get("network-config", None)
+ self.metadata["public-keys"] = md.get("public-keys", None)
self.vendordata_raw = md.get("vendor_data", None)
# instance-id and serial from SMBIOS should be identical
@@ -92,19 +106,27 @@ class DataSourceHetzner(sources.DataSource):
def check_instance_id(self, sys_cfg):
return sources.instance_id_matches_system_uuid(
- self.get_instance_id(), 'system-serial-number')
+ self.get_instance_id(), "system-serial-number"
+ )
@property
def network_config(self):
"""Configure the networking. This needs to be done each boot, since
- the IP information may have changed due to snapshot and/or
- migration.
+ the IP information may have changed due to snapshot and/or
+ migration.
"""
- if self._network_config:
+ if self._network_config is None:
+ LOG.warning(
+ "Found None as cached _network_config. Resetting to %s",
+ sources.UNSET,
+ )
+ self._network_config = sources.UNSET
+
+ if self._network_config != sources.UNSET:
return self._network_config
- _net_config = self.metadata['network-config']
+ _net_config = self.metadata["network-config"]
if not _net_config:
raise Exception("Unable to get meta-data from server....")
@@ -114,7 +136,7 @@ class DataSourceHetzner(sources.DataSource):
def get_hcloud_data():
- vendor_name = dmi.read_dmi_data('system-manufacturer')
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
if vendor_name != "Hetzner":
return (False, None)
@@ -129,7 +151,7 @@ def get_hcloud_data():
# Used to match classes to dependencies
datasources = [
- (DataSourceHetzner, (sources.DEP_FILESYSTEM, )),
+ (DataSourceHetzner, (sources.DEP_FILESYSTEM,)),
]
@@ -137,4 +159,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index 8d196185..18c3848f 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -97,10 +97,8 @@ import json
import os
from cloudinit import log as logging
-from cloudinit import sources
+from cloudinit import sources, subp, util
from cloudinit.sources.helpers import openstack
-from cloudinit import subp
-from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -117,12 +115,13 @@ class Platforms(object):
PROVISIONING = (
Platforms.TEMPLATE_PROVISIONING_METADATA,
- Platforms.TEMPLATE_PROVISIONING_NODATA)
+ Platforms.TEMPLATE_PROVISIONING_NODATA,
+)
class DataSourceIBMCloud(sources.DataSource):
- dsname = 'IBMCloud'
+ dsname = "IBMCloud"
system_uuid = None
def __init__(self, sys_cfg, distro, paths):
@@ -142,14 +141,14 @@ class DataSourceIBMCloud(sources.DataSource):
if results is None:
return False
- self.source = results['source']
- self.platform = results['platform']
- self.metadata = results['metadata']
- self.userdata_raw = results.get('userdata')
- self.network_json = results.get('networkdata')
- vd = results.get('vendordata')
+ self.source = results["source"]
+ self.platform = results["platform"]
+ self.metadata = results["metadata"]
+ self.userdata_raw = results.get("userdata")
+ self.network_json = results.get("networkdata")
+ vd = results.get("vendordata")
self.vendordata_pure = vd
- self.system_uuid = results['system-uuid']
+ self.system_uuid = results["system-uuid"]
try:
self.vendordata_raw = sources.convert_vendordata(vd)
except ValueError as e:
@@ -160,7 +159,7 @@ class DataSourceIBMCloud(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return '%s (%s)' % (self.platform, self.source)
+ return "%s (%s)" % (self.platform, self.source)
def check_instance_id(self, sys_cfg):
"""quickly (local check only) if self.instance_id is still valid
@@ -177,12 +176,13 @@ class DataSourceIBMCloud(sources.DataSource):
if self.platform != Platforms.OS_CODE:
# If deployed from template, an agent in the provisioning
# environment handles networking configuration. Not cloud-init.
- return {'config': 'disabled', 'version': 1}
+ return {"config": "disabled", "version": 1}
if self._network_config is None:
if self.network_json is not None:
LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=None)
+ self.network_json, known_macs=None
+ )
else:
LOG.debug("no network configuration available.")
return self._network_config
@@ -200,22 +200,28 @@ def _is_xen():
def _is_ibm_provisioning(
- prov_cfg="/root/provisioningConfiguration.cfg",
- inst_log="/root/swinstall.log",
- boot_ref="/proc/1/environ"):
+ prov_cfg="/root/provisioningConfiguration.cfg",
+ inst_log="/root/swinstall.log",
+ boot_ref="/proc/1/environ",
+):
"""Return boolean indicating if this boot is ibm provisioning boot."""
if os.path.exists(prov_cfg):
msg = "config '%s' exists." % prov_cfg
result = True
if os.path.exists(inst_log):
if os.path.exists(boot_ref):
- result = (os.stat(inst_log).st_mtime >
- os.stat(boot_ref).st_mtime)
- msg += (" log '%s' from %s boot." %
- (inst_log, "current" if result else "previous"))
+ result = (
+ os.stat(inst_log).st_mtime > os.stat(boot_ref).st_mtime
+ )
+ msg += " log '%s' from %s boot." % (
+ inst_log,
+ "current" if result else "previous",
+ )
else:
- msg += (" log '%s' existed, but no reference file '%s'." %
- (inst_log, boot_ref))
+ msg += " log '%s' existed, but no reference file '%s'." % (
+ inst_log,
+ boot_ref,
+ )
result = False
else:
msg += " log '%s' did not exist." % inst_log
@@ -252,17 +258,26 @@ def get_ibm_platform():
if label not in (label_mdata, label_cfg2):
continue
if label in fslabels:
- LOG.warning("Duplicate fslabel '%s'. existing=%s current=%s",
- label, fslabels[label], data)
+ LOG.warning(
+ "Duplicate fslabel '%s'. existing=%s current=%s",
+ label,
+ fslabels[label],
+ data,
+ )
continue
if label == label_cfg2 and uuid != IBM_CONFIG_UUID:
- LOG.debug("Skipping %s with LABEL=%s due to uuid != %s: %s",
- dev, label, uuid, data)
+ LOG.debug(
+ "Skipping %s with LABEL=%s due to uuid != %s: %s",
+ dev,
+ label,
+ uuid,
+ data,
+ )
continue
fslabels[label] = data
- metadata_path = fslabels.get(label_mdata, {}).get('DEVNAME')
- cfg2_path = fslabels.get(label_cfg2, {}).get('DEVNAME')
+ metadata_path = fslabels.get(label_mdata, {}).get("DEVNAME")
+ cfg2_path = fslabels.get(label_cfg2, {}).get("DEVNAME")
if cfg2_path:
return (Platforms.OS_CODE, cfg2_path)
@@ -288,12 +303,14 @@ def read_md():
LOG.debug("This is not an IBMCloud platform.")
return None
elif platform in PROVISIONING:
- LOG.debug("Cloud-init is disabled during provisioning: %s.",
- platform)
+ LOG.debug("Cloud-init is disabled during provisioning: %s.", platform)
return None
- ret = {'platform': platform, 'source': path,
- 'system-uuid': _read_system_uuid()}
+ ret = {
+ "platform": platform,
+ "source": path,
+ "system-uuid": _read_system_uuid(),
+ }
try:
if os.path.isdir(path):
@@ -302,8 +319,8 @@ def read_md():
results = util.mount_cb(path, metadata_from_dir)
except sources.BrokenMetadata as e:
raise RuntimeError(
- "Failed reading IBM config disk (platform=%s path=%s): %s" %
- (platform, path, e)
+ "Failed reading IBM config disk (platform=%s path=%s): %s"
+ % (platform, path, e)
) from e
ret.update(results)
@@ -329,14 +346,14 @@ def metadata_from_dir(source_dir):
return os.path.join("openstack", "latest", fname)
def load_json_bytes(blob):
- return json.loads(blob.decode('utf-8'))
+ return json.loads(blob.decode("utf-8"))
files = [
# tuples of (results_name, path, translator)
- ('metadata_raw', opath('meta_data.json'), load_json_bytes),
- ('userdata', opath('user_data'), None),
- ('vendordata', opath('vendor_data.json'), load_json_bytes),
- ('networkdata', opath('network_data.json'), load_json_bytes),
+ ("metadata_raw", opath("meta_data.json"), load_json_bytes),
+ ("userdata", opath("user_data"), None),
+ ("vendordata", opath("vendor_data.json"), load_json_bytes),
+ ("networkdata", opath("network_data.json"), load_json_bytes),
]
results = {}
@@ -355,28 +372,33 @@ def metadata_from_dir(source_dir):
data = transl(raw)
except Exception as e:
raise sources.BrokenMetadata(
- "Failed decoding %s: %s" % (path, e))
+ "Failed decoding %s: %s" % (path, e)
+ )
results[name] = data
- if results.get('metadata_raw') is None:
+ if results.get("metadata_raw") is None:
raise sources.BrokenMetadata(
- "%s missing required file 'meta_data.json'" % source_dir)
+ "%s missing required file 'meta_data.json'" % source_dir
+ )
- results['metadata'] = {}
+ results["metadata"] = {}
- md_raw = results['metadata_raw']
- md = results['metadata']
- if 'random_seed' in md_raw:
+ md_raw = results["metadata_raw"]
+ md = results["metadata"]
+ if "random_seed" in md_raw:
try:
- md['random_seed'] = base64.b64decode(md_raw['random_seed'])
+ md["random_seed"] = base64.b64decode(md_raw["random_seed"])
except (ValueError, TypeError) as e:
raise sources.BrokenMetadata(
- "Badly formatted metadata random_seed entry: %s" % e)
+ "Badly formatted metadata random_seed entry: %s" % e
+ )
renames = (
- ('public_keys', 'public-keys'), ('hostname', 'local-hostname'),
- ('uuid', 'instance-id'))
+ ("public_keys", "public-keys"),
+ ("hostname", "local-hostname"),
+ ("uuid", "instance-id"),
+ )
for mdname, newname in renames:
if mdname in md_raw:
md[newname] = md_raw[mdname]
@@ -398,7 +420,7 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import argparse
- parser = argparse.ArgumentParser(description='Query IBM Cloud Metadata')
+ parser = argparse.ArgumentParser(description="Query IBM Cloud Metadata")
args = parser.parse_args()
data = read_md()
print(util.json_dumps(data))
diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py
new file mode 100644
index 00000000..071ea87c
--- /dev/null
+++ b/cloudinit/sources/DataSourceLXD.py
@@ -0,0 +1,392 @@
+"""Datasource for LXD, reads /dev/lxd/sock representaton of instance data.
+
+Notes:
+ * This datasource replaces previous NoCloud datasource for LXD.
+ * Older LXD images may not have updates for cloud-init so NoCloud may
+ still be detected on those images.
+ * Detect LXD datasource when /dev/lxd/sock is an active socket file.
+ * Info on dev-lxd API: https://linuxcontainers.org/lxd/docs/master/dev-lxd
+ * TODO( Hotplug support using websockets API 1.0/events )
+"""
+
+import os
+import socket
+import stat
+from json.decoder import JSONDecodeError
+
+import requests
+from requests.adapters import HTTPAdapter
+
+# pylint fails to import the two modules below.
+# These are imported via requests.packages rather than urllib3 because:
+# a.) the provider of the requests package should ensure that urllib3
+# contained in it is consistent/correct.
+# b.) cloud-init does not specifically have a dependency on urllib3
+#
+# For future reference, see:
+# https://github.com/kennethreitz/requests/pull/2375
+# https://github.com/requests/requests/issues/4104
+# pylint: disable=E0401
+from requests.packages.urllib3.connection import HTTPConnection
+from requests.packages.urllib3.connectionpool import HTTPConnectionPool
+
+from cloudinit import log as logging
+from cloudinit import sources, subp, util
+
+LOG = logging.getLogger(__name__)
+
+LXD_SOCKET_PATH = "/dev/lxd/sock"
+LXD_SOCKET_API_VERSION = "1.0"
+
+# Config key mappings to alias as top-level instance data keys
+CONFIG_KEY_ALIASES = {
+ "cloud-init.user-data": "user-data",
+ "cloud-init.network-config": "network-config",
+ "cloud-init.vendor-data": "vendor-data",
+ "user.user-data": "user-data",
+ "user.network-config": "network-config",
+ "user.vendor-data": "vendor-data",
+}
+
+
+def generate_fallback_network_config() -> dict:
+ """Return network config V1 dict representing instance network config."""
+ network_v1 = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "subnets": [{"type": "dhcp", "control": "auto"}],
+ }
+ ],
+ }
+ if subp.which("systemd-detect-virt"):
+ try:
+ virt_type, _ = subp.subp(["systemd-detect-virt"])
+ except subp.ProcessExecutionError as err:
+ LOG.warning(
+ "Unable to run systemd-detect-virt: %s."
+ " Rendering default network config.",
+ err,
+ )
+ return network_v1
+ if virt_type.strip() == "kvm": # instance.type VIRTUAL-MACHINE
+ arch = util.system_info()["uname"][4]
+ if arch == "ppc64le":
+ network_v1["config"][0]["name"] = "enp0s5"
+ elif arch == "s390x":
+ network_v1["config"][0]["name"] = "enc9"
+ else:
+ network_v1["config"][0]["name"] = "enp5s0"
+ return network_v1
+
+
+class SocketHTTPConnection(HTTPConnection):
+ def __init__(self, socket_path):
+ super().__init__("localhost")
+ self.socket_path = socket_path
+
+ def connect(self):
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.sock.connect(self.socket_path)
+
+
+class SocketConnectionPool(HTTPConnectionPool):
+ def __init__(self, socket_path):
+ self.socket_path = socket_path
+ super().__init__("localhost")
+
+ def _new_conn(self):
+ return SocketHTTPConnection(self.socket_path)
+
+
+class LXDSocketAdapter(HTTPAdapter):
+ def get_connection(self, url, proxies=None):
+ return SocketConnectionPool(LXD_SOCKET_PATH)
+
+
+def _maybe_remove_top_network(cfg):
+ """If network-config contains top level 'network' key, then remove it.
+
+ Some providers of network configuration may provide a top level
+ 'network' key (LP: #1798117) even though it is not necessary.
+
+ Be friendly and remove it if it really seems so.
+
+ Return the original value if no change or the updated value if changed."""
+ if "network" not in cfg:
+ return cfg
+ network_val = cfg["network"]
+ bmsg = "Top level network key in network-config %s: %s"
+ if not isinstance(network_val, dict):
+ LOG.debug(bmsg, "was not a dict", cfg)
+ return cfg
+ if len(list(cfg.keys())) != 1:
+ LOG.debug(bmsg, "had multiple top level keys", cfg)
+ return cfg
+ if network_val.get("config") == "disabled":
+ LOG.debug(bmsg, "was config/disabled", cfg)
+ elif not all(("config" in network_val, "version" in network_val)):
+ LOG.debug(bmsg, "but missing 'config' or 'version'", cfg)
+ return cfg
+ LOG.debug(bmsg, "fixed by removing shifting network.", cfg)
+ return network_val
+
+
+def _raw_instance_data_to_dict(metadata_type: str, metadata_value) -> dict:
+ """Convert raw instance data from str, bytes, YAML to dict
+
+ :param metadata_type: string, one of as: meta-data, vendor-data, user-data
+ network-config
+
+ :param metadata_value: str, bytes or dict representing or instance-data.
+
+ :raises: InvalidMetaDataError on invalid instance-data content.
+ """
+ if isinstance(metadata_value, dict):
+ return metadata_value
+ if metadata_value is None:
+ return {}
+ try:
+ parsed_metadata = util.load_yaml(metadata_value)
+ except AttributeError as exc: # not str or bytes
+ raise sources.InvalidMetaDataException(
+ "Invalid {md_type}. Expected str, bytes or dict but found:"
+ " {value}".format(md_type=metadata_type, value=metadata_value)
+ ) from exc
+ if parsed_metadata is None:
+ raise sources.InvalidMetaDataException(
+ "Invalid {md_type} format. Expected YAML but found:"
+ " {value}".format(md_type=metadata_type, value=metadata_value)
+ )
+ return parsed_metadata
+
+
+class DataSourceLXD(sources.DataSource):
+
+ dsname = "LXD"
+
+ _network_config = sources.UNSET
+ _crawled_metadata = sources.UNSET
+
+ sensitive_metadata_keys = (
+ "merged_cfg",
+ "user.meta-data",
+ "user.vendor-data",
+ "user.user-data",
+ )
+
+ def _is_platform_viable(self) -> bool:
+ """Check platform environment to report if this datasource may run."""
+ return is_platform_viable()
+
+ def _get_data(self) -> bool:
+ """Crawl LXD socket API instance data and return True on success"""
+ if not self._is_platform_viable():
+ LOG.debug("Not an LXD datasource: No LXD socket found.")
+ return False
+
+ self._crawled_metadata = util.log_time(
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=read_metadata,
+ )
+ self.metadata = _raw_instance_data_to_dict(
+ "meta-data", self._crawled_metadata.get("meta-data")
+ )
+ config = self._crawled_metadata.get("config", {})
+ user_metadata = config.get("user.meta-data", {})
+ if user_metadata:
+ user_metadata = _raw_instance_data_to_dict(
+ "user.meta-data", user_metadata
+ )
+ if not isinstance(self.metadata, dict):
+ self.metadata = util.mergemanydict(
+ [util.load_yaml(self.metadata), user_metadata]
+ )
+ if "user-data" in self._crawled_metadata:
+ self.userdata_raw = self._crawled_metadata["user-data"]
+ if "network-config" in self._crawled_metadata:
+ self._network_config = _maybe_remove_top_network(
+ _raw_instance_data_to_dict(
+ "network-config", self._crawled_metadata["network-config"]
+ )
+ )
+ if "vendor-data" in self._crawled_metadata:
+ self.vendordata_raw = self._crawled_metadata["vendor-data"]
+ return True
+
+ def _get_subplatform(self) -> str:
+ """Return subplatform details for this datasource"""
+ return "LXD socket API v. {ver} ({socket})".format(
+ ver=LXD_SOCKET_API_VERSION, socket=LXD_SOCKET_PATH
+ )
+
+ def check_instance_id(self, sys_cfg) -> str:
+ """Return True if instance_id unchanged."""
+ response = read_metadata(metadata_only=True)
+ md = response.get("meta-data", {})
+ if not isinstance(md, dict):
+ md = util.load_yaml(md)
+ return md.get("instance-id") == self.metadata.get("instance-id")
+
+ @property
+ def network_config(self) -> dict:
+ """Network config read from LXD socket config/user.network-config.
+
+ If none is present, then we generate fallback configuration.
+ """
+ if self._network_config == sources.UNSET:
+ if self._crawled_metadata.get("network-config"):
+ self._network_config = self._crawled_metadata.get(
+ "network-config"
+ )
+ else:
+ self._network_config = generate_fallback_network_config()
+ return self._network_config
+
+
+def is_platform_viable() -> bool:
+ """Return True when this platform appears to have an LXD socket."""
+ if os.path.exists(LXD_SOCKET_PATH):
+ return stat.S_ISSOCK(os.lstat(LXD_SOCKET_PATH).st_mode)
+ return False
+
+
+def read_metadata(
+ api_version: str = LXD_SOCKET_API_VERSION, metadata_only: bool = False
+) -> dict:
+ """Fetch metadata from the /dev/lxd/socket routes.
+
+ Perform a number of HTTP GETs on known routes on the devlxd socket API.
+ Minimally all containers must respond to http://lxd/1.0/meta-data when
+ the LXD configuration setting `security.devlxd` is true.
+
+ When `security.devlxd` is false, no /dev/lxd/socket file exists. This
+ datasource will return False from `is_platform_viable` in that case.
+
+ Perform a GET of <LXD_SOCKET_API_VERSION>/config` and walk all `user.*`
+ configuration keys, storing all keys and values under a dict key
+ LXD_SOCKET_API_VERSION: config {...}.
+
+ In the presence of the following optional user config keys,
+ create top level aliases:
+ - user.user-data -> user-data
+ - user.vendor-data -> vendor-data
+ - user.network-config -> network-config
+
+ :return:
+ A dict with the following mandatory key: meta-data.
+ Optional keys: user-data, vendor-data, network-config, network_mode
+
+ Below <LXD_SOCKET_API_VERSION> is a dict representation of all raw
+ configuration keys and values provided to the container surfaced by
+ the socket under the /1.0/config/ route.
+ """
+ md = {}
+ lxd_url = "http://lxd"
+ version_url = lxd_url + "/" + api_version + "/"
+ with requests.Session() as session:
+ session.mount(version_url, LXDSocketAdapter())
+ # Raw meta-data as text
+ md_route = "{route}meta-data".format(route=version_url)
+ response = session.get(md_route)
+ LOG.debug("[GET] [HTTP:%d] %s", response.status_code, md_route)
+ if not response.ok:
+ raise sources.InvalidMetaDataException(
+ "Invalid HTTP response [{code}] from {route}: {resp}".format(
+ code=response.status_code,
+ route=md_route,
+ resp=response.text,
+ )
+ )
+
+ md["meta-data"] = response.text
+ if metadata_only:
+ return md # Skip network-data, vendor-data, user-data
+
+ md = {
+ "_metadata_api_version": api_version, # Document API version read
+ "config": {},
+ "meta-data": md["meta-data"],
+ }
+
+ config_url = version_url + "config"
+ # Represent all advertized/available config routes under
+ # the dict path {LXD_SOCKET_API_VERSION: {config: {...}}.
+ response = session.get(config_url)
+ LOG.debug("[GET] [HTTP:%d] %s", response.status_code, config_url)
+ if not response.ok:
+ raise sources.InvalidMetaDataException(
+ "Invalid HTTP response [{code}] from {route}: {resp}".format(
+ code=response.status_code,
+ route=config_url,
+ resp=response.text,
+ )
+ )
+ try:
+ config_routes = response.json()
+ except JSONDecodeError as exc:
+ raise sources.InvalidMetaDataException(
+ "Unable to determine cloud-init config from {route}."
+ " Expected JSON but found: {resp}".format(
+ route=config_url, resp=response.text
+ )
+ ) from exc
+
+ # Sorting keys to ensure we always process in alphabetical order.
+ # cloud-init.* keys will sort before user.* keys which is preferred
+ # precedence.
+ for config_route in sorted(config_routes):
+ url = "http://lxd{route}".format(route=config_route)
+ response = session.get(url)
+ LOG.debug("[GET] [HTTP:%d] %s", response.status_code, url)
+ if response.ok:
+ cfg_key = config_route.rpartition("/")[-1]
+ # Leave raw data values/format unchanged to represent it in
+ # instance-data.json for cloud-init query or jinja template
+ # use.
+ md["config"][cfg_key] = response.text
+ # Promote common CONFIG_KEY_ALIASES to top-level keys.
+ if cfg_key in CONFIG_KEY_ALIASES:
+ # Due to sort of config_routes, promote cloud-init.*
+ # aliases before user.*. This allows user.* keys to act as
+ # fallback config on old LXD, with new cloud-init images.
+ if CONFIG_KEY_ALIASES[cfg_key] not in md:
+ md[CONFIG_KEY_ALIASES[cfg_key]] = response.text
+ else:
+ LOG.warning(
+ "Ignoring LXD config %s in favor of %s value.",
+ cfg_key,
+ cfg_key.replace("user", "cloud-init", 1),
+ )
+ else:
+ LOG.debug(
+ "Skipping %s on [HTTP:%d]:%s",
+ url,
+ response.status_code,
+ response.text,
+ )
+ return md
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceLXD, (sources.DEP_FILESYSTEM,)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+if __name__ == "__main__":
+ import argparse
+
+ description = """Query LXD metadata and emit a JSON object."""
+ parser = argparse.ArgumentParser(description=description)
+ parser.parse_args()
+ print(util.json_dumps(read_metadata()))
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 9156925f..d554db0d 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -11,20 +11,18 @@ import os
import time
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import sources, url_helper, util
LOG = logging.getLogger(__name__)
MD_VERSION = "2012-03-01"
DS_FIELDS = [
# remote path, location in dictionary, binary data?, optional?
- ("meta-data/instance-id", 'meta-data/instance-id', False, False),
- ("meta-data/local-hostname", 'meta-data/local-hostname', False, False),
- ("meta-data/public-keys", 'meta-data/public-keys', False, True),
- ('meta-data/vendor-data', 'vendor-data', True, True),
- ('user-data', 'user-data', True, True),
+ ("meta-data/instance-id", "meta-data/instance-id", False, False),
+ ("meta-data/local-hostname", "meta-data/local-hostname", False, False),
+ ("meta-data/public-keys", "meta-data/public-keys", False, True),
+ ("meta-data/vendor-data", "vendor-data", True, True),
+ ("user-data", "user-data", True, True),
]
@@ -46,7 +44,7 @@ class DataSourceMAAS(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.base_url = None
- self.seed_dir = os.path.join(paths.seed_dir, 'maas')
+ self.seed_dir = os.path.join(paths.seed_dir, "maas")
self.id_hash = get_id_from_ds_cfg(self.ds_cfg)
@property
@@ -72,7 +70,7 @@ class DataSourceMAAS(sources.DataSource):
raise
# If there is no metadata_url, then we're not configured
- url = mcfg.get('metadata_url', None)
+ url = mcfg.get("metadata_url", None)
if not url:
return False
@@ -85,9 +83,14 @@ class DataSourceMAAS(sources.DataSource):
return False
self._set_data(
- url, read_maas_seed_url(
- url, read_file_or_url=self.oauth_helper.readurl,
- paths=self.paths, retries=1))
+ url,
+ read_maas_seed_url(
+ url,
+ read_file_or_url=self.oauth_helper.readurl,
+ paths=self.paths,
+ retries=1,
+ ),
+ )
return True
except Exception:
util.logexc(LOG, "Failed fetching metadata from url %s", url)
@@ -109,7 +112,7 @@ class DataSourceMAAS(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'seed-dir (%s)' % self.base_url
+ return "seed-dir (%s)" % self.base_url
def wait_for_metadata_service(self, url):
mcfg = self.ds_cfg
@@ -135,13 +138,17 @@ class DataSourceMAAS(sources.DataSource):
check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
urls = [check_url]
url, _response = self.oauth_helper.wait_for_url(
- urls=urls, max_wait=max_wait, timeout=timeout)
+ urls=urls, max_wait=max_wait, timeout=timeout
+ )
if url:
LOG.debug("Using metadata source: '%s'", url)
else:
- LOG.critical("Giving up on md from %s after %i seconds",
- urls, int(time.time() - starttime))
+ LOG.critical(
+ "Giving up on md from %s after %i seconds",
+ urls,
+ int(time.time() - starttime),
+ )
return bool(url)
@@ -154,26 +161,26 @@ class DataSourceMAAS(sources.DataSource):
if self.id_hash is None:
return False
ncfg = util.get_cfg_by_path(sys_cfg, ("datasource", self.dsname), {})
- return (self.id_hash == get_id_from_ds_cfg(ncfg))
+ return self.id_hash == get_id_from_ds_cfg(ncfg)
def get_oauth_helper(cfg):
"""Return an oauth helper instance for values in cfg.
- @raises ValueError from OauthUrlHelper if some required fields have
- true-ish values but others do not."""
- keys = ('consumer_key', 'consumer_secret', 'token_key', 'token_secret')
+ @raises ValueError from OauthUrlHelper if some required fields have
+ true-ish values but others do not."""
+ keys = ("consumer_key", "consumer_secret", "token_key", "token_secret")
kwargs = dict([(r, cfg.get(r)) for r in keys])
return url_helper.OauthUrlHelper(**kwargs)
def get_id_from_ds_cfg(ds_cfg):
"""Given a config, generate a unique identifier for this node."""
- fields = ('consumer_key', 'token_key', 'token_secret')
- idstr = '\0'.join([ds_cfg.get(f, "") for f in fields])
+ fields = ("consumer_key", "token_key", "token_secret")
+ idstr = "\0".join([ds_cfg.get(f, "") for f in fields])
# store the encoding version as part of the hash in the event
# that it ever changed we can compute older versions.
- return 'v1:' + hashlib.sha256(idstr.encode('utf-8')).hexdigest()
+ return "v1:" + hashlib.sha256(idstr.encode("utf-8")).hexdigest()
def read_maas_seed_dir(seed_d):
@@ -186,8 +193,14 @@ def read_maas_seed_dir(seed_d):
return read_maas_seed_url("file://%s" % seed_d, version=None)
-def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
- version=MD_VERSION, paths=None, retries=None):
+def read_maas_seed_url(
+ seed_url,
+ read_file_or_url=None,
+ timeout=None,
+ version=MD_VERSION,
+ paths=None,
+ retries=None,
+):
"""
Read the maas datasource at seed_url.
read_file_or_url is a method that should provide an interface
@@ -213,16 +226,20 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
url = "%s/%s/%s" % (seed_url, version, path)
try:
ssl_details = util.fetch_ssl_details(paths)
- resp = read_file_or_url(url, retries=retries, timeout=timeout,
- ssl_details=ssl_details)
+ resp = read_file_or_url(
+ url, retries=retries, timeout=timeout, ssl_details=ssl_details
+ )
if resp.ok():
if binary:
md[path] = resp.contents
else:
md[path] = util.decode_binary(resp.contents)
else:
- LOG.warning(("Fetching from %s resulted in"
- " an invalid http code %s"), url, resp.code)
+ LOG.warning(
+ "Fetching from %s resulted in an invalid http code %s",
+ url,
+ resp.code,
+ )
except url_helper.UrlError as e:
if e.code == 404 and not optional:
raise MAASSeedDirMalformed(
@@ -236,8 +253,8 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
def check_seed_contents(content, seed):
"""Validate if dictionary content valid as a return for a datasource.
- Either return a (userdata, metadata, vendordata) tuple or
- Raise MAASSeedDirMalformed or MAASSeedDirNone
+ Either return a (userdata, metadata, vendordata) tuple or
+ Raise MAASSeedDirMalformed or MAASSeedDirNone
"""
ret = {}
missing = []
@@ -262,14 +279,15 @@ def check_seed_contents(content, seed):
raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
vd_data = None
- if ret.get('vendor-data'):
+ if ret.get("vendor-data"):
err = object()
- vd_data = util.load_yaml(ret.get('vendor-data'), default=err,
- allowed=(object))
+ vd_data = util.load_yaml(
+ ret.get("vendor-data"), default=err, allowed=(object)
+ )
if vd_data is err:
raise MAASSeedDirMalformed("vendor-data was not loadable as yaml.")
- return ret.get('user-data'), ret.get('meta-data'), vd_data
+ return ret.get("user-data"), ret.get("meta-data"), vd_data
class MAASSeedDirNone(Exception):
@@ -292,6 +310,7 @@ def get_datasource_list(depends):
if __name__ == "__main__":
+
def main():
"""
Call with single argument of directory or http or https url.
@@ -302,36 +321,66 @@ if __name__ == "__main__":
import pprint
import sys
- parser = argparse.ArgumentParser(description='Interact with MAAS DS')
- parser.add_argument("--config", metavar="file",
- help="specify DS config file", default=None)
- parser.add_argument("--ckey", metavar="key",
- help="the consumer key to auth with", default=None)
- parser.add_argument("--tkey", metavar="key",
- help="the token key to auth with", default=None)
- parser.add_argument("--csec", metavar="secret",
- help="the consumer secret (likely '')", default="")
- parser.add_argument("--tsec", metavar="secret",
- help="the token secret to auth with", default=None)
- parser.add_argument("--apiver", metavar="version",
- help="the apiver to use ("" can be used)",
- default=MD_VERSION)
+ parser = argparse.ArgumentParser(description="Interact with MAAS DS")
+ parser.add_argument(
+ "--config",
+ metavar="file",
+ help="specify DS config file",
+ default=None,
+ )
+ parser.add_argument(
+ "--ckey",
+ metavar="key",
+ help="the consumer key to auth with",
+ default=None,
+ )
+ parser.add_argument(
+ "--tkey",
+ metavar="key",
+ help="the token key to auth with",
+ default=None,
+ )
+ parser.add_argument(
+ "--csec",
+ metavar="secret",
+ help="the consumer secret (likely '')",
+ default="",
+ )
+ parser.add_argument(
+ "--tsec",
+ metavar="secret",
+ help="the token secret to auth with",
+ default=None,
+ )
+ parser.add_argument(
+ "--apiver",
+ metavar="version",
+ help="the apiver to use ( can be used)",
+ default=MD_VERSION,
+ )
subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
- for (name, help) in (('crawl', 'crawl the datasource'),
- ('get', 'do a single GET of provided url'),
- ('check-seed', 'read and verify seed at url')):
+ for (name, help) in (
+ ("crawl", "crawl the datasource"),
+ ("get", "do a single GET of provided url"),
+ ("check-seed", "read and verify seed at url"),
+ ):
p = subcmds.add_parser(name, help=help)
- p.add_argument("url", help="the datasource url", nargs='?',
- default=None)
+ p.add_argument(
+ "url", help="the datasource url", nargs="?", default=None
+ )
args = parser.parse_args()
- creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
- 'token_secret': args.tsec, 'consumer_secret': args.csec}
+ creds = {
+ "consumer_key": args.ckey,
+ "token_key": args.tkey,
+ "token_secret": args.tsec,
+ "consumer_secret": args.csec,
+ }
if args.config is None:
- for fname in ('91_kernel_cmdline_url', '90_dpkg_maas'):
+ for fname in ("91_kernel_cmdline_url", "90_dpkg_maas"):
fpath = "/etc/cloud/cloud.cfg.d/" + fname + ".cfg"
if os.path.exists(fpath) and os.access(fpath, os.R_OK):
sys.stderr.write("Used config in %s.\n" % fpath)
@@ -339,13 +388,13 @@ if __name__ == "__main__":
if args.config:
cfg = util.read_conf(args.config)
- if 'datasource' in cfg:
- cfg = cfg['datasource']['MAAS']
+ if "datasource" in cfg:
+ cfg = cfg["datasource"]["MAAS"]
for key in creds.keys():
if key in cfg and creds[key] is None:
creds[key] = cfg[key]
- if args.url is None and 'metadata_url' in cfg:
- args.url = cfg['metadata_url']
+ if args.url is None and "metadata_url" in cfg:
+ args.url = cfg["metadata_url"]
if args.url is None:
sys.stderr.write("Must provide a url or a config with url.\n")
@@ -380,8 +429,11 @@ if __name__ == "__main__":
(userdata, metadata, vd) = read_maas_seed_dir(args.url)
else:
(userdata, metadata, vd) = read_maas_seed_url(
- args.url, version=args.apiver, read_file_or_url=readurl,
- retries=2)
+ args.url,
+ version=args.apiver,
+ read_file_or_url=readurl,
+ retries=2,
+ )
print("=== user-data ===")
print("N/A" if userdata is None else userdata.decode())
print("=== meta-data ===")
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index a126aad3..56559630 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -13,9 +13,8 @@ import os
from cloudinit import dmi
from cloudinit import log as logging
+from cloudinit import sources, util
from cloudinit.net import eni
-from cloudinit import sources
-from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -27,8 +26,10 @@ class DataSourceNoCloud(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
- self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'),
- os.path.join(paths.seed_dir, 'nocloud-net')]
+ self.seed_dirs = [
+ os.path.join(paths.seed_dir, "nocloud"),
+ os.path.join(paths.seed_dir, "nocloud-net"),
+ ]
self.seed_dir = None
self.supported_seed_starts = ("/", "file://")
@@ -55,17 +56,21 @@ class DataSourceNoCloud(sources.DataSource):
}
found = []
- mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
- 'network-config': None}
+ mydata = {
+ "meta-data": {},
+ "user-data": "",
+ "vendor-data": "",
+ "network-config": None,
+ }
try:
# Parse the system serial label from dmi. If not empty, try parsing
# like the commandline
md = {}
- serial = dmi.read_dmi_data('system-serial-number')
+ serial = dmi.read_dmi_data("system-serial-number")
if serial and load_cmdline_data(md, serial):
found.append("dmi")
- mydata = _merge_new_seed(mydata, {'meta-data': md})
+ mydata = _merge_new_seed(mydata, {"meta-data": md})
except Exception:
util.logexc(LOG, "Unable to parse dmi data")
return False
@@ -75,14 +80,16 @@ class DataSourceNoCloud(sources.DataSource):
md = {}
if load_cmdline_data(md):
found.append("cmdline")
- mydata = _merge_new_seed(mydata, {'meta-data': md})
+ mydata = _merge_new_seed(mydata, {"meta-data": md})
except Exception:
util.logexc(LOG, "Unable to parse command line data")
return False
# Check to see if the seed dir has data.
- pp2d_kwargs = {'required': ['user-data', 'meta-data'],
- 'optional': ['vendor-data', 'network-config']}
+ pp2d_kwargs = {
+ "required": ["user-data", "meta-data"],
+ "optional": ["vendor-data", "network-config"],
+ }
for path in self.seed_dirs:
try:
@@ -97,31 +104,35 @@ class DataSourceNoCloud(sources.DataSource):
# If the datasource config had a 'seedfrom' entry, then that takes
# precedence over a 'seedfrom' that was found in a filesystem
# but not over external media
- if self.ds_cfg.get('seedfrom'):
+ if self.ds_cfg.get("seedfrom"):
found.append("ds_config_seedfrom")
- mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']
+ mydata["meta-data"]["seedfrom"] = self.ds_cfg["seedfrom"]
# fields appropriately named can also just come from the datasource
# config (ie, 'user-data', 'meta-data', 'vendor-data' there)
- if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
+ if "user-data" in self.ds_cfg and "meta-data" in self.ds_cfg:
mydata = _merge_new_seed(mydata, self.ds_cfg)
found.append("ds_config")
def _pp2d_callback(mp, data):
return util.pathprefix2dict(mp, **data)
- label = self.ds_cfg.get('fs_label', "cidata")
+ label = self.ds_cfg.get("fs_label", "cidata")
if label is not None:
for dev in self._get_devices(label):
try:
LOG.debug("Attempting to use data from %s", dev)
try:
- seeded = util.mount_cb(dev, _pp2d_callback,
- pp2d_kwargs)
+ seeded = util.mount_cb(
+ dev, _pp2d_callback, pp2d_kwargs
+ )
except ValueError:
- LOG.warning("device %s with label=%s not a "
- "valid seed.", dev, label)
+ LOG.warning(
+ "device %s with label=%s not a valid seed.",
+ dev,
+ label,
+ )
continue
mydata = _merge_new_seed(mydata, seeded)
@@ -133,8 +144,9 @@ class DataSourceNoCloud(sources.DataSource):
if e.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for "
- "data", dev)
+ util.logexc(
+ LOG, "Failed to mount %s when looking for data", dev
+ )
# There was no indication on kernel cmdline or data
# in the seeddir suggesting this handler should be used.
@@ -145,8 +157,8 @@ class DataSourceNoCloud(sources.DataSource):
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
# on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
- if "seedfrom" in mydata['meta-data']:
- seedfrom = mydata['meta-data']["seedfrom"]
+ if "seedfrom" in mydata["meta-data"]:
+ seedfrom = mydata["meta-data"]["seedfrom"]
seedfound = False
for proto in self.supported_seed_starts:
if seedfrom.startswith(proto):
@@ -162,39 +174,43 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Using seeded cache data from %s", seedfrom)
# Values in the command line override those from the seed
- mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
- md_seed])
- mydata['user-data'] = ud
- mydata['vendor-data'] = vd
+ mydata["meta-data"] = util.mergemanydict(
+ [mydata["meta-data"], md_seed]
+ )
+ mydata["user-data"] = ud
+ mydata["vendor-data"] = vd
found.append(seedfrom)
# Now that we have exhausted any other places merge in the defaults
- mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
- defaults])
+ mydata["meta-data"] = util.mergemanydict(
+ [mydata["meta-data"], defaults]
+ )
self.dsmode = self._determine_dsmode(
- [mydata['meta-data'].get('dsmode')])
+ [mydata["meta-data"].get("dsmode")]
+ )
if self.dsmode == sources.DSMODE_DISABLED:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- self.dsmode)
+ LOG.debug(
+ "%s: not claiming datasource, dsmode=%s", self, self.dsmode
+ )
return False
self.seed = ",".join(found)
- self.metadata = mydata['meta-data']
- self.userdata_raw = mydata['user-data']
- self.vendordata_raw = mydata['vendor-data']
- self._network_config = mydata['network-config']
- self._network_eni = mydata['meta-data'].get('network-interfaces')
+ self.metadata = mydata["meta-data"]
+ self.userdata_raw = mydata["user-data"]
+ self.vendordata_raw = mydata["vendor-data"]
+ self._network_config = mydata["network-config"]
+ self._network_eni = mydata["meta-data"].get("network-interfaces")
return True
@property
def platform_type(self):
# Handle upgrade path of pickled ds
- if not hasattr(self, '_platform_type'):
+ if not hasattr(self, "_platform_type"):
self._platform_type = None
if not self._platform_type:
- self._platform_type = 'lxd' if util.is_lxd() else 'nocloud'
+ self._platform_type = "lxd" if util.is_lxd() else "nocloud"
return self._platform_type
def _get_cloud_name(self):
@@ -203,11 +219,11 @@ class DataSourceNoCloud(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- if self.seed.startswith('/dev'):
- subplatform_type = 'config-disk'
+ if self.seed.startswith("/dev"):
+ subplatform_type = "config-disk"
else:
- subplatform_type = 'seed-dir'
- return '%s (%s)' % (subplatform_type, self.seed)
+ subplatform_type = "seed-dir"
+ return "%s (%s)" % (subplatform_type, self.seed)
def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still valid
@@ -218,7 +234,7 @@ class DataSourceNoCloud(sources.DataSource):
# LP: #1568150 need getattr in the case that an old class object
# has been loaded from a pickled file and now executing new source.
- dirs = getattr(self, 'seed_dirs', [self.seed_dir])
+ dirs = getattr(self, "seed_dirs", [self.seed_dir])
quick_id = _quick_read_instance_id(dirs=dirs)
if not quick_id:
return None
@@ -236,7 +252,7 @@ def _quick_read_instance_id(dirs=None):
if dirs is None:
dirs = []
- iid_key = 'instance-id'
+ iid_key = "instance-id"
fill = {}
if load_cmdline_data(fill) and iid_key in fill:
return fill[iid_key]
@@ -245,9 +261,9 @@ def _quick_read_instance_id(dirs=None):
if d is None:
continue
try:
- data = util.pathprefix2dict(d, required=['meta-data'])
- md = util.load_yaml(data['meta-data'])
- if iid_key in md:
+ data = util.pathprefix2dict(d, required=["meta-data"])
+ md = util.load_yaml(data["meta-data"])
+ if md and iid_key in md:
return md[iid_key]
except ValueError:
pass
@@ -256,14 +272,16 @@ def _quick_read_instance_id(dirs=None):
def load_cmdline_data(fill, cmdline=None):
- pairs = [("ds=nocloud", sources.DSMODE_LOCAL),
- ("ds=nocloud-net", sources.DSMODE_NETWORK)]
+ pairs = [
+ ("ds=nocloud", sources.DSMODE_LOCAL),
+ ("ds=nocloud-net", sources.DSMODE_NETWORK),
+ ]
for idstr, dsmode in pairs:
if parse_cmdline_data(idstr, fill, cmdline):
# if dsmode was explicitly in the command line, then
# prefer it to the dsmode based on the command line id
- if 'dsmode' not in fill:
- fill['dsmode'] = dsmode
+ if "dsmode" not in fill:
+ fill["dsmode"] = dsmode
return True
return False
@@ -323,19 +341,19 @@ def _maybe_remove_top_network(cfg):
Return the original value if no change or the updated value if changed."""
nullval = object()
- network_val = cfg.get('network', nullval)
+ network_val = cfg.get("network", nullval)
if network_val is nullval:
return cfg
- bmsg = 'Top level network key in network-config %s: %s'
+ bmsg = "Top level network key in network-config %s: %s"
if not isinstance(network_val, dict):
LOG.debug(bmsg, "was not a dict", cfg)
return cfg
if len(list(cfg.keys())) != 1:
LOG.debug(bmsg, "had multiple top level keys", cfg)
return cfg
- if network_val.get('config') == "disabled":
+ if network_val.get("config") == "disabled":
LOG.debug(bmsg, "was config/disabled", cfg)
- elif not all(('config' in network_val, 'version' in network_val)):
+ elif not all(("config" in network_val, "version" in network_val)):
LOG.debug(bmsg, "but missing 'config' or 'version'", cfg)
return cfg
LOG.debug(bmsg, "fixed by removing shifting network.", cfg)
@@ -345,19 +363,20 @@ def _maybe_remove_top_network(cfg):
def _merge_new_seed(cur, seeded):
ret = cur.copy()
- newmd = seeded.get('meta-data', {})
- if not isinstance(seeded['meta-data'], dict):
- newmd = util.load_yaml(seeded['meta-data'])
- ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd])
+ newmd = seeded.get("meta-data", {})
+ if not isinstance(seeded["meta-data"], dict):
+ newmd = util.load_yaml(seeded["meta-data"])
+ ret["meta-data"] = util.mergemanydict([cur["meta-data"], newmd])
- if seeded.get('network-config'):
- ret['network-config'] = _maybe_remove_top_network(
- util.load_yaml(seeded.get('network-config')))
+ if seeded.get("network-config"):
+ ret["network-config"] = _maybe_remove_top_network(
+ util.load_yaml(seeded.get("network-config"))
+ )
- if 'user-data' in seeded:
- ret['user-data'] = seeded['user-data']
- if 'vendor-data' in seeded:
- ret['vendor-data'] = seeded['vendor-data']
+ if "user-data" in seeded:
+ ret["user-data"] = seeded["user-data"]
+ if "vendor-data" in seeded:
+ ret["vendor-data"] = seeded["vendor-data"]
return ret
@@ -369,7 +388,7 @@ class DataSourceNoCloudNet(DataSourceNoCloud):
# Used to match classes to dependencies
datasources = [
- (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
+ (DataSourceNoCloud, (sources.DEP_FILESYSTEM,)),
(DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
@@ -378,4 +397,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
index b7656ac5..036d00b2 100644
--- a/cloudinit/sources/DataSourceNone.py
+++ b/cloudinit/sources/DataSourceNone.py
@@ -14,23 +14,23 @@ class DataSourceNone(sources.DataSource):
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
self.metadata = {}
- self.userdata_raw = ''
+ self.userdata_raw = ""
def _get_data(self):
# If the datasource config has any provided 'fallback'
# userdata or metadata, use it...
- if 'userdata_raw' in self.ds_cfg:
- self.userdata_raw = self.ds_cfg['userdata_raw']
- if 'metadata' in self.ds_cfg:
- self.metadata = self.ds_cfg['metadata']
+ if "userdata_raw" in self.ds_cfg:
+ self.userdata_raw = self.ds_cfg["userdata_raw"]
+ if "metadata" in self.ds_cfg:
+ self.metadata = self.ds_cfg["metadata"]
return True
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'config'
+ return "config"
def get_instance_id(self):
- return 'iid-datasource-none'
+ return "iid-datasource-none"
@property
def is_disconnected(self):
@@ -48,4 +48,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 741c140a..0df39824 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -16,37 +16,39 @@ from xml.dom import minidom
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.sources.helpers.vmware.imc.config \
- import Config
-from cloudinit.sources.helpers.vmware.imc.config_custom_script \
- import PreCustomScript, PostCustomScript
-from cloudinit.sources.helpers.vmware.imc.config_file \
- import ConfigFile
-from cloudinit.sources.helpers.vmware.imc.config_nic \
- import NicConfigurator
-from cloudinit.sources.helpers.vmware.imc.config_passwd \
- import PasswordConfigurator
-from cloudinit.sources.helpers.vmware.imc.guestcust_error \
- import GuestCustErrorEnum
-from cloudinit.sources.helpers.vmware.imc.guestcust_event \
- import GuestCustEventEnum as GuestCustEvent
-from cloudinit.sources.helpers.vmware.imc.guestcust_state \
- import GuestCustStateEnum
+from cloudinit import safeyaml, sources, subp, util
+from cloudinit.sources.helpers.vmware.imc.config import Config
+from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
+ PostCustomScript,
+ PreCustomScript,
+)
+from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
+from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator
+from cloudinit.sources.helpers.vmware.imc.config_passwd import (
+ PasswordConfigurator,
+)
+from cloudinit.sources.helpers.vmware.imc.guestcust_error import (
+ GuestCustErrorEnum,
+)
+from cloudinit.sources.helpers.vmware.imc.guestcust_event import (
+ GuestCustEventEnum as GuestCustEvent,
+)
+from cloudinit.sources.helpers.vmware.imc.guestcust_state import (
+ GuestCustStateEnum,
+)
from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
enable_nics,
get_nics_to_enable,
- set_customization_status,
get_tools_config,
- set_gc_status
+ set_customization_status,
+ set_gc_status,
)
LOG = logging.getLogger(__name__)
CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg"
GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts"
+VMWARE_IMC_DIR = "/var/run/vmware-imc"
class DataSourceOVF(sources.DataSource):
@@ -56,7 +58,7 @@ class DataSourceOVF(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
- self.seed_dir = os.path.join(paths.seed_dir, 'ovf')
+ self.seed_dir = os.path.join(paths.seed_dir, "ovf")
self.environment = None
self.cfg = {}
self.supported_seed_starts = ("/", "file://")
@@ -94,64 +96,152 @@ class DataSourceOVF(sources.DataSource):
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
found.append(seed)
- elif system_type and 'vmware' in system_type.lower():
+ elif system_type and "vmware" in system_type.lower():
LOG.debug("VMware Virtualization Platform found")
+ allow_vmware_cust = False
+ allow_raw_data = False
if not self.vmware_customization_supported:
- LOG.debug("Skipping the check for "
- "VMware Customization support")
- elif not util.get_cfg_option_bool(
- self.sys_cfg, "disable_vmware_customization", True):
-
+ LOG.debug(
+ "Skipping the check for VMware Customization support"
+ )
+ else:
+ allow_vmware_cust = not util.get_cfg_option_bool(
+ self.sys_cfg, "disable_vmware_customization", True
+ )
+ allow_raw_data = util.get_cfg_option_bool(
+ self.ds_cfg, "allow_raw_data", True
+ )
+
+ if not (allow_vmware_cust or allow_raw_data):
+ LOG.debug("Customization for VMware platform is disabled.")
+ else:
search_paths = (
- "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools",
- "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools")
+ "/usr/lib/vmware-tools",
+ "/usr/lib64/vmware-tools",
+ "/usr/lib/open-vm-tools",
+ "/usr/lib64/open-vm-tools",
+ "/usr/lib/x86_64-linux-gnu/open-vm-tools",
+ "/usr/lib/aarch64-linux-gnu/open-vm-tools",
+ )
plugin = "libdeployPkgPlugin.so"
deployPkgPluginPath = None
for path in search_paths:
deployPkgPluginPath = search_file(path, plugin)
if deployPkgPluginPath:
- LOG.debug("Found the customization plugin at %s",
- deployPkgPluginPath)
+ LOG.debug(
+ "Found the customization plugin at %s",
+ deployPkgPluginPath,
+ )
break
if deployPkgPluginPath:
# When the VM is powered on, the "VMware Tools" daemon
# copies the customization specification file to
# /var/run/vmware-imc directory. cloud-init code needs
- # to search for the file in that directory.
+ # to search for the file in that directory which indicates
+ # that required metadata and userdata files are now
+ # present.
max_wait = get_max_wait_from_cfg(self.ds_cfg)
vmwareImcConfigFilePath = util.log_time(
logfunc=LOG.debug,
msg="waiting for configuration file",
func=wait_for_imc_cfg_file,
- args=("cust.cfg", max_wait))
+ args=("cust.cfg", max_wait),
+ )
else:
LOG.debug("Did not find the customization plugin.")
+ md_path = None
if vmwareImcConfigFilePath:
- LOG.debug("Found VMware Customization Config File at %s",
- vmwareImcConfigFilePath)
- nicspath = wait_for_imc_cfg_file(
- filename="nics.txt", maxwait=10, naplen=5)
+ imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
+ cf = ConfigFile(vmwareImcConfigFilePath)
+ self._vmware_cust_conf = Config(cf)
+ LOG.debug(
+ "Found VMware Customization Config File at %s",
+ vmwareImcConfigFilePath,
+ )
+ try:
+ (md_path, ud_path, nicspath) = collect_imc_file_paths(
+ self._vmware_cust_conf
+ )
+ except FileNotFoundError as e:
+ _raise_error_status(
+ "File(s) missing in directory",
+ e,
+ GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf,
+ )
+ # Don't handle the customization for below 2 cases:
+ # 1. meta data is found, allow_raw_data is False.
+ # 2. no meta data is found, allow_vmware_cust is False.
+ if md_path and not allow_raw_data:
+ LOG.debug("Customization using raw data is disabled.")
+ # reset vmwareImcConfigFilePath to None to avoid
+ # customization for VMware platform
+ vmwareImcConfigFilePath = None
+ if md_path is None and not allow_vmware_cust:
+ LOG.debug(
+ "Customization using VMware config is disabled."
+ )
+ vmwareImcConfigFilePath = None
else:
LOG.debug("Did not find VMware Customization Config File")
- else:
- LOG.debug("Customization for VMware platform is disabled.")
- if vmwareImcConfigFilePath:
+ use_raw_data = bool(vmwareImcConfigFilePath and md_path)
+ if use_raw_data:
+ set_gc_status(self._vmware_cust_conf, "Started")
+ LOG.debug("Start to load cloud-init meta data and user data")
+ try:
+ (md, ud, cfg, network) = load_cloudinit_data(md_path, ud_path)
+
+ if network:
+ self._network_config = network
+ else:
+ self._network_config = (
+ self.distro.generate_fallback_config()
+ )
+
+ except safeyaml.YAMLError as e:
+ _raise_error_status(
+ "Error parsing the cloud-init meta data",
+ e,
+ GuestCustErrorEnum.GUESTCUST_ERROR_WRONG_META_FORMAT,
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf,
+ )
+ except Exception as e:
+ _raise_error_status(
+ "Error loading cloud-init configuration",
+ e,
+ GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf,
+ )
+
+ self._vmware_cust_found = True
+ found.append("vmware-tools")
+
+ util.del_dir(imcdirpath)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_DONE,
+ GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS,
+ )
+ set_gc_status(self._vmware_cust_conf, "Successful")
+
+ elif vmwareImcConfigFilePath:
+ # Load configuration from vmware_imc
self._vmware_nics_to_enable = ""
try:
- cf = ConfigFile(vmwareImcConfigFilePath)
- self._vmware_cust_conf = Config(cf)
set_gc_status(self._vmware_cust_conf, "Started")
(md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf)
self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
- imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
product_marker = self._vmware_cust_conf.marker_id
hasmarkerfile = check_marker_exists(
- product_marker, os.path.join(self.paths.cloud_dir, 'data'))
+ product_marker, os.path.join(self.paths.cloud_dir, "data")
+ )
special_customization = product_marker and not hasmarkerfile
customscript = self._vmware_cust_conf.custom_script_name
@@ -169,7 +259,8 @@ class DataSourceOVF(sources.DataSource):
custScriptConfig = get_tools_config(
CONFGROUPNAME_GUESTCUSTOMIZATION,
GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS,
- defVal)
+ defVal,
+ )
if custScriptConfig.lower() != "true":
# Update the customization status if custom script
# is disabled
@@ -177,19 +268,21 @@ class DataSourceOVF(sources.DataSource):
LOG.debug(msg)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED)
+ GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED,
+ )
raise RuntimeError(msg)
ccScriptsDir = os.path.join(
- self.paths.get_cpath("scripts"),
- "per-instance")
+ self.paths.get_cpath("scripts"), "per-instance"
+ )
except Exception as e:
_raise_error_status(
"Error parsing the customization Config File",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if special_customization:
if customscript:
@@ -202,22 +295,22 @@ class DataSourceOVF(sources.DataSource):
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
try:
LOG.debug("Preparing the Network configuration")
self._network_config = get_network_config_from_conf(
- self._vmware_cust_conf,
- True,
- True,
- self.distro.osfamily)
+ self._vmware_cust_conf, True, True, self.distro.osfamily
+ )
except Exception as e:
_raise_error_status(
"Error preparing Network Configuration",
e,
GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if special_customization:
LOG.debug("Applying password customization")
@@ -226,8 +319,9 @@ class DataSourceOVF(sources.DataSource):
try:
resetpwd = self._vmware_cust_conf.reset_password
if adminpwd or resetpwd:
- pwdConfigurator.configure(adminpwd, resetpwd,
- self.distro)
+ pwdConfigurator.configure(
+ adminpwd, resetpwd, self.distro
+ )
else:
LOG.debug("Changing password is not needed")
except Exception as e:
@@ -236,13 +330,14 @@ class DataSourceOVF(sources.DataSource):
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if customscript:
try:
- postcust = PostCustomScript(customscript,
- imcdirpath,
- ccScriptsDir)
+ postcust = PostCustomScript(
+ customscript, imcdirpath, ccScriptsDir
+ )
postcust.execute()
except Exception as e:
_raise_error_status(
@@ -250,23 +345,26 @@ class DataSourceOVF(sources.DataSource):
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if product_marker:
try:
setup_marker_files(
product_marker,
- os.path.join(self.paths.cloud_dir, 'data'))
+ os.path.join(self.paths.cloud_dir, "data"),
+ )
except Exception as e:
_raise_error_status(
"Error creating marker files",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
self._vmware_cust_found = True
- found.append('vmware-tools')
+ found.append("vmware-tools")
# TODO: Need to set the status to DONE only when the
# customization is done successfully.
@@ -274,36 +372,40 @@ class DataSourceOVF(sources.DataSource):
enable_nics(self._vmware_nics_to_enable)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_DONE,
- GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
+ GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS,
+ )
set_gc_status(self._vmware_cust_conf, "Successful")
else:
- np = [('com.vmware.guestInfo', transport_vmware_guestinfo),
- ('iso', transport_iso9660)]
+ np = [
+ ("com.vmware.guestInfo", transport_vmware_guestinfo),
+ ("iso", transport_iso9660),
+ ]
name = None
for name, transfunc in np:
contents = transfunc()
if contents:
break
if contents:
- (md, ud, cfg) = read_ovf_environment(contents)
+ (md, ud, cfg) = read_ovf_environment(contents, True)
self.environment = contents
+ if "network-config" in md and md["network-config"]:
+ self._network_config = md["network-config"]
found.append(name)
# There was no OVF transports found
if len(found) == 0:
return False
- if 'seedfrom' in md and md['seedfrom']:
- seedfrom = md['seedfrom']
+ if "seedfrom" in md and md["seedfrom"]:
+ seedfrom = md["seedfrom"]
seedfound = False
for proto in self.supported_seed_starts:
if seedfrom.startswith(proto):
seedfound = proto
break
if not seedfound:
- LOG.debug("Seed from %s not supported by %s",
- seedfrom, self)
+ LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
(md_seed, ud, vd) = util.read_seeded(seedfrom, timeout=None)
@@ -324,14 +426,14 @@ class DataSourceOVF(sources.DataSource):
def _get_subplatform(self):
system_type = dmi.read_dmi_data("system-product-name").lower()
- if system_type == 'vmware':
- return 'vmware (%s)' % self.seed
- return 'ovf (%s)' % self.seed
+ if system_type == "vmware":
+ return "vmware (%s)" % self.seed
+ return "ovf (%s)" % self.seed
def get_public_ssh_keys(self):
- if 'public-keys' not in self.metadata:
+ if "public-keys" not in self.metadata:
return []
- pks = self.metadata['public-keys']
+ pks = self.metadata["public-keys"]
if isinstance(pks, (list)):
return pks
else:
@@ -351,14 +453,14 @@ class DataSourceOVF(sources.DataSource):
class DataSourceOVFNet(DataSourceOVF):
def __init__(self, sys_cfg, distro, paths):
DataSourceOVF.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
+ self.seed_dir = os.path.join(paths.seed_dir, "ovf-net")
self.supported_seed_starts = ("http://", "https://")
self.vmware_customization_supported = False
def get_max_wait_from_cfg(cfg):
- default_max_wait = 90
- max_wait_cfg_option = 'vmware_cust_file_max_wait'
+ default_max_wait = 15
+ max_wait_cfg_option = "vmware_cust_file_max_wait"
max_wait = default_max_wait
if not cfg:
@@ -367,20 +469,30 @@ def get_max_wait_from_cfg(cfg):
try:
max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait))
except ValueError:
- LOG.warning("Failed to get '%s', using %s",
- max_wait_cfg_option, default_max_wait)
-
- if max_wait <= 0:
- LOG.warning("Invalid value '%s' for '%s', using '%s' instead",
- max_wait, max_wait_cfg_option, default_max_wait)
+ LOG.warning(
+ "Failed to get '%s', using %s",
+ max_wait_cfg_option,
+ default_max_wait,
+ )
+
+ if max_wait < 0:
+ LOG.warning(
+ "Invalid value '%s' for '%s', using '%s' instead",
+ max_wait,
+ max_wait_cfg_option,
+ default_max_wait,
+ )
max_wait = default_max_wait
return max_wait
-def wait_for_imc_cfg_file(filename, maxwait=180, naplen=5,
- dirpath="/var/run/vmware-imc"):
+def wait_for_imc_cfg_file(
+ filename, maxwait=180, naplen=5, dirpath="/var/run/vmware-imc"
+):
waited = 0
+ if maxwait <= naplen:
+ naplen = 1
while waited < maxwait:
fileFullPath = os.path.join(dirpath, filename)
@@ -392,24 +504,26 @@ def wait_for_imc_cfg_file(filename, maxwait=180, naplen=5,
return None
-def get_network_config_from_conf(config, use_system_devices=True,
- configure=False, osfamily=None):
+def get_network_config_from_conf(
+ config, use_system_devices=True, configure=False, osfamily=None
+):
nicConfigurator = NicConfigurator(config.nics, use_system_devices)
nics_cfg_list = nicConfigurator.generate(configure, osfamily)
- return get_network_config(nics_cfg_list,
- config.name_servers,
- config.dns_suffixes)
+ return get_network_config(
+ nics_cfg_list, config.name_servers, config.dns_suffixes
+ )
def get_network_config(nics=None, nameservers=None, search=None):
config_list = nics
if nameservers or search:
- config_list.append({'type': 'nameserver', 'address': nameservers,
- 'search': search})
+ config_list.append(
+ {"type": "nameserver", "address": nameservers, "search": search}
+ )
- return {'version': 1, 'config': config_list}
+ return {"version": 1, "config": config_list}
# This will return a dict with some content
@@ -420,33 +534,40 @@ def read_vmware_imc(config):
ud = None
if config.host_name:
if config.domain_name:
- md['local-hostname'] = config.host_name + "." + config.domain_name
+ md["local-hostname"] = config.host_name + "." + config.domain_name
else:
- md['local-hostname'] = config.host_name
+ md["local-hostname"] = config.host_name
if config.timezone:
- cfg['timezone'] = config.timezone
+ cfg["timezone"] = config.timezone
- md['instance-id'] = "iid-vmware-imc"
+ md["instance-id"] = "iid-vmware-imc"
return (md, ud, cfg)
# This will return a dict with some content
# meta-data, user-data, some config
-def read_ovf_environment(contents):
+def read_ovf_environment(contents, read_network=False):
props = get_properties(contents)
md = {}
cfg = {}
ud = None
- cfg_props = ['password']
- md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
+ cfg_props = ["password"]
+ md_props = ["seedfrom", "local-hostname", "public-keys", "instance-id"]
+ network_props = ["network-config"]
for (prop, val) in props.items():
- if prop == 'hostname':
+ if prop == "hostname":
prop = "local-hostname"
if prop in md_props:
md[prop] = val
elif prop in cfg_props:
cfg[prop] = val
+ elif prop in network_props and read_network:
+ try:
+ network_config = base64.b64decode(val.encode())
+ md[prop] = safeload_yaml_or_dict(network_config).get("network")
+ except Exception:
+ LOG.debug("Ignore network-config in wrong format")
elif prop == "user-data":
try:
ud = base64.b64decode(val.encode())
@@ -516,12 +637,12 @@ def transport_iso9660(require_iso=True):
# Go through mounts to see if it was already mounted
mounts = util.mounts()
for (dev, info) in mounts.items():
- fstype = info['fstype']
+ fstype = info["fstype"]
if fstype != "iso9660" and require_iso:
continue
if not maybe_cdrom_device(dev):
continue
- mp = info['mountpoint']
+ mp = info["mountpoint"]
(_fname, contents) = get_ovf_env(mp)
if contents is not False:
return contents
@@ -532,9 +653,11 @@ def transport_iso9660(require_iso=True):
mtype = None
# generate a list of devices with mtype filesystem, filter by regex
- devs = [dev for dev in
- util.find_devs_with("TYPE=%s" % mtype if mtype else None)
- if maybe_cdrom_device(dev)]
+ devs = [
+ dev
+ for dev in util.find_devs_with("TYPE=%s" % mtype if mtype else None)
+ if maybe_cdrom_device(dev)
+ ]
for dev in devs:
try:
(_fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype)
@@ -589,15 +712,17 @@ def get_properties(contents):
# could also check here that elem.namespaceURI ==
# "http://schemas.dmtf.org/ovf/environment/1"
- propSections = find_child(dom.documentElement,
- lambda n: n.localName == "PropertySection")
+ propSections = find_child(
+ dom.documentElement, lambda n: n.localName == "PropertySection"
+ )
if len(propSections) == 0:
raise XmlError("No 'PropertySection's")
props = {}
- propElems = find_child(propSections[0],
- (lambda n: n.localName == "Property"))
+ propElems = find_child(
+ propSections[0], (lambda n: n.localName == "Property")
+ )
for elem in propElems:
key = elem.attributes.getNamedItemNS(envNsURI, "key").value
@@ -624,7 +749,7 @@ class XmlError(Exception):
# Used to match classes to dependencies
datasources = (
- (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOVF, (sources.DEP_FILESYSTEM,)),
(DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
)
@@ -668,7 +793,7 @@ def setup_marker_files(markerid, marker_dir):
for fname in os.listdir(marker_dir):
if fname.startswith(".markerfile"):
util.del_file(os.path.join(marker_dir, fname))
- open(markerfile, 'w').close()
+ open(markerfile, "w").close()
def _raise_error_status(prefix, error, event, config_file, conf):
@@ -676,12 +801,90 @@ def _raise_error_status(prefix, error, event, config_file, conf):
Raise error and send customization status to the underlying VMware
Virtualization Platform. Also, cleanup the imc directory.
"""
- LOG.debug('%s: %s', prefix, error)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- event)
+ LOG.debug("%s: %s", prefix, error)
+ set_customization_status(GuestCustStateEnum.GUESTCUST_STATE_RUNNING, event)
set_gc_status(conf, prefix)
util.del_dir(os.path.dirname(config_file))
raise error
+
+def load_cloudinit_data(md_path, ud_path):
+ """
+ Load the cloud-init meta data, user data, cfg and network from the
+ given files
+
+ @return: 4-tuple of configuration
+ metadata, userdata, cfg={}, network
+
+ @raises: FileNotFoundError if md_path or ud_path are absent
+ """
+ LOG.debug("load meta data from: %s: user data from: %s", md_path, ud_path)
+ md = {}
+ ud = None
+ network = None
+
+ md = safeload_yaml_or_dict(util.load_file(md_path))
+
+ if "network" in md:
+ network = md["network"]
+
+ if ud_path:
+ ud = util.load_file(ud_path).replace("\r", "")
+ return md, ud, {}, network
+
+
+def safeload_yaml_or_dict(data):
+ """
+ The meta data could be JSON or YAML. Since YAML is a strict superset of
+ JSON, we will unmarshal the data as YAML. If data is None then a new
+ dictionary is returned.
+ """
+ if not data:
+ return {}
+ return safeyaml.load(data)
+
+
+def collect_imc_file_paths(cust_conf):
+ """
+ collect all the other imc files.
+
+ metadata is preferred to nics.txt configuration data.
+
+ If metadata file exists because it is specified in customization
+ configuration, then metadata is required and userdata is optional.
+
+ @return a 3-tuple containing desired configuration file paths if present
+ Expected returns:
+ 1. user provided metadata and userdata (md_path, ud_path, None)
+ 2. user provided metadata (md_path, None, None)
+ 3. user-provided network config (None, None, nics_path)
+ 4. No config found (None, None, None)
+ """
+ md_path = None
+ ud_path = None
+ nics_path = None
+ md_file = cust_conf.meta_data_name
+ if md_file:
+ md_path = os.path.join(VMWARE_IMC_DIR, md_file)
+ if not os.path.exists(md_path):
+ raise FileNotFoundError(
+ "meta data file is not found: %s" % md_path
+ )
+
+ ud_file = cust_conf.user_data_name
+ if ud_file:
+ ud_path = os.path.join(VMWARE_IMC_DIR, ud_file)
+ if not os.path.exists(ud_path):
+ raise FileNotFoundError(
+ "user data file is not found: %s" % ud_path
+ )
+ else:
+ nics_path = os.path.join(VMWARE_IMC_DIR, "nics.txt")
+ if not os.path.exists(nics_path):
+ LOG.debug("%s does not exist.", nics_path)
+ nics_path = None
+
+ return md_path, ud_path, nics_path
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 730ec586..e46f920d 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -20,16 +20,12 @@ import re
import string
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import net, sources, subp, util
LOG = logging.getLogger(__name__)
DEFAULT_IID = "iid-dsopennebula"
-DEFAULT_PARSEUSER = 'nobody'
+DEFAULT_PARSEUSER = "nobody"
CONTEXT_DISK_FILES = ["context.sh"]
@@ -40,7 +36,7 @@ class DataSourceOpenNebula(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
- self.seed_dir = os.path.join(paths.seed_dir, 'opennebula')
+ self.seed_dir = os.path.join(paths.seed_dir, "opennebula")
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -53,8 +49,8 @@ class DataSourceOpenNebula(sources.DataSource):
# decide parseuser for context.sh shell reader
parseuser = DEFAULT_PARSEUSER
- if 'parseuser' in self.ds_cfg:
- parseuser = self.ds_cfg.get('parseuser')
+ if "parseuser" in self.ds_cfg:
+ parseuser = self.ds_cfg.get("parseuser")
candidates = [self.seed_dir]
candidates.extend(find_candidate_devs())
@@ -90,29 +86,30 @@ class DataSourceOpenNebula(sources.DataSource):
return False
# merge fetched metadata with datasource defaults
- md = results['metadata']
+ md = results["metadata"]
md = util.mergemanydict([md, defaults])
# check for valid user specified dsmode
self.dsmode = self._determine_dsmode(
- [results.get('DSMODE'), self.ds_cfg.get('dsmode')])
+ [results.get("DSMODE"), self.ds_cfg.get("dsmode")]
+ )
if self.dsmode == sources.DSMODE_DISABLED:
return False
self.seed = seed
- self.network = results.get('network-interfaces')
+ self.network = results.get("network-interfaces")
self.metadata = md
- self.userdata_raw = results.get('userdata')
+ self.userdata_raw = results.get("userdata")
return True
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
if self.seed_dir in self.seed:
- subplatform_type = 'seed-dir'
+ subplatform_type = "seed-dir"
else:
- subplatform_type = 'config-disk'
- return '%s (%s)' % (subplatform_type, self.seed)
+ subplatform_type = "config-disk"
+ return "%s (%s)" % (subplatform_type, self.seed)
@property
def network_config(self):
@@ -144,19 +141,25 @@ class OpenNebulaNetwork(object):
if system_nics_by_mac is None:
system_nics_by_mac = get_physical_nics_by_mac(distro)
self.ifaces = collections.OrderedDict(
- [k for k in sorted(system_nics_by_mac.items(),
- key=lambda k: net.natural_sort_key(k[1]))])
+ [
+ k
+ for k in sorted(
+ system_nics_by_mac.items(),
+ key=lambda k: net.natural_sort_key(k[1]),
+ )
+ ]
+ )
# OpenNebula 4.14+ provide macaddr for ETHX in variable ETH_MAC.
# context_devname provides {mac.lower():ETHX, mac2.lower():ETHX}
self.context_devname = {}
for k, v in context.items():
- m = re.match(r'^(.+)_MAC$', k)
+ m = re.match(r"^(.+)_MAC$", k)
if m:
self.context_devname[v.lower()] = m.group(1)
def mac2ip(self, mac):
- return '.'.join([str(int(c, 16)) for c in mac.split(':')[2:]])
+ return ".".join([str(int(c, 16)) for c in mac.split(":")[2:]])
def mac2network(self, mac):
return self.mac2ip(mac).rpartition(".")[0] + ".0"
@@ -164,12 +167,12 @@ class OpenNebulaNetwork(object):
def get_nameservers(self, dev):
nameservers = {}
dns = self.get_field(dev, "dns", "").split()
- dns.extend(self.context.get('DNS', "").split())
+ dns.extend(self.context.get("DNS", "").split())
if dns:
- nameservers['addresses'] = dns
+ nameservers["addresses"] = dns
search_domain = self.get_field(dev, "search_domain", "").split()
if search_domain:
- nameservers['search'] = search_domain
+ nameservers["search"] = search_domain
return nameservers
def get_mtu(self, dev):
@@ -195,7 +198,12 @@ class OpenNebulaNetwork(object):
return self.get_field(dev, "gateway")
def get_gateway6(self, dev):
- return self.get_field(dev, "gateway6")
+ # OpenNebula 6.1.80 introduced new context parameter ETHx_IP6_GATEWAY
+ # to replace old ETHx_GATEWAY6. Old ETHx_GATEWAY6 will be removed in
+ # OpenNebula 6.4.0 (https://github.com/OpenNebula/one/issues/5536).
+ return self.get_field(
+ dev, "ip6_gateway", self.get_field(dev, "gateway6")
+ )
def get_mask(self, dev):
return self.get_field(dev, "mask", "255.255.255.0")
@@ -208,14 +216,21 @@ class OpenNebulaNetwork(object):
context stores <dev>_<NAME> (example: eth0_DOMAIN).
an empty string for value will return default."""
- val = self.context.get('_'.join((dev, name,)).upper())
+ val = self.context.get(
+ "_".join(
+ (
+ dev,
+ name,
+ )
+ ).upper()
+ )
# allow empty string to return the default.
return default if val in (None, "") else val
def gen_conf(self):
netconf = {}
- netconf['version'] = 2
- netconf['ethernets'] = {}
+ netconf["version"] = 2
+ netconf["ethernets"] = {}
ethernets = {}
for mac, dev in self.ifaces.items():
@@ -228,46 +243,46 @@ class OpenNebulaNetwork(object):
devconf = {}
# Set MAC address
- devconf['match'] = {'macaddress': mac}
+ devconf["match"] = {"macaddress": mac}
# Set IPv4 address
- devconf['addresses'] = []
+ devconf["addresses"] = []
mask = self.get_mask(c_dev)
- prefix = str(net.mask_to_net_prefix(mask))
- devconf['addresses'].append(
- self.get_ip(c_dev, mac) + '/' + prefix)
+ prefix = str(net.ipv4_mask_to_net_prefix(mask))
+ devconf["addresses"].append(self.get_ip(c_dev, mac) + "/" + prefix)
# Set IPv6 Global and ULA address
addresses6 = self.get_ip6(c_dev)
if addresses6:
prefix6 = self.get_ip6_prefix(c_dev)
- devconf['addresses'].extend(
- [i + '/' + prefix6 for i in addresses6])
+ devconf["addresses"].extend(
+ [i + "/" + prefix6 for i in addresses6]
+ )
# Set IPv4 default gateway
gateway = self.get_gateway(c_dev)
if gateway:
- devconf['gateway4'] = gateway
+ devconf["gateway4"] = gateway
# Set IPv6 default gateway
gateway6 = self.get_gateway6(c_dev)
if gateway6:
- devconf['gateway6'] = gateway6
+ devconf["gateway6"] = gateway6
# Set DNS servers and search domains
nameservers = self.get_nameservers(c_dev)
if nameservers:
- devconf['nameservers'] = nameservers
+ devconf["nameservers"] = nameservers
# Set MTU size
mtu = self.get_mtu(c_dev)
if mtu:
- devconf['mtu'] = mtu
+ devconf["mtu"] = mtu
ethernets[dev] = devconf
- netconf['ethernets'] = ethernets
- return(netconf)
+ netconf["ethernets"] = ethernets
+ return netconf
def find_candidate_devs():
@@ -275,7 +290,7 @@ def find_candidate_devs():
Return a list of devices that may contain the context disk.
"""
combined = []
- for f in ('LABEL=CONTEXT', 'LABEL=CDROM', 'TYPE=iso9660'):
+ for f in ("LABEL=CONTEXT", "LABEL=CDROM", "TYPE=iso9660"):
devs = util.find_devs_with(f)
devs.sort()
for d in devs:
@@ -286,16 +301,17 @@ def find_candidate_devs():
def switch_user_cmd(user):
- return ['sudo', '-u', user]
+ return ["sudo", "-u", user]
-def parse_shell_config(content, keylist=None, bash=None, asuser=None,
- switch_user_cb=None):
+def parse_shell_config(
+ content, keylist=None, bash=None, asuser=None, switch_user_cb=None
+):
if isinstance(bash, str):
bash = [bash]
elif bash is None:
- bash = ['bash', '-e']
+ bash = ["bash", "-e"]
if switch_user_cb is None:
switch_user_cb = switch_user_cmd
@@ -309,17 +325,24 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
keylist = allvars
keylist_in = []
- setup = '\n'.join(('__v="";', '',))
+ setup = "\n".join(
+ (
+ '__v="";',
+ "",
+ )
+ )
def varprinter(vlist):
# output '\0'.join(['_start_', key=value NULL for vars in vlist]
- return '\n'.join((
- 'printf "%s\\0" _start_',
- 'for __v in %s; do' % ' '.join(vlist),
- ' printf "%s=%s\\0" "$__v" "${!__v}";',
- 'done',
- ''
- ))
+ return "\n".join(
+ (
+ 'printf "%s\\0" _start_',
+ "for __v in %s; do" % " ".join(vlist),
+ ' printf "%s=%s\\0" "$__v" "${!__v}";',
+ "done",
+ "",
+ )
+ )
# the rendered 'bcmd' is bash syntax that does
# setup: declare variables we use (so they show up in 'all')
@@ -332,12 +355,15 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
# key=value (for each preset variable)
# literal '_start_'
# key=value (for each post set variable)
- bcmd = ('unset IFS\n' +
- setup +
- varprinter(allvars) +
- '{\n%s\n\n:\n} > /dev/null\n' % content +
- 'unset IFS\n' +
- varprinter(keylist) + "\n")
+ bcmd = (
+ "unset IFS\n"
+ + setup
+ + varprinter(allvars)
+ + "{\n%s\n\n:\n} > /dev/null\n" % content
+ + "unset IFS\n"
+ + varprinter(keylist)
+ + "\n"
+ )
cmd = []
if asuser is not None:
@@ -349,8 +375,14 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
# exclude vars in bash that change on their own or that we used
excluded = (
- "EPOCHREALTIME", "EPOCHSECONDS", "RANDOM", "LINENO", "SECONDS", "_",
- "SRANDOM", "__v",
+ "EPOCHREALTIME",
+ "EPOCHSECONDS",
+ "RANDOM",
+ "LINENO",
+ "SECONDS",
+ "_",
+ "SRANDOM",
+ "__v",
)
preset = {}
ret = {}
@@ -364,8 +396,9 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
(key, val) = line.split("=", 1)
if target is preset:
preset[key] = val
- elif (key not in excluded and
- (key in keylist_in or preset.get(key) != val)):
+ elif key not in excluded and (
+ key in keylist_in or preset.get(key) != val
+ ):
ret[key] = val
except ValueError:
if line != "_start_":
@@ -394,7 +427,7 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
raise NonContextDiskDir("%s: %s" % (source_dir, "no files found"))
context = {}
- results = {'userdata': None, 'metadata': {}}
+ results = {"userdata": None, "metadata": {}}
if "context.sh" in found:
if asuser is not None:
@@ -403,10 +436,11 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
except KeyError as e:
raise BrokenContextDiskDir(
"configured user '{user}' does not exist".format(
- user=asuser)
+ user=asuser
+ )
) from e
try:
- path = os.path.join(source_dir, 'context.sh')
+ path = os.path.join(source_dir, "context.sh")
content = util.load_file(path)
context = parse_shell_config(content, asuser=asuser)
except subp.ProcessExecutionError as e:
@@ -423,7 +457,7 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
if not context:
return results
- results['metadata'] = context
+ results["metadata"] = context
# process single or multiple SSH keys
ssh_key_var = None
@@ -434,40 +468,41 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
if ssh_key_var:
lines = context.get(ssh_key_var).splitlines()
- results['metadata']['public-keys'] = [
+ results["metadata"]["public-keys"] = [
line for line in lines if len(line) and not line.startswith("#")
]
# custom hostname -- try hostname or leave cloud-init
# itself create hostname from IP address later
- for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
+ for k in ("SET_HOSTNAME", "HOSTNAME", "PUBLIC_IP", "IP_PUBLIC", "ETH0_IP"):
if k in context:
- results['metadata']['local-hostname'] = context[k]
+ results["metadata"]["local-hostname"] = context[k]
break
# raw user data
if "USER_DATA" in context:
- results['userdata'] = context["USER_DATA"]
+ results["userdata"] = context["USER_DATA"]
elif "USERDATA" in context:
- results['userdata'] = context["USERDATA"]
+ results["userdata"] = context["USERDATA"]
# b64decode user data if necessary (default)
- if 'userdata' in results:
- encoding = context.get('USERDATA_ENCODING',
- context.get('USER_DATA_ENCODING'))
+ if "userdata" in results:
+ encoding = context.get(
+ "USERDATA_ENCODING", context.get("USER_DATA_ENCODING")
+ )
if encoding == "base64":
try:
- results['userdata'] = util.b64d(results['userdata'])
+ results["userdata"] = util.b64d(results["userdata"])
except TypeError:
LOG.warning("Failed base64 decoding of userdata")
# generate Network Configuration v2
# only if there are any required context variables
# http://docs.opennebula.org/5.4/operation/references/template.html#context-section
- ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP.*$', k)]
+ ipaddr_keys = [k for k in context if re.match(r"^ETH\d+_IP.*$", k)]
if ipaddr_keys:
onet = OpenNebulaNetwork(context, distro)
- results['network-interfaces'] = onet.gen_conf()
+ results["network-interfaces"] = onet.gen_conf()
return results
@@ -484,7 +519,7 @@ DataSourceOpenNebulaNet = DataSourceOpenNebula
# Used to match classes to dependencies
datasources = [
- (DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOpenNebula, (sources.DEP_FILESYSTEM,)),
]
@@ -492,4 +527,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index b3406c67..6878528d 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -8,13 +8,11 @@ import time
from cloudinit import dmi
from cloudinit import log as logging
+from cloudinit import sources, url_helper, util
+from cloudinit.event import EventScope, EventType
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-from cloudinit.sources.helpers import openstack
from cloudinit.sources import DataSourceOracle as oracle
+from cloudinit.sources.helpers import openstack
LOG = logging.getLogger(__name__)
@@ -26,13 +24,13 @@ DEFAULT_METADATA = {
}
# OpenStack DMI constants
-DMI_PRODUCT_NOVA = 'OpenStack Nova'
-DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
+DMI_PRODUCT_NOVA = "OpenStack Nova"
+DMI_PRODUCT_COMPUTE = "OpenStack Compute"
VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
-DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
+DMI_ASSET_TAG_OPENTELEKOM = "OpenTelekomCloud"
# See github.com/sapcc/helm-charts/blob/master/openstack/nova/values.yaml
# -> compute.defaults.vmware.smbios_asset_tag for this value
-DMI_ASSET_TAG_SAPCCLOUD = 'SAP CCloud VM'
+DMI_ASSET_TAG_SAPCCLOUD = "SAP CCloud VM"
VALID_DMI_ASSET_TAGS = VALID_DMI_PRODUCT_NAMES
VALID_DMI_ASSET_TAGS += [DMI_ASSET_TAG_OPENTELEKOM, DMI_ASSET_TAG_SAPCCLOUD]
@@ -46,6 +44,15 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
# Whether we want to get network configuration from the metadata service.
perform_dhcp_setup = False
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
self.metadata_address = None
@@ -64,8 +71,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
filtered = [x for x in urls if util.is_resolvable_url(x)]
if set(filtered) != set(urls):
- LOG.debug("Removed the following from metadata urls: %s",
- list((set(urls) - set(filtered))))
+ LOG.debug(
+ "Removed the following from metadata urls: %s",
+ list((set(urls) - set(filtered))),
+ )
if len(filtered):
urls = filtered
else:
@@ -75,20 +84,25 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
md_urls = []
url2base = {}
for url in urls:
- md_url = url_helper.combine_url(url, 'openstack')
+ md_url = url_helper.combine_url(url, "openstack")
md_urls.append(md_url)
url2base[md_url] = url
url_params = self.get_url_params()
start_time = time.time()
avail_url, _response = url_helper.wait_for_url(
- urls=md_urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds)
+ urls=md_urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ )
if avail_url:
LOG.debug("Using metadata source: '%s'", url2base[avail_url])
else:
- LOG.debug("Giving up on OpenStack md from %s after %s seconds",
- md_urls, int(time.time() - start_time))
+ LOG.debug(
+ "Giving up on OpenStack md from %s after %s seconds",
+ md_urls,
+ int(time.time() - start_time),
+ )
self.metadata_address = url2base.get(avail_url)
return bool(avail_url)
@@ -106,18 +120,20 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
# RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide
# network_config by default unless configured in /etc/cloud/cloud.cfg*.
# Patch Xenial and Artful before release to default to False.
- if util.is_false(self.ds_cfg.get('apply_network_config', True)):
+ if util.is_false(self.ds_cfg.get("apply_network_config", True)):
self._network_config = None
return self._network_config
if self.network_json == sources.UNSET:
# this would happen if get_data hadn't been called. leave as UNSET
LOG.warning(
- 'Unexpected call to network_config when network_json is None.')
+ "Unexpected call to network_config when network_json is None."
+ )
return None
- LOG.debug('network config provided via network_json')
+ LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=None)
+ self.network_json, known_macs=None
+ )
return self._network_config
def _get_data(self):
@@ -127,7 +143,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
False when unable to contact metadata service or when metadata
format is invalid or disabled.
"""
- oracle_considered = 'Oracle' in self.sys_cfg.get('datasource_list')
+ oracle_considered = "Oracle" in self.sys_cfg.get("datasource_list")
if not detect_openstack(accept_oracle=not oracle_considered):
return False
@@ -135,8 +151,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
try:
with EphemeralDHCPv4(self.fallback_interface):
results = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self._crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self._crawl_metadata,
+ )
except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
util.logexc(LOG, str(e))
return False
@@ -147,19 +165,19 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
util.logexc(LOG, str(e))
return False
- self.dsmode = self._determine_dsmode([results.get('dsmode')])
+ self.dsmode = self._determine_dsmode([results.get("dsmode")])
if self.dsmode == sources.DSMODE_DISABLED:
return False
- md = results.get('metadata', {})
+ md = results.get("metadata", {})
md = util.mergemanydict([md, DEFAULT_METADATA])
self.metadata = md
- self.ec2_metadata = results.get('ec2-metadata')
- self.network_json = results.get('networkdata')
- self.userdata_raw = results.get('userdata')
- self.version = results['version']
- self.files.update(results.get('files', {}))
+ self.ec2_metadata = results.get("ec2-metadata")
+ self.network_json = results.get("networkdata")
+ self.userdata_raw = results.get("userdata")
+ self.version = results["version"]
+ self.files.update(results.get("files", {}))
- vd = results.get('vendordata')
+ vd = results.get("vendordata")
self.vendordata_pure = vd
try:
self.vendordata_raw = sources.convert_vendordata(vd)
@@ -167,6 +185,14 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
LOG.warning("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
+ vd2 = results.get("vendordata2")
+ self.vendordata2_pure = vd2
+ try:
+ self.vendordata2_raw = sources.convert_vendordata(vd2)
+ except ValueError as e:
+ LOG.warning("Invalid content in vendor-data2: %s", e)
+ self.vendordata2_raw = None
+
return True
def _crawl_metadata(self):
@@ -179,26 +205,35 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
try:
if not self.wait_for_metadata_service():
raise sources.InvalidMetaDataException(
- 'No active metadata service found')
+ "No active metadata service found"
+ )
except IOError as e:
raise sources.InvalidMetaDataException(
- 'IOError contacting metadata service: {error}'.format(
- error=str(e)))
+ "IOError contacting metadata service: {error}".format(
+ error=str(e)
+ )
+ )
url_params = self.get_url_params()
try:
result = util.log_time(
- LOG.debug, 'Crawl of openstack metadata service',
- read_metadata_service, args=[self.metadata_address],
- kwargs={'ssl_details': self.ssl_details,
- 'retries': url_params.num_retries,
- 'timeout': url_params.timeout_seconds})
+ LOG.debug,
+ "Crawl of openstack metadata service",
+ read_metadata_service,
+ args=[self.metadata_address],
+ kwargs={
+ "ssl_details": self.ssl_details,
+ "retries": url_params.num_retries,
+ "timeout": url_params.timeout_seconds,
+ },
+ )
except openstack.NonReadable as e:
raise sources.InvalidMetaDataException(str(e))
except (openstack.BrokenMetadata, IOError) as e:
- msg = 'Broken metadata address {addr}'.format(
- addr=self.metadata_address)
+ msg = "Broken metadata address {addr}".format(
+ addr=self.metadata_address
+ )
raise sources.InvalidMetaDataException(msg) from e
return result
@@ -215,10 +250,10 @@ class DataSourceOpenStackLocal(DataSourceOpenStack):
perform_dhcp_setup = True # Get metadata network config if present
-def read_metadata_service(base_url, ssl_details=None,
- timeout=5, retries=5):
- reader = openstack.MetadataReader(base_url, ssl_details=ssl_details,
- timeout=timeout, retries=retries)
+def read_metadata_service(base_url, ssl_details=None, timeout=5, retries=5):
+ reader = openstack.MetadataReader(
+ base_url, ssl_details=ssl_details, timeout=timeout, retries=retries
+ )
return reader.read_v2()
@@ -226,14 +261,14 @@ def detect_openstack(accept_oracle=False):
"""Return True when a potential OpenStack platform is detected."""
if not util.is_x86():
return True # Non-Intel cpus don't properly report dmi product names
- product_name = dmi.read_dmi_data('system-product-name')
+ product_name = dmi.read_dmi_data("system-product-name")
if product_name in VALID_DMI_PRODUCT_NAMES:
return True
- elif dmi.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS:
+ elif dmi.read_dmi_data("chassis-asset-tag") in VALID_DMI_ASSET_TAGS:
return True
elif accept_oracle and oracle._is_platform_viable():
return True
- elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA:
+ elif util.get_proc_env(1).get("product_name") == DMI_PRODUCT_NOVA:
return True
return False
@@ -249,4 +284,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index bf81b10b..6d81be1e 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -32,7 +32,7 @@ LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
# Don't use IMDS to configure secondary NICs by default
- 'configure_secondary_nics': False,
+ "configure_secondary_nics": False,
}
CHASSIS_ASSET_TAG = "OracleCloud.com"
METADATA_ROOT = "http://169.254.169.254/opc/v{version}/"
@@ -40,6 +40,7 @@ METADATA_PATTERN = METADATA_ROOT + "{path}/"
# https://docs.cloud.oracle.com/iaas/Content/Network/Troubleshoot/connectionhang.htm#Overview,
# indicates that an MTU of 9000 is used within OCI
MTU = 9000
+V2_HEADERS = {"Authorization": "Bearer Oracle"}
OpcMetadata = namedtuple("OpcMetadata", "version instance_data vnics_data")
@@ -60,50 +61,52 @@ def _ensure_netfailover_safe(network_config):
"""
# ignore anything that's not an actual network-config
- if 'version' not in network_config:
+ if "version" not in network_config:
return
- if network_config['version'] not in [1, 2]:
- LOG.debug('Ignoring unknown network config version: %s',
- network_config['version'])
+ if network_config["version"] not in [1, 2]:
+ LOG.debug(
+ "Ignoring unknown network config version: %s",
+ network_config["version"],
+ )
return
mac_to_name = get_interfaces_by_mac()
- if network_config['version'] == 1:
- for cfg in [c for c in network_config['config'] if 'type' in c]:
- if cfg['type'] == 'physical':
- if 'mac_address' in cfg:
- mac = cfg['mac_address']
+ if network_config["version"] == 1:
+ for cfg in [c for c in network_config["config"] if "type" in c]:
+ if cfg["type"] == "physical":
+ if "mac_address" in cfg:
+ mac = cfg["mac_address"]
cur_name = mac_to_name.get(mac)
if not cur_name:
continue
elif is_netfail_master(cur_name):
- del cfg['mac_address']
+ del cfg["mac_address"]
- elif network_config['version'] == 2:
- for _, cfg in network_config.get('ethernets', {}).items():
- if 'match' in cfg:
- macaddr = cfg.get('match', {}).get('macaddress')
+ elif network_config["version"] == 2:
+ for _, cfg in network_config.get("ethernets", {}).items():
+ if "match" in cfg:
+ macaddr = cfg.get("match", {}).get("macaddress")
if macaddr:
cur_name = mac_to_name.get(macaddr)
if not cur_name:
continue
elif is_netfail_master(cur_name):
- del cfg['match']['macaddress']
- del cfg['set-name']
- cfg['match']['name'] = cur_name
+ del cfg["match"]["macaddress"]
+ del cfg["set-name"]
+ cfg["match"]["name"] = cur_name
class DataSourceOracle(sources.DataSource):
- dsname = 'Oracle'
+ dsname = "Oracle"
system_uuid = None
vendordata_pure = None
network_config_sources = (
sources.NetworkConfigSource.cmdline,
+ sources.NetworkConfigSource.system_cfg,
sources.NetworkConfigSource.ds,
sources.NetworkConfigSource.initramfs,
- sources.NetworkConfigSource.system_cfg,
)
_network_config = sources.UNSET
@@ -112,9 +115,12 @@ class DataSourceOracle(sources.DataSource):
super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs)
self._vnics_data = None
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ['datasource', self.dsname], {}),
- BUILTIN_DS_CONFIG])
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", self.dsname], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
def _is_platform_viable(self):
"""Check platform environment to report if this datasource may run."""
@@ -129,12 +135,18 @@ class DataSourceOracle(sources.DataSource):
# network may be configured if iscsi root. If that is the case
# then read_initramfs_config will return non-None.
fetch_vnics_data = self.ds_cfg.get(
- 'configure_secondary_nics',
- BUILTIN_DS_CONFIG["configure_secondary_nics"]
+ "configure_secondary_nics",
+ BUILTIN_DS_CONFIG["configure_secondary_nics"],
)
network_context = noop()
if not _is_iscsi_root():
- network_context = dhcp.EphemeralDHCPv4(net.find_fallback_nic())
+ network_context = dhcp.EphemeralDHCPv4(
+ iface=net.find_fallback_nic(),
+ connectivity_url_data={
+ "url": METADATA_PATTERN.format(version=2, path="instance"),
+ "headers": V2_HEADERS,
+ },
+ )
with network_context:
fetched_metadata = read_opc_metadata(
fetch_vnics_data=fetch_vnics_data
@@ -172,7 +184,7 @@ class DataSourceOracle(sources.DataSource):
return sources.instance_id_matches_system_uuid(self.system_uuid)
def get_public_ssh_keys(self):
- return sources.normalize_pubkey_data(self.metadata.get('public_keys'))
+ return sources.normalize_pubkey_data(self.metadata.get("public_keys"))
@property
def network_config(self):
@@ -189,8 +201,8 @@ class DataSourceOracle(sources.DataSource):
self._network_config = self.distro.generate_fallback_config()
if self.ds_cfg.get(
- 'configure_secondary_nics',
- BUILTIN_DS_CONFIG["configure_secondary_nics"]
+ "configure_secondary_nics",
+ BUILTIN_DS_CONFIG["configure_secondary_nics"],
):
try:
# Mutate self._network_config to include secondary
@@ -198,8 +210,8 @@ class DataSourceOracle(sources.DataSource):
self._add_network_config_from_opc_imds()
except Exception:
util.logexc(
- LOG,
- "Failed to parse secondary network configuration!")
+ LOG, "Failed to parse secondary network configuration!"
+ )
# we need to verify that the nic selected is not a netfail over
# device and, if it is a netfail master, then we need to avoid
@@ -223,11 +235,10 @@ class DataSourceOracle(sources.DataSource):
(if the IMDS returns valid JSON with unexpected contents).
"""
if self._vnics_data is None:
- LOG.warning(
- "Secondary NIC data is UNSET but should not be")
+ LOG.warning("Secondary NIC data is UNSET but should not be")
return
- if 'nicIndex' in self._vnics_data[0]:
+ if "nicIndex" in self._vnics_data[0]:
# TODO: Once configure_secondary_nics defaults to True, lower the
# level of this log message. (Currently, if we're running this
# code at all, someone has explicitly opted-in to secondary
@@ -236,8 +247,8 @@ class DataSourceOracle(sources.DataSource):
# Metal Machine launch, which means INFO or DEBUG would be more
# appropriate.)
LOG.warning(
- 'VNIC metadata indicates this is a bare metal machine; '
- 'skipping secondary VNIC configuration.'
+ "VNIC metadata indicates this is a bare metal machine; "
+ "skipping secondary VNIC configuration."
)
return
@@ -247,39 +258,45 @@ class DataSourceOracle(sources.DataSource):
# We skip the first entry in the response because the primary
# interface is already configured by iSCSI boot; applying
# configuration from the IMDS is not required.
- mac_address = vnic_dict['macAddr'].lower()
+ mac_address = vnic_dict["macAddr"].lower()
if mac_address not in interfaces_by_mac:
- LOG.debug('Interface with MAC %s not found; skipping',
- mac_address)
+ LOG.debug(
+ "Interface with MAC %s not found; skipping", mac_address
+ )
continue
name = interfaces_by_mac[mac_address]
- if self._network_config['version'] == 1:
+ if self._network_config["version"] == 1:
subnet = {
- 'type': 'static',
- 'address': vnic_dict['privateIp'],
+ "type": "static",
+ "address": vnic_dict["privateIp"],
+ }
+ self._network_config["config"].append(
+ {
+ "name": name,
+ "type": "physical",
+ "mac_address": mac_address,
+ "mtu": MTU,
+ "subnets": [subnet],
+ }
+ )
+ elif self._network_config["version"] == 2:
+ self._network_config["ethernets"][name] = {
+ "addresses": [vnic_dict["privateIp"]],
+ "mtu": MTU,
+ "dhcp4": False,
+ "dhcp6": False,
+ "match": {"macaddress": mac_address},
}
- self._network_config['config'].append({
- 'name': name,
- 'type': 'physical',
- 'mac_address': mac_address,
- 'mtu': MTU,
- 'subnets': [subnet],
- })
- elif self._network_config['version'] == 2:
- self._network_config['ethernets'][name] = {
- 'addresses': [vnic_dict['privateIp']],
- 'mtu': MTU, 'dhcp4': False, 'dhcp6': False,
- 'match': {'macaddress': mac_address}}
def _read_system_uuid():
- sys_uuid = dmi.read_dmi_data('system-uuid')
+ sys_uuid = dmi.read_dmi_data("system-uuid")
return None if sys_uuid is None else sys_uuid.lower()
def _is_platform_viable():
- asset_tag = dmi.read_dmi_data('chassis-asset-tag')
+ asset_tag = dmi.read_dmi_data("chassis-asset-tag")
return asset_tag == CHASSIS_ASSET_TAG
@@ -304,11 +321,9 @@ def read_opc_metadata(*, fetch_vnics_data: bool = False):
retries = 2
def _fetch(metadata_version: int, path: str) -> dict:
- headers = {
- "Authorization": "Bearer Oracle"} if metadata_version > 1 else None
return readurl(
url=METADATA_PATTERN.format(version=metadata_version, path=path),
- headers=headers,
+ headers=V2_HEADERS if metadata_version > 1 else None,
retries=retries,
)._response.json()
@@ -324,8 +339,9 @@ def read_opc_metadata(*, fetch_vnics_data: bool = False):
try:
vnics_data = _fetch(metadata_version, path="vnics")
except UrlError:
- util.logexc(LOG,
- "Failed to fetch secondary network configuration!")
+ util.logexc(
+ LOG, "Failed to fetch secondary network configuration!"
+ )
return OpcMetadata(metadata_version, instance_data, vnics_data)
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
index 0b8994bf..14ac77e4 100644
--- a/cloudinit/sources/DataSourceRbxCloud.py
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -14,32 +14,34 @@ import os
import os.path
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.event import EventType
+from cloudinit import sources, subp, util
+from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
-ETC_HOSTS = '/etc/hosts'
+ETC_HOSTS = "/etc/hosts"
def get_manage_etc_hosts():
hosts = util.load_file(ETC_HOSTS, quiet=True)
if hosts:
- LOG.debug('/etc/hosts exists - setting manage_etc_hosts to False')
+ LOG.debug("/etc/hosts exists - setting manage_etc_hosts to False")
return False
- LOG.debug('/etc/hosts does not exists - setting manage_etc_hosts to True')
+ LOG.debug("/etc/hosts does not exists - setting manage_etc_hosts to True")
return True
def ip2int(addr):
- parts = addr.split('.')
- return (int(parts[0]) << 24) + (int(parts[1]) << 16) + \
- (int(parts[2]) << 8) + int(parts[3])
+ parts = addr.split(".")
+ return (
+ (int(parts[0]) << 24)
+ + (int(parts[1]) << 16)
+ + (int(parts[2]) << 8)
+ + int(parts[3])
+ )
def int2ip(addr):
- return '.'.join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]])
+ return ".".join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]])
def _sub_arp(cmd):
@@ -48,33 +50,35 @@ def _sub_arp(cmd):
and runs arping. Breaking this to a separate function
for later use in mocking and unittests
"""
- return subp.subp(['arping'] + cmd)
+ return subp.subp(["arping"] + cmd)
def gratuitous_arp(items, distro):
- source_param = '-S'
- if distro.name in ['fedora', 'centos', 'rhel']:
- source_param = '-s'
+ source_param = "-S"
+ if distro.name in ["fedora", "centos", "rhel"]:
+ source_param = "-s"
for item in items:
try:
- _sub_arp([
- '-c', '2',
- source_param, item['source'],
- item['destination']
- ])
+ _sub_arp(
+ ["-c", "2", source_param, item["source"], item["destination"]]
+ )
except subp.ProcessExecutionError as error:
# warning, because the system is able to function properly
# despite no success - some ARP table may be waiting for
# expiration, but the system may continue
- LOG.warning('Failed to arping from "%s" to "%s": %s',
- item['source'], item['destination'], error)
+ LOG.warning(
+ 'Failed to arping from "%s" to "%s": %s',
+ item["source"],
+ item["destination"],
+ error,
+ )
def get_md():
"""Returns False (not found or error) or a dictionary with metadata."""
devices = set(
- util.find_devs_with('LABEL=CLOUDMD') +
- util.find_devs_with('LABEL=cloudmd')
+ util.find_devs_with("LABEL=CLOUDMD")
+ + util.find_devs_with("LABEL=cloudmd")
)
if not devices:
return False
@@ -83,7 +87,7 @@ def get_md():
rbx_data = util.mount_cb(
device=device,
callback=read_user_data_callback,
- mtype=['vfat', 'fat', 'msdosfs']
+ mtype=["vfat", "fat", "msdosfs"],
)
if rbx_data:
return rbx_data
@@ -91,11 +95,13 @@ def get_md():
if err.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user "
- "data", device)
+ util.logexc(
+ LOG, "Failed to mount %s when looking for user data", device
+ )
- LOG.debug("Did not find RbxCloud data, searched devices: %s",
- ",".join(devices))
+ LOG.debug(
+ "Did not find RbxCloud data, searched devices: %s", ",".join(devices)
+ )
return False
@@ -107,25 +113,28 @@ def generate_network_config(netadps):
@returns: A dict containing network config
"""
return {
- 'version': 1,
- 'config': [
+ "version": 1,
+ "config": [
{
- 'type': 'physical',
- 'name': 'eth{}'.format(str(i)),
- 'mac_address': netadp['macaddress'].lower(),
- 'subnets': [
+ "type": "physical",
+ "name": "eth{}".format(str(i)),
+ "mac_address": netadp["macaddress"].lower(),
+ "subnets": [
{
- 'type': 'static',
- 'address': ip['address'],
- 'netmask': netadp['network']['netmask'],
- 'control': 'auto',
- 'gateway': netadp['network']['gateway'],
- 'dns_nameservers': netadp['network']['dns'][
- 'nameservers']
- } for ip in netadp['ip']
+ "type": "static",
+ "address": ip["address"],
+ "netmask": netadp["network"]["netmask"],
+ "control": "auto",
+ "gateway": netadp["network"]["gateway"],
+ "dns_nameservers": netadp["network"]["dns"][
+ "nameservers"
+ ],
+ }
+ for ip in netadp["ip"]
],
- } for i, netadp in enumerate(netadps)
- ]
+ }
+ for i, netadp in enumerate(netadps)
+ ],
}
@@ -140,65 +149,60 @@ def read_user_data_callback(mount_dir):
"""
meta_data = util.load_json(
text=util.load_file(
- fname=os.path.join(mount_dir, 'cloud.json'),
- decode=False
+ fname=os.path.join(mount_dir, "cloud.json"), decode=False
)
)
user_data = util.load_file(
- fname=os.path.join(mount_dir, 'user.data'),
- quiet=True
+ fname=os.path.join(mount_dir, "user.data"), quiet=True
)
- if 'vm' not in meta_data or 'netadp' not in meta_data:
+ if "vm" not in meta_data or "netadp" not in meta_data:
util.logexc(LOG, "Failed to load metadata. Invalid format.")
return None
- username = meta_data.get('additionalMetadata', {}).get('username')
- ssh_keys = meta_data.get('additionalMetadata', {}).get('sshKeys', [])
+ username = meta_data.get("additionalMetadata", {}).get("username")
+ ssh_keys = meta_data.get("additionalMetadata", {}).get("sshKeys", [])
hash = None
- if meta_data.get('additionalMetadata', {}).get('password'):
- hash = meta_data['additionalMetadata']['password']['sha512']
+ if meta_data.get("additionalMetadata", {}).get("password"):
+ hash = meta_data["additionalMetadata"]["password"]["sha512"]
- network = generate_network_config(meta_data['netadp'])
+ network = generate_network_config(meta_data["netadp"])
data = {
- 'userdata': user_data,
- 'metadata': {
- 'instance-id': meta_data['vm']['_id'],
- 'local-hostname': meta_data['vm']['name'],
- 'public-keys': []
+ "userdata": user_data,
+ "metadata": {
+ "instance-id": meta_data["vm"]["_id"],
+ "local-hostname": meta_data["vm"]["name"],
+ "public-keys": [],
},
- 'gratuitous_arp': [
- {
- "source": ip["address"],
- "destination": target
- }
- for netadp in meta_data['netadp']
- for ip in netadp['ip']
+ "gratuitous_arp": [
+ {"source": ip["address"], "destination": target}
+ for netadp in meta_data["netadp"]
+ for ip in netadp["ip"]
for target in [
- netadp['network']["gateway"],
- int2ip(ip2int(netadp['network']["gateway"]) + 2),
- int2ip(ip2int(netadp['network']["gateway"]) + 3)
+ netadp["network"]["gateway"],
+ int2ip(ip2int(netadp["network"]["gateway"]) + 2),
+ int2ip(ip2int(netadp["network"]["gateway"]) + 3),
]
],
- 'cfg': {
- 'ssh_pwauth': True,
- 'disable_root': True,
- 'system_info': {
- 'default_user': {
- 'name': username,
- 'gecos': username,
- 'sudo': ['ALL=(ALL) NOPASSWD:ALL'],
- 'passwd': hash,
- 'lock_passwd': False,
- 'ssh_authorized_keys': ssh_keys,
+ "cfg": {
+ "ssh_pwauth": True,
+ "disable_root": True,
+ "system_info": {
+ "default_user": {
+ "name": username,
+ "gecos": username,
+ "sudo": ["ALL=(ALL) NOPASSWD:ALL"],
+ "passwd": hash,
+ "lock_passwd": False,
+ "ssh_authorized_keys": ssh_keys,
}
},
- 'network_config': network,
- 'manage_etc_hosts': get_manage_etc_hosts(),
+ "network_config": network,
+ "manage_etc_hosts": get_manage_etc_hosts(),
},
}
- LOG.debug('returning DATA object:')
+ LOG.debug("returning DATA object:")
LOG.debug(data)
return data
@@ -206,10 +210,13 @@ def read_user_data_callback(mount_dir):
class DataSourceRbxCloud(sources.DataSource):
dsname = "RbxCloud"
- update_events = {'network': [
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT
- ]}
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -227,18 +234,18 @@ class DataSourceRbxCloud(sources.DataSource):
rbx_data = get_md()
if rbx_data is False:
return False
- self.userdata_raw = rbx_data['userdata']
- self.metadata = rbx_data['metadata']
- self.gratuitous_arp = rbx_data['gratuitous_arp']
- self.cfg = rbx_data['cfg']
+ self.userdata_raw = rbx_data["userdata"]
+ self.metadata = rbx_data["metadata"]
+ self.gratuitous_arp = rbx_data["gratuitous_arp"]
+ self.cfg = rbx_data["cfg"]
return True
@property
def network_config(self):
- return self.cfg['network_config']
+ return self.cfg["network_config"]
def get_public_ssh_keys(self):
- return self.metadata['public-keys']
+ return self.metadata["public-keys"]
def get_userdata_raw(self):
return self.userdata_raw
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index 41be7665..8e5dd82c 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -27,21 +27,18 @@ from requests.packages.urllib3.poolmanager import PoolManager
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-from cloudinit import net
+from cloudinit import net, sources, url_helper, util
+from cloudinit.event import EventScope, EventType
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from cloudinit.event import EventType
LOG = logging.getLogger(__name__)
-DS_BASE_URL = 'http://169.254.42.42'
+DS_BASE_URL = "http://169.254.42.42"
BUILTIN_DS_CONFIG = {
- 'metadata_url': DS_BASE_URL + '/conf?format=json',
- 'userdata_url': DS_BASE_URL + '/user_data/cloud-init',
- 'vendordata_url': DS_BASE_URL + '/vendor_data/cloud-init'
+ "metadata_url": DS_BASE_URL + "/conf?format=json",
+ "userdata_url": DS_BASE_URL + "/user_data/cloud-init",
+ "vendordata_url": DS_BASE_URL + "/vendor_data/cloud-init",
}
DEF_MD_RETRIES = 5
@@ -57,15 +54,15 @@ def on_scaleway():
* the initrd created the file /var/run/scaleway.
* "scaleway" is in the kernel cmdline.
"""
- vendor_name = dmi.read_dmi_data('system-manufacturer')
- if vendor_name == 'Scaleway':
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
+ if vendor_name == "Scaleway":
return True
- if os.path.exists('/var/run/scaleway'):
+ if os.path.exists("/var/run/scaleway"):
return True
cmdline = util.get_cmdline()
- if 'scaleway' in cmdline:
+ if "scaleway" in cmdline:
return True
return False
@@ -75,6 +72,7 @@ class SourceAddressAdapter(requests.adapters.HTTPAdapter):
"""
Adapter for requests to choose the local address to bind to.
"""
+
def __init__(self, source_address, **kwargs):
self.source_address = source_address
super(SourceAddressAdapter, self).__init__(**kwargs)
@@ -83,11 +81,13 @@ class SourceAddressAdapter(requests.adapters.HTTPAdapter):
socket_options = HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
]
- self.poolmanager = PoolManager(num_pools=connections,
- maxsize=maxsize,
- block=block,
- source_address=self.source_address,
- socket_options=socket_options)
+ self.poolmanager = PoolManager(
+ num_pools=connections,
+ maxsize=maxsize,
+ block=block,
+ source_address=self.source_address,
+ socket_options=socket_options,
+ )
def query_data_api_once(api_address, timeout, requests_session):
@@ -117,9 +117,10 @@ def query_data_api_once(api_address, timeout, requests_session):
session=requests_session,
# If the error is a HTTP/404 or a ConnectionError, go into raise
# block below and don't bother retrying.
- exception_cb=lambda _, exc: exc.code != 404 and (
+ exception_cb=lambda _, exc: exc.code != 404
+ and (
not isinstance(exc.cause, requests.exceptions.ConnectionError)
- )
+ ),
)
return util.decode_binary(resp.contents)
except url_helper.UrlError as exc:
@@ -143,25 +144,22 @@ def query_data_api(api_type, api_address, retries, timeout):
for port in range(1, max(retries, 2)):
try:
LOG.debug(
- 'Trying to get %s data (bind on port %d)...',
- api_type, port
+ "Trying to get %s data (bind on port %d)...", api_type, port
)
requests_session = requests.Session()
requests_session.mount(
- 'http://',
- SourceAddressAdapter(source_address=('0.0.0.0', port))
+ "http://",
+ SourceAddressAdapter(source_address=("0.0.0.0", port)),
)
data = query_data_api_once(
- api_address,
- timeout=timeout,
- requests_session=requests_session
+ api_address, timeout=timeout, requests_session=requests_session
)
- LOG.debug('%s-data downloaded', api_type)
+ LOG.debug("%s-data downloaded", api_type)
return data
except url_helper.UrlError as exc:
# Local port already in use or HTTP/429.
- LOG.warning('Error while trying to get %s data: %s', api_type, exc)
+ LOG.warning("Error while trying to get %s data: %s", api_type, exc)
time.sleep(5)
last_exc = exc
continue
@@ -172,38 +170,44 @@ def query_data_api(api_type, api_address, retries, timeout):
class DataSourceScaleway(sources.DataSource):
dsname = "Scaleway"
- update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]}
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}),
- BUILTIN_DS_CONFIG
- ])
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
- self.metadata_address = self.ds_cfg['metadata_url']
- self.userdata_address = self.ds_cfg['userdata_url']
- self.vendordata_address = self.ds_cfg['vendordata_url']
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.userdata_address = self.ds_cfg["userdata_url"]
+ self.vendordata_address = self.ds_cfg["vendordata_url"]
- self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
- self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
+ self.retries = int(self.ds_cfg.get("retries", DEF_MD_RETRIES))
+ self.timeout = int(self.ds_cfg.get("timeout", DEF_MD_TIMEOUT))
self._fallback_interface = None
self._network_config = sources.UNSET
def _crawl_metadata(self):
- resp = url_helper.readurl(self.metadata_address,
- timeout=self.timeout,
- retries=self.retries)
+ resp = url_helper.readurl(
+ self.metadata_address, timeout=self.timeout, retries=self.retries
+ )
self.metadata = json.loads(util.decode_binary(resp.contents))
self.userdata_raw = query_data_api(
- 'user-data', self.userdata_address,
- self.retries, self.timeout
+ "user-data", self.userdata_address, self.retries, self.timeout
)
self.vendordata_raw = query_data_api(
- 'vendor-data', self.vendordata_address,
- self.retries, self.timeout
+ "vendor-data", self.vendordata_address, self.retries, self.timeout
)
def _get_data(self):
@@ -215,8 +219,10 @@ class DataSourceScaleway(sources.DataSource):
try:
with EphemeralDHCPv4(self._fallback_interface):
util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self._crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self._crawl_metadata,
+ )
except (NoDHCPLeaseError) as e:
util.logexc(LOG, str(e))
return False
@@ -229,8 +235,10 @@ class DataSourceScaleway(sources.DataSource):
metadata API.
"""
if self._network_config is None:
- LOG.warning('Found None as cached _network_config. '
- 'Resetting to %s', sources.UNSET)
+ LOG.warning(
+ "Found None as cached _network_config. Resetting to %s",
+ sources.UNSET,
+ )
self._network_config = sources.UNSET
if self._network_config != sources.UNSET:
@@ -239,16 +247,19 @@ class DataSourceScaleway(sources.DataSource):
if self._fallback_interface is None:
self._fallback_interface = net.find_fallback_nic()
- netcfg = {'type': 'physical', 'name': '%s' % self._fallback_interface}
- subnets = [{'type': 'dhcp4'}]
- if self.metadata['ipv6']:
- subnets += [{'type': 'static',
- 'address': '%s' % self.metadata['ipv6']['address'],
- 'gateway': '%s' % self.metadata['ipv6']['gateway'],
- 'netmask': '%s' % self.metadata['ipv6']['netmask'],
- }]
- netcfg['subnets'] = subnets
- self._network_config = {'version': 1, 'config': [netcfg]}
+ netcfg = {"type": "physical", "name": "%s" % self._fallback_interface}
+ subnets = [{"type": "dhcp4"}]
+ if self.metadata["ipv6"]:
+ subnets += [
+ {
+ "type": "static",
+ "address": "%s" % self.metadata["ipv6"]["address"],
+ "gateway": "%s" % self.metadata["ipv6"]["gateway"],
+ "netmask": "%s" % self.metadata["ipv6"]["netmask"],
+ }
+ ]
+ netcfg["subnets"] = subnets
+ self._network_config = {"version": 1, "config": [netcfg]}
return self._network_config
@property
@@ -256,14 +267,14 @@ class DataSourceScaleway(sources.DataSource):
return None
def get_instance_id(self):
- return self.metadata['id']
+ return self.metadata["id"]
def get_public_ssh_keys(self):
- ssh_keys = [key['key'] for key in self.metadata['ssh_public_keys']]
+ ssh_keys = [key["key"] for key in self.metadata["ssh_public_keys"]]
akeypre = "AUTHORIZED_KEY="
plen = len(akeypre)
- for tag in self.metadata.get('tags', []):
+ for tag in self.metadata.get("tags", []):
if not tag.startswith(akeypre):
continue
ssh_keys.append(tag[:plen].replace("_", " "))
@@ -271,7 +282,7 @@ class DataSourceScaleway(sources.DataSource):
return ssh_keys
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
- return self.metadata['hostname']
+ return self.metadata["hostname"]
@property
def availability_zone(self):
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index fd292baa..40f915fa 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -32,55 +32,51 @@ import socket
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import serial
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.event import EventType
+from cloudinit import serial, sources, subp, util
+from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
# Cloud-init Key : (SmartOS Key, Strip line endings)
- 'instance-id': ('sdc:uuid', True),
- 'local-hostname': ('hostname', True),
- 'public-keys': ('root_authorized_keys', True),
- 'user-script': ('user-script', False),
- 'legacy-user-data': ('user-data', False),
- 'user-data': ('cloud-init:user-data', False),
- 'iptables_disable': ('iptables_disable', True),
- 'motd_sys_info': ('motd_sys_info', True),
- 'availability_zone': ('sdc:datacenter_name', True),
- 'vendor-data': ('sdc:vendor-data', False),
- 'operator-script': ('sdc:operator-script', False),
- 'hostname': ('sdc:hostname', True),
- 'dns_domain': ('sdc:dns_domain', True),
+ "instance-id": ("sdc:uuid", True),
+ "local-hostname": ("hostname", True),
+ "public-keys": ("root_authorized_keys", True),
+ "user-script": ("user-script", False),
+ "legacy-user-data": ("user-data", False),
+ "user-data": ("cloud-init:user-data", False),
+ "iptables_disable": ("iptables_disable", True),
+ "motd_sys_info": ("motd_sys_info", True),
+ "availability_zone": ("sdc:datacenter_name", True),
+ "vendor-data": ("sdc:vendor-data", False),
+ "operator-script": ("sdc:operator-script", False),
+ "hostname": ("sdc:hostname", True),
+ "dns_domain": ("sdc:dns_domain", True),
}
SMARTOS_ATTRIB_JSON = {
# Cloud-init Key : (SmartOS Key known JSON)
- 'network-data': 'sdc:nics',
- 'dns_servers': 'sdc:resolvers',
- 'routes': 'sdc:routes',
+ "network-data": "sdc:nics",
+ "dns_servers": "sdc:resolvers",
+ "routes": "sdc:routes",
}
SMARTOS_ENV_LX_BRAND = "lx-brand"
SMARTOS_ENV_KVM = "kvm"
-DS_NAME = 'SmartOS'
-DS_CFG_PATH = ['datasource', DS_NAME]
+DS_NAME = "SmartOS"
+DS_CFG_PATH = ["datasource", DS_NAME]
NO_BASE64_DECODE = [
- 'iptables_disable',
- 'motd_sys_info',
- 'root_authorized_keys',
- 'sdc:datacenter_name',
- 'sdc:uuid'
- 'user-data',
- 'user-script',
+ "iptables_disable",
+ "motd_sys_info",
+ "root_authorized_keys",
+ "sdc:datacenter_name",
+ "sdc:uuiduser-data",
+ "user-script",
]
-METADATA_SOCKFILE = '/native/.zonecontrol/metadata.sock'
-SERIAL_DEVICE = '/dev/ttyS1'
+METADATA_SOCKFILE = "/native/.zonecontrol/metadata.sock"
+SERIAL_DEVICE = "/dev/ttyS1"
SERIAL_TIMEOUT = 60
# BUILT-IN DATASOURCE CONFIGURATION
@@ -98,24 +94,26 @@ SERIAL_TIMEOUT = 60
# fs_setup: describes how to format the ephemeral drive
#
BUILTIN_DS_CONFIG = {
- 'serial_device': SERIAL_DEVICE,
- 'serial_timeout': SERIAL_TIMEOUT,
- 'metadata_sockfile': METADATA_SOCKFILE,
- 'no_base64_decode': NO_BASE64_DECODE,
- 'base64_keys': [],
- 'base64_all': False,
- 'disk_aliases': {'ephemeral0': '/dev/vdb'},
+ "serial_device": SERIAL_DEVICE,
+ "serial_timeout": SERIAL_TIMEOUT,
+ "metadata_sockfile": METADATA_SOCKFILE,
+ "no_base64_decode": NO_BASE64_DECODE,
+ "base64_keys": [],
+ "base64_all": False,
+ "disk_aliases": {"ephemeral0": "/dev/vdb"},
}
BUILTIN_CLOUD_CONFIG = {
- 'disk_setup': {
- 'ephemeral0': {'table_type': 'mbr',
- 'layout': False,
- 'overwrite': False}
+ "disk_setup": {
+ "ephemeral0": {
+ "table_type": "mbr",
+ "layout": False,
+ "overwrite": False,
+ }
},
- 'fs_setup': [{'label': 'ephemeral0',
- 'filesystem': 'ext4',
- 'device': 'ephemeral0'}],
+ "fs_setup": [
+ {"label": "ephemeral0", "filesystem": "ext4", "device": "ephemeral0"}
+ ],
}
# builtin vendor-data is a boothook that writes a script into
@@ -170,18 +168,27 @@ class DataSourceSmartOS(sources.DataSource):
smartos_type = sources.UNSET
md_client = sources.UNSET
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.ds_cfg = util.mergemanydict([
- self.ds_cfg,
- util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
- BUILTIN_DS_CONFIG])
+ self.ds_cfg = util.mergemanydict(
+ [
+ self.ds_cfg,
+ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
self.metadata = {}
self.network_data = None
self._network_config = None
- self.update_events['network'].add(EventType.BOOT)
self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
@@ -200,25 +207,28 @@ class DataSourceSmartOS(sources.DataSource):
if self.md_client == sources.UNSET:
self.md_client = jmc_client_factory(
smartos_type=self.smartos_type,
- metadata_sockfile=self.ds_cfg['metadata_sockfile'],
- serial_device=self.ds_cfg['serial_device'],
- serial_timeout=self.ds_cfg['serial_timeout'])
+ metadata_sockfile=self.ds_cfg["metadata_sockfile"],
+ serial_device=self.ds_cfg["serial_device"],
+ serial_timeout=self.ds_cfg["serial_timeout"],
+ )
def _set_provisioned(self):
- '''Mark the instance provisioning state as successful.
+ """Mark the instance provisioning state as successful.
When run in a zone, the host OS will look for /var/svc/provisioning
to be renamed as /var/svc/provision_success. This should be done
after meta-data is successfully retrieved and from this point
the host considers the provision of the zone to be a success and
keeps the zone running.
- '''
+ """
- LOG.debug('Instance provisioning state set as successful')
- svc_path = '/var/svc'
- if os.path.exists('/'.join([svc_path, 'provisioning'])):
- os.rename('/'.join([svc_path, 'provisioning']),
- '/'.join([svc_path, 'provision_success']))
+ LOG.debug("Instance provisioning state set as successful")
+ svc_path = "/var/svc"
+ if os.path.exists("/".join([svc_path, "provisioning"])):
+ os.rename(
+ "/".join([svc_path, "provisioning"]),
+ "/".join([svc_path, "provision_success"]),
+ )
def _get_data(self):
self._init()
@@ -231,8 +241,10 @@ class DataSourceSmartOS(sources.DataSource):
return False
if not self.md_client.exists():
- LOG.debug("No metadata device '%r' found for SmartOS datasource",
- self.md_client)
+ LOG.debug(
+ "No metadata device '%r' found for SmartOS datasource",
+ self.md_client,
+ )
return False
# Open once for many requests, rather than once for each request
@@ -255,24 +267,33 @@ class DataSourceSmartOS(sources.DataSource):
# We write 'user-script' and 'operator-script' into the
# instance/data directory. The default vendor-data then handles
# executing them later.
- data_d = os.path.join(self.paths.get_cpath(), 'instances',
- md['instance-id'], 'data')
- user_script = os.path.join(data_d, 'user-script')
+ data_d = os.path.join(
+ self.paths.get_cpath(), "instances", md["instance-id"], "data"
+ )
+ user_script = os.path.join(data_d, "user-script")
u_script_l = "%s/user-script" % LEGACY_USER_D
- write_boot_content(md.get('user-script'), content_f=user_script,
- link=u_script_l, shebang=True, mode=0o700)
-
- operator_script = os.path.join(data_d, 'operator-script')
- write_boot_content(md.get('operator-script'),
- content_f=operator_script, shebang=False,
- mode=0o700)
+ write_boot_content(
+ md.get("user-script"),
+ content_f=user_script,
+ link=u_script_l,
+ shebang=True,
+ mode=0o700,
+ )
+
+ operator_script = os.path.join(data_d, "operator-script")
+ write_boot_content(
+ md.get("operator-script"),
+ content_f=operator_script,
+ shebang=False,
+ mode=0o700,
+ )
# @datadictionary: This key has no defined format, but its value
# is written to the file /var/db/mdata-user-data on each boot prior
# to the phase that runs user-script. This file is not to be executed.
# This allows a configuration file of some kind to be injected into
# the machine to be consumed by the user-script when it runs.
- u_data = md.get('legacy-user-data')
+ u_data = md.get("legacy-user-data")
u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
write_boot_content(u_data, u_data_f)
@@ -280,38 +301,39 @@ class DataSourceSmartOS(sources.DataSource):
# The hostname may or may not be qualified with the local domain name.
# This follows section 3.14 of RFC 2132.
- if not md['local-hostname']:
- if md['hostname']:
- md['local-hostname'] = md['hostname']
+ if not md["local-hostname"]:
+ if md["hostname"]:
+ md["local-hostname"] = md["hostname"]
else:
- md['local-hostname'] = md['instance-id']
+ md["local-hostname"] = md["instance-id"]
ud = None
- if md['user-data']:
- ud = md['user-data']
-
- if not md['vendor-data']:
- md['vendor-data'] = BUILTIN_VENDOR_DATA % {
- 'user_script': user_script,
- 'operator_script': operator_script,
- 'per_boot_d': os.path.join(self.paths.get_cpath("scripts"),
- 'per-boot'),
+ if md["user-data"]:
+ ud = md["user-data"]
+
+ if not md["vendor-data"]:
+ md["vendor-data"] = BUILTIN_VENDOR_DATA % {
+ "user_script": user_script,
+ "operator_script": operator_script,
+ "per_boot_d": os.path.join(
+ self.paths.get_cpath("scripts"), "per-boot"
+ ),
}
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
- self.vendordata_raw = md['vendor-data']
- self.network_data = md['network-data']
- self.routes_data = md['routes']
+ self.vendordata_raw = md["vendor-data"]
+ self.network_data = md["network-data"]
+ self.routes_data = md["routes"]
self._set_provisioned()
return True
def _get_subplatform(self):
- return 'serial (%s)' % SERIAL_DEVICE
+ return "serial (%s)" % SERIAL_DEVICE
def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
+ return self.ds_cfg["disk_aliases"].get(name)
def get_config_obj(self):
if self.smartos_type == SMARTOS_ENV_KVM:
@@ -319,7 +341,7 @@ class DataSourceSmartOS(sources.DataSource):
return {}
def get_instance_id(self):
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
@property
def network_config(self):
@@ -329,12 +351,12 @@ class DataSourceSmartOS(sources.DataSource):
if self._network_config is None:
if self.network_data is not None:
- self._network_config = (
- convert_smartos_network_data(
- network_data=self.network_data,
- dns_servers=self.metadata['dns_servers'],
- dns_domain=self.metadata['dns_domain'],
- routes=self.routes_data))
+ self._network_config = convert_smartos_network_data(
+ network_data=self.network_data,
+ dns_servers=self.metadata["dns_servers"],
+ dns_domain=self.metadata["dns_domain"],
+ routes=self.routes_data,
+ )
return self._network_config
@@ -353,10 +375,12 @@ class JoyentMetadataClient(object):
The full specification can be found at
http://eng.joyent.com/mdata/protocol.html
"""
+
line_regex = re.compile(
- r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
- r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
- r'( (?P<payload>.+))?)')
+ r"V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)"
+ r" (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)"
+ r"( (?P<payload>.+))?)"
+ )
def __init__(self, smartos_type=None, fp=None):
if smartos_type is None:
@@ -365,43 +389,50 @@ class JoyentMetadataClient(object):
self.fp = fp
def _checksum(self, body):
- return '{0:08x}'.format(
- binascii.crc32(body.encode('utf-8')) & 0xffffffff)
+ return "{0:08x}".format(
+ binascii.crc32(body.encode("utf-8")) & 0xFFFFFFFF
+ )
def _get_value_from_frame(self, expected_request_id, frame):
frame_data = self.line_regex.match(frame).groupdict()
- if int(frame_data['length']) != len(frame_data['body']):
+ if int(frame_data["length"]) != len(frame_data["body"]):
raise JoyentMetadataFetchException(
- 'Incorrect frame length given ({0} != {1}).'.format(
- frame_data['length'], len(frame_data['body'])))
- expected_checksum = self._checksum(frame_data['body'])
- if frame_data['checksum'] != expected_checksum:
+ "Incorrect frame length given ({0} != {1}).".format(
+ frame_data["length"], len(frame_data["body"])
+ )
+ )
+ expected_checksum = self._checksum(frame_data["body"])
+ if frame_data["checksum"] != expected_checksum:
raise JoyentMetadataFetchException(
- 'Invalid checksum (expected: {0}; got {1}).'.format(
- expected_checksum, frame_data['checksum']))
- if frame_data['request_id'] != expected_request_id:
+ "Invalid checksum (expected: {0}; got {1}).".format(
+ expected_checksum, frame_data["checksum"]
+ )
+ )
+ if frame_data["request_id"] != expected_request_id:
raise JoyentMetadataFetchException(
- 'Request ID mismatch (expected: {0}; got {1}).'.format(
- expected_request_id, frame_data['request_id']))
- if not frame_data.get('payload', None):
- LOG.debug('No value found.')
+ "Request ID mismatch (expected: {0}; got {1}).".format(
+ expected_request_id, frame_data["request_id"]
+ )
+ )
+ if not frame_data.get("payload", None):
+ LOG.debug("No value found.")
return None
- value = util.b64d(frame_data['payload'])
+ value = util.b64d(frame_data["payload"])
LOG.debug('Value "%s" found.', value)
return value
def _readline(self):
"""
- Reads a line a byte at a time until \n is encountered. Returns an
- ascii string with the trailing newline removed.
+ Reads a line a byte at a time until \n is encountered. Returns an
+ ascii string with the trailing newline removed.
- If a timeout (per-byte) is set and it expires, a
- JoyentMetadataFetchException will be thrown.
+ If a timeout (per-byte) is set and it expires, a
+ JoyentMetadataFetchException will be thrown.
"""
response = []
def as_ascii():
- return b''.join(response).decode('ascii')
+ return b"".join(response).decode("ascii")
msg = "Partial response: '%s'"
while True:
@@ -409,7 +440,7 @@ class JoyentMetadataClient(object):
byte = self.fp.read(1)
if len(byte) == 0:
raise JoyentMetadataTimeoutException(msg % as_ascii())
- if byte == b'\n':
+ if byte == b"\n":
return as_ascii()
response.append(byte)
except OSError as exc:
@@ -420,26 +451,33 @@ class JoyentMetadataClient(object):
raise
def _write(self, msg):
- self.fp.write(msg.encode('ascii'))
+ self.fp.write(msg.encode("ascii"))
self.fp.flush()
def _negotiate(self):
- LOG.debug('Negotiating protocol V2')
- self._write('NEGOTIATE V2\n')
+ LOG.debug("Negotiating protocol V2")
+ self._write("NEGOTIATE V2\n")
response = self._readline()
LOG.debug('read "%s"', response)
- if response != 'V2_OK':
+ if response != "V2_OK":
raise JoyentMetadataFetchException(
- 'Invalid response "%s" to "NEGOTIATE V2"' % response)
- LOG.debug('Negotiation complete')
+ 'Invalid response "%s" to "NEGOTIATE V2"' % response
+ )
+ LOG.debug("Negotiation complete")
def request(self, rtype, param=None):
- request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
- message_body = ' '.join((request_id, rtype,))
+ request_id = "{0:08x}".format(random.randint(0, 0xFFFFFFFF))
+ message_body = " ".join(
+ (
+ request_id,
+ rtype,
+ )
+ )
if param:
- message_body += ' ' + base64.b64encode(param.encode()).decode()
- msg = 'V2 {0} {1} {2}\n'.format(
- len(message_body), self._checksum(message_body), message_body)
+ message_body += " " + base64.b64encode(param.encode()).decode()
+ msg = "V2 {0} {1} {2}\n".format(
+ len(message_body), self._checksum(message_body), message_body
+ )
LOG.debug('Writing "%s" to metadata transport.', msg)
need_close = False
@@ -454,14 +492,14 @@ class JoyentMetadataClient(object):
LOG.debug('Read "%s" from metadata transport.', response)
- if 'SUCCESS' not in response:
+ if "SUCCESS" not in response:
return None
value = self._get_value_from_frame(request_id, response)
return value
def get(self, key, default=None, strip=False):
- result = self.request(rtype='GET', param=key)
+ result = self.request(rtype="GET", param=key)
if result is None:
return default
if result and strip:
@@ -475,18 +513,19 @@ class JoyentMetadataClient(object):
return json.loads(result)
def list(self):
- result = self.request(rtype='KEYS')
+ result = self.request(rtype="KEYS")
if not result:
return []
- return result.split('\n')
+ return result.split("\n")
def put(self, key, val):
- param = b' '.join([base64.b64encode(i.encode())
- for i in (key, val)]).decode()
- return self.request(rtype='PUT', param=param)
+ param = b" ".join(
+ [base64.b64encode(i.encode()) for i in (key, val)]
+ ).decode()
+ return self.request(rtype="PUT", param=param)
def delete(self, key):
- return self.request(rtype='DELETE', param=key)
+ return self.request(rtype="DELETE", param=key)
def close_transport(self):
if self.fp:
@@ -515,7 +554,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
def open_transport(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.socketpath)
- self.fp = sock.makefile('rwb')
+ self.fp = sock.makefile("rwb")
self._negotiate()
def exists(self):
@@ -526,8 +565,9 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
class JoyentMetadataSerialClient(JoyentMetadataClient):
- def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM,
- fp=None):
+ def __init__(
+ self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM, fp=None
+ ):
super(JoyentMetadataSerialClient, self).__init__(smartos_type, fp)
self.device = device
self.timeout = timeout
@@ -546,7 +586,7 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
self._negotiate()
def _flush(self):
- LOG.debug('Flushing input')
+ LOG.debug("Flushing input")
# Read any pending data
timeout = self.fp.timeout
self.fp.timeout = 0.1
@@ -555,7 +595,7 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
self._readline()
except JoyentMetadataTimeoutException:
break
- LOG.debug('Input empty')
+ LOG.debug("Input empty")
# Send a newline and expect "invalid command". Keep trying until
# successful. Retry rather frequently so that the "Is the host
@@ -567,24 +607,29 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
self.fp.timeout = timeout
while True:
LOG.debug('Writing newline, expecting "invalid command"')
- self._write('\n')
+ self._write("\n")
try:
response = self._readline()
- if response == 'invalid command':
+ if response == "invalid command":
break
- if response == 'FAILURE':
+ if response == "FAILURE":
LOG.debug('Got "FAILURE". Retrying.')
continue
LOG.warning('Unexpected response "%s" during flush', response)
except JoyentMetadataTimeoutException:
- LOG.warning('Timeout while initializing metadata client. '
- 'Is the host metadata service running?')
+ LOG.warning(
+ "Timeout while initializing metadata client. "
+ "Is the host metadata service running?"
+ )
LOG.debug('Got "invalid command". Flush complete.')
self.fp.timeout = timeout
def __repr__(self):
return "%s(device=%s, timeout=%s)" % (
- self.__class__.__name__, self.device, self.timeout)
+ self.__class__.__name__,
+ self.device,
+ self.timeout,
+ )
class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
@@ -616,7 +661,7 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
keys = None
if self.base64_all is None:
keys = self.list()
- if 'base64_all' in keys:
+ if "base64_all" in keys:
self.base64_all = util.is_true(self._get("base64_all"))
else:
self.base64_all = False
@@ -629,7 +674,7 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
if keys is None:
keys = self.list()
b64_keys = set()
- if 'base64_keys' in keys:
+ if "base64_keys" in keys:
b64_keys = set(self._get("base64_keys").split(","))
# now add any b64-<keyname> that has a true value
@@ -643,8 +688,9 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
self.base64_keys = b64_keys
def _get(self, key, default=None, strip=False):
- return (super(JoyentMetadataLegacySerialClient, self).
- get(key, default=default, strip=strip))
+ return super(JoyentMetadataLegacySerialClient, self).get(
+ key, default=default, strip=strip
+ )
def is_b64_encoded(self, key, reset=False):
if key in NO_BASE64_DECODE:
@@ -676,9 +722,12 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
def jmc_client_factory(
- smartos_type=None, metadata_sockfile=METADATA_SOCKFILE,
- serial_device=SERIAL_DEVICE, serial_timeout=SERIAL_TIMEOUT,
- uname_version=None):
+ smartos_type=None,
+ metadata_sockfile=METADATA_SOCKFILE,
+ serial_device=SERIAL_DEVICE,
+ serial_timeout=SERIAL_TIMEOUT,
+ uname_version=None,
+):
if smartos_type is None:
smartos_type = get_smartos_environ(uname_version)
@@ -687,11 +736,14 @@ def jmc_client_factory(
return None
elif smartos_type == SMARTOS_ENV_KVM:
return JoyentMetadataLegacySerialClient(
- device=serial_device, timeout=serial_timeout,
- smartos_type=smartos_type)
+ device=serial_device,
+ timeout=serial_timeout,
+ smartos_type=smartos_type,
+ )
elif smartos_type == SMARTOS_ENV_LX_BRAND:
- return JoyentMetadataSocketClient(socketpath=metadata_sockfile,
- smartos_type=smartos_type)
+ return JoyentMetadataSocketClient(
+ socketpath=metadata_sockfile, smartos_type=smartos_type
+ )
raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
@@ -704,12 +756,14 @@ def identify_file(content_f):
LOG.debug("script %s mime type is %s", content_f, f_type)
except subp.ProcessExecutionError as e:
util.logexc(
- LOG, ("Failed to identify script type for %s" % content_f, e))
+ LOG, ("Failed to identify script type for %s" % content_f, e)
+ )
return None if f_type is None else f_type.strip()
-def write_boot_content(content, content_f, link=None, shebang=False,
- mode=0o400):
+def write_boot_content(
+ content, content_f, link=None, shebang=False, mode=0o400
+):
"""
Write the content to content_f. Under the following rules:
1. If no content, remove the file
@@ -743,7 +797,8 @@ def write_boot_content(content, content_f, link=None, shebang=False,
f_type = identify_file(content_f)
if f_type == "text/plain":
util.write_file(
- content_f, "\n".join(["#!/bin/bash", content]), mode=mode)
+ content_f, "\n".join(["#!/bin/bash", content]), mode=mode
+ )
LOG.debug("added shebang to file %s", content_f)
if link:
@@ -764,7 +819,7 @@ def get_smartos_environ(uname_version=None, product_name=None):
# report 'BrandZ virtual linux' as the kernel version
if uname_version is None:
uname_version = uname[3]
- if uname_version == 'BrandZ virtual linux':
+ if uname_version == "BrandZ virtual linux":
return SMARTOS_ENV_LX_BRAND
if product_name is None:
@@ -772,16 +827,16 @@ def get_smartos_environ(uname_version=None, product_name=None):
else:
system_type = product_name
- if system_type and system_type.startswith('SmartDC'):
+ if system_type and system_type.startswith("SmartDC"):
return SMARTOS_ENV_KVM
return None
# Convert SMARTOS 'sdc:nics' data to network_config yaml
-def convert_smartos_network_data(network_data=None,
- dns_servers=None, dns_domain=None,
- routes=None):
+def convert_smartos_network_data(
+ network_data=None, dns_servers=None, dns_domain=None, routes=None
+):
"""Return a dictionary of network_config by parsing provided
SMARTOS sdc:nics configuration data
@@ -806,28 +861,28 @@ def convert_smartos_network_data(network_data=None,
"""
valid_keys = {
- 'physical': [
- 'mac_address',
- 'mtu',
- 'name',
- 'params',
- 'subnets',
- 'type',
+ "physical": [
+ "mac_address",
+ "mtu",
+ "name",
+ "params",
+ "subnets",
+ "type",
],
- 'subnet': [
- 'address',
- 'broadcast',
- 'dns_nameservers',
- 'dns_search',
- 'metric',
- 'pointopoint',
- 'routes',
- 'scope',
- 'type',
+ "subnet": [
+ "address",
+ "broadcast",
+ "dns_nameservers",
+ "dns_search",
+ "metric",
+ "pointopoint",
+ "routes",
+ "scope",
+ "type",
],
- 'route': [
- 'network',
- 'gateway',
+ "route": [
+ "network",
+ "gateway",
],
}
@@ -847,56 +902,64 @@ def convert_smartos_network_data(network_data=None,
routes = []
def is_valid_ipv4(addr):
- return '.' in addr
+ return "." in addr
def is_valid_ipv6(addr):
- return ':' in addr
+ return ":" in addr
pgws = {
- 'ipv4': {'match': is_valid_ipv4, 'gw': None},
- 'ipv6': {'match': is_valid_ipv6, 'gw': None},
+ "ipv4": {"match": is_valid_ipv4, "gw": None},
+ "ipv6": {"match": is_valid_ipv6, "gw": None},
}
config = []
for nic in network_data:
- cfg = dict((k, v) for k, v in nic.items()
- if k in valid_keys['physical'])
- cfg.update({
- 'type': 'physical',
- 'name': nic['interface']})
- if 'mac' in nic:
- cfg.update({'mac_address': nic['mac']})
+ cfg = dict(
+ (k, v) for k, v in nic.items() if k in valid_keys["physical"]
+ )
+ cfg.update({"type": "physical", "name": nic["interface"]})
+ if "mac" in nic:
+ cfg.update({"mac_address": nic["mac"]})
subnets = []
- for ip in nic.get('ips', []):
+ for ip in nic.get("ips", []):
if ip == "dhcp":
- subnet = {'type': 'dhcp4'}
+ subnet = {"type": "dhcp4"}
else:
routeents = []
- subnet = dict((k, v) for k, v in nic.items()
- if k in valid_keys['subnet'])
- subnet.update({
- 'type': 'static',
- 'address': ip,
- })
-
- proto = 'ipv4' if is_valid_ipv4(ip) else 'ipv6'
+ subnet = dict(
+ (k, v) for k, v in nic.items() if k in valid_keys["subnet"]
+ )
+ subnet.update(
+ {
+ "type": "static",
+ "address": ip,
+ }
+ )
+
+ proto = "ipv4" if is_valid_ipv4(ip) else "ipv6"
# Only use gateways for 'primary' nics
- if 'primary' in nic and nic.get('primary', False):
+ if "primary" in nic and nic.get("primary", False):
# the ips and gateways list may be N to M, here
# we map the ip index into the gateways list,
# and handle the case that we could have more ips
# than gateways. we only consume the first gateway
- if not pgws[proto]['gw']:
- gateways = [gw for gw in nic.get('gateways', [])
- if pgws[proto]['match'](gw)]
+ if not pgws[proto]["gw"]:
+ gateways = [
+ gw
+ for gw in nic.get("gateways", [])
+ if pgws[proto]["match"](gw)
+ ]
if len(gateways):
- pgws[proto]['gw'] = gateways[0]
- subnet.update({'gateway': pgws[proto]['gw']})
+ pgws[proto]["gw"] = gateways[0]
+ subnet.update({"gateway": pgws[proto]["gw"]})
for route in routes:
- rcfg = dict((k, v) for k, v in route.items()
- if k in valid_keys['route'])
+ rcfg = dict(
+ (k, v)
+ for k, v in route.items()
+ if k in valid_keys["route"]
+ )
# Linux uses the value of 'gateway' to determine
# automatically if the route is a forward/next-hop
# (non-local IP for gateway) or an interface/resolver
@@ -909,25 +972,29 @@ def convert_smartos_network_data(network_data=None,
# to see if it's in the prefix. We can then smartly
# add or not-add this route. But for now,
# when in doubt, use brute force! Routes for everyone!
- rcfg.update({'network': route['dst']})
+ rcfg.update({"network": route["dst"]})
routeents.append(rcfg)
- subnet.update({'routes': routeents})
+ subnet.update({"routes": routeents})
subnets.append(subnet)
- cfg.update({'subnets': subnets})
+ cfg.update({"subnets": subnets})
config.append(cfg)
if dns_servers:
config.append(
- {'type': 'nameserver', 'address': dns_servers,
- 'search': dns_domain})
+ {
+ "type": "nameserver",
+ "address": dns_servers,
+ "search": dns_domain,
+ }
+ )
- return {'version': 1, 'config': config}
+ return {"version": 1, "config": config}
# Used to match classes to dependencies
datasources = [
- (DataSourceSmartOS, (sources.DEP_FILESYSTEM, )),
+ (DataSourceSmartOS, (sources.DEP_FILESYSTEM,)),
]
@@ -938,13 +1005,17 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import sys
+
jmc = jmc_client_factory()
if jmc is None:
print("Do not appear to be on smartos.")
sys.exit(1)
if len(sys.argv) == 1:
- keys = (list(SMARTOS_ATTRIB_JSON.keys()) +
- list(SMARTOS_ATTRIB_MAP.keys()) + ['network_config'])
+ keys = (
+ list(SMARTOS_ATTRIB_JSON.keys())
+ + list(SMARTOS_ATTRIB_MAP.keys())
+ + ["network_config"]
+ )
else:
keys = sys.argv[1:]
@@ -956,14 +1027,19 @@ if __name__ == "__main__":
keyname = SMARTOS_ATTRIB_JSON[key]
data[key] = client.get_json(keyname)
elif key == "network_config":
- for depkey in ('network-data', 'dns_servers', 'dns_domain',
- 'routes'):
+ for depkey in (
+ "network-data",
+ "dns_servers",
+ "dns_domain",
+ "routes",
+ ):
load_key(client, depkey, data)
data[key] = convert_smartos_network_data(
- network_data=data['network-data'],
- dns_servers=data['dns_servers'],
- dns_domain=data['dns_domain'],
- routes=data['routes'])
+ network_data=data["network-data"],
+ dns_servers=data["dns_servers"],
+ dns_domain=data["dns_domain"],
+ routes=data["routes"],
+ )
else:
if key in SMARTOS_ATTRIB_MAP:
keyname, strip = SMARTOS_ATTRIB_MAP[key]
@@ -977,7 +1053,6 @@ if __name__ == "__main__":
for key in keys:
load_key(client=jmc, key=key, data=data)
- print(json.dumps(data, indent=1, sort_keys=True,
- separators=(',', ': ')))
+ print(json.dumps(data, indent=1, sort_keys=True, separators=(",", ": ")))
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceUpCloud.py b/cloudinit/sources/DataSourceUpCloud.py
new file mode 100644
index 00000000..f4b78da5
--- /dev/null
+++ b/cloudinit/sources/DataSourceUpCloud.py
@@ -0,0 +1,162 @@
+# Author: Antti Myyrä <antti.myyra@upcloud.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# UpCloud server metadata API:
+# https://developers.upcloud.com/1.3/8-servers/#metadata-service
+
+from cloudinit import log as logging
+from cloudinit import net as cloudnet
+from cloudinit import sources, util
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from cloudinit.sources.helpers import upcloud as uc_helper
+
+LOG = logging.getLogger(__name__)
+
+BUILTIN_DS_CONFIG = {"metadata_url": "http://169.254.169.254/metadata/v1.json"}
+
+# Wait for a up to a minute, retrying the meta-data server
+# every 2 seconds.
+MD_RETRIES = 30
+MD_TIMEOUT = 2
+MD_WAIT_RETRY = 2
+
+
+class DataSourceUpCloud(sources.DataSource):
+
+ dsname = "UpCloud"
+
+ # We'll perform DHCP setup only in init-local, see DataSourceUpCloudLocal
+ perform_dhcp_setup = False
+
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.distro = distro
+ self.metadata = dict()
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "UpCloud"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.retries = self.ds_cfg.get("retries", MD_RETRIES)
+ self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT)
+ self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY)
+ self._network_config = None
+
+ def _get_sysinfo(self):
+ return uc_helper.read_sysinfo()
+
+ def _read_metadata(self):
+ return uc_helper.read_metadata(
+ self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
+
+ def _get_data(self):
+ (is_upcloud, server_uuid) = self._get_sysinfo()
+
+ # only proceed if we know we are on UpCloud
+ if not is_upcloud:
+ return False
+
+ LOG.info("Running on UpCloud. server_uuid=%s", server_uuid)
+
+ if self.perform_dhcp_setup: # Setup networking in init-local stage.
+ try:
+ LOG.debug("Finding a fallback NIC")
+ nic = cloudnet.find_fallback_nic()
+ LOG.debug("Discovering metadata via DHCP interface %s", nic)
+ with EphemeralDHCPv4(nic):
+ md = util.log_time(
+ logfunc=LOG.debug,
+ msg="Reading from metadata service",
+ func=self._read_metadata,
+ )
+ except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
+ util.logexc(LOG, str(e))
+ return False
+ else:
+ try:
+ LOG.debug(
+ "Discovering metadata without DHCP-configured networking"
+ )
+ md = util.log_time(
+ logfunc=LOG.debug,
+ msg="Reading from metadata service",
+ func=self._read_metadata,
+ )
+ except sources.InvalidMetaDataException as e:
+ util.logexc(LOG, str(e))
+ LOG.info(
+ "No DHCP-enabled interfaces available, "
+ "unable to fetch metadata for %s",
+ server_uuid,
+ )
+ return False
+
+ self.metadata_full = md
+ self.metadata["instance-id"] = md.get("instance_id", server_uuid)
+ self.metadata["local-hostname"] = md.get("hostname")
+ self.metadata["network"] = md.get("network")
+ self.metadata["public-keys"] = md.get("public_keys")
+ self.metadata["availability_zone"] = md.get("region", "default")
+ self.vendordata_raw = md.get("vendor_data", None)
+ self.userdata_raw = md.get("user_data", None)
+
+ return True
+
+ def check_instance_id(self, sys_cfg):
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
+
+ @property
+ def network_config(self):
+ """
+ Configure the networking. This needs to be done each boot,
+ since the IP and interface information might have changed
+ due to reconfiguration.
+ """
+
+ if self._network_config:
+ return self._network_config
+
+ raw_network_config = self.metadata.get("network")
+ if not raw_network_config:
+ raise Exception("Unable to get network meta-data from server....")
+
+ self._network_config = uc_helper.convert_network_config(
+ raw_network_config,
+ )
+
+ return self._network_config
+
+
+class DataSourceUpCloudLocal(DataSourceUpCloud):
+ """
+ Run in init-local using a DHCP discovery prior to metadata crawl.
+
+ In init-local, no network is available. This subclass sets up minimal
+ networking with dhclient on a viable nic so that it can talk to the
+ metadata service. If the metadata service provides network configuration
+ then render the network configuration for that instance based on metadata.
+ """
+
+ perform_dhcp_setup = True # Get metadata network config if present
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceUpCloudLocal, (sources.DEP_FILESYSTEM,)),
+ (DataSourceUpCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py
new file mode 100644
index 00000000..6ef7c9d5
--- /dev/null
+++ b/cloudinit/sources/DataSourceVMware.py
@@ -0,0 +1,869 @@
+# Cloud-Init DataSource for VMware
+#
+# Copyright (c) 2018-2021 VMware, Inc. All Rights Reserved.
+#
+# Authors: Anish Swaminathan <anishs@vmware.com>
+# Andrew Kutz <akutz@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Cloud-Init DataSource for VMware
+
+This module provides a cloud-init datasource for VMware systems and supports
+multiple transports types, including:
+
+ * EnvVars
+ * GuestInfo
+
+Netifaces (https://github.com/al45tair/netifaces)
+
+ Please note this module relies on the netifaces project to introspect the
+ runtime, network configuration of the host on which this datasource is
+ running. This is in contrast to the rest of cloud-init which uses the
+ cloudinit/netinfo module.
+
+ The reasons for using netifaces include:
+
+ * Netifaces is built in C and is more portable across multiple systems
+ and more deterministic than shell exec'ing local network commands and
+ parsing their output.
+
+ * Netifaces provides a stable way to determine the view of the host's
+ network after DHCP has brought the network online. Unlike most other
+ datasources, this datasource still provides support for JINJA queries
+ based on networking information even when the network is based on a
+ DHCP lease. While this does not tie this datasource directly to
+ netifaces, it does mean the ability to consistently obtain the
+ correct information is paramount.
+
+ * It is currently possible to execute this datasource on macOS
+ (which many developers use today) to print the output of the
+ get_host_info function. This function calls netifaces to obtain
+ the same runtime network configuration that the datasource would
+ persist to the local system's instance data.
+
+ However, the netinfo module fails on macOS. The result is either a
+ hung operation that requires a SIGINT to return control to the user,
+ or, if brew is used to install iproute2mac, the ip commands are used
+ but produce output the netinfo module is unable to parse.
+
+ While macOS is not a target of cloud-init, this feature is quite
+ useful when working on this datasource.
+
+ For more information about this behavior, please see the following
+ PR comment, https://bit.ly/3fG7OVh.
+
+ The authors of this datasource are not opposed to moving away from
+ netifaces. The goal may be to eventually do just that. This proviso was
+ added to the top of this module as a way to remind future-us and others
+ why netifaces was used in the first place in order to either smooth the
+ transition away from netifaces or embrace it further up the cloud-init
+ stack.
+"""
+
+import collections
+import copy
+import ipaddress
+import json
+import os
+import socket
+import time
+
+import netifaces
+
+from cloudinit import dmi
+from cloudinit import log as logging
+from cloudinit import sources, util
+from cloudinit.subp import ProcessExecutionError, subp, which
+
+PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid"
+
+LOG = logging.getLogger(__name__)
+NOVAL = "No value found"
+
+DATA_ACCESS_METHOD_ENVVAR = "envvar"
+DATA_ACCESS_METHOD_GUESTINFO = "guestinfo"
+
+VMWARE_RPCTOOL = which("vmware-rpctool")
+REDACT = "redact"
+CLEANUP_GUESTINFO = "cleanup-guestinfo"
+VMX_GUESTINFO = "VMX_GUESTINFO"
+GUESTINFO_EMPTY_YAML_VAL = "---"
+
+LOCAL_IPV4 = "local-ipv4"
+LOCAL_IPV6 = "local-ipv6"
+WAIT_ON_NETWORK = "wait-on-network"
+WAIT_ON_NETWORK_IPV4 = "ipv4"
+WAIT_ON_NETWORK_IPV6 = "ipv6"
+
+
+class DataSourceVMware(sources.DataSource):
+ """
+ Setting the hostname:
+ The hostname is set by way of the metadata key "local-hostname".
+
+ Setting the instance ID:
+ The instance ID may be set by way of the metadata key "instance-id".
+ However, if this value is absent then the instance ID is read
+ from the file /sys/class/dmi/id/product_uuid.
+
+ Configuring the network:
+ The network is configured by setting the metadata key "network"
+ with a value consistent with Network Config Versions 1 or 2,
+ depending on the Linux distro's version of cloud-init:
+
+ Network Config Version 1 - http://bit.ly/cloudinit-net-conf-v1
+ Network Config Version 2 - http://bit.ly/cloudinit-net-conf-v2
+
+ For example, CentOS 7's official cloud-init package is version
+ 0.7.9 and does not support Network Config Version 2. However,
+ this datasource still supports supplying Network Config Version 2
+ data as long as the Linux distro's cloud-init package is new
+ enough to parse the data.
+
+ The metadata key "network.encoding" may be used to indicate the
+ format of the metadata key "network". Valid encodings are base64
+ and gzip+base64.
+ """
+
+ dsname = "VMware"
+
+ def __init__(self, sys_cfg, distro, paths, ud_proc=None):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
+
+ self.data_access_method = None
+ self.vmware_rpctool = VMWARE_RPCTOOL
+
+ def _get_data(self):
+ """
+ _get_data loads the metadata, userdata, and vendordata from one of
+ the following locations in the given order:
+
+ * envvars
+ * guestinfo
+
+ Please note when updating this function with support for new data
+ transports, the order should match the order in the dscheck_VMware
+ function from the file ds-identify.
+ """
+
+ # Initialize the locally scoped metadata, userdata, and vendordata
+ # variables. They are assigned below depending on the detected data
+ # access method.
+ md, ud, vd = None, None, None
+
+ # First check to see if there is data via env vars.
+ if os.environ.get(VMX_GUESTINFO, ""):
+ md = guestinfo_envvar("metadata")
+ ud = guestinfo_envvar("userdata")
+ vd = guestinfo_envvar("vendordata")
+
+ if md or ud or vd:
+ self.data_access_method = DATA_ACCESS_METHOD_ENVVAR
+
+ # At this point, all additional data transports are valid only on
+ # a VMware platform.
+ if not self.data_access_method:
+ system_type = dmi.read_dmi_data("system-product-name")
+ if system_type is None:
+ LOG.debug("No system-product-name found")
+ return False
+ if "vmware" not in system_type.lower():
+ LOG.debug("Not a VMware platform")
+ return False
+
+ # If no data was detected, check the guestinfo transport next.
+ if not self.data_access_method:
+ if self.vmware_rpctool:
+ md = guestinfo("metadata", self.vmware_rpctool)
+ ud = guestinfo("userdata", self.vmware_rpctool)
+ vd = guestinfo("vendordata", self.vmware_rpctool)
+
+ if md or ud or vd:
+ self.data_access_method = DATA_ACCESS_METHOD_GUESTINFO
+
+ if not self.data_access_method:
+ LOG.error("failed to find a valid data access method")
+ return False
+
+ LOG.info("using data access method %s", self._get_subplatform())
+
+ # Get the metadata.
+ self.metadata = process_metadata(load_json_or_yaml(md))
+
+ # Get the user data.
+ self.userdata_raw = ud
+
+ # Get the vendor data.
+ self.vendordata_raw = vd
+
+ # Redact any sensitive information.
+ self.redact_keys()
+
+ # get_data returns true if there is any available metadata,
+ # userdata, or vendordata.
+ if self.metadata or self.userdata_raw or self.vendordata_raw:
+ return True
+ else:
+ return False
+
+ def setup(self, is_new_instance):
+ """setup(is_new_instance)
+
+ This is called before user-data and vendor-data have been processed.
+
+ Unless the datasource has set mode to 'local', then networking
+ per 'fallback' or per 'network_config' will have been written and
+ brought up the OS at this point.
+ """
+
+ host_info = wait_on_network(self.metadata)
+ LOG.info("got host-info: %s", host_info)
+
+ # Reflect any possible local IPv4 or IPv6 addresses in the guest
+ # info.
+ advertise_local_ip_addrs(host_info)
+
+ # Ensure the metadata gets updated with information about the
+ # host, including the network interfaces, default IP addresses,
+ # etc.
+ self.metadata = util.mergemanydict([self.metadata, host_info])
+
+ # Persist the instance data for versions of cloud-init that support
+ # doing so. This occurs here rather than in the get_data call in
+ # order to ensure that the network interfaces are up and can be
+ # persisted with the metadata.
+ self.persist_instance_data()
+
+ def _get_subplatform(self):
+ get_key_name_fn = None
+ if self.data_access_method == DATA_ACCESS_METHOD_ENVVAR:
+ get_key_name_fn = get_guestinfo_envvar_key_name
+ elif self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO:
+ get_key_name_fn = get_guestinfo_key_name
+ else:
+ return sources.METADATA_UNKNOWN
+
+ return "%s (%s)" % (
+ self.data_access_method,
+ get_key_name_fn("metadata"),
+ )
+
+ @property
+ def network_config(self):
+ if "network" in self.metadata:
+ LOG.debug("using metadata network config")
+ else:
+ LOG.debug("using fallback network config")
+ self.metadata["network"] = {
+ "config": self.distro.generate_fallback_config(),
+ }
+ return self.metadata["network"]["config"]
+
+ def get_instance_id(self):
+ # Pull the instance ID out of the metadata if present. Otherwise
+ # read the file /sys/class/dmi/id/product_uuid for the instance ID.
+ if self.metadata and "instance-id" in self.metadata:
+ return self.metadata["instance-id"]
+ with open(PRODUCT_UUID_FILE_PATH, "r") as id_file:
+ self.metadata["instance-id"] = str(id_file.read()).rstrip().lower()
+ return self.metadata["instance-id"]
+
+ def get_public_ssh_keys(self):
+ for key_name in (
+ "public-keys-data",
+ "public_keys_data",
+ "public-keys",
+ "public_keys",
+ ):
+ if key_name in self.metadata:
+ return sources.normalize_pubkey_data(self.metadata[key_name])
+ return []
+
+ def redact_keys(self):
+ # Determine if there are any keys to redact.
+ keys_to_redact = None
+ if REDACT in self.metadata:
+ keys_to_redact = self.metadata[REDACT]
+ elif CLEANUP_GUESTINFO in self.metadata:
+ # This is for backwards compatibility.
+ keys_to_redact = self.metadata[CLEANUP_GUESTINFO]
+
+ if self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO:
+ guestinfo_redact_keys(keys_to_redact, self.vmware_rpctool)
+
+
+def decode(key, enc_type, data):
+ """
+ decode returns the decoded string value of data
+ key is a string used to identify the data being decoded in log messages
+ """
+ LOG.debug("Getting encoded data for key=%s, enc=%s", key, enc_type)
+
+ raw_data = None
+ if enc_type in ["gzip+base64", "gz+b64"]:
+ LOG.debug("Decoding %s format %s", enc_type, key)
+ raw_data = util.decomp_gzip(util.b64d(data))
+ elif enc_type in ["base64", "b64"]:
+ LOG.debug("Decoding %s format %s", enc_type, key)
+ raw_data = util.b64d(data)
+ else:
+ LOG.debug("Plain-text data %s", key)
+ raw_data = data
+
+ return util.decode_binary(raw_data)
+
+
+def get_none_if_empty_val(val):
+ """
+ get_none_if_empty_val returns None if the provided value, once stripped
+ of its trailing whitespace, is empty or equal to GUESTINFO_EMPTY_YAML_VAL.
+
+ The return value is always a string, regardless of whether the input is
+ a bytes class or a string.
+ """
+
+ # If the provided value is a bytes class, convert it to a string to
+ # simplify the rest of this function's logic.
+ val = util.decode_binary(val)
+ val = val.rstrip()
+ if len(val) == 0 or val == GUESTINFO_EMPTY_YAML_VAL:
+ return None
+ return val
+
+
+def advertise_local_ip_addrs(host_info):
+ """
+ advertise_local_ip_addrs gets the local IP address information from
+ the provided host_info map and sets the addresses in the guestinfo
+ namespace
+ """
+ if not host_info:
+ return
+
+ # Reflect any possible local IPv4 or IPv6 addresses in the guest
+ # info.
+ local_ipv4 = host_info.get(LOCAL_IPV4)
+ if local_ipv4:
+ guestinfo_set_value(LOCAL_IPV4, local_ipv4)
+ LOG.info("advertised local ipv4 address %s in guestinfo", local_ipv4)
+
+ local_ipv6 = host_info.get(LOCAL_IPV6)
+ if local_ipv6:
+ guestinfo_set_value(LOCAL_IPV6, local_ipv6)
+ LOG.info("advertised local ipv6 address %s in guestinfo", local_ipv6)
+
+
+def handle_returned_guestinfo_val(key, val):
+ """
+ handle_returned_guestinfo_val returns the provided value if it is
+ not empty or set to GUESTINFO_EMPTY_YAML_VAL, otherwise None is
+ returned
+ """
+ val = get_none_if_empty_val(val)
+ if val:
+ return val
+ LOG.debug("No value found for key %s", key)
+ return None
+
+
+def get_guestinfo_key_name(key):
+ return "guestinfo." + key
+
+
+def get_guestinfo_envvar_key_name(key):
+ return ("vmx." + get_guestinfo_key_name(key)).upper().replace(".", "_", -1)
+
+
+def guestinfo_envvar(key):
+ val = guestinfo_envvar_get_value(key)
+ if not val:
+ return None
+ enc_type = guestinfo_envvar_get_value(key + ".encoding")
+ return decode(get_guestinfo_envvar_key_name(key), enc_type, val)
+
+
+def guestinfo_envvar_get_value(key):
+ env_key = get_guestinfo_envvar_key_name(key)
+ return handle_returned_guestinfo_val(key, os.environ.get(env_key, ""))
+
+
+def guestinfo(key, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ guestinfo returns the guestinfo value for the provided key, decoding
+ the value when required
+ """
+ val = guestinfo_get_value(key, vmware_rpctool)
+ if not val:
+ return None
+ enc_type = guestinfo_get_value(key + ".encoding", vmware_rpctool)
+ return decode(get_guestinfo_key_name(key), enc_type, val)
+
+
+def guestinfo_get_value(key, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ Returns a guestinfo value for the specified key.
+ """
+ LOG.debug("Getting guestinfo value for key %s", key)
+
+ try:
+ (stdout, stderr) = subp(
+ [
+ vmware_rpctool,
+ "info-get " + get_guestinfo_key_name(key),
+ ]
+ )
+ if stderr == NOVAL:
+ LOG.debug("No value found for key %s", key)
+ elif not stdout:
+ LOG.error("Failed to get guestinfo value for key %s", key)
+ return handle_returned_guestinfo_val(key, stdout)
+ except ProcessExecutionError as error:
+ if error.stderr == NOVAL:
+ LOG.debug("No value found for key %s", key)
+ else:
+ util.logexc(
+ LOG,
+ "Failed to get guestinfo value for key %s: %s",
+ key,
+ error,
+ )
+ except Exception:
+ util.logexc(
+ LOG,
+ "Unexpected error while trying to get "
+ + "guestinfo value for key %s",
+ key,
+ )
+
+ return None
+
+
+def guestinfo_set_value(key, value, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ Sets a guestinfo value for the specified key. Set value to an empty string
+ to clear an existing guestinfo key.
+ """
+
+ # If value is an empty string then set it to a single space as it is not
+ # possible to set a guestinfo key to an empty string. Setting a guestinfo
+ # key to a single space is as close as it gets to clearing an existing
+ # guestinfo key.
+ if value == "":
+ value = " "
+
+ LOG.debug("Setting guestinfo key=%s to value=%s", key, value)
+
+ try:
+ subp(
+ [
+ vmware_rpctool,
+ "info-set %s %s" % (get_guestinfo_key_name(key), value),
+ ]
+ )
+ return True
+ except ProcessExecutionError as error:
+ util.logexc(
+ LOG,
+ "Failed to set guestinfo key=%s to value=%s: %s",
+ key,
+ value,
+ error,
+ )
+ except Exception:
+ util.logexc(
+ LOG,
+ "Unexpected error while trying to set "
+ + "guestinfo key=%s to value=%s",
+ key,
+ value,
+ )
+
+ return None
+
+
+def guestinfo_redact_keys(keys, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ guestinfo_redact_keys redacts guestinfo of all of the keys in the given
+ list. each key will have its value set to "---". Since the value is valid
+ YAML, cloud-init can still read it if it tries.
+ """
+ if not keys:
+ return
+ if not type(keys) in (list, tuple):
+ keys = [keys]
+ for key in keys:
+ key_name = get_guestinfo_key_name(key)
+ LOG.info("clearing %s", key_name)
+ if not guestinfo_set_value(
+ key, GUESTINFO_EMPTY_YAML_VAL, vmware_rpctool
+ ):
+ LOG.error("failed to clear %s", key_name)
+ LOG.info("clearing %s.encoding", key_name)
+ if not guestinfo_set_value(key + ".encoding", "", vmware_rpctool):
+ LOG.error("failed to clear %s.encoding", key_name)
+
+
+def load_json_or_yaml(data):
+ """
+ load first attempts to unmarshal the provided data as JSON, and if
+ that fails then attempts to unmarshal the data as YAML. If data is
+ None then a new dictionary is returned.
+ """
+ if not data:
+ return {}
+ try:
+ return util.load_json(data)
+ except (json.JSONDecodeError, TypeError):
+ return util.load_yaml(data)
+
+
+def process_metadata(data):
+ """
+ process_metadata processes metadata and loads the optional network
+ configuration.
+ """
+ network = None
+ if "network" in data:
+ network = data["network"]
+ del data["network"]
+
+ network_enc = None
+ if "network.encoding" in data:
+ network_enc = data["network.encoding"]
+ del data["network.encoding"]
+
+ if network:
+ if isinstance(network, collections.abc.Mapping):
+ LOG.debug("network data copied to 'config' key")
+ network = {"config": copy.deepcopy(network)}
+ else:
+ LOG.debug("network data to be decoded %s", network)
+ dec_net = decode("metadata.network", network_enc, network)
+ network = {
+ "config": load_json_or_yaml(dec_net),
+ }
+
+ LOG.debug("network data %s", network)
+ data["network"] = network
+
+ return data
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceVMware, (sources.DEP_FILESYSTEM,)), # Run at init-local
+ (DataSourceVMware, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+def get_datasource_list(depends):
+ """
+ Return a list of data sources that match this set of dependencies
+ """
+ return sources.list_from_depends(depends, datasources)
+
+
+def get_default_ip_addrs():
+ """
+ Returns the default IPv4 and IPv6 addresses based on the device(s) used for
+ the default route. Please note that None may be returned for either address
+ family if that family has no default route or if there are multiple
+ addresses associated with the device used by the default route for a given
+ address.
+ """
+ # TODO(promote and use netifaces in cloudinit.net* modules)
+ gateways = netifaces.gateways()
+ if "default" not in gateways:
+ return None, None
+
+ default_gw = gateways["default"]
+ if (
+ netifaces.AF_INET not in default_gw
+ and netifaces.AF_INET6 not in default_gw
+ ):
+ return None, None
+
+ ipv4 = None
+ ipv6 = None
+
+ gw4 = default_gw.get(netifaces.AF_INET)
+ if gw4:
+ _, dev4 = gw4
+ addr4_fams = netifaces.ifaddresses(dev4)
+ if addr4_fams:
+ af_inet4 = addr4_fams.get(netifaces.AF_INET)
+ if af_inet4:
+ if len(af_inet4) > 1:
+ LOG.warning(
+ "device %s has more than one ipv4 address: %s",
+ dev4,
+ af_inet4,
+ )
+ elif "addr" in af_inet4[0]:
+ ipv4 = af_inet4[0]["addr"]
+
+ # Try to get the default IPv6 address by first seeing if there is a default
+ # IPv6 route.
+ gw6 = default_gw.get(netifaces.AF_INET6)
+ if gw6:
+ _, dev6 = gw6
+ addr6_fams = netifaces.ifaddresses(dev6)
+ if addr6_fams:
+ af_inet6 = addr6_fams.get(netifaces.AF_INET6)
+ if af_inet6:
+ if len(af_inet6) > 1:
+ LOG.warning(
+ "device %s has more than one ipv6 address: %s",
+ dev6,
+ af_inet6,
+ )
+ elif "addr" in af_inet6[0]:
+ ipv6 = af_inet6[0]["addr"]
+
+ # If there is a default IPv4 address but not IPv6, then see if there is a
+ # single IPv6 address associated with the same device associated with the
+ # default IPv4 address.
+ if ipv4 and not ipv6:
+ af_inet6 = addr4_fams.get(netifaces.AF_INET6)
+ if af_inet6:
+ if len(af_inet6) > 1:
+ LOG.warning(
+ "device %s has more than one ipv6 address: %s",
+ dev4,
+ af_inet6,
+ )
+ elif "addr" in af_inet6[0]:
+ ipv6 = af_inet6[0]["addr"]
+
+ # If there is a default IPv6 address but not IPv4, then see if there is a
+ # single IPv4 address associated with the same device associated with the
+ # default IPv6 address.
+ if not ipv4 and ipv6:
+ af_inet4 = addr6_fams.get(netifaces.AF_INET)
+ if af_inet4:
+ if len(af_inet4) > 1:
+ LOG.warning(
+ "device %s has more than one ipv4 address: %s",
+ dev6,
+ af_inet4,
+ )
+ elif "addr" in af_inet4[0]:
+ ipv4 = af_inet4[0]["addr"]
+
+ return ipv4, ipv6
+
+
+# patched socket.getfqdn() - see https://bugs.python.org/issue5004
+
+
+def getfqdn(name=""):
+ """Get fully qualified domain name from name.
+ An empty argument is interpreted as meaning the local host.
+ """
+ # TODO(may want to promote this function to util.getfqdn)
+ # TODO(may want to extend util.get_hostname to accept fqdn=True param)
+ name = name.strip()
+ if not name or name == "0.0.0.0":
+ name = util.get_hostname()
+ try:
+ addrs = socket.getaddrinfo(
+ name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME
+ )
+ except socket.error:
+ pass
+ else:
+ for addr in addrs:
+ if addr[3]:
+ name = addr[3]
+ break
+ return name
+
+
+def is_valid_ip_addr(val):
+ """
+ Returns false if the address is loopback, link local or unspecified;
+ otherwise true is returned.
+ """
+ # TODO(extend cloudinit.net.is_ip_addr exclude link_local/loopback etc)
+ # TODO(migrate to use cloudinit.net.is_ip_addr)#
+
+ addr = None
+ try:
+ addr = ipaddress.ip_address(val)
+ except ipaddress.AddressValueError:
+ addr = ipaddress.ip_address(str(val))
+ except Exception:
+ return None
+
+ if addr.is_link_local or addr.is_loopback or addr.is_unspecified:
+ return False
+ return True
+
+
+def get_host_info():
+ """
+ Returns host information such as the host name and network interfaces.
+ """
+ # TODO(look to promote netifices use up in cloud-init netinfo funcs)
+ host_info = {
+ "network": {
+ "interfaces": {
+ "by-mac": collections.OrderedDict(),
+ "by-ipv4": collections.OrderedDict(),
+ "by-ipv6": collections.OrderedDict(),
+ },
+ },
+ }
+ hostname = getfqdn(util.get_hostname())
+ if hostname:
+ host_info["hostname"] = hostname
+ host_info["local-hostname"] = hostname
+ host_info["local_hostname"] = hostname
+
+ default_ipv4, default_ipv6 = get_default_ip_addrs()
+ if default_ipv4:
+ host_info[LOCAL_IPV4] = default_ipv4
+ if default_ipv6:
+ host_info[LOCAL_IPV6] = default_ipv6
+
+ by_mac = host_info["network"]["interfaces"]["by-mac"]
+ by_ipv4 = host_info["network"]["interfaces"]["by-ipv4"]
+ by_ipv6 = host_info["network"]["interfaces"]["by-ipv6"]
+
+ ifaces = netifaces.interfaces()
+ for dev_name in ifaces:
+ addr_fams = netifaces.ifaddresses(dev_name)
+ af_link = addr_fams.get(netifaces.AF_LINK)
+ af_inet4 = addr_fams.get(netifaces.AF_INET)
+ af_inet6 = addr_fams.get(netifaces.AF_INET6)
+
+ mac = None
+ if af_link and "addr" in af_link[0]:
+ mac = af_link[0]["addr"]
+
+ # Do not bother recording localhost
+ if mac == "00:00:00:00:00:00":
+ continue
+
+ if mac and (af_inet4 or af_inet6):
+ key = mac
+ val = {}
+ if af_inet4:
+ af_inet4_vals = []
+ for ip_info in af_inet4:
+ if not is_valid_ip_addr(ip_info["addr"]):
+ continue
+ af_inet4_vals.append(ip_info)
+ val["ipv4"] = af_inet4_vals
+ if af_inet6:
+ af_inet6_vals = []
+ for ip_info in af_inet6:
+ if not is_valid_ip_addr(ip_info["addr"]):
+ continue
+ af_inet6_vals.append(ip_info)
+ val["ipv6"] = af_inet6_vals
+ by_mac[key] = val
+
+ if af_inet4:
+ for ip_info in af_inet4:
+ key = ip_info["addr"]
+ if not is_valid_ip_addr(key):
+ continue
+ val = copy.deepcopy(ip_info)
+ del val["addr"]
+ if mac:
+ val["mac"] = mac
+ by_ipv4[key] = val
+
+ if af_inet6:
+ for ip_info in af_inet6:
+ key = ip_info["addr"]
+ if not is_valid_ip_addr(key):
+ continue
+ val = copy.deepcopy(ip_info)
+ del val["addr"]
+ if mac:
+ val["mac"] = mac
+ by_ipv6[key] = val
+
+ return host_info
+
+
+def wait_on_network(metadata):
+ # Determine whether we need to wait on the network coming online.
+ wait_on_ipv4 = False
+ wait_on_ipv6 = False
+ if WAIT_ON_NETWORK in metadata:
+ wait_on_network = metadata[WAIT_ON_NETWORK]
+ if WAIT_ON_NETWORK_IPV4 in wait_on_network:
+ wait_on_ipv4_val = wait_on_network[WAIT_ON_NETWORK_IPV4]
+ if isinstance(wait_on_ipv4_val, bool):
+ wait_on_ipv4 = wait_on_ipv4_val
+ else:
+ wait_on_ipv4 = util.translate_bool(wait_on_ipv4_val)
+ if WAIT_ON_NETWORK_IPV6 in wait_on_network:
+ wait_on_ipv6_val = wait_on_network[WAIT_ON_NETWORK_IPV6]
+ if isinstance(wait_on_ipv6_val, bool):
+ wait_on_ipv6 = wait_on_ipv6_val
+ else:
+ wait_on_ipv6 = util.translate_bool(wait_on_ipv6_val)
+
+ # Get information about the host.
+ host_info = None
+ while host_info is None:
+ # This loop + sleep results in two logs every second while waiting
+ # for either ipv4 or ipv6 up. Do we really need to log each iteration
+ # or can we log once and log on successful exit?
+ host_info = get_host_info()
+
+ network = host_info.get("network") or {}
+ interfaces = network.get("interfaces") or {}
+ by_ipv4 = interfaces.get("by-ipv4") or {}
+ by_ipv6 = interfaces.get("by-ipv6") or {}
+
+ if wait_on_ipv4:
+ ipv4_ready = len(by_ipv4) > 0 if by_ipv4 else False
+ if not ipv4_ready:
+ host_info = None
+
+ if wait_on_ipv6:
+ ipv6_ready = len(by_ipv6) > 0 if by_ipv6 else False
+ if not ipv6_ready:
+ host_info = None
+
+ if host_info is None:
+ LOG.debug(
+ "waiting on network: wait4=%s, ready4=%s, wait6=%s, ready6=%s",
+ wait_on_ipv4,
+ ipv4_ready,
+ wait_on_ipv6,
+ ipv6_ready,
+ )
+ time.sleep(1)
+
+ LOG.debug("waiting on network complete")
+ return host_info
+
+
+def main():
+ """
+ Executed when this file is used as a program.
+ """
+ try:
+ logging.setupBasicLogging()
+ except Exception:
+ pass
+ metadata = {
+ "wait-on-network": {"ipv4": True, "ipv6": "false"},
+ "network": {"config": {"dhcp": True}},
+ }
+ host_info = wait_on_network(metadata)
+ metadata = util.mergemanydict([metadata, host_info])
+ print(util.json_dumps(metadata))
+
+
+if __name__ == "__main__":
+ main()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py
new file mode 100644
index 00000000..8c2e82c2
--- /dev/null
+++ b/cloudinit/sources/DataSourceVultr.py
@@ -0,0 +1,157 @@
+# Author: Eric Benner <ebenner@vultr.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# Vultr Metadata API:
+# https://www.vultr.com/metadata/
+
+import cloudinit.sources.helpers.vultr as vultr
+from cloudinit import log as log
+from cloudinit import sources, util, version
+
+LOG = log.getLogger(__name__)
+BUILTIN_DS_CONFIG = {
+ "url": "http://169.254.169.254",
+ "retries": 30,
+ "timeout": 10,
+ "wait": 5,
+ "user-agent": "Cloud-Init/%s - OS: %s Variant: %s"
+ % (
+ version.version_string(),
+ util.system_info()["system"],
+ util.system_info()["variant"],
+ ),
+}
+
+
+class DataSourceVultr(sources.DataSource):
+
+ dsname = "Vultr"
+
+ def __init__(self, sys_cfg, distro, paths):
+ super(DataSourceVultr, self).__init__(sys_cfg, distro, paths)
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Vultr"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+
+ # Initiate data and check if Vultr
+ def _get_data(self):
+ LOG.debug("Detecting if machine is a Vultr instance")
+ if not vultr.is_vultr():
+ LOG.debug("Machine is not a Vultr instance")
+ return False
+
+ LOG.debug("Machine is a Vultr instance")
+
+ # Fetch metadata
+ self.metadata = self.get_metadata()
+ self.metadata["instance-id"] = self.metadata["instance-v2-id"]
+ self.metadata["local-hostname"] = self.metadata["hostname"]
+ region = self.metadata["region"]["regioncode"]
+ if "countrycode" in self.metadata["region"]:
+ region = self.metadata["region"]["countrycode"]
+ self.metadata["region"] = region.lower()
+ self.userdata_raw = self.metadata["user-data"]
+
+ # Generate config and process data
+ self.get_datasource_data(self.metadata)
+
+ # Dump some data so diagnosing failures is manageable
+ LOG.debug("Vultr Vendor Config:")
+ LOG.debug(util.json_dumps(self.metadata["vendor-data"]))
+ LOG.debug("SUBID: %s", self.metadata["instance-id"])
+ LOG.debug("Hostname: %s", self.metadata["local-hostname"])
+ if self.userdata_raw is not None:
+ LOG.debug("User-Data:")
+ LOG.debug(self.userdata_raw)
+
+ return True
+
+ # Process metadata
+ def get_datasource_data(self, md):
+ # Generate network config
+ if "cloud_interfaces" in md:
+ # In the future we will just drop pre-configured
+ # network configs into the array. They need names though.
+ self.netcfg = vultr.add_interface_names(md["cloud_interfaces"])
+ else:
+ self.netcfg = vultr.generate_network_config(md["interfaces"])
+
+ # Grab vendordata
+ self.vendordata_raw = md["vendor-data"]
+
+ # Default hostname is "guest" for whitelabel
+ if self.metadata["local-hostname"] == "":
+ self.metadata["local-hostname"] = "guest"
+
+ self.userdata_raw = md["user-data"]
+ if self.userdata_raw == "":
+ self.userdata_raw = None
+
+ # Get the metadata by flag
+ def get_metadata(self):
+ return vultr.get_metadata(
+ self.ds_cfg["url"],
+ self.ds_cfg["timeout"],
+ self.ds_cfg["retries"],
+ self.ds_cfg["wait"],
+ self.ds_cfg["user-agent"],
+ )
+
+ # Compare subid as instance id
+ def check_instance_id(self, sys_cfg):
+ if not vultr.is_vultr():
+ return False
+
+ # Baremetal has no way to implement this in local
+ if vultr.is_baremetal():
+ return False
+
+ subid = vultr.get_sysinfo()["subid"]
+ return sources.instance_id_matches_system_uuid(subid)
+
+ # Currently unsupported
+ @property
+ def launch_index(self):
+ return None
+
+ @property
+ def network_config(self):
+ return self.netcfg
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceVultr, (sources.DEP_FILESYSTEM,)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+if __name__ == "__main__":
+ import sys
+
+ if not vultr.is_vultr():
+ print("Machine is not a Vultr instance")
+ sys.exit(1)
+
+ md = vultr.get_metadata(
+ BUILTIN_DS_CONFIG["url"],
+ BUILTIN_DS_CONFIG["timeout"],
+ BUILTIN_DS_CONFIG["retries"],
+ BUILTIN_DS_CONFIG["wait"],
+ BUILTIN_DS_CONFIG["user-agent"],
+ )
+ config = md["vendor-data"]
+ sysinfo = vultr.get_sysinfo()
+
+ print(util.json_dumps(sysinfo))
+ print(util.json_dumps(config))
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 9dccc687..88028cfa 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -13,17 +13,18 @@ import copy
import json
import os
from collections import namedtuple
+from typing import Dict, List # noqa: F401
-from cloudinit import dmi
-from cloudinit import importer
+from cloudinit import dmi, importer
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import type_utils
+from cloudinit import net, type_utils
from cloudinit import user_data as ud
from cloudinit import util
from cloudinit.atomic_helper import write_json
-from cloudinit.event import EventType
+from cloudinit.distros import Distro
+from cloudinit.event import EventScope, EventType
from cloudinit.filters import launch_index
+from cloudinit.persistence import CloudInitPickleMixin
from cloudinit.reporting import events
DSMODE_DISABLED = "disabled"
@@ -35,42 +36,48 @@ VALID_DSMODES = [DSMODE_DISABLED, DSMODE_LOCAL, DSMODE_NETWORK]
DEP_FILESYSTEM = "FILESYSTEM"
DEP_NETWORK = "NETWORK"
-DS_PREFIX = 'DataSource'
+DS_PREFIX = "DataSource"
EXPERIMENTAL_TEXT = (
"EXPERIMENTAL: The structure and format of content scoped under the 'ds'"
- " key may change in subsequent releases of cloud-init.")
+ " key may change in subsequent releases of cloud-init."
+)
# File in which public available instance meta-data is written
# security-sensitive key values are redacted from this world-readable file
-INSTANCE_JSON_FILE = 'instance-data.json'
+INSTANCE_JSON_FILE = "instance-data.json"
# security-sensitive key values are present in this root-readable file
-INSTANCE_JSON_SENSITIVE_FILE = 'instance-data-sensitive.json'
-REDACT_SENSITIVE_VALUE = 'redacted for non-root user'
+INSTANCE_JSON_SENSITIVE_FILE = "instance-data-sensitive.json"
+REDACT_SENSITIVE_VALUE = "redacted for non-root user"
# Key which can be provide a cloud's official product name to cloud-init
-METADATA_CLOUD_NAME_KEY = 'cloud-name'
+METADATA_CLOUD_NAME_KEY = "cloud-name"
UNSET = "_unset"
-METADATA_UNKNOWN = 'unknown'
+METADATA_UNKNOWN = "unknown"
LOG = logging.getLogger(__name__)
# CLOUD_ID_REGION_PREFIX_MAP format is:
# <region-match-prefix>: (<new-cloud-id>: <test_allowed_cloud_callable>)
CLOUD_ID_REGION_PREFIX_MAP = {
- 'cn-': ('aws-china', lambda c: c == 'aws'), # only change aws regions
- 'us-gov-': ('aws-gov', lambda c: c == 'aws'), # only change aws regions
- 'china': ('azure-china', lambda c: c == 'azure'), # only change azure
+ "cn-": ("aws-china", lambda c: c == "aws"), # only change aws regions
+ "us-gov-": ("aws-gov", lambda c: c == "aws"), # only change aws regions
+ "china": ("azure-china", lambda c: c == "azure"), # only change azure
}
# NetworkConfigSource represents the canonical list of network config sources
# that cloud-init knows about. (Python 2.7 lacks PEP 435, so use a singleton
# namedtuple as an enum; see https://stackoverflow.com/a/6971002)
-_NETCFG_SOURCE_NAMES = ('cmdline', 'ds', 'system_cfg', 'fallback', 'initramfs')
-NetworkConfigSource = namedtuple('NetworkConfigSource',
- _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES)
+_NETCFG_SOURCE_NAMES = ("cmdline", "ds", "system_cfg", "fallback", "initramfs")
+NetworkConfigSource = namedtuple("NetworkConfigSource", _NETCFG_SOURCE_NAMES)(
+ *_NETCFG_SOURCE_NAMES
+)
+
+
+class DatasourceUnpickleUserDataError(Exception):
+ """Raised when userdata is unable to be unpickled due to python upgrades"""
class DataSourceNotFoundException(Exception):
@@ -81,7 +88,7 @@ class InvalidMetaDataException(Exception):
"""Raised when metadata is broken, unavailable or disabled."""
-def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
+def process_instance_metadata(metadata, key_path="", sensitive_keys=()):
"""Process all instance metadata cleaning it up for persisting as json.
Strip ci-b64 prefix and catalog any 'base64_encoded_keys' as a list
@@ -93,22 +100,23 @@ def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
sens_keys = []
for key, val in metadata.items():
if key_path:
- sub_key_path = key_path + '/' + key
+ sub_key_path = key_path + "/" + key
else:
sub_key_path = key
if key in sensitive_keys or sub_key_path in sensitive_keys:
sens_keys.append(sub_key_path)
- if isinstance(val, str) and val.startswith('ci-b64:'):
+ if isinstance(val, str) and val.startswith("ci-b64:"):
base64_encoded_keys.append(sub_key_path)
- md_copy[key] = val.replace('ci-b64:', '')
+ md_copy[key] = val.replace("ci-b64:", "")
if isinstance(val, dict):
return_val = process_instance_metadata(
- val, sub_key_path, sensitive_keys)
- base64_encoded_keys.extend(return_val.pop('base64_encoded_keys'))
- sens_keys.extend(return_val.pop('sensitive_keys'))
+ val, sub_key_path, sensitive_keys
+ )
+ base64_encoded_keys.extend(return_val.pop("base64_encoded_keys"))
+ sens_keys.extend(return_val.pop("sensitive_keys"))
md_copy[key] = return_val
- md_copy['base64_encoded_keys'] = sorted(base64_encoded_keys)
- md_copy['sensitive_keys'] = sorted(sens_keys)
+ md_copy["base64_encoded_keys"] = sorted(base64_encoded_keys)
+ md_copy["sensitive_keys"] = sorted(sens_keys)
return md_copy
@@ -117,11 +125,11 @@ def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
Replace any keys values listed in 'sensitive_keys' with redact_value.
"""
- if not metadata.get('sensitive_keys', []):
+ if not metadata.get("sensitive_keys", []):
return metadata
md_copy = copy.deepcopy(metadata)
- for key_path in metadata.get('sensitive_keys'):
- path_parts = key_path.split('/')
+ for key_path in metadata.get("sensitive_keys"):
+ path_parts = key_path.split("/")
obj = md_copy
for path in path_parts:
if isinstance(obj[path], dict) and path != path_parts[-1]:
@@ -131,17 +139,24 @@ def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
URLParams = namedtuple(
- 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
+ "URLParms",
+ [
+ "max_wait_seconds",
+ "timeout_seconds",
+ "num_retries",
+ "sec_between_retries",
+ ],
+)
-class DataSource(metaclass=abc.ABCMeta):
+class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
dsmode = DSMODE_NETWORK
- default_locale = 'en_US.UTF-8'
+ default_locale = "en_US.UTF-8"
# Datasource name needs to be set by subclasses to determine which
# cloud-config datasource key is loaded
- dsname = '_undef'
+ dsname = "_undef"
# Cached cloud_name as determined by _get_cloud_name
_cloud_name = None
@@ -162,40 +177,71 @@ class DataSource(metaclass=abc.ABCMeta):
# configuration will be used without considering any that follow.) This
# should always be a subset of the members of NetworkConfigSource with no
# duplicate entries.
- network_config_sources = (NetworkConfigSource.cmdline,
- NetworkConfigSource.initramfs,
- NetworkConfigSource.system_cfg,
- NetworkConfigSource.ds)
+ network_config_sources = (
+ NetworkConfigSource.cmdline,
+ NetworkConfigSource.initramfs,
+ NetworkConfigSource.system_cfg,
+ NetworkConfigSource.ds,
+ )
# read_url_params
- url_max_wait = -1 # max_wait < 0 means do not wait
- url_timeout = 10 # timeout for each metadata url read attempt
- url_retries = 5 # number of times to retry url upon 404
+ url_max_wait = -1 # max_wait < 0 means do not wait
+ url_timeout = 10 # timeout for each metadata url read attempt
+ url_retries = 5 # number of times to retry url upon 404
+ url_sec_between_retries = 1 # amount of seconds to wait between retries
# The datasource defines a set of supported EventTypes during which
# the datasource can react to changes in metadata and regenerate
- # network configuration on metadata changes.
- # A datasource which supports writing network config on each system boot
- # would call update_events['network'].add(EventType.BOOT).
+ # network configuration on metadata changes. These are defined in
+ # `supported_network_events`.
+ # The datasource also defines a set of default EventTypes that the
+ # datasource can react to. These are the event types that will be used
+ # if not overridden by the user.
+ # A datasource requiring to write network config on each system boot
+ # would call default_update_events['network'].add(EventType.BOOT).
# Default: generate network config on new instance id (first boot).
- update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])}
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ }
+ }
# N-tuple listing default values for any metadata-related class
# attributes cached on an instance by a process_data runs. These attribute
# values are reset via clear_cached_attrs during any update_metadata call.
cached_attr_defaults = (
- ('ec2_metadata', UNSET), ('network_json', UNSET),
- ('metadata', {}), ('userdata', None), ('userdata_raw', None),
- ('vendordata', None), ('vendordata_raw', None))
+ ("ec2_metadata", UNSET),
+ ("network_json", UNSET),
+ ("metadata", {}),
+ ("userdata", None),
+ ("userdata_raw", None),
+ ("vendordata", None),
+ ("vendordata_raw", None),
+ ("vendordata2", None),
+ ("vendordata2_raw", None),
+ )
_dirty_cache = False
# N-tuple of keypaths or keynames redact from instance-data.json for
# non-root users
- sensitive_metadata_keys = ('merged_cfg', 'security-credentials',)
+ sensitive_metadata_keys = (
+ "merged_cfg",
+ "security-credentials",
+ )
+
+ _ci_pkl_version = 1
- def __init__(self, sys_cfg, distro, paths, ud_proc=None):
+ def __init__(self, sys_cfg, distro: Distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
self.distro = distro
self.paths = paths
@@ -203,10 +249,13 @@ class DataSource(metaclass=abc.ABCMeta):
self.metadata = {}
self.userdata_raw = None
self.vendordata = None
+ self.vendordata2 = None
self.vendordata_raw = None
+ self.vendordata2_raw = None
self.ds_cfg = util.get_cfg_by_path(
- self.sys_cfg, ("datasource", self.dsname), {})
+ self.sys_cfg, ("datasource", self.dsname), {}
+ )
if not self.ds_cfg:
self.ds_cfg = {}
@@ -215,6 +264,28 @@ class DataSource(metaclass=abc.ABCMeta):
else:
self.ud_proc = ud_proc
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ """Perform deserialization fixes for Paths."""
+ if not hasattr(self, "vendordata2"):
+ self.vendordata2 = None
+ if not hasattr(self, "vendordata2_raw"):
+ self.vendordata2_raw = None
+ if hasattr(self, "userdata") and self.userdata is not None:
+ # If userdata stores MIME data, on < python3.6 it will be
+ # missing the 'policy' attribute that exists on >=python3.6.
+ # Calling str() on the userdata will attempt to access this
+ # policy attribute. This will raise an exception, causing
+ # the pickle load to fail, so cloud-init will discard the cache
+ try:
+ str(self.userdata)
+ except AttributeError as e:
+ LOG.debug(
+ "Unable to unpickle datasource: %s."
+ " Ignoring current cache.",
+ e,
+ )
+ raise DatasourceUnpickleUserDataError() from e
+
def __str__(self):
return type_utils.obj_name(self)
@@ -228,28 +299,33 @@ class DataSource(metaclass=abc.ABCMeta):
# metadata to discover that content
sysinfo = instance_data["sys_info"]
return {
- 'v1': {
- '_beta_keys': ['subplatform'],
- 'availability-zone': availability_zone,
- 'availability_zone': availability_zone,
- 'cloud-name': self.cloud_name,
- 'cloud_name': self.cloud_name,
- 'distro': sysinfo["dist"][0],
- 'distro_version': sysinfo["dist"][1],
- 'distro_release': sysinfo["dist"][2],
- 'platform': self.platform_type,
- 'public_ssh_keys': self.get_public_ssh_keys(),
- 'python_version': sysinfo["python"],
- 'instance-id': instance_id,
- 'instance_id': instance_id,
- 'kernel_release': sysinfo["uname"][2],
- 'local-hostname': local_hostname,
- 'local_hostname': local_hostname,
- 'machine': sysinfo["uname"][4],
- 'region': self.region,
- 'subplatform': self.subplatform,
- 'system_platform': sysinfo["platform"],
- 'variant': sysinfo["variant"]}}
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": availability_zone,
+ "availability_zone": availability_zone,
+ "cloud_id": canonical_cloud_id(
+ self.cloud_name, self.region, self.platform_type
+ ),
+ "cloud-name": self.cloud_name,
+ "cloud_name": self.cloud_name,
+ "distro": sysinfo["dist"][0],
+ "distro_version": sysinfo["dist"][1],
+ "distro_release": sysinfo["dist"][2],
+ "platform": self.platform_type,
+ "public_ssh_keys": self.get_public_ssh_keys(),
+ "python_version": sysinfo["python"],
+ "instance-id": instance_id,
+ "instance_id": instance_id,
+ "kernel_release": sysinfo["uname"][2],
+ "local-hostname": local_hostname,
+ "local_hostname": local_hostname,
+ "machine": sysinfo["uname"][4],
+ "region": self.region,
+ "subplatform": self.subplatform,
+ "system_platform": sysinfo["platform"],
+ "variant": sysinfo["variant"],
+ }
+ }
def clear_cached_attrs(self, attr_defaults=()):
"""Reset any cached metadata attributes to datasource defaults.
@@ -290,48 +366,61 @@ class DataSource(metaclass=abc.ABCMeta):
@return True on successful write, False otherwise.
"""
- if hasattr(self, '_crawled_metadata'):
+ if hasattr(self, "_crawled_metadata"):
# Any datasource with _crawled_metadata will best represent
# most recent, 'raw' metadata
crawled_metadata = copy.deepcopy(
- getattr(self, '_crawled_metadata'))
- crawled_metadata.pop('user-data', None)
- crawled_metadata.pop('vendor-data', None)
- instance_data = {'ds': crawled_metadata}
+ getattr(self, "_crawled_metadata")
+ )
+ crawled_metadata.pop("user-data", None)
+ crawled_metadata.pop("vendor-data", None)
+ instance_data = {"ds": crawled_metadata}
else:
- instance_data = {'ds': {'meta_data': self.metadata}}
- if hasattr(self, 'network_json'):
- network_json = getattr(self, 'network_json')
+ instance_data = {"ds": {"meta_data": self.metadata}}
+ if hasattr(self, "network_json"):
+ network_json = getattr(self, "network_json")
if network_json != UNSET:
- instance_data['ds']['network_json'] = network_json
- if hasattr(self, 'ec2_metadata'):
- ec2_metadata = getattr(self, 'ec2_metadata')
+ instance_data["ds"]["network_json"] = network_json
+ if hasattr(self, "ec2_metadata"):
+ ec2_metadata = getattr(self, "ec2_metadata")
if ec2_metadata != UNSET:
- instance_data['ds']['ec2_metadata'] = ec2_metadata
- instance_data['ds']['_doc'] = EXPERIMENTAL_TEXT
+ instance_data["ds"]["ec2_metadata"] = ec2_metadata
+ instance_data["ds"]["_doc"] = EXPERIMENTAL_TEXT
# Add merged cloud.cfg and sys info for jinja templates and cli query
- instance_data['merged_cfg'] = copy.deepcopy(self.sys_cfg)
- instance_data['merged_cfg']['_doc'] = (
- 'Merged cloud-init system config from /etc/cloud/cloud.cfg and'
- ' /etc/cloud/cloud.cfg.d/')
- instance_data['sys_info'] = util.system_info()
- instance_data.update(
- self._get_standardized_metadata(instance_data))
+ instance_data["merged_cfg"] = copy.deepcopy(self.sys_cfg)
+ instance_data["merged_cfg"]["_doc"] = (
+ "Merged cloud-init system config from /etc/cloud/cloud.cfg and"
+ " /etc/cloud/cloud.cfg.d/"
+ )
+ instance_data["sys_info"] = util.system_info()
+ instance_data.update(self._get_standardized_metadata(instance_data))
try:
# Process content base64encoding unserializable values
content = util.json_dumps(instance_data)
# Strip base64: prefix and set base64_encoded_keys list.
processed_data = process_instance_metadata(
json.loads(content),
- sensitive_keys=self.sensitive_metadata_keys)
+ sensitive_keys=self.sensitive_metadata_keys,
+ )
except TypeError as e:
- LOG.warning('Error persisting instance-data.json: %s', str(e))
+ LOG.warning("Error persisting instance-data.json: %s", str(e))
return False
except UnicodeDecodeError as e:
- LOG.warning('Error persisting instance-data.json: %s', str(e))
+ LOG.warning("Error persisting instance-data.json: %s", str(e))
return False
- json_sensitive_file = os.path.join(self.paths.run_dir,
- INSTANCE_JSON_SENSITIVE_FILE)
+ json_sensitive_file = os.path.join(
+ self.paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
+ )
+ cloud_id = instance_data["v1"].get("cloud_id", "none")
+ cloud_id_file = os.path.join(self.paths.run_dir, "cloud-id")
+ util.write_file(f"{cloud_id_file}-{cloud_id}", f"{cloud_id}\n")
+ if os.path.exists(cloud_id_file):
+ prev_cloud_id_file = os.path.realpath(cloud_id_file)
+ else:
+ prev_cloud_id_file = cloud_id_file
+ util.sym_link(f"{cloud_id_file}-{cloud_id}", cloud_id_file, force=True)
+ if prev_cloud_id_file != cloud_id_file:
+ util.del_file(prev_cloud_id_file)
write_json(json_sensitive_file, processed_data, mode=0o600)
json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
# World readable
@@ -341,8 +430,9 @@ class DataSource(metaclass=abc.ABCMeta):
def _get_data(self):
"""Walk metadata sources, process crawled data and save attributes."""
raise NotImplementedError(
- 'Subclasses of DataSource must implement _get_data which'
- ' sets self.metadata, vendordata_raw and userdata_raw.')
+ "Subclasses of DataSource must implement _get_data which"
+ " sets self.metadata, vendordata_raw and userdata_raw."
+ )
def get_url_params(self):
"""Return the Datasource's prefered url_read parameters.
@@ -357,28 +447,52 @@ class DataSource(metaclass=abc.ABCMeta):
max_wait = int(self.ds_cfg.get("max_wait", self.url_max_wait))
except ValueError:
util.logexc(
- LOG, "Config max_wait '%s' is not an int, using default '%s'",
- self.ds_cfg.get("max_wait"), max_wait)
+ LOG,
+ "Config max_wait '%s' is not an int, using default '%s'",
+ self.ds_cfg.get("max_wait"),
+ max_wait,
+ )
timeout = self.url_timeout
try:
- timeout = max(
- 0, int(self.ds_cfg.get("timeout", self.url_timeout)))
+ timeout = max(0, int(self.ds_cfg.get("timeout", self.url_timeout)))
except ValueError:
timeout = self.url_timeout
util.logexc(
- LOG, "Config timeout '%s' is not an int, using default '%s'",
- self.ds_cfg.get('timeout'), timeout)
+ LOG,
+ "Config timeout '%s' is not an int, using default '%s'",
+ self.ds_cfg.get("timeout"),
+ timeout,
+ )
retries = self.url_retries
try:
retries = int(self.ds_cfg.get("retries", self.url_retries))
except Exception:
util.logexc(
- LOG, "Config retries '%s' is not an int, using default '%s'",
- self.ds_cfg.get('retries'), retries)
+ LOG,
+ "Config retries '%s' is not an int, using default '%s'",
+ self.ds_cfg.get("retries"),
+ retries,
+ )
- return URLParams(max_wait, timeout, retries)
+ sec_between_retries = self.url_sec_between_retries
+ try:
+ sec_between_retries = int(
+ self.ds_cfg.get(
+ "sec_between_retries", self.url_sec_between_retries
+ )
+ )
+ except Exception:
+ util.logexc(
+ LOG,
+ "Config sec_between_retries '%s' is not an int,"
+ " using default '%s'",
+ self.ds_cfg.get("sec_between_retries"),
+ sec_between_retries,
+ )
+
+ return URLParams(max_wait, timeout, retries, sec_between_retries)
def get_userdata(self, apply_filter=False):
if self.userdata is None:
@@ -392,6 +506,11 @@ class DataSource(metaclass=abc.ABCMeta):
self.vendordata = self.ud_proc.process(self.get_vendordata_raw())
return self.vendordata
+ def get_vendordata2(self):
+ if self.vendordata2 is None:
+ self.vendordata2 = self.ud_proc.process(self.get_vendordata2_raw())
+ return self.vendordata2
+
@property
def fallback_interface(self):
"""Determine the network interface used during local network config."""
@@ -399,13 +518,13 @@ class DataSource(metaclass=abc.ABCMeta):
self._fallback_interface = net.find_fallback_nic()
if self._fallback_interface is None:
LOG.warning(
- "Did not find a fallback interface on %s.",
- self.cloud_name)
+ "Did not find a fallback interface on %s.", self.cloud_name
+ )
return self._fallback_interface
@property
def platform_type(self):
- if not hasattr(self, '_platform_type'):
+ if not hasattr(self, "_platform_type"):
# Handle upgrade path where pickled datasource has no _platform.
self._platform_type = self.dsname.lower()
if not self._platform_type:
@@ -424,7 +543,7 @@ class DataSource(metaclass=abc.ABCMeta):
nocloud: seed-dir (/seed/dir/path)
lxd: nocloud (/seed/dir/path)
"""
- if not hasattr(self, '_subplatform'):
+ if not hasattr(self, "_subplatform"):
# Handle upgrade path where pickled datasource has no _platform.
self._subplatform = self._get_subplatform()
if not self._subplatform:
@@ -433,8 +552,8 @@ class DataSource(metaclass=abc.ABCMeta):
def _get_subplatform(self):
"""Subclasses should implement to return a "slug (detail)" string."""
- if hasattr(self, 'metadata_address'):
- return 'metadata (%s)' % getattr(self, 'metadata_address')
+ if hasattr(self, "metadata_address"):
+ return "metadata (%s)" % getattr(self, "metadata_address")
return METADATA_UNKNOWN
@property
@@ -453,8 +572,10 @@ class DataSource(metaclass=abc.ABCMeta):
else:
self._cloud_name = self._get_cloud_name().lower()
LOG.debug(
- 'Ignoring metadata provided key %s: non-string type %s',
- METADATA_CLOUD_NAME_KEY, type(cloud_name))
+ "Ignoring metadata provided key %s: non-string type %s",
+ METADATA_CLOUD_NAME_KEY,
+ type(cloud_name),
+ )
else:
self._cloud_name = self._get_cloud_name().lower()
return self._cloud_name
@@ -471,8 +592,8 @@ class DataSource(metaclass=abc.ABCMeta):
def launch_index(self):
if not self.metadata:
return None
- if 'launch-index' in self.metadata:
- return self.metadata['launch-index']
+ if "launch-index" in self.metadata:
+ return self.metadata["launch-index"]
return None
def _filter_xdata(self, processed_ud):
@@ -494,6 +615,9 @@ class DataSource(metaclass=abc.ABCMeta):
def get_vendordata_raw(self):
return self.vendordata_raw
+ def get_vendordata2_raw(self):
+ return self.vendordata2_raw
+
# the data sources' config_obj is a cloud-config formated
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
@@ -501,7 +625,7 @@ class DataSource(metaclass=abc.ABCMeta):
return {}
def get_public_ssh_keys(self):
- return normalize_pubkey_data(self.metadata.get('public-keys'))
+ return normalize_pubkey_data(self.metadata.get("public-keys"))
def publish_host_keys(self, hostkeys):
"""Publish the public SSH host keys (found in /etc/ssh/*.pub).
@@ -523,7 +647,7 @@ class DataSource(metaclass=abc.ABCMeta):
if not short_name.startswith(nfrom):
continue
for nto in tlist:
- cand = "/dev/%s%s" % (nto, short_name[len(nfrom):])
+ cand = "/dev/%s%s" % (nto, short_name[len(nfrom) :])
if os.path.exists(cand):
return cand
return None
@@ -548,20 +672,21 @@ class DataSource(metaclass=abc.ABCMeta):
@property
def availability_zone(self):
top_level_az = self.metadata.get(
- 'availability-zone', self.metadata.get('availability_zone'))
+ "availability-zone", self.metadata.get("availability_zone")
+ )
if top_level_az:
return top_level_az
- return self.metadata.get('placement', {}).get('availability-zone')
+ return self.metadata.get("placement", {}).get("availability-zone")
@property
def region(self):
- return self.metadata.get('region')
+ return self.metadata.get("region")
def get_instance_id(self):
- if not self.metadata or 'instance-id' not in self.metadata:
+ if not self.metadata or "instance-id" not in self.metadata:
# Return a magic not really instance id string
return "iid-datasource"
- return str(self.metadata['instance-id'])
+ return str(self.metadata["instance-id"])
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
"""Get hostname or fqdn from the datasource. Look it up if desired.
@@ -579,7 +704,7 @@ class DataSource(metaclass=abc.ABCMeta):
defhost = "localhost"
domain = defdomain
- if not self.metadata or not self.metadata.get('local-hostname'):
+ if not self.metadata or not self.metadata.get("local-hostname"):
if metadata_only:
return None
# this is somewhat questionable really.
@@ -600,14 +725,14 @@ class DataSource(metaclass=abc.ABCMeta):
else:
# if there is an ipv4 address in 'local-hostname', then
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
- lhost = self.metadata['local-hostname']
+ lhost = self.metadata["local-hostname"]
if net.is_ipv4_address(lhost):
toks = []
if resolve_ip:
toks = util.gethostbyaddr(lhost)
if toks:
- toks = str(toks).split('.')
+ toks = str(toks).split(".")
else:
toks = ["ip-%s" % lhost.replace(".", "-")]
else:
@@ -615,7 +740,7 @@ class DataSource(metaclass=abc.ABCMeta):
if len(toks) > 1:
hostname = toks[0]
- domain = '.'.join(toks[1:])
+ domain = ".".join(toks[1:])
else:
hostname = toks[0]
@@ -627,10 +752,25 @@ class DataSource(metaclass=abc.ABCMeta):
def get_package_mirror_info(self):
return self.distro.get_package_mirror_info(data_source=self)
- def update_metadata(self, source_event_types):
+ def get_supported_events(self, source_event_types: List[EventType]):
+ supported_events = {} # type: Dict[EventScope, set]
+ for event in source_event_types:
+ for (
+ update_scope,
+ update_events,
+ ) in self.supported_update_events.items():
+ if event in update_events:
+ if not supported_events.get(update_scope):
+ supported_events[update_scope] = set()
+ supported_events[update_scope].add(event)
+ return supported_events
+
+ def update_metadata_if_supported(
+ self, source_event_types: List[EventType]
+ ) -> bool:
"""Refresh cached metadata if the datasource supports this event.
- The datasource has a list of update_events which
+ The datasource has a list of supported_update_events which
trigger refreshing all cached metadata as well as refreshing the
network configuration.
@@ -640,28 +780,27 @@ class DataSource(metaclass=abc.ABCMeta):
@return True if the datasource did successfully update cached metadata
due to source_event_type.
"""
- supported_events = {}
- for event in source_event_types:
- for update_scope, update_events in self.update_events.items():
- if event in update_events:
- if not supported_events.get(update_scope):
- supported_events[update_scope] = set()
- supported_events[update_scope].add(event)
+ supported_events = self.get_supported_events(source_event_types)
for scope, matched_events in supported_events.items():
LOG.debug(
"Update datasource metadata and %s config due to events: %s",
- scope, ', '.join(matched_events))
+ scope.value,
+ ", ".join([event.value for event in matched_events]),
+ )
# Each datasource has a cached config property which needs clearing
# Once cleared that config property will be regenerated from
# current metadata.
- self.clear_cached_attrs((('_%s_config' % scope, UNSET),))
+ self.clear_cached_attrs((("_%s_config" % scope, UNSET),))
if supported_events:
self.clear_cached_attrs()
result = self.get_data()
if result:
return True
- LOG.debug("Datasource %s not updated for events: %s", self,
- ', '.join(source_event_types))
+ LOG.debug(
+ "Datasource %s not updated for events: %s",
+ self,
+ ", ".join([event.value for event in source_event_types]),
+ )
return False
def check_instance_id(self, sys_cfg):
@@ -683,8 +822,9 @@ class DataSource(metaclass=abc.ABCMeta):
if candidate in valid:
return candidate
else:
- LOG.warning("invalid dsmode '%s', using default=%s",
- candidate, default)
+ LOG.warning(
+ "invalid dsmode '%s', using default=%s", candidate, default
+ )
return default
return default
@@ -763,19 +903,23 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
name="search-%s" % name.replace("DataSource", ""),
description="searching for %s data from %s" % (mode, name),
message="no %s data found from %s" % (mode, name),
- parent=reporter)
+ parent=reporter,
+ )
try:
with myrep:
LOG.debug("Seeing if we can get any data from %s", cls)
s = cls(sys_cfg, distro, paths)
- if s.update_metadata([EventType.BOOT_NEW_INSTANCE]):
+ if s.update_metadata_if_supported(
+ [EventType.BOOT_NEW_INSTANCE]
+ ):
myrep.message = "found %s data from %s" % (mode, name)
return (s, type_utils.obj_name(cls))
except Exception:
util.logexc(LOG, "Getting data from %s failed", cls)
- msg = ("Did not find any data source,"
- " searched classes: (%s)") % (", ".join(ds_names))
+ msg = "Did not find any data source, searched classes: (%s)" % ", ".join(
+ ds_names
+ )
raise DataSourceNotFoundException(msg)
@@ -785,15 +929,25 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
# Return an ordered list of classes that match (if any)
def list_sources(cfg_list, depends, pkg_list):
src_list = []
- LOG.debug(("Looking for data source in: %s,"
- " via packages %s that matches dependencies %s"),
- cfg_list, pkg_list, depends)
+ LOG.debug(
+ "Looking for data source in: %s,"
+ " via packages %s that matches dependencies %s",
+ cfg_list,
+ pkg_list,
+ depends,
+ )
for ds_name in cfg_list:
if not ds_name.startswith(DS_PREFIX):
- ds_name = '%s%s' % (DS_PREFIX, ds_name)
- m_locs, _looked_locs = importer.find_module(ds_name,
- pkg_list,
- ['get_datasource_list'])
+ ds_name = "%s%s" % (DS_PREFIX, ds_name)
+ m_locs, _looked_locs = importer.find_module(
+ ds_name, pkg_list, ["get_datasource_list"]
+ )
+ if not m_locs:
+ LOG.error(
+ "Could not import %s. Does the DataSource exist and "
+ "is it importable?",
+ ds_name,
+ )
for m_loc in m_locs:
mod = importer.import_module(m_loc)
lister = getattr(mod, "get_datasource_list")
@@ -804,7 +958,7 @@ def list_sources(cfg_list, depends, pkg_list):
return src_list
-def instance_id_matches_system_uuid(instance_id, field='system-uuid'):
+def instance_id_matches_system_uuid(instance_id, field="system-uuid"):
# quickly (local check only) if self.instance_id is still valid
# we check kernel command line or files.
if not instance_id:
@@ -854,8 +1008,7 @@ def convert_vendordata(data, recurse=True):
return copy.deepcopy(data)
if isinstance(data, dict):
if recurse is True:
- return convert_vendordata(data.get('cloud-init'),
- recurse=False)
+ return convert_vendordata(data.get("cloud-init"), recurse=False)
raise ValueError("vendordata['cloud-init'] cannot be dict")
raise ValueError("Unknown data type for vendordata: %s" % type(data))
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index d3055d08..d07dc3c0 100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -6,27 +6,28 @@ import os
import re
import socket
import struct
-import time
import textwrap
+import time
import zlib
-from errno import ENOENT
-
-from cloudinit.settings import CFG_BUILTIN
-from cloudinit.net import dhcp
-from cloudinit import stages
-from cloudinit import temp_utils
from contextlib import contextmanager
+from datetime import datetime
+from errno import ENOENT
+from typing import List, Optional
from xml.etree import ElementTree
from xml.sax.saxutils import escape
-from cloudinit import subp
-from cloudinit import url_helper
-from cloudinit import util
-from cloudinit import version
-from cloudinit import distros
+from cloudinit import (
+ distros,
+ stages,
+ subp,
+ temp_utils,
+ url_helper,
+ util,
+ version,
+)
+from cloudinit.net import dhcp
from cloudinit.reporting import events
-from cloudinit.net.dhcp import EphemeralDHCPv4
-from datetime import datetime
+from cloudinit.settings import CFG_BUILTIN
LOG = logging.getLogger(__name__)
@@ -34,10 +35,10 @@ LOG = logging.getLogger(__name__)
# value is applied if the endpoint can't be found within a lease file
DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10"
-BOOT_EVENT_TYPE = 'boot-telemetry'
-SYSTEMINFO_EVENT_TYPE = 'system-info'
-DIAGNOSTIC_EVENT_TYPE = 'diagnostic'
-COMPRESSED_EVENT_TYPE = 'compressed'
+BOOT_EVENT_TYPE = "boot-telemetry"
+SYSTEMINFO_EVENT_TYPE = "system-info"
+DIAGNOSTIC_EVENT_TYPE = "diagnostic"
+COMPRESSED_EVENT_TYPE = "compressed"
# Maximum number of bytes of the cloud-init.log file that can be dumped to KVP
# at once. This number is based on the analysis done on a large sample of
# cloud-init.log files where the P95 of the file sizes was 537KB and the time
@@ -45,25 +46,29 @@ COMPRESSED_EVENT_TYPE = 'compressed'
MAX_LOG_TO_KVP_LENGTH = 512000
# File to store the last byte of cloud-init.log that was pushed to KVP. This
# file will be deleted with every VM reboot.
-LOG_PUSHED_TO_KVP_INDEX_FILE = '/run/cloud-init/log_pushed_to_kvp_index'
+LOG_PUSHED_TO_KVP_INDEX_FILE = "/run/cloud-init/log_pushed_to_kvp_index"
azure_ds_reporter = events.ReportEventStack(
name="azure-ds",
description="initialize reporter for azure ds",
- reporting_enabled=True)
+ reporting_enabled=True,
+)
DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE = (
- 'The VM encountered an error during deployment. '
- 'Please visit https://aka.ms/linuxprovisioningerror '
- 'for more information on remediation.')
+ "The VM encountered an error during deployment. "
+ "Please visit https://aka.ms/linuxprovisioningerror "
+ "for more information on remediation."
+)
def azure_ds_telemetry_reporter(func):
def impl(*args, **kwargs):
with events.ReportEventStack(
- name=func.__name__,
- description=func.__name__,
- parent=azure_ds_reporter):
+ name=func.__name__,
+ description=func.__name__,
+ parent=azure_ds_reporter,
+ ):
return func(*args, **kwargs)
+
return impl
@@ -79,16 +84,16 @@ def is_byte_swapped(previous_id, current_id):
def swap_bytestring(s, width=2):
dd = [byte for byte in textwrap.wrap(s, 2)]
dd.reverse()
- return ''.join(dd)
+ return "".join(dd)
- parts = current_id.split('-')
- swapped_id = '-'.join(
+ parts = current_id.split("-")
+ swapped_id = "-".join(
[
swap_bytestring(parts[0]),
swap_bytestring(parts[1]),
swap_bytestring(parts[2]),
parts[3],
- parts[4]
+ parts[4],
]
)
@@ -98,31 +103,29 @@ def is_byte_swapped(previous_id, current_id):
@azure_ds_telemetry_reporter
def get_boot_telemetry():
"""Report timestamps related to kernel initialization and systemd
- activation of cloud-init"""
+ activation of cloud-init"""
if not distros.uses_systemd():
- raise RuntimeError(
- "distro not using systemd, skipping boot telemetry")
+ raise RuntimeError("distro not using systemd, skipping boot telemetry")
LOG.debug("Collecting boot telemetry")
try:
kernel_start = float(time.time()) - float(util.uptime())
except ValueError as e:
- raise RuntimeError(
- "Failed to determine kernel start timestamp"
- ) from e
+ raise RuntimeError("Failed to determine kernel start timestamp") from e
try:
- out, _ = subp.subp(['/bin/systemctl',
- 'show', '-p',
- 'UserspaceTimestampMonotonic'],
- capture=True)
+ out, _ = subp.subp(
+ ["/bin/systemctl", "show", "-p", "UserspaceTimestampMonotonic"],
+ capture=True,
+ )
tsm = None
- if out and '=' in out:
+ if out and "=" in out:
tsm = out.split("=")[1]
if not tsm:
- raise RuntimeError("Failed to parse "
- "UserspaceTimestampMonotonic from systemd")
+ raise RuntimeError(
+ "Failed to parse UserspaceTimestampMonotonic from systemd"
+ )
user_start = kernel_start + (float(tsm) / 1000000)
except subp.ProcessExecutionError as e:
@@ -135,16 +138,23 @@ def get_boot_telemetry():
) from e
try:
- out, _ = subp.subp(['/bin/systemctl', 'show',
- 'cloud-init-local', '-p',
- 'InactiveExitTimestampMonotonic'],
- capture=True)
+ out, _ = subp.subp(
+ [
+ "/bin/systemctl",
+ "show",
+ "cloud-init-local",
+ "-p",
+ "InactiveExitTimestampMonotonic",
+ ],
+ capture=True,
+ )
tsm = None
- if out and '=' in out:
+ if out and "=" in out:
tsm = out.split("=")[1]
if not tsm:
- raise RuntimeError("Failed to parse "
- "InactiveExitTimestampMonotonic from systemd")
+ raise RuntimeError(
+ "Failed to parse InactiveExitTimestampMonotonic from systemd"
+ )
cloudinit_activation = kernel_start + (float(tsm) / 1000000)
except subp.ProcessExecutionError as e:
@@ -158,12 +168,16 @@ def get_boot_telemetry():
) from e
evt = events.ReportingEvent(
- BOOT_EVENT_TYPE, 'boot-telemetry',
- "kernel_start=%s user_start=%s cloudinit_activation=%s" %
- (datetime.utcfromtimestamp(kernel_start).isoformat() + 'Z',
- datetime.utcfromtimestamp(user_start).isoformat() + 'Z',
- datetime.utcfromtimestamp(cloudinit_activation).isoformat() + 'Z'),
- events.DEFAULT_EVENT_ORIGIN)
+ BOOT_EVENT_TYPE,
+ "boot-telemetry",
+ "kernel_start=%s user_start=%s cloudinit_activation=%s"
+ % (
+ datetime.utcfromtimestamp(kernel_start).isoformat() + "Z",
+ datetime.utcfromtimestamp(user_start).isoformat() + "Z",
+ datetime.utcfromtimestamp(cloudinit_activation).isoformat() + "Z",
+ ),
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt)
# return the event for unit testing purpose
@@ -175,13 +189,22 @@ def get_system_info():
"""Collect and report system information"""
info = util.system_info()
evt = events.ReportingEvent(
- SYSTEMINFO_EVENT_TYPE, 'system information',
+ SYSTEMINFO_EVENT_TYPE,
+ "system information",
"cloudinit_version=%s, kernel_version=%s, variant=%s, "
"distro_name=%s, distro_version=%s, flavor=%s, "
- "python_version=%s" %
- (version.version_string(), info['release'], info['variant'],
- info['dist'][0], info['dist'][1], info['dist'][2],
- info['python']), events.DEFAULT_EVENT_ORIGIN)
+ "python_version=%s"
+ % (
+ version.version_string(),
+ info["release"],
+ info["variant"],
+ info["dist"][0],
+ info["dist"][1],
+ info["dist"][2],
+ info["python"],
+ ),
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt)
# return the event for unit testing purpose
@@ -189,13 +212,17 @@ def get_system_info():
def report_diagnostic_event(
- msg: str, *, logger_func=None) -> events.ReportingEvent:
+ msg: str, *, logger_func=None
+) -> events.ReportingEvent:
"""Report a diagnostic event"""
if callable(logger_func):
logger_func(msg)
evt = events.ReportingEvent(
- DIAGNOSTIC_EVENT_TYPE, 'diagnostic message',
- msg, events.DEFAULT_EVENT_ORIGIN)
+ DIAGNOSTIC_EVENT_TYPE,
+ "diagnostic message",
+ msg,
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt, excluded_handler_types={"log"})
# return the event for unit testing purpose
@@ -205,21 +232,26 @@ def report_diagnostic_event(
def report_compressed_event(event_name, event_content):
"""Report a compressed event"""
compressed_data = base64.encodebytes(zlib.compress(event_content))
- event_data = {"encoding": "gz+b64",
- "data": compressed_data.decode('ascii')}
+ event_data = {
+ "encoding": "gz+b64",
+ "data": compressed_data.decode("ascii"),
+ }
evt = events.ReportingEvent(
- COMPRESSED_EVENT_TYPE, event_name,
+ COMPRESSED_EVENT_TYPE,
+ event_name,
json.dumps(event_data),
- events.DEFAULT_EVENT_ORIGIN)
- events.report_event(evt,
- excluded_handler_types={"log", "print", "webhook"})
+ events.DEFAULT_EVENT_ORIGIN,
+ )
+ events.report_event(
+ evt, excluded_handler_types={"log", "print", "webhook"}
+ )
# return the event for unit testing purpose
return evt
@azure_ds_telemetry_reporter
-def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']):
+def push_log_to_kvp(file_name=CFG_BUILTIN["def_log_file"]):
"""Push a portion of cloud-init.log file or the whole file to KVP
based on the file size.
The first time this function is called after VM boot, It will push the last
@@ -237,23 +269,26 @@ def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']):
report_diagnostic_event(
"Dumping last {0} bytes of cloud-init.log file to KVP starting"
" from index: {1}".format(f.tell() - seek_index, seek_index),
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
f.seek(seek_index, os.SEEK_SET)
report_compressed_event("cloud-init.log", f.read())
util.write_file(LOG_PUSHED_TO_KVP_INDEX_FILE, str(f.tell()))
except Exception as ex:
report_diagnostic_event(
"Exception when dumping log file: %s" % repr(ex),
- logger_func=LOG.warning)
+ logger_func=LOG.warning,
+ )
LOG.debug("Dumping dmesg log to KVP")
try:
- out, _ = subp.subp(['dmesg'], decode=False, capture=True)
+ out, _ = subp.subp(["dmesg"], decode=False, capture=True)
report_compressed_event("dmesg", out)
except Exception as ex:
report_diagnostic_event(
"Exception when dumping dmesg log: %s" % repr(ex),
- logger_func=LOG.warning)
+ logger_func=LOG.warning,
+ )
@azure_ds_telemetry_reporter
@@ -263,16 +298,20 @@ def get_last_log_byte_pushed_to_kvp_index():
return int(f.read())
except IOError as e:
if e.errno != ENOENT:
- report_diagnostic_event("Reading LOG_PUSHED_TO_KVP_INDEX_FILE"
- " failed: %s." % repr(e),
- logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Reading LOG_PUSHED_TO_KVP_INDEX_FILE failed: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
except ValueError as e:
- report_diagnostic_event("Invalid value in LOG_PUSHED_TO_KVP_INDEX_FILE"
- ": %s." % repr(e),
- logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Invalid value in LOG_PUSHED_TO_KVP_INDEX_FILE: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
except Exception as e:
- report_diagnostic_event("Failed to get the last log byte pushed to KVP"
- ": %s." % repr(e), logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Failed to get the last log byte pushed to KVP: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
return 0
@@ -295,58 +334,97 @@ def _get_dhcp_endpoint_option_name():
@azure_ds_telemetry_reporter
-def http_with_retries(url, **kwargs) -> str:
+def http_with_retries(url, **kwargs) -> url_helper.UrlResponse:
"""Wrapper around url_helper.readurl() with custom telemetry logging
that url_helper.readurl() does not provide.
"""
- exc = None
-
max_readurl_attempts = 240
default_readurl_timeout = 5
+ sleep_duration_between_retries = 5
periodic_logging_attempts = 12
- if 'timeout' not in kwargs:
- kwargs['timeout'] = default_readurl_timeout
+ if "timeout" not in kwargs:
+ kwargs["timeout"] = default_readurl_timeout
# remove kwargs that cause url_helper.readurl to retry,
# since we are already implementing our own retry logic.
- if kwargs.pop('retries', None):
+ if kwargs.pop("retries", None):
LOG.warning(
- 'Ignoring retries kwarg passed in for '
- 'communication with Azure endpoint.')
- if kwargs.pop('infinite', None):
+ "Ignoring retries kwarg passed in for "
+ "communication with Azure endpoint."
+ )
+ if kwargs.pop("infinite", None):
LOG.warning(
- 'Ignoring infinite kwarg passed in for communication '
- 'with Azure endpoint.')
+ "Ignoring infinite kwarg passed in for communication "
+ "with Azure endpoint."
+ )
for attempt in range(1, max_readurl_attempts + 1):
try:
ret = url_helper.readurl(url, **kwargs)
report_diagnostic_event(
- 'Successful HTTP request with Azure endpoint %s after '
- '%d attempts' % (url, attempt),
- logger_func=LOG.debug)
+ "Successful HTTP request with Azure endpoint %s after "
+ "%d attempts" % (url, attempt),
+ logger_func=LOG.debug,
+ )
return ret
except Exception as e:
- exc = e
if attempt % periodic_logging_attempts == 0:
report_diagnostic_event(
- 'Failed HTTP request with Azure endpoint %s during '
- 'attempt %d with exception: %s' %
- (url, attempt, e),
- logger_func=LOG.debug)
-
- raise exc
+ "Failed HTTP request with Azure endpoint %s during "
+ "attempt %d with exception: %s" % (url, attempt, e),
+ logger_func=LOG.debug,
+ )
+ if attempt == max_readurl_attempts:
+ raise
+
+ time.sleep(sleep_duration_between_retries)
+
+ raise RuntimeError("Failed to return in http_with_retries")
+
+
+def build_minimal_ovf(
+ username: str, hostname: str, disableSshPwd: str
+) -> bytes:
+ OVF_ENV_TEMPLATE = textwrap.dedent(
+ """\
+ <ns0:Environment xmlns:ns0="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:ns1="http://schemas.microsoft.com/windowsazure"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <ns1:ProvisioningSection>
+ <ns1:Version>1.0</ns1:Version>
+ <ns1:LinuxProvisioningConfigurationSet>
+ <ns1:ConfigurationSetType>LinuxProvisioningConfiguration
+ </ns1:ConfigurationSetType>
+ <ns1:UserName>{username}</ns1:UserName>
+ <ns1:DisableSshPasswordAuthentication>{disableSshPwd}
+ </ns1:DisableSshPasswordAuthentication>
+ <ns1:HostName>{hostname}</ns1:HostName>
+ </ns1:LinuxProvisioningConfigurationSet>
+ </ns1:ProvisioningSection>
+ <ns1:PlatformSettingsSection>
+ <ns1:Version>1.0</ns1:Version>
+ <ns1:PlatformSettings>
+ <ns1:ProvisionGuestAgent>true</ns1:ProvisionGuestAgent>
+ </ns1:PlatformSettings>
+ </ns1:PlatformSettingsSection>
+ </ns0:Environment>
+ """
+ )
+ ret = OVF_ENV_TEMPLATE.format(
+ username=username, hostname=hostname, disableSshPwd=disableSshPwd
+ )
+ return ret.encode("utf-8")
class AzureEndpointHttpClient:
headers = {
- 'x-ms-agent-name': 'WALinuxAgent',
- 'x-ms-version': '2012-11-30',
+ "x-ms-agent-name": "WALinuxAgent",
+ "x-ms-version": "2012-11-30",
}
def __init__(self, certificate):
@@ -355,20 +433,21 @@ class AzureEndpointHttpClient:
"x-ms-guest-agent-public-x509-cert": certificate,
}
- def get(self, url, secure=False):
+ def get(self, url, secure=False) -> url_helper.UrlResponse:
headers = self.headers
if secure:
headers = self.headers.copy()
headers.update(self.extra_secure_headers)
return http_with_retries(url, headers=headers)
- def post(self, url, data=None, extra_headers=None):
+ def post(
+ self, url, data=None, extra_headers=None
+ ) -> url_helper.UrlResponse:
headers = self.headers
if extra_headers is not None:
headers = self.headers.copy()
headers.update(extra_headers)
- return http_with_retries(
- url, data=data, headers=headers)
+ return http_with_retries(url, data=data, headers=headers)
class InvalidGoalStateXMLException(Exception):
@@ -376,12 +455,12 @@ class InvalidGoalStateXMLException(Exception):
class GoalState:
-
def __init__(
- self,
- unparsed_xml: str,
- azure_endpoint_client: AzureEndpointHttpClient,
- need_certificate: bool = True) -> None:
+ self,
+ unparsed_xml: str,
+ azure_endpoint_client: AzureEndpointHttpClient,
+ need_certificate: bool = True,
+ ) -> None:
"""Parses a GoalState XML string and returns a GoalState object.
@param unparsed_xml: string representing a GoalState XML.
@@ -395,36 +474,41 @@ class GoalState:
self.root = ElementTree.fromstring(unparsed_xml)
except ElementTree.ParseError as e:
report_diagnostic_event(
- 'Failed to parse GoalState XML: %s' % e,
- logger_func=LOG.warning)
+ "Failed to parse GoalState XML: %s" % e,
+ logger_func=LOG.warning,
+ )
raise
- self.container_id = self._text_from_xpath('./Container/ContainerId')
+ self.container_id = self._text_from_xpath("./Container/ContainerId")
self.instance_id = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance/InstanceId')
- self.incarnation = self._text_from_xpath('./Incarnation')
+ "./Container/RoleInstanceList/RoleInstance/InstanceId"
+ )
+ self.incarnation = self._text_from_xpath("./Incarnation")
for attr in ("container_id", "instance_id", "incarnation"):
if getattr(self, attr) is None:
- msg = 'Missing %s in GoalState XML' % attr
+ msg = "Missing %s in GoalState XML" % attr
report_diagnostic_event(msg, logger_func=LOG.warning)
raise InvalidGoalStateXMLException(msg)
self.certificates_xml = None
url = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance'
- '/Configuration/Certificates')
+ "./Container/RoleInstanceList/RoleInstance"
+ "/Configuration/Certificates"
+ )
if url is not None and need_certificate:
with events.ReportEventStack(
- name="get-certificates-xml",
- description="get certificates xml",
- parent=azure_ds_reporter):
- self.certificates_xml = \
- self.azure_endpoint_client.get(
- url, secure=True).contents
+ name="get-certificates-xml",
+ description="get certificates xml",
+ parent=azure_ds_reporter,
+ ):
+ self.certificates_xml = self.azure_endpoint_client.get(
+ url, secure=True
+ ).contents
if self.certificates_xml is None:
raise InvalidGoalStateXMLException(
- 'Azure endpoint returned empty certificates xml.')
+ "Azure endpoint returned empty certificates xml."
+ )
def _text_from_xpath(self, xpath):
element = self.root.find(xpath)
@@ -436,8 +520,8 @@ class GoalState:
class OpenSSLManager:
certificate_names = {
- 'private_key': 'TransportPrivate.pem',
- 'certificate': 'TransportCert.pem',
+ "private_key": "TransportPrivate.pem",
+ "certificate": "TransportCert.pem",
}
def __init__(self):
@@ -458,35 +542,47 @@ class OpenSSLManager:
@azure_ds_telemetry_reporter
def generate_certificate(self):
- LOG.debug('Generating certificate for communication with fabric...')
+ LOG.debug("Generating certificate for communication with fabric...")
if self.certificate is not None:
- LOG.debug('Certificate already generated.')
+ LOG.debug("Certificate already generated.")
return
with cd(self.tmpdir):
- subp.subp([
- 'openssl', 'req', '-x509', '-nodes', '-subj',
- '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
- '-keyout', self.certificate_names['private_key'],
- '-out', self.certificate_names['certificate'],
- ])
- certificate = ''
- for line in open(self.certificate_names['certificate']):
+ subp.subp(
+ [
+ "openssl",
+ "req",
+ "-x509",
+ "-nodes",
+ "-subj",
+ "/CN=LinuxTransport",
+ "-days",
+ "32768",
+ "-newkey",
+ "rsa:2048",
+ "-keyout",
+ self.certificate_names["private_key"],
+ "-out",
+ self.certificate_names["certificate"],
+ ]
+ )
+ certificate = ""
+ for line in open(self.certificate_names["certificate"]):
if "CERTIFICATE" not in line:
certificate += line.rstrip()
self.certificate = certificate
- LOG.debug('New certificate generated.')
+ LOG.debug("New certificate generated.")
@staticmethod
@azure_ds_telemetry_reporter
def _run_x509_action(action, cert):
- cmd = ['openssl', 'x509', '-noout', action]
+ cmd = ["openssl", "x509", "-noout", action]
result, _ = subp.subp(cmd, data=cert)
return result
@azure_ds_telemetry_reporter
def _get_ssh_key_from_cert(self, certificate):
- pub_key = self._run_x509_action('-pubkey', certificate)
- keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
+ pub_key = self._run_x509_action("-pubkey", certificate)
+ keygen_cmd = ["ssh-keygen", "-i", "-m", "PKCS8", "-f", "/dev/stdin"]
ssh_key, _ = subp.subp(keygen_cmd, data=pub_key)
return ssh_key
@@ -499,48 +595,50 @@ class OpenSSLManager:
Azure control plane passes that fingerprint as so:
'073E19D14D1C799224C6A0FD8DDAB6A8BF27D473'
"""
- raw_fp = self._run_x509_action('-fingerprint', certificate)
- eq = raw_fp.find('=')
- octets = raw_fp[eq+1:-1].split(':')
- return ''.join(octets)
+ raw_fp = self._run_x509_action("-fingerprint", certificate)
+ eq = raw_fp.find("=")
+ octets = raw_fp[eq + 1 : -1].split(":")
+ return "".join(octets)
@azure_ds_telemetry_reporter
def _decrypt_certs_from_xml(self, certificates_xml):
"""Decrypt the certificates XML document using the our private key;
- return the list of certs and private keys contained in the doc.
+ return the list of certs and private keys contained in the doc.
"""
- tag = ElementTree.fromstring(certificates_xml).find('.//Data')
+ tag = ElementTree.fromstring(certificates_xml).find(".//Data")
certificates_content = tag.text
lines = [
- b'MIME-Version: 1.0',
+ b"MIME-Version: 1.0",
b'Content-Disposition: attachment; filename="Certificates.p7m"',
b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"',
- b'Content-Transfer-Encoding: base64',
- b'',
- certificates_content.encode('utf-8'),
+ b"Content-Transfer-Encoding: base64",
+ b"",
+ certificates_content.encode("utf-8"),
]
with cd(self.tmpdir):
out, _ = subp.subp(
- 'openssl cms -decrypt -in /dev/stdin -inkey'
- ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
- ' -password pass:'.format(**self.certificate_names),
- shell=True, data=b'\n'.join(lines))
+ "openssl cms -decrypt -in /dev/stdin -inkey"
+ " {private_key} -recip {certificate} | openssl pkcs12 -nodes"
+ " -password pass:".format(**self.certificate_names),
+ shell=True,
+ data=b"\n".join(lines),
+ )
return out
@azure_ds_telemetry_reporter
def parse_certificates(self, certificates_xml):
"""Given the Certificates XML document, return a dictionary of
- fingerprints and associated SSH keys derived from the certs."""
+ fingerprints and associated SSH keys derived from the certs."""
out = self._decrypt_certs_from_xml(certificates_xml)
current = []
keys = {}
for line in out.splitlines():
current.append(line)
- if re.match(r'[-]+END .*?KEY[-]+$', line):
+ if re.match(r"[-]+END .*?KEY[-]+$", line):
# ignore private_keys
current = []
- elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
- certificate = '\n'.join(current)
+ elif re.match(r"[-]+END .*?CERTIFICATE[-]+$", line):
+ certificate = "\n".join(current)
ssh_key = self._get_ssh_key_from_cert(certificate)
fingerprint = self._get_fingerprint_from_cert(certificate)
keys[fingerprint] = ssh_key
@@ -550,7 +648,8 @@ class OpenSSLManager:
class GoalStateHealthReporter:
- HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent('''\
+ HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent(
+ """\
<?xml version="1.0" encoding="utf-8"?>
<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
@@ -568,25 +667,30 @@ class GoalStateHealthReporter:
</RoleInstanceList>
</Container>
</Health>
- ''')
+ """
+ )
- HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent('''\
+ HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent(
+ """\
<Details>
<SubStatus>{health_substatus}</SubStatus>
<Description>{health_description}</Description>
</Details>
- ''')
+ """
+ )
- PROVISIONING_SUCCESS_STATUS = 'Ready'
- PROVISIONING_NOT_READY_STATUS = 'NotReady'
- PROVISIONING_FAILURE_SUBSTATUS = 'ProvisioningFailed'
+ PROVISIONING_SUCCESS_STATUS = "Ready"
+ PROVISIONING_NOT_READY_STATUS = "NotReady"
+ PROVISIONING_FAILURE_SUBSTATUS = "ProvisioningFailed"
HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512
def __init__(
- self, goal_state: GoalState,
- azure_endpoint_client: AzureEndpointHttpClient,
- endpoint: str) -> None:
+ self,
+ goal_state: GoalState,
+ azure_endpoint_client: AzureEndpointHttpClient,
+ endpoint: str,
+ ) -> None:
"""Creates instance that will report provisioning status to an endpoint
@param goal_state: An instance of class GoalState that contains
@@ -608,17 +712,19 @@ class GoalStateHealthReporter:
incarnation=self._goal_state.incarnation,
container_id=self._goal_state.container_id,
instance_id=self._goal_state.instance_id,
- status=self.PROVISIONING_SUCCESS_STATUS)
- LOG.debug('Reporting ready to Azure fabric.')
+ status=self.PROVISIONING_SUCCESS_STATUS,
+ )
+ LOG.debug("Reporting ready to Azure fabric.")
try:
self._post_health_report(document=document)
except Exception as e:
report_diagnostic_event(
"exception while reporting ready: %s" % e,
- logger_func=LOG.error)
+ logger_func=LOG.error,
+ )
raise
- LOG.info('Reported ready to Azure fabric.')
+ LOG.info("Reported ready to Azure fabric.")
@azure_ds_telemetry_reporter
def send_failure_signal(self, description: str) -> None:
@@ -628,7 +734,8 @@ class GoalStateHealthReporter:
instance_id=self._goal_state.instance_id,
status=self.PROVISIONING_NOT_READY_STATUS,
substatus=self.PROVISIONING_FAILURE_SUBSTATUS,
- description=description)
+ description=description,
+ )
try:
self._post_health_report(document=document)
except Exception as e:
@@ -636,24 +743,33 @@ class GoalStateHealthReporter:
report_diagnostic_event(msg, logger_func=LOG.error)
raise
- LOG.warning('Reported failure to Azure fabric.')
+ LOG.warning("Reported failure to Azure fabric.")
def build_report(
- self, incarnation: str, container_id: str, instance_id: str,
- status: str, substatus=None, description=None) -> str:
- health_detail = ''
+ self,
+ incarnation: str,
+ container_id: str,
+ instance_id: str,
+ status: str,
+ substatus=None,
+ description=None,
+ ) -> str:
+ health_detail = ""
if substatus is not None:
health_detail = self.HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(
health_substatus=escape(substatus),
health_description=escape(
- description[:self.HEALTH_REPORT_DESCRIPTION_TRIM_LEN]))
+ description[: self.HEALTH_REPORT_DESCRIPTION_TRIM_LEN]
+ ),
+ )
health_report = self.HEALTH_REPORT_XML_TEMPLATE.format(
incarnation=escape(str(incarnation)),
container_id=escape(container_id),
instance_id=escape(instance_id),
health_status=escape(status),
- health_detail_subsection=health_detail)
+ health_detail_subsection=health_detail,
+ )
return health_report
@@ -681,20 +797,22 @@ class GoalStateHealthReporter:
# reporting handler that writes to the special KVP files.
time.sleep(0)
- LOG.debug('Sending health report to Azure fabric.')
+ LOG.debug("Sending health report to Azure fabric.")
url = "http://{}/machine?comp=health".format(self._endpoint)
self._azure_endpoint_client.post(
url,
data=document,
- extra_headers={'Content-Type': 'text/xml; charset=utf-8'})
- LOG.debug('Successfully sent health report to Azure fabric')
+ extra_headers={"Content-Type": "text/xml; charset=utf-8"},
+ )
+ LOG.debug("Successfully sent health report to Azure fabric")
class WALinuxAgentShim:
-
def __init__(self, fallback_lease_file=None, dhcp_options=None):
- LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s',
- fallback_lease_file)
+ LOG.debug(
+ "WALinuxAgentShim instantiated, fallback_lease_file=%s",
+ fallback_lease_file,
+ )
self.dhcpoptions = dhcp_options
self._endpoint = None
self.openssl_manager = None
@@ -713,30 +831,33 @@ class WALinuxAgentShim:
@property
def endpoint(self):
if self._endpoint is None:
- self._endpoint = self.find_endpoint(self.lease_file,
- self.dhcpoptions)
+ self._endpoint = self.find_endpoint(
+ self.lease_file, self.dhcpoptions
+ )
return self._endpoint
@staticmethod
def get_ip_from_lease_value(fallback_lease_value):
- unescaped_value = fallback_lease_value.replace('\\', '')
+ unescaped_value = fallback_lease_value.replace("\\", "")
if len(unescaped_value) > 4:
- hex_string = ''
- for hex_pair in unescaped_value.split(':'):
+ hex_string = ""
+ for hex_pair in unescaped_value.split(":"):
if len(hex_pair) == 1:
- hex_pair = '0' + hex_pair
+ hex_pair = "0" + hex_pair
hex_string += hex_pair
packed_bytes = struct.pack(
- '>L', int(hex_string.replace(':', ''), 16))
+ ">L", int(hex_string.replace(":", ""), 16)
+ )
else:
- packed_bytes = unescaped_value.encode('utf-8')
+ packed_bytes = unescaped_value.encode("utf-8")
return socket.inet_ntoa(packed_bytes)
@staticmethod
@azure_ds_telemetry_reporter
def _networkd_get_value_from_leases(leases_d=None):
return dhcp.networkd_get_option_from_leases(
- 'OPTION_245', leases_d=leases_d)
+ "OPTION_245", leases_d=leases_d
+ )
@staticmethod
@azure_ds_telemetry_reporter
@@ -754,7 +875,7 @@ class WALinuxAgentShim:
if option_name in line:
# Example line from Ubuntu
# option unknown-245 a8:3f:81:10;
- leases.append(line.strip(' ').split(' ', 2)[-1].strip(';\n"'))
+ leases.append(line.strip(" ").split(" ", 2)[-1].strip(';\n"'))
# Return the "most recent" one in the list
if len(leases) < 1:
return None
@@ -769,15 +890,16 @@ class WALinuxAgentShim:
if not os.path.exists(hooks_dir):
LOG.debug("%s not found.", hooks_dir)
return None
- hook_files = [os.path.join(hooks_dir, x)
- for x in os.listdir(hooks_dir)]
+ hook_files = [
+ os.path.join(hooks_dir, x) for x in os.listdir(hooks_dir)
+ ]
for hook_file in hook_files:
try:
- name = os.path.basename(hook_file).replace('.json', '')
+ name = os.path.basename(hook_file).replace(".json", "")
dhcp_options[name] = json.loads(util.load_file((hook_file)))
except ValueError as e:
raise ValueError(
- '{_file} is not valid JSON data'.format(_file=hook_file)
+ "{_file} is not valid JSON data".format(_file=hook_file)
) from e
return dhcp_options
@@ -789,7 +911,7 @@ class WALinuxAgentShim:
# the MS endpoint server is given to us as DHPC option 245
_value = None
for interface in dhcp_options:
- _value = dhcp_options[interface].get('unknown_245', None)
+ _value = dhcp_options[interface].get("unknown_245", None)
if _value is not None:
LOG.debug("Endpoint server found in dhclient options")
break
@@ -819,51 +941,73 @@ class WALinuxAgentShim:
LOG.debug("Using Azure Endpoint from dhcp options")
if value is None:
report_diagnostic_event(
- 'No Azure endpoint from dhcp options. '
- 'Finding Azure endpoint from networkd...',
- logger_func=LOG.debug)
+ "No Azure endpoint from dhcp options. "
+ "Finding Azure endpoint from networkd...",
+ logger_func=LOG.debug,
+ )
value = WALinuxAgentShim._networkd_get_value_from_leases()
if value is None:
# Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
# a dhclient exit hook that calls cloud-init-dhclient-hook
report_diagnostic_event(
- 'No Azure endpoint from networkd. '
- 'Finding Azure endpoint from hook json...',
- logger_func=LOG.debug)
+ "No Azure endpoint from networkd. "
+ "Finding Azure endpoint from hook json...",
+ logger_func=LOG.debug,
+ )
dhcp_options = WALinuxAgentShim._load_dhclient_json()
value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options)
if value is None:
# Fallback and check the leases file if unsuccessful
report_diagnostic_event(
- 'No Azure endpoint from dhclient logs. '
- 'Unable to find endpoint in dhclient logs. '
- 'Falling back to check lease files',
- logger_func=LOG.debug)
+ "No Azure endpoint from dhclient logs. "
+ "Unable to find endpoint in dhclient logs. "
+ "Falling back to check lease files",
+ logger_func=LOG.debug,
+ )
if fallback_lease_file is None:
report_diagnostic_event(
- 'No fallback lease file was specified.',
- logger_func=LOG.warning)
+ "No fallback lease file was specified.",
+ logger_func=LOG.warning,
+ )
value = None
else:
report_diagnostic_event(
- 'Looking for endpoint in lease file %s'
- % fallback_lease_file, logger_func=LOG.debug)
+ "Looking for endpoint in lease file %s"
+ % fallback_lease_file,
+ logger_func=LOG.debug,
+ )
value = WALinuxAgentShim._get_value_from_leases_file(
- fallback_lease_file)
+ fallback_lease_file
+ )
if value is None:
value = DEFAULT_WIRESERVER_ENDPOINT
report_diagnostic_event(
- 'No lease found; using default endpoint: %s' % value,
- logger_func=LOG.warning)
+ "No lease found; using default endpoint: %s" % value,
+ logger_func=LOG.warning,
+ )
endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
report_diagnostic_event(
- 'Azure endpoint found at %s' % endpoint_ip_address,
- logger_func=LOG.debug)
+ "Azure endpoint found at %s" % endpoint_ip_address,
+ logger_func=LOG.debug,
+ )
return endpoint_ip_address
@azure_ds_telemetry_reporter
- def register_with_azure_and_fetch_data(self, pubkey_info=None) -> dict:
+ def eject_iso(self, iso_dev) -> None:
+ try:
+ LOG.debug("Ejecting the provisioning iso")
+ subp.subp(["eject", iso_dev])
+ except Exception as e:
+ report_diagnostic_event(
+ "Failed ejecting the provisioning iso: %s" % e,
+ logger_func=LOG.debug,
+ )
+
+ @azure_ds_telemetry_reporter
+ def register_with_azure_and_fetch_data(
+ self, pubkey_info=None, iso_dev=None
+ ) -> Optional[List[str]]:
"""Gets the VM's GoalState from Azure, uses the GoalState information
to report ready/send the ready signal/provisioning complete signal to
Azure, and then uses pubkey_info to filter and obtain the user's
@@ -880,7 +1024,8 @@ class WALinuxAgentShim:
http_client_certificate = self.openssl_manager.certificate
if self.azure_endpoint_client is None:
self.azure_endpoint_client = AzureEndpointHttpClient(
- http_client_certificate)
+ http_client_certificate
+ )
goal_state = self._fetch_goal_state_from_azure(
need_certificate=http_client_certificate is not None
)
@@ -888,9 +1033,14 @@ class WALinuxAgentShim:
if pubkey_info is not None:
ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info)
health_reporter = GoalStateHealthReporter(
- goal_state, self.azure_endpoint_client, self.endpoint)
+ goal_state, self.azure_endpoint_client, self.endpoint
+ )
+
+ if iso_dev is not None:
+ self.eject_iso(iso_dev)
+
health_reporter.send_ready_signal()
- return {'public-keys': ssh_keys}
+ return ssh_keys
@azure_ds_telemetry_reporter
def register_with_azure_and_report_failure(self, description: str) -> None:
@@ -903,13 +1053,14 @@ class WALinuxAgentShim:
self.azure_endpoint_client = AzureEndpointHttpClient(None)
goal_state = self._fetch_goal_state_from_azure(need_certificate=False)
health_reporter = GoalStateHealthReporter(
- goal_state, self.azure_endpoint_client, self.endpoint)
+ goal_state, self.azure_endpoint_client, self.endpoint
+ )
health_reporter.send_failure_signal(description=description)
@azure_ds_telemetry_reporter
def _fetch_goal_state_from_azure(
- self,
- need_certificate: bool) -> GoalState:
+ self, need_certificate: bool
+ ) -> GoalState:
"""Fetches the GoalState XML from the Azure endpoint, parses the XML,
and returns a GoalState object.
@@ -918,8 +1069,7 @@ class WALinuxAgentShim:
"""
unparsed_goal_state_xml = self._get_raw_goal_state_xml_from_azure()
return self._parse_raw_goal_state_xml(
- unparsed_goal_state_xml,
- need_certificate
+ unparsed_goal_state_xml, need_certificate
)
@azure_ds_telemetry_reporter
@@ -930,27 +1080,29 @@ class WALinuxAgentShim:
@return: GoalState XML string
"""
- LOG.info('Registering with Azure...')
- url = 'http://{}/machine/?comp=goalstate'.format(self.endpoint)
+ LOG.info("Registering with Azure...")
+ url = "http://{}/machine/?comp=goalstate".format(self.endpoint)
try:
with events.ReportEventStack(
- name="goalstate-retrieval",
- description="retrieve goalstate",
- parent=azure_ds_reporter):
+ name="goalstate-retrieval",
+ description="retrieve goalstate",
+ parent=azure_ds_reporter,
+ ):
response = self.azure_endpoint_client.get(url)
except Exception as e:
report_diagnostic_event(
- 'failed to register with Azure and fetch GoalState XML: %s'
- % e, logger_func=LOG.warning)
+ "failed to register with Azure and fetch GoalState XML: %s"
+ % e,
+ logger_func=LOG.warning,
+ )
raise
- LOG.debug('Successfully fetched GoalState XML.')
+ LOG.debug("Successfully fetched GoalState XML.")
return response.contents
@azure_ds_telemetry_reporter
def _parse_raw_goal_state_xml(
- self,
- unparsed_goal_state_xml: str,
- need_certificate: bool) -> GoalState:
+ self, unparsed_goal_state_xml: str, need_certificate: bool
+ ) -> GoalState:
"""Parses a GoalState XML string and returns a GoalState object.
@param unparsed_goal_state_xml: GoalState XML string
@@ -961,23 +1113,28 @@ class WALinuxAgentShim:
goal_state = GoalState(
unparsed_goal_state_xml,
self.azure_endpoint_client,
- need_certificate
+ need_certificate,
)
except Exception as e:
report_diagnostic_event(
- 'Error processing GoalState XML: %s' % e,
- logger_func=LOG.warning)
+ "Error processing GoalState XML: %s" % e,
+ logger_func=LOG.warning,
+ )
raise
- msg = ', '.join([
- 'GoalState XML container id: %s' % goal_state.container_id,
- 'GoalState XML instance id: %s' % goal_state.instance_id,
- 'GoalState XML incarnation: %s' % goal_state.incarnation])
+ msg = ", ".join(
+ [
+ "GoalState XML container id: %s" % goal_state.container_id,
+ "GoalState XML instance id: %s" % goal_state.instance_id,
+ "GoalState XML incarnation: %s" % goal_state.incarnation,
+ ]
+ )
report_diagnostic_event(msg, logger_func=LOG.debug)
return goal_state
@azure_ds_telemetry_reporter
def _get_user_pubkeys(
- self, goal_state: GoalState, pubkey_info: list) -> list:
+ self, goal_state: GoalState, pubkey_info: list
+ ) -> list:
"""Gets and filters the VM admin user's authorized pubkeys.
The admin user in this case is the username specified as "admin"
@@ -1005,15 +1162,16 @@ class WALinuxAgentShim:
"""
ssh_keys = []
if goal_state.certificates_xml is not None and pubkey_info is not None:
- LOG.debug('Certificate XML found; parsing out public keys.')
+ LOG.debug("Certificate XML found; parsing out public keys.")
keys_by_fingerprint = self.openssl_manager.parse_certificates(
- goal_state.certificates_xml)
+ goal_state.certificates_xml
+ )
ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info)
return ssh_keys
@staticmethod
def _filter_pubkeys(keys_by_fingerprint: dict, pubkey_info: list) -> list:
- """ Filter and return only the user's actual pubkeys.
+ """Filter and return only the user's actual pubkeys.
@param keys_by_fingerprint: pubkey fingerprint -> pubkey value dict
that was obtained from GoalState Certificates XML. May contain
@@ -1026,70 +1184,65 @@ class WALinuxAgentShim:
"""
keys = []
for pubkey in pubkey_info:
- if 'value' in pubkey and pubkey['value']:
- keys.append(pubkey['value'])
- elif 'fingerprint' in pubkey and pubkey['fingerprint']:
- fingerprint = pubkey['fingerprint']
+ if "value" in pubkey and pubkey["value"]:
+ keys.append(pubkey["value"])
+ elif "fingerprint" in pubkey and pubkey["fingerprint"]:
+ fingerprint = pubkey["fingerprint"]
if fingerprint in keys_by_fingerprint:
keys.append(keys_by_fingerprint[fingerprint])
else:
- LOG.warning("ovf-env.xml specified PublicKey fingerprint "
- "%s not found in goalstate XML", fingerprint)
+ LOG.warning(
+ "ovf-env.xml specified PublicKey fingerprint "
+ "%s not found in goalstate XML",
+ fingerprint,
+ )
else:
- LOG.warning("ovf-env.xml specified PublicKey with neither "
- "value nor fingerprint: %s", pubkey)
+ LOG.warning(
+ "ovf-env.xml specified PublicKey with neither "
+ "value nor fingerprint: %s",
+ pubkey,
+ )
return keys
@azure_ds_telemetry_reporter
-def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
- pubkey_info=None):
- shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
- dhcp_options=dhcp_opts)
+def get_metadata_from_fabric(
+ fallback_lease_file=None, dhcp_opts=None, pubkey_info=None, iso_dev=None
+):
+ shim = WALinuxAgentShim(
+ fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts
+ )
try:
- return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info)
+ return shim.register_with_azure_and_fetch_data(
+ pubkey_info=pubkey_info, iso_dev=iso_dev
+ )
finally:
shim.clean_up()
@azure_ds_telemetry_reporter
-def report_failure_to_fabric(fallback_lease_file=None, dhcp_opts=None,
- description=None):
- shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
- dhcp_options=dhcp_opts)
+def report_failure_to_fabric(
+ fallback_lease_file=None, dhcp_opts=None, description=None
+):
+ shim = WALinuxAgentShim(
+ fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts
+ )
if not description:
description = DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
try:
- shim.register_with_azure_and_report_failure(
- description=description)
+ shim.register_with_azure_and_report_failure(description=description)
finally:
shim.clean_up()
def dhcp_log_cb(out, err):
report_diagnostic_event(
- "dhclient output stream: %s" % out, logger_func=LOG.debug)
+ "dhclient output stream: %s" % out, logger_func=LOG.debug
+ )
report_diagnostic_event(
- "dhclient error stream: %s" % err, logger_func=LOG.debug)
-
-
-class EphemeralDHCPv4WithReporting:
- def __init__(self, reporter, nic=None):
- self.reporter = reporter
- self.ephemeralDHCPv4 = EphemeralDHCPv4(
- iface=nic, dhcp_log_func=dhcp_log_cb)
-
- def __enter__(self):
- with events.ReportEventStack(
- name="obtain-dhcp-lease",
- description="obtain dhcp lease",
- parent=self.reporter):
- return self.ephemeralDHCPv4.__enter__()
-
- def __exit__(self, excp_type, excp_value, excp_traceback):
- self.ephemeralDHCPv4.__exit__(
- excp_type, excp_value, excp_traceback)
+ "dhclient error stream: %s" % err, logger_func=LOG.debug
+ )
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
index f9be4ecb..72515caf 100644
--- a/cloudinit/sources/helpers/digitalocean.py
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -8,20 +8,18 @@ import random
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import net as cloudnet
-from cloudinit import url_helper
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, url_helper, util
-NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
+NIC_MAP = {"public": "eth0", "private": "eth1"}
LOG = logging.getLogger(__name__)
def assign_ipv4_link_local(distro, nic=None):
- """Bring up NIC using an address using link-local (ip4LL) IPs. On
- DigitalOcean, the link-local domain is per-droplet routed, so there
- is no risk of collisions. However, to be more safe, the ip4LL
- address is random.
+ """Bring up NIC using an address using link-local (ip4LL) IPs.
+ On DigitalOcean, the link-local domain is per-droplet routed, so there
+ is no risk of collisions. However, to be more safe, the ip4LL
+ address is random.
"""
if not nic:
@@ -29,18 +27,22 @@ def assign_ipv4_link_local(distro, nic=None):
LOG.debug("selected interface '%s' for reading metadata", nic)
if not nic:
- raise RuntimeError("unable to find interfaces to access the"
- "meta-data server. This droplet is broken.")
+ raise RuntimeError(
+ "unable to find interfaces to access the"
+ "meta-data server. This droplet is broken."
+ )
- addr = "169.254.{0}.{1}/16".format(random.randint(1, 168),
- random.randint(0, 255))
+ addr = "169.254.{0}.{1}/16".format(
+ random.randint(1, 168), random.randint(0, 255)
+ )
- ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic]
- ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up']
+ ip_addr_cmd = ["ip", "addr", "add", addr, "dev", nic]
+ ip_link_cmd = ["ip", "link", "set", "dev", nic, "up"]
- if not subp.which('ip'):
- raise RuntimeError("No 'ip' command available to configure ip4LL "
- "address")
+ if not subp.which("ip"):
+ raise RuntimeError(
+ "No 'ip' command available to configure ip4LL address"
+ )
try:
subp.subp(ip_addr_cmd)
@@ -48,8 +50,13 @@ def assign_ipv4_link_local(distro, nic=None):
subp.subp(ip_link_cmd)
LOG.debug("brought device '%s' up", nic)
except Exception:
- util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed."
- " Droplet networking will be broken", addr, nic)
+ util.logexc(
+ LOG,
+ "ip4LL address assignment of '%s' to '%s' failed."
+ " Droplet networking will be broken",
+ addr,
+ nic,
+ )
raise
return nic
@@ -63,21 +70,23 @@ def get_link_local_nic(distro):
]
if not nics:
return None
- return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex'))
+ return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, "ifindex"))
def del_ipv4_link_local(nic=None):
"""Remove the ip4LL address. While this is not necessary, the ip4LL
- address is extraneous and confusing to users.
+ address is extraneous and confusing to users.
"""
if not nic:
- LOG.debug("no link_local address interface defined, skipping link "
- "local address cleanup")
+ LOG.debug(
+ "no link_local address interface defined, skipping link "
+ "local address cleanup"
+ )
return
LOG.debug("cleaning up ipv4LL address")
- ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
+ ip_addr_cmd = ["ip", "addr", "flush", "dev", nic]
try:
subp.subp(ip_addr_cmd)
@@ -89,44 +98,47 @@ def del_ipv4_link_local(nic=None):
def convert_network_configuration(config, dns_servers):
"""Convert the DigitalOcean Network description into Cloud-init's netconfig
- format.
-
- Example JSON:
- {'public': [
- {'mac': '04:01:58:27:7f:01',
- 'ipv4': {'gateway': '45.55.32.1',
- 'netmask': '255.255.224.0',
- 'ip_address': '45.55.50.93'},
- 'anchor_ipv4': {
- 'gateway': '10.17.0.1',
- 'netmask': '255.255.0.0',
- 'ip_address': '10.17.0.9'},
- 'type': 'public',
- 'ipv6': {'gateway': '....',
- 'ip_address': '....',
- 'cidr': 64}}
- ],
- 'private': [
- {'mac': '04:01:58:27:7f:02',
- 'ipv4': {'gateway': '10.132.0.1',
- 'netmask': '255.255.0.0',
- 'ip_address': '10.132.75.35'},
- 'type': 'private'}
- ]
- }
+ format.
+
+ Example JSON:
+ {'public': [
+ {'mac': '04:01:58:27:7f:01',
+ 'ipv4': {'gateway': '45.55.32.1',
+ 'netmask': '255.255.224.0',
+ 'ip_address': '45.55.50.93'},
+ 'anchor_ipv4': {
+ 'gateway': '10.17.0.1',
+ 'netmask': '255.255.0.0',
+ 'ip_address': '10.17.0.9'},
+ 'type': 'public',
+ 'ipv6': {'gateway': '....',
+ 'ip_address': '....',
+ 'cidr': 64}}
+ ],
+ 'private': [
+ {'mac': '04:01:58:27:7f:02',
+ 'ipv4': {'gateway': '10.132.0.1',
+ 'netmask': '255.255.0.0',
+ 'ip_address': '10.132.75.35'},
+ 'type': 'private'}
+ ]
+ }
"""
def _get_subnet_part(pcfg):
- subpart = {'type': 'static',
- 'control': 'auto',
- 'address': pcfg.get('ip_address'),
- 'gateway': pcfg.get('gateway')}
-
- if ":" in pcfg.get('ip_address'):
- subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'),
- pcfg.get('cidr'))
+ subpart = {
+ "type": "static",
+ "control": "auto",
+ "address": pcfg.get("ip_address"),
+ "gateway": pcfg.get("gateway"),
+ }
+
+ if ":" in pcfg.get("ip_address"):
+ subpart["address"] = "{0}/{1}".format(
+ pcfg.get("ip_address"), pcfg.get("cidr")
+ )
else:
- subpart['netmask'] = pcfg.get('netmask')
+ subpart["netmask"] = pcfg.get("netmask")
return subpart
@@ -138,54 +150,66 @@ def convert_network_configuration(config, dns_servers):
nic = config[n][0]
LOG.debug("considering %s", nic)
- mac_address = nic.get('mac')
+ mac_address = nic.get("mac")
if mac_address not in macs_to_nics:
- raise RuntimeError("Did not find network interface on system "
- "with mac '%s'. Cannot apply configuration: %s"
- % (mac_address, nic))
+ raise RuntimeError(
+ "Did not find network interface on system "
+ "with mac '%s'. Cannot apply configuration: %s"
+ % (mac_address, nic)
+ )
sysfs_name = macs_to_nics.get(mac_address)
- nic_type = nic.get('type', 'unknown')
+ nic_type = nic.get("type", "unknown")
if_name = NIC_MAP.get(nic_type, sysfs_name)
if if_name != sysfs_name:
- LOG.debug("Found %s interface '%s' on '%s', assigned name of '%s'",
- nic_type, mac_address, sysfs_name, if_name)
+ LOG.debug(
+ "Found %s interface '%s' on '%s', assigned name of '%s'",
+ nic_type,
+ mac_address,
+ sysfs_name,
+ if_name,
+ )
else:
- msg = ("Found interface '%s' on '%s', which is not a public "
- "or private interface. Using default system naming.")
+ msg = (
+ "Found interface '%s' on '%s', which is not a public "
+ "or private interface. Using default system naming."
+ )
LOG.debug(msg, mac_address, sysfs_name)
- ncfg = {'type': 'physical',
- 'mac_address': mac_address,
- 'name': if_name}
+ ncfg = {
+ "type": "physical",
+ "mac_address": mac_address,
+ "name": if_name,
+ }
subnets = []
- for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'):
+ for netdef in ("ipv4", "ipv6", "anchor_ipv4", "anchor_ipv6"):
raw_subnet = nic.get(netdef, None)
if not raw_subnet:
continue
sub_part = _get_subnet_part(raw_subnet)
if nic_type != "public" or "anchor" in netdef:
- del sub_part['gateway']
+ del sub_part["gateway"]
subnets.append(sub_part)
- ncfg['subnets'] = subnets
+ ncfg["subnets"] = subnets
nic_configs.append(ncfg)
LOG.debug("nic '%s' configuration: %s", if_name, ncfg)
if dns_servers:
LOG.debug("added dns servers: %s", dns_servers)
- nic_configs.append({'type': 'nameserver', 'address': dns_servers})
+ nic_configs.append({"type": "nameserver", "address": dns_servers})
- return {'version': 1, 'config': nic_configs}
+ return {"version": 1, "config": nic_configs}
def read_metadata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read metadata at %s" % url)
return json.loads(response.contents.decode())
@@ -202,16 +226,21 @@ def read_sysinfo():
droplet_id = dmi.read_dmi_data("system-serial-number")
if droplet_id:
- LOG.debug("system identified via SMBIOS as DigitalOcean Droplet: %s",
- droplet_id)
+ LOG.debug(
+ "system identified via SMBIOS as DigitalOcean Droplet: %s",
+ droplet_id,
+ )
else:
- msg = ("system identified via SMBIOS as a DigitalOcean "
- "Droplet, but did not provide an ID. Please file a "
- "support ticket at: "
- "https://cloud.digitalocean.com/support/tickets/new")
+ msg = (
+ "system identified via SMBIOS as a DigitalOcean "
+ "Droplet, but did not provide an ID. Please file a "
+ "support ticket at: "
+ "https://cloud.digitalocean.com/support/tickets/new"
+ )
LOG.critical(msg)
raise RuntimeError(msg)
return (True, droplet_id)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py
index 33dc4c53..592ae80b 100644
--- a/cloudinit/sources/helpers/hetzner.py
+++ b/cloudinit/sources/helpers/hetzner.py
@@ -3,24 +3,25 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import url_helper
-from cloudinit import util
-
import base64
import binascii
+from cloudinit import url_helper, util
+
def read_metadata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read metadata at %s" % url)
return util.load_yaml(response.contents.decode())
def read_userdata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read userdata at %s" % url)
return response.contents
diff --git a/cloudinit/sources/helpers/netlink.py b/cloudinit/sources/helpers/netlink.py
index e13d6834..2953e858 100644
--- a/cloudinit/sources/helpers/netlink.py
+++ b/cloudinit/sources/helpers/netlink.py
@@ -2,14 +2,14 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import log as logging
-from cloudinit import util
-from collections import namedtuple
-
import os
import select
import socket
import struct
+from collections import namedtuple
+
+from cloudinit import log as logging
+from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -47,29 +47,30 @@ OPER_TESTING = 4
OPER_DORMANT = 5
OPER_UP = 6
-RTAAttr = namedtuple('RTAAttr', ['length', 'rta_type', 'data'])
-InterfaceOperstate = namedtuple('InterfaceOperstate', ['ifname', 'operstate'])
-NetlinkHeader = namedtuple('NetlinkHeader', ['length', 'type', 'flags', 'seq',
- 'pid'])
+RTAAttr = namedtuple("RTAAttr", ["length", "rta_type", "data"])
+InterfaceOperstate = namedtuple("InterfaceOperstate", ["ifname", "operstate"])
+NetlinkHeader = namedtuple(
+ "NetlinkHeader", ["length", "type", "flags", "seq", "pid"]
+)
class NetlinkCreateSocketError(RuntimeError):
- '''Raised if netlink socket fails during create or bind.'''
+ """Raised if netlink socket fails during create or bind."""
def create_bound_netlink_socket():
- '''Creates netlink socket and bind on netlink group to catch interface
+ """Creates netlink socket and bind on netlink group to catch interface
down/up events. The socket will bound only on RTMGRP_LINK (which only
includes RTM_NEWLINK/RTM_DELLINK/RTM_GETLINK events). The socket is set to
non-blocking mode since we're only receiving messages.
:returns: netlink socket in non-blocking mode
:raises: NetlinkCreateSocketError
- '''
+ """
try:
- netlink_socket = socket.socket(socket.AF_NETLINK,
- socket.SOCK_RAW,
- socket.NETLINK_ROUTE)
+ netlink_socket = socket.socket(
+ socket.AF_NETLINK, socket.SOCK_RAW, socket.NETLINK_ROUTE
+ )
netlink_socket.bind((os.getpid(), RTMGRP_LINK))
netlink_socket.setblocking(0)
except socket.error as e:
@@ -80,7 +81,7 @@ def create_bound_netlink_socket():
def get_netlink_msg_header(data):
- '''Gets netlink message type and length
+ """Gets netlink message type and length
:param: data read from netlink socket
:returns: netlink message type
@@ -92,18 +93,20 @@ def get_netlink_msg_header(data):
__u32 nlmsg_seq; /* Sequence number */
__u32 nlmsg_pid; /* Sender port ID */
};
- '''
- assert (data is not None), ("data is none")
- assert (len(data) >= NLMSGHDR_SIZE), (
- "data is smaller than netlink message header")
- msg_len, msg_type, flags, seq, pid = struct.unpack(NLMSGHDR_FMT,
- data[:MSG_TYPE_OFFSET])
+ """
+ assert data is not None, "data is none"
+ assert (
+ len(data) >= NLMSGHDR_SIZE
+ ), "data is smaller than netlink message header"
+ msg_len, msg_type, flags, seq, pid = struct.unpack(
+ NLMSGHDR_FMT, data[:MSG_TYPE_OFFSET]
+ )
LOG.debug("Got netlink msg of type %d", msg_type)
return NetlinkHeader(msg_len, msg_type, flags, seq, pid)
def read_netlink_socket(netlink_socket, timeout=None):
- '''Select and read from the netlink socket if ready.
+ """Select and read from the netlink socket if ready.
:param: netlink_socket: specify which socket object to read from
:param: timeout: specify a timeout value (integer) to wait while reading,
@@ -111,8 +114,8 @@ def read_netlink_socket(netlink_socket, timeout=None):
:returns: string of data read (max length = <MAX_SIZE>) from socket,
if no data read, returns None
:raises: AssertionError if netlink_socket is None
- '''
- assert (netlink_socket is not None), ("netlink socket is none")
+ """
+ assert netlink_socket is not None, "netlink socket is none"
read_set, _, _ = select.select([netlink_socket], [], [], timeout)
# Incase of timeout,read_set doesn't contain netlink socket.
# just return from this function
@@ -126,32 +129,33 @@ def read_netlink_socket(netlink_socket, timeout=None):
def unpack_rta_attr(data, offset):
- '''Unpack a single rta attribute.
+ """Unpack a single rta attribute.
:param: data: string of data read from netlink socket
:param: offset: starting offset of RTA Attribute
:return: RTAAttr object with length, type and data. On error, return None.
:raises: AssertionError if data is None or offset is not integer.
- '''
- assert (data is not None), ("data is none")
- assert (type(offset) == int), ("offset is not integer")
- assert (offset >= RTATTR_START_OFFSET), (
- "rta offset is less than expected length")
+ """
+ assert data is not None, "data is none"
+ assert type(offset) == int, "offset is not integer"
+ assert (
+ offset >= RTATTR_START_OFFSET
+ ), "rta offset is less than expected length"
length = rta_type = 0
attr_data = None
try:
length = struct.unpack_from("H", data, offset=offset)[0]
- rta_type = struct.unpack_from("H", data, offset=offset+2)[0]
+ rta_type = struct.unpack_from("H", data, offset=offset + 2)[0]
except struct.error:
return None # Should mean our offset is >= remaining data
# Unpack just the attribute's data. Offset by 4 to skip length/type header
- attr_data = data[offset+RTA_DATA_START_OFFSET:offset+length]
+ attr_data = data[offset + RTA_DATA_START_OFFSET : offset + length]
return RTAAttr(length, rta_type, attr_data)
def read_rta_oper_state(data):
- '''Reads Interface name and operational state from RTA Data.
+ """Reads Interface name and operational state from RTA Data.
:param: data: string of data read from netlink socket
:returns: InterfaceOperstate object containing if_name and oper_state.
@@ -159,10 +163,11 @@ def read_rta_oper_state(data):
IFLA_IFNAME messages.
:raises: AssertionError if data is None or length of data is
smaller than RTATTR_START_OFFSET.
- '''
- assert (data is not None), ("data is none")
- assert (len(data) > RTATTR_START_OFFSET), (
- "length of data is smaller than RTATTR_START_OFFSET")
+ """
+ assert data is not None, "data is none"
+ assert (
+ len(data) > RTATTR_START_OFFSET
+ ), "length of data is smaller than RTATTR_START_OFFSET"
ifname = operstate = None
offset = RTATTR_START_OFFSET
while offset <= len(data):
@@ -170,15 +175,16 @@ def read_rta_oper_state(data):
if not attr or attr.length == 0:
break
# Each attribute is 4-byte aligned. Determine pad length.
- padlen = (PAD_ALIGNMENT -
- (attr.length % PAD_ALIGNMENT)) % PAD_ALIGNMENT
+ padlen = (
+ PAD_ALIGNMENT - (attr.length % PAD_ALIGNMENT)
+ ) % PAD_ALIGNMENT
offset += attr.length + padlen
if attr.rta_type == IFLA_OPERSTATE:
operstate = ord(attr.data)
elif attr.rta_type == IFLA_IFNAME:
- interface_name = util.decode_binary(attr.data, 'utf-8')
- ifname = interface_name.strip('\0')
+ interface_name = util.decode_binary(attr.data, "utf-8")
+ ifname = interface_name.strip("\0")
if not ifname or operstate is None:
return None
LOG.debug("rta attrs: ifname %s operstate %d", ifname, operstate)
@@ -186,12 +192,12 @@ def read_rta_oper_state(data):
def wait_for_nic_attach_event(netlink_socket, existing_nics):
- '''Block until a single nic is attached.
+ """Block until a single nic is attached.
:param: netlink_socket: netlink_socket to receive events
:param: existing_nics: List of existing nics so that we can skip them.
:raises: AssertionError if netlink_socket is none.
- '''
+ """
LOG.debug("Preparing to wait for nic attach.")
ifname = None
@@ -204,19 +210,21 @@ def wait_for_nic_attach_event(netlink_socket, existing_nics):
# We can return even if the operational state of the new nic is DOWN
# because we set it to UP before doing dhcp.
- read_netlink_messages(netlink_socket,
- None,
- [RTM_NEWLINK],
- [OPER_UP, OPER_DOWN],
- should_continue_cb)
+ read_netlink_messages(
+ netlink_socket,
+ None,
+ [RTM_NEWLINK],
+ [OPER_UP, OPER_DOWN],
+ should_continue_cb,
+ )
return ifname
def wait_for_nic_detach_event(netlink_socket):
- '''Block until a single nic is detached and its operational state is down.
+ """Block until a single nic is detached and its operational state is down.
:param: netlink_socket: netlink_socket to receive events.
- '''
+ """
LOG.debug("Preparing to wait for nic detach.")
ifname = None
@@ -225,16 +233,14 @@ def wait_for_nic_detach_event(netlink_socket):
ifname = iname
return False
- read_netlink_messages(netlink_socket,
- None,
- [RTM_DELLINK],
- [OPER_DOWN],
- should_continue_cb)
+ read_netlink_messages(
+ netlink_socket, None, [RTM_DELLINK], [OPER_DOWN], should_continue_cb
+ )
return ifname
def wait_for_media_disconnect_connect(netlink_socket, ifname):
- '''Block until media disconnect and connect has happened on an interface.
+ """Block until media disconnect and connect has happened on an interface.
Listens on netlink socket to receive netlink events and when the carrier
changes from 0 to 1, it considers event has happened and
return from this function
@@ -242,10 +248,10 @@ def wait_for_media_disconnect_connect(netlink_socket, ifname):
:param: netlink_socket: netlink_socket to receive events
:param: ifname: Interface name to lookout for netlink events
:raises: AssertionError if netlink_socket is None or ifname is None.
- '''
- assert (netlink_socket is not None), ("netlink socket is none")
- assert (ifname is not None), ("interface name is none")
- assert (len(ifname) > 0), ("interface name cannot be empty")
+ """
+ assert netlink_socket is not None, "netlink socket is none"
+ assert ifname is not None, "interface name is none"
+ assert len(ifname) > 0, "interface name cannot be empty"
def should_continue_cb(iname, carrier, prevCarrier):
# check for carrier down, up sequence
@@ -256,19 +262,23 @@ def wait_for_media_disconnect_connect(netlink_socket, ifname):
return True
LOG.debug("Wait for media disconnect and reconnect to happen")
- read_netlink_messages(netlink_socket,
- ifname,
- [RTM_NEWLINK, RTM_DELLINK],
- [OPER_UP, OPER_DOWN],
- should_continue_cb)
-
-
-def read_netlink_messages(netlink_socket,
- ifname_filter,
- rtm_types,
- operstates,
- should_continue_callback):
- ''' Reads from the netlink socket until the condition specified by
+ read_netlink_messages(
+ netlink_socket,
+ ifname,
+ [RTM_NEWLINK, RTM_DELLINK],
+ [OPER_UP, OPER_DOWN],
+ should_continue_cb,
+ )
+
+
+def read_netlink_messages(
+ netlink_socket,
+ ifname_filter,
+ rtm_types,
+ operstates,
+ should_continue_callback,
+):
+ """Reads from the netlink socket until the condition specified by
the continuation callback is met.
:param: netlink_socket: netlink_socket to receive events.
@@ -276,7 +286,7 @@ def read_netlink_messages(netlink_socket,
:param: rtm_types: Type of netlink events to listen for.
:param: operstates: Operational states to listen.
:param: should_continue_callback: Specifies when to stop listening.
- '''
+ """
if netlink_socket is None:
raise RuntimeError("Netlink socket is none")
data = bytes()
@@ -286,9 +296,9 @@ def read_netlink_messages(netlink_socket,
recv_data = read_netlink_socket(netlink_socket, SELECT_TIMEOUT)
if recv_data is None:
continue
- LOG.debug('read %d bytes from socket', len(recv_data))
+ LOG.debug("read %d bytes from socket", len(recv_data))
data += recv_data
- LOG.debug('Length of data after concat %d', len(data))
+ LOG.debug("Length of data after concat %d", len(data))
offset = 0
datalen = len(data)
while offset < datalen:
@@ -300,30 +310,37 @@ def read_netlink_messages(netlink_socket,
if len(nl_msg) < nlheader.length:
LOG.debug("Partial data. Smaller than netlink message")
break
- padlen = (nlheader.length+PAD_ALIGNMENT-1) & ~(PAD_ALIGNMENT-1)
+ padlen = (nlheader.length + PAD_ALIGNMENT - 1) & ~(
+ PAD_ALIGNMENT - 1
+ )
offset = offset + padlen
- LOG.debug('offset to next netlink message: %d', offset)
+ LOG.debug("offset to next netlink message: %d", offset)
# Continue if we are not interested in this message.
if nlheader.type not in rtm_types:
continue
interface_state = read_rta_oper_state(nl_msg)
if interface_state is None:
- LOG.debug('Failed to read rta attributes: %s', interface_state)
+ LOG.debug("Failed to read rta attributes: %s", interface_state)
continue
- if (ifname_filter is not None and
- interface_state.ifname != ifname_filter):
+ if (
+ ifname_filter is not None
+ and interface_state.ifname != ifname_filter
+ ):
LOG.debug(
"Ignored netlink event on interface %s. Waiting for %s.",
- interface_state.ifname, ifname_filter)
+ interface_state.ifname,
+ ifname_filter,
+ )
continue
if interface_state.operstate not in operstates:
continue
prevCarrier = carrier
carrier = interface_state.operstate
- if not should_continue_callback(interface_state.ifname,
- carrier,
- prevCarrier):
+ if not should_continue_callback(
+ interface_state.ifname, carrier, prevCarrier
+ ):
return
data = data[offset:]
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 3e6365f1..a42543e4 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -14,11 +14,7 @@ import os
from cloudinit import ec2_utils
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import net, sources, subp, url_helper, util
from cloudinit.sources import BrokenMetadata
# See https://docs.openstack.org/user-guide/cli-config-drive.html
@@ -27,30 +23,30 @@ LOG = logging.getLogger(__name__)
FILES_V1 = {
# Path <-> (metadata key name, translator function, default value)
- 'etc/network/interfaces': ('network_config', lambda x: x, ''),
- 'meta.js': ('meta_js', util.load_json, {}),
- "root/.ssh/authorized_keys": ('authorized_keys', lambda x: x, ''),
+ "etc/network/interfaces": ("network_config", lambda x: x, ""),
+ "meta.js": ("meta_js", util.load_json, {}),
+ "root/.ssh/authorized_keys": ("authorized_keys", lambda x: x, ""),
}
KEY_COPIES = (
# Cloud-init metadata names <-> (metadata key, is required)
- ('local-hostname', 'hostname', False),
- ('instance-id', 'uuid', True),
+ ("local-hostname", "hostname", False),
+ ("instance-id", "uuid", True),
)
# Versions and names taken from nova source nova/api/metadata/base.py
-OS_LATEST = 'latest'
-OS_FOLSOM = '2012-08-10'
-OS_GRIZZLY = '2013-04-04'
-OS_HAVANA = '2013-10-17'
-OS_LIBERTY = '2015-10-15'
+OS_LATEST = "latest"
+OS_FOLSOM = "2012-08-10"
+OS_GRIZZLY = "2013-04-04"
+OS_HAVANA = "2013-10-17"
+OS_LIBERTY = "2015-10-15"
# NEWTON_ONE adds 'devices' to md (sriov-pf-passthrough-neutron-port-vlan)
-OS_NEWTON_ONE = '2016-06-30'
+OS_NEWTON_ONE = "2016-06-30"
# NEWTON_TWO adds vendor_data2.json (vendordata-reboot)
-OS_NEWTON_TWO = '2016-10-06'
+OS_NEWTON_TWO = "2016-10-06"
# OS_OCATA adds 'vif' field to devices (sriov-pf-passthrough-neutron-port-vlan)
-OS_OCATA = '2017-02-22'
+OS_OCATA = "2017-02-22"
# OS_ROCKY adds a vf_trusted field to devices (sriov-trusted-vfs)
-OS_ROCKY = '2018-08-27'
+OS_ROCKY = "2018-08-27"
# keep this in chronological order. new supported versions go at the end.
@@ -67,18 +63,18 @@ OS_VERSIONS = (
KNOWN_PHYSICAL_TYPES = (
None,
- 'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
- 'bridge',
- 'cascading', # not present in OpenStack upstream, used on OpenTelekomCloud
- 'dvs',
- 'ethernet',
- 'hw_veb',
- 'hyperv',
- 'ovs',
- 'phy',
- 'tap',
- 'vhostuser',
- 'vif',
+ "bgpovs", # not present in OpenStack upstream but used on OVH cloud.
+ "bridge",
+ "cascading", # not present in OpenStack upstream, used on OpenTelekomCloud
+ "dvs",
+ "ethernet",
+ "hw_veb",
+ "hyperv",
+ "ovs",
+ "phy",
+ "tap",
+ "vhostuser",
+ "vif",
)
@@ -90,7 +86,7 @@ class SourceMixin(object):
def _ec2_name_to_device(self, name):
if not self.ec2_metadata:
return None
- bdm = self.ec2_metadata.get('block-device-mapping', {})
+ bdm = self.ec2_metadata.get("block-device-mapping", {})
for (ent_name, device) in bdm.items():
if name == ent_name:
return device
@@ -105,9 +101,9 @@ class SourceMixin(object):
def _os_name_to_device(self, name):
device = None
try:
- criteria = 'LABEL=%s' % (name)
- if name == 'swap':
- criteria = 'TYPE=%s' % (name)
+ criteria = "LABEL=%s" % (name)
+ if name == "swap":
+ criteria = "TYPE=%s" % (name)
dev_entries = util.find_devs_with(criteria)
if dev_entries:
device = dev_entries[0]
@@ -135,10 +131,10 @@ class SourceMixin(object):
return None
# Try the ec2 mapping first
names = [name]
- if name == 'root':
- names.insert(0, 'ami')
- if name == 'ami':
- names.append('root')
+ if name == "root":
+ names.insert(0, "ami")
+ if name == "ami":
+ names.append("root")
device = None
LOG.debug("Using ec2 style lookup to find device %s", names)
for n in names:
@@ -163,7 +159,6 @@ class SourceMixin(object):
class BaseReader(metaclass=abc.ABCMeta):
-
def __init__(self, base_path):
self.base_path = base_path
@@ -187,8 +182,11 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
versions_available = self._fetch_available_versions()
except Exception as e:
- LOG.debug("Unable to read openstack versions from %s due to: %s",
- self.base_path, e)
+ LOG.debug(
+ "Unable to read openstack versions from %s due to: %s",
+ self.base_path,
+ e,
+ )
versions_available = []
# openstack.OS_VERSIONS is stored in chronological order, so
@@ -202,12 +200,15 @@ class BaseReader(metaclass=abc.ABCMeta):
selected_version = potential_version
break
- LOG.debug("Selected version '%s' from %s", selected_version,
- versions_available)
+ LOG.debug(
+ "Selected version '%s' from %s",
+ selected_version,
+ versions_available,
+ )
return selected_version
def _read_content_path(self, item, decode=False):
- path = item.get('content_path', '').lstrip("/")
+ path = item.get("content_path", "").lstrip("/")
path_pieces = path.split("/")
valid_pieces = [p for p in path_pieces if len(p)]
if not valid_pieces:
@@ -225,38 +226,44 @@ class BaseReader(metaclass=abc.ABCMeta):
"""
load_json_anytype = functools.partial(
- util.load_json, root_types=(dict, list, str))
+ util.load_json, root_types=(dict, list, str)
+ )
def datafiles(version):
files = {}
- files['metadata'] = (
+ files["metadata"] = (
# File path to read
- self._path_join("openstack", version, 'meta_data.json'),
+ self._path_join("openstack", version, "meta_data.json"),
# Is it required?
True,
# Translator function (applied after loading)
util.load_json,
)
- files['userdata'] = (
- self._path_join("openstack", version, 'user_data'),
+ files["userdata"] = (
+ self._path_join("openstack", version, "user_data"),
False,
lambda x: x,
)
- files['vendordata'] = (
- self._path_join("openstack", version, 'vendor_data.json'),
+ files["vendordata"] = (
+ self._path_join("openstack", version, "vendor_data.json"),
+ False,
+ load_json_anytype,
+ )
+ files["vendordata2"] = (
+ self._path_join("openstack", version, "vendor_data2.json"),
False,
load_json_anytype,
)
- files['networkdata'] = (
- self._path_join("openstack", version, 'network_data.json'),
+ files["networkdata"] = (
+ self._path_join("openstack", version, "network_data.json"),
False,
load_json_anytype,
)
return files
results = {
- 'userdata': '',
- 'version': 2,
+ "userdata": "",
+ "version": 2,
}
data = datafiles(self._find_working_version())
for (name, (path, required, translator)) in data.items():
@@ -267,11 +274,13 @@ class BaseReader(metaclass=abc.ABCMeta):
data = self._path_read(path)
except IOError as e:
if not required:
- LOG.debug("Failed reading optional path %s due"
- " to: %s", path, e)
+ LOG.debug(
+ "Failed reading optional path %s due to: %s", path, e
+ )
else:
- LOG.debug("Failed reading mandatory path %s due"
- " to: %s", path, e)
+ LOG.debug(
+ "Failed reading mandatory path %s due to: %s", path, e
+ )
else:
found = True
if required and not found:
@@ -286,11 +295,11 @@ class BaseReader(metaclass=abc.ABCMeta):
if found:
results[name] = data
- metadata = results['metadata']
- if 'random_seed' in metadata:
- random_seed = metadata['random_seed']
+ metadata = results["metadata"]
+ if "random_seed" in metadata:
+ random_seed = metadata["random_seed"]
try:
- metadata['random_seed'] = base64.b64decode(random_seed)
+ metadata["random_seed"] = base64.b64decode(random_seed)
except (ValueError, TypeError) as e:
raise BrokenMetadata(
"Badly formatted metadata random_seed entry: %s" % e
@@ -298,18 +307,18 @@ class BaseReader(metaclass=abc.ABCMeta):
# load any files that were provided
files = {}
- metadata_files = metadata.get('files', [])
+ metadata_files = metadata.get("files", [])
for item in metadata_files:
- if 'path' not in item:
+ if "path" not in item:
continue
- path = item['path']
+ path = item["path"]
try:
files[path] = self._read_content_path(item)
except Exception as e:
raise BrokenMetadata(
"Failed to read provided file %s: %s" % (path, e)
) from e
- results['files'] = files
+ results["files"] = files
# The 'network_config' item in metadata is a content pointer
# to the network config that should be applied. It is just a
@@ -318,7 +327,7 @@ class BaseReader(metaclass=abc.ABCMeta):
if net_item:
try:
content = self._read_content_path(net_item, decode=True)
- results['network_config'] = content
+ results["network_config"] = content
except IOError as e:
raise BrokenMetadata(
"Failed to read network configuration: %s" % (e)
@@ -329,12 +338,12 @@ class BaseReader(metaclass=abc.ABCMeta):
# if they specify 'dsmode' they're indicating the mode that they intend
# for this datasource to operate in.
try:
- results['dsmode'] = metadata['meta']['dsmode']
+ results["dsmode"] = metadata["meta"]["dsmode"]
except KeyError:
pass
# Read any ec2-metadata (if applicable)
- results['ec2-metadata'] = self._read_ec2_metadata()
+ results["ec2-metadata"] = self._read_ec2_metadata()
# Perform some misc. metadata key renames...
for (target_key, source_key, is_required) in KEY_COPIES:
@@ -359,15 +368,19 @@ class ConfigDriveReader(BaseReader):
def _fetch_available_versions(self):
if self._versions is None:
- path = self._path_join(self.base_path, 'openstack')
- found = [d for d in os.listdir(path)
- if os.path.isdir(os.path.join(path))]
+ path = self._path_join(self.base_path, "openstack")
+ found = [
+ d
+ for d in os.listdir(path)
+ if os.path.isdir(os.path.join(path))
+ ]
self._versions = sorted(found)
return self._versions
def _read_ec2_metadata(self):
- path = self._path_join(self.base_path,
- 'ec2', 'latest', 'meta-data.json')
+ path = self._path_join(
+ self.base_path, "ec2", "latest", "meta-data.json"
+ )
if not os.path.exists(path):
return {}
else:
@@ -414,14 +427,14 @@ class ConfigDriveReader(BaseReader):
else:
md[key] = copy.deepcopy(default)
- keydata = md['authorized_keys']
- meta_js = md['meta_js']
+ keydata = md["authorized_keys"]
+ meta_js = md["meta_js"]
# keydata in meta_js is preferred over "injected"
- keydata = meta_js.get('public-keys', keydata)
+ keydata = meta_js.get("public-keys", keydata)
if keydata:
lines = keydata.splitlines()
- md['public-keys'] = [
+ md["public-keys"] = [
line
for line in lines
if len(line) and not line.startswith("#")
@@ -429,25 +442,25 @@ class ConfigDriveReader(BaseReader):
# config-drive-v1 has no way for openstack to provide the instance-id
# so we copy that into metadata from the user input
- if 'instance-id' in meta_js:
- md['instance-id'] = meta_js['instance-id']
+ if "instance-id" in meta_js:
+ md["instance-id"] = meta_js["instance-id"]
results = {
- 'version': 1,
- 'metadata': md,
+ "version": 1,
+ "metadata": md,
}
# allow the user to specify 'dsmode' in a meta tag
- if 'dsmode' in meta_js:
- results['dsmode'] = meta_js['dsmode']
+ if "dsmode" in meta_js:
+ results["dsmode"] = meta_js["dsmode"]
# config-drive-v1 has no way of specifying user-data, so the user has
# to cheat and stuff it in a meta tag also.
- results['userdata'] = meta_js.get('user-data', '')
+ results["userdata"] = meta_js.get("user-data", "")
# this implementation does not support files other than
# network/interfaces and authorized_keys...
- results['files'] = {}
+ results["files"] = {}
return results
@@ -476,7 +489,6 @@ class MetadataReader(BaseReader):
return self._versions
def _path_read(self, path, decode=False):
-
def should_retry_cb(_request_args, cause):
try:
code = int(cause.code)
@@ -487,11 +499,13 @@ class MetadataReader(BaseReader):
pass
return True
- response = url_helper.readurl(path,
- retries=self.retries,
- ssl_details=self.ssl_details,
- timeout=self.timeout,
- exception_cb=should_retry_cb)
+ response = url_helper.readurl(
+ path,
+ retries=self.retries,
+ ssl_details=self.ssl_details,
+ timeout=self.timeout,
+ exception_cb=should_retry_cb,
+ )
if decode:
return response.contents.decode()
else:
@@ -501,9 +515,11 @@ class MetadataReader(BaseReader):
return url_helper.combine_url(base, *add_ons)
def _read_ec2_metadata(self):
- return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details,
- timeout=self.timeout,
- retries=self.retries)
+ return ec2_utils.get_instance_metadata(
+ ssl_details=self.ssl_details,
+ timeout=self.timeout,
+ retries=self.retries,
+ )
# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
@@ -539,32 +555,32 @@ def convert_net_json(network_json=None, known_macs=None):
# dict of network_config key for filtering network_json
valid_keys = {
- 'physical': [
- 'name',
- 'type',
- 'mac_address',
- 'subnets',
- 'params',
- 'mtu',
+ "physical": [
+ "name",
+ "type",
+ "mac_address",
+ "subnets",
+ "params",
+ "mtu",
],
- 'subnet': [
- 'type',
- 'address',
- 'netmask',
- 'broadcast',
- 'metric',
- 'gateway',
- 'pointopoint',
- 'scope',
- 'dns_nameservers',
- 'dns_search',
- 'routes',
+ "subnet": [
+ "type",
+ "address",
+ "netmask",
+ "broadcast",
+ "metric",
+ "gateway",
+ "pointopoint",
+ "scope",
+ "dns_nameservers",
+ "dns_search",
+ "routes",
],
}
- links = network_json.get('links', [])
- networks = network_json.get('networks', [])
- services = network_json.get('services', [])
+ links = network_json.get("links", [])
+ networks = network_json.get("networks", [])
+ services = network_json.get("services", [])
link_updates = []
link_id_info = {}
@@ -573,65 +589,77 @@ def convert_net_json(network_json=None, known_macs=None):
config = []
for link in links:
subnets = []
- cfg = dict((k, v) for k, v in link.items()
- if k in valid_keys['physical'])
+ cfg = dict(
+ (k, v) for k, v in link.items() if k in valid_keys["physical"]
+ )
# 'name' is not in openstack spec yet, but we will support it if it is
# present. The 'id' in the spec is currently implemented as the host
# nic's name, meaning something like 'tap-adfasdffd'. We do not want
# to name guest devices with such ugly names.
- if 'name' in link:
- cfg['name'] = link['name']
+ if "name" in link:
+ cfg["name"] = link["name"]
link_mac_addr = None
- if link.get('ethernet_mac_address'):
- link_mac_addr = link.get('ethernet_mac_address').lower()
- link_id_info[link['id']] = link_mac_addr
-
- curinfo = {'name': cfg.get('name'), 'mac': link_mac_addr,
- 'id': link['id'], 'type': link['type']}
-
- for network in [n for n in networks
- if n['link'] == link['id']]:
- subnet = dict((k, v) for k, v in network.items()
- if k in valid_keys['subnet'])
-
- if network['type'] == 'ipv4_dhcp':
- subnet.update({'type': 'dhcp4'})
- elif network['type'] == 'ipv6_dhcp':
- subnet.update({'type': 'dhcp6'})
- elif network['type'] in ['ipv6_slaac', 'ipv6_dhcpv6-stateless',
- 'ipv6_dhcpv6-stateful']:
- subnet.update({'type': network['type']})
- elif network['type'] in ['ipv4', 'static']:
- subnet.update({
- 'type': 'static',
- 'address': network.get('ip_address'),
- })
- elif network['type'] in ['ipv6', 'static6']:
- cfg.update({'accept-ra': False})
- subnet.update({
- 'type': 'static6',
- 'address': network.get('ip_address'),
- })
+ if link.get("ethernet_mac_address"):
+ link_mac_addr = link.get("ethernet_mac_address").lower()
+ link_id_info[link["id"]] = link_mac_addr
+
+ curinfo = {
+ "name": cfg.get("name"),
+ "mac": link_mac_addr,
+ "id": link["id"],
+ "type": link["type"],
+ }
+
+ for network in [n for n in networks if n["link"] == link["id"]]:
+ subnet = dict(
+ (k, v) for k, v in network.items() if k in valid_keys["subnet"]
+ )
+
+ if network["type"] == "ipv4_dhcp":
+ subnet.update({"type": "dhcp4"})
+ elif network["type"] == "ipv6_dhcp":
+ subnet.update({"type": "dhcp6"})
+ elif network["type"] in [
+ "ipv6_slaac",
+ "ipv6_dhcpv6-stateless",
+ "ipv6_dhcpv6-stateful",
+ ]:
+ subnet.update({"type": network["type"]})
+ elif network["type"] in ["ipv4", "static"]:
+ subnet.update(
+ {
+ "type": "static",
+ "address": network.get("ip_address"),
+ }
+ )
+ elif network["type"] in ["ipv6", "static6"]:
+ cfg.update({"accept-ra": False})
+ subnet.update(
+ {
+ "type": "static6",
+ "address": network.get("ip_address"),
+ }
+ )
# Enable accept_ra for stateful and legacy ipv6_dhcp types
- if network['type'] in ['ipv6_dhcpv6-stateful', 'ipv6_dhcp']:
- cfg.update({'accept-ra': True})
+ if network["type"] in ["ipv6_dhcpv6-stateful", "ipv6_dhcp"]:
+ cfg.update({"accept-ra": True})
- if network['type'] == 'ipv4':
- subnet['ipv4'] = True
- if network['type'] == 'ipv6':
- subnet['ipv6'] = True
+ if network["type"] == "ipv4":
+ subnet["ipv4"] = True
+ if network["type"] == "ipv6":
+ subnet["ipv6"] = True
subnets.append(subnet)
- cfg.update({'subnets': subnets})
- if link['type'] in ['bond']:
+ cfg.update({"subnets": subnets})
+ if link["type"] in ["bond"]:
params = {}
if link_mac_addr:
- params['mac_address'] = link_mac_addr
+ params["mac_address"] = link_mac_addr
for k, v in link.items():
- if k == 'bond_links':
+ if k == "bond_links":
continue
- elif k.startswith('bond'):
+ elif k.startswith("bond"):
params.update({k: v})
# openstack does not provide a name for the bond.
@@ -644,35 +672,45 @@ def convert_net_json(network_json=None, known_macs=None):
# to the network config by their nic name.
# store that in bond_links_needed, and update these later.
link_updates.append(
- (cfg, 'bond_interfaces', '%s',
- copy.deepcopy(link['bond_links']))
+ (
+ cfg,
+ "bond_interfaces",
+ "%s",
+ copy.deepcopy(link["bond_links"]),
+ )
+ )
+ cfg.update({"params": params, "name": link_name})
+
+ curinfo["name"] = link_name
+ elif link["type"] in ["vlan"]:
+ name = "%s.%s" % (link["vlan_link"], link["vlan_id"])
+ cfg.update(
+ {
+ "name": name,
+ "vlan_id": link["vlan_id"],
+ "mac_address": link["vlan_mac_address"],
+ }
)
- cfg.update({'params': params, 'name': link_name})
-
- curinfo['name'] = link_name
- elif link['type'] in ['vlan']:
- name = "%s.%s" % (link['vlan_link'], link['vlan_id'])
- cfg.update({
- 'name': name,
- 'vlan_id': link['vlan_id'],
- 'mac_address': link['vlan_mac_address'],
- })
- link_updates.append((cfg, 'vlan_link', '%s', link['vlan_link']))
- link_updates.append((cfg, 'name', "%%s.%s" % link['vlan_id'],
- link['vlan_link']))
- curinfo.update({'mac': link['vlan_mac_address'],
- 'name': name})
+ link_updates.append((cfg, "vlan_link", "%s", link["vlan_link"]))
+ link_updates.append(
+ (cfg, "name", "%%s.%s" % link["vlan_id"], link["vlan_link"])
+ )
+ curinfo.update({"mac": link["vlan_mac_address"], "name": name})
else:
- if link['type'] not in KNOWN_PHYSICAL_TYPES:
- LOG.warning('Unknown network_data link type (%s); treating as'
- ' physical', link['type'])
- cfg.update({'type': 'physical', 'mac_address': link_mac_addr})
+ if link["type"] not in KNOWN_PHYSICAL_TYPES:
+ LOG.warning(
+ "Unknown network_data link type (%s); treating as"
+ " physical",
+ link["type"],
+ )
+ cfg.update({"type": "physical", "mac_address": link_mac_addr})
config.append(cfg)
- link_id_info[curinfo['id']] = curinfo
+ link_id_info[curinfo["id"]] = curinfo
- need_names = [d for d in config
- if d.get('type') == 'physical' and 'name' not in d]
+ need_names = [
+ d for d in config if d.get("type") == "physical" and "name" not in d
+ ]
if need_names or link_updates:
if known_macs is None:
@@ -680,26 +718,26 @@ def convert_net_json(network_json=None, known_macs=None):
# go through and fill out the link_id_info with names
for _link_id, info in link_id_info.items():
- if info.get('name'):
+ if info.get("name"):
continue
- if info.get('mac') in known_macs:
- info['name'] = known_macs[info['mac']]
+ if info.get("mac") in known_macs:
+ info["name"] = known_macs[info["mac"]]
for d in need_names:
- mac = d.get('mac_address')
+ mac = d.get("mac_address")
if not mac:
raise ValueError("No mac_address or name entry for %s" % d)
if mac not in known_macs:
raise ValueError("Unable to find a system nic for %s" % d)
- d['name'] = known_macs[mac]
+ d["name"] = known_macs[mac]
for cfg, key, fmt, targets in link_updates:
if isinstance(targets, (list, tuple)):
cfg[key] = [
- fmt % link_id_info[target]['name'] for target in targets
+ fmt % link_id_info[target]["name"] for target in targets
]
else:
- cfg[key] = fmt % link_id_info[targets]['name']
+ cfg[key] = fmt % link_id_info[targets]["name"]
# Infiniband interfaces may be referenced in network_data.json by a 6 byte
# Ethernet MAC-style address, and we use that address to look up the
@@ -708,15 +746,16 @@ def convert_net_json(network_json=None, known_macs=None):
ib_known_hwaddrs = net.get_ib_hwaddrs_by_interface()
if ib_known_hwaddrs:
for cfg in config:
- if cfg['name'] in ib_known_hwaddrs:
- cfg['mac_address'] = ib_known_hwaddrs[cfg['name']]
- cfg['type'] = 'infiniband'
+ if cfg["name"] in ib_known_hwaddrs:
+ cfg["mac_address"] = ib_known_hwaddrs[cfg["name"]]
+ cfg["type"] = "infiniband"
for service in services:
cfg = service
- cfg.update({'type': 'nameserver'})
+ cfg.update({"type": "nameserver"})
config.append(cfg)
- return {'version': 1, 'config': config}
+ return {"version": 1, "config": config}
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py
deleted file mode 100644
index 2bde1e3f..00000000
--- a/cloudinit/sources/helpers/tests/test_openstack.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-# ./cloudinit/sources/helpers/tests/test_openstack.py
-
-from cloudinit.sources.helpers import openstack
-from cloudinit.tests import helpers as test_helpers
-
-
-class TestConvertNetJson(test_helpers.CiTestCase):
-
- def test_phy_types(self):
- """Verify the different known physical types are handled."""
- # network_data.json example from
- # https://docs.openstack.org/nova/latest/user/metadata.html
- mac0 = "fa:16:3e:9c:bf:3d"
- net_json = {
- "links": [
- {"ethernet_mac_address": mac0, "id": "tapcd9f6d46-4a",
- "mtu": None, "type": "bridge",
- "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc"}
- ],
- "networks": [
- {"id": "network0", "link": "tapcd9f6d46-4a",
- "network_id": "99e88329-f20d-4741-9593-25bf07847b16",
- "type": "ipv4_dhcp"}
- ],
- "services": [{"address": "8.8.8.8", "type": "dns"}]
- }
- macs = {mac0: 'eth0'}
-
- expected = {
- 'version': 1,
- 'config': [
- {'mac_address': 'fa:16:3e:9c:bf:3d',
- 'mtu': None, 'name': 'eth0',
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical'},
- {'address': '8.8.8.8', 'type': 'nameserver'}]}
-
- for t in openstack.KNOWN_PHYSICAL_TYPES:
- net_json["links"][0]["type"] = t
- self.assertEqual(
- expected,
- openstack.convert_net_json(network_json=net_json,
- known_macs=macs))
diff --git a/cloudinit/sources/helpers/upcloud.py b/cloudinit/sources/helpers/upcloud.py
new file mode 100644
index 00000000..e7b95a5e
--- /dev/null
+++ b/cloudinit/sources/helpers/upcloud.py
@@ -0,0 +1,229 @@
+# Author: Antti Myyrä <antti.myyra@upcloud.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+from cloudinit import dmi
+from cloudinit import log as logging
+from cloudinit import net as cloudnet
+from cloudinit import url_helper
+
+LOG = logging.getLogger(__name__)
+
+
+def convert_to_network_config_v1(config):
+ """
+ Convert the UpCloud network metadata description into
+ Cloud-init's version 1 netconfig format.
+
+ Example JSON:
+ {
+ "interfaces": [
+ {
+ "index": 1,
+ "ip_addresses": [
+ {
+ "address": "94.237.105.53",
+ "dhcp": true,
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "94.237.104.1",
+ "network": "94.237.104.0/22"
+ },
+ {
+ "address": "94.237.105.50",
+ "dhcp": false,
+ "dns": [],
+ "family": "IPv4",
+ "floating": true,
+ "gateway": "",
+ "network": "94.237.105.50/32"
+ }
+ ],
+ "mac": "32:d5:ba:4a:36:e7",
+ "network_id": "031457f4-0f8c-483c-96f2-eccede02909c",
+ "type": "public"
+ },
+ {
+ "index": 2,
+ "ip_addresses": [
+ {
+ "address": "10.6.3.27",
+ "dhcp": true,
+ "dns": [],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "10.6.0.1",
+ "network": "10.6.0.0/22"
+ }
+ ],
+ "mac": "32:d5:ba:4a:84:cc",
+ "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1",
+ "type": "utility"
+ },
+ {
+ "index": 3,
+ "ip_addresses": [
+ {
+ "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7",
+ "dhcp": true,
+ "dns": [
+ "2a04:3540:53::1",
+ "2a04:3544:53::1"
+ ],
+ "family": "IPv6",
+ "floating": false,
+ "gateway": "2a04:3545:1000:720::1",
+ "network": "2a04:3545:1000:720::/64"
+ }
+ ],
+ "mac": "32:d5:ba:4a:63:e7",
+ "network_id": "03000000-0000-4000-8046-000000000000",
+ "type": "public"
+ },
+ {
+ "index": 4,
+ "ip_addresses": [
+ {
+ "address": "172.30.1.10",
+ "dhcp": true,
+ "dns": [],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "172.30.1.1",
+ "network": "172.30.1.0/24"
+ }
+ ],
+ "mac": "32:d5:ba:4a:8a:e1",
+ "network_id": "035a0a4a-77b4-4de5-820d-189fc8135714",
+ "type": "private"
+ }
+ ],
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ]
+ }
+ """
+
+ def _get_subnet_config(ip_addr, dns):
+ if ip_addr.get("dhcp"):
+ dhcp_type = "dhcp"
+ if ip_addr.get("family") == "IPv6":
+ # UpCloud currently passes IPv6 addresses via
+ # StateLess Address Auto Configuration (SLAAC)
+ dhcp_type = "ipv6_dhcpv6-stateless"
+ return {"type": dhcp_type}
+
+ static_type = "static"
+ if ip_addr.get("family") == "IPv6":
+ static_type = "static6"
+ subpart = {
+ "type": static_type,
+ "control": "auto",
+ "address": ip_addr.get("address"),
+ }
+
+ if ip_addr.get("gateway"):
+ subpart["gateway"] = ip_addr.get("gateway")
+
+ if "/" in ip_addr.get("network"):
+ subpart["netmask"] = ip_addr.get("network").split("/")[1]
+
+ if dns != ip_addr.get("dns") and ip_addr.get("dns"):
+ subpart["dns_nameservers"] = ip_addr.get("dns")
+
+ return subpart
+
+ nic_configs = []
+ macs_to_interfaces = cloudnet.get_interfaces_by_mac()
+ LOG.debug("NIC mapping: %s", macs_to_interfaces)
+
+ for raw_iface in config.get("interfaces"):
+ LOG.debug("Considering %s", raw_iface)
+
+ mac_address = raw_iface.get("mac")
+ if mac_address not in macs_to_interfaces:
+ raise RuntimeError(
+ "Did not find network interface on system "
+ "with mac '%s'. Cannot apply configuration: %s"
+ % (mac_address, raw_iface)
+ )
+
+ iface_type = raw_iface.get("type")
+ sysfs_name = macs_to_interfaces.get(mac_address)
+
+ LOG.debug(
+ "Found %s interface '%s' with address '%s' (index %d)",
+ iface_type,
+ sysfs_name,
+ mac_address,
+ raw_iface.get("index"),
+ )
+
+ interface = {
+ "type": "physical",
+ "name": sysfs_name,
+ "mac_address": mac_address,
+ }
+
+ subnets = []
+ for ip_address in raw_iface.get("ip_addresses"):
+ sub_part = _get_subnet_config(ip_address, config.get("dns"))
+ subnets.append(sub_part)
+
+ interface["subnets"] = subnets
+ nic_configs.append(interface)
+
+ if config.get("dns"):
+ LOG.debug("Setting DNS nameservers to %s", config.get("dns"))
+ nic_configs.append(
+ {"type": "nameserver", "address": config.get("dns")}
+ )
+
+ return {"version": 1, "config": nic_configs}
+
+
+def convert_network_config(config):
+ return convert_to_network_config_v1(config)
+
+
+def read_metadata(url, timeout=2, sec_between=2, retries=30):
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
+ if not response.ok():
+ raise RuntimeError("unable to read metadata at %s" % url)
+ return json.loads(response.contents.decode())
+
+
+def read_sysinfo():
+ # UpCloud embeds vendor ID and server UUID in the
+ # SMBIOS information
+
+ # Detect if we are on UpCloud and return the UUID
+
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
+ if vendor_name != "UpCloud":
+ return False, None
+
+ server_uuid = dmi.read_dmi_data("system-uuid")
+ if server_uuid:
+ LOG.debug(
+ "system identified via SMBIOS as UpCloud server: %s", server_uuid
+ )
+ else:
+ msg = (
+ "system identified via SMBIOS as a UpCloud server, but "
+ "did not provide an ID. Please contact support via"
+ "https://hub.upcloud.com or via email with support@upcloud.com"
+ )
+ LOG.critical(msg)
+ raise RuntimeError(msg)
+
+ return True, server_uuid
diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
index 9a07eafa..a5c67bb7 100644
--- a/cloudinit/sources/helpers/vmware/imc/boot_proto.py
+++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
@@ -9,7 +9,8 @@
class BootProtoEnum(object):
"""Specifies the NIC Boot Settings."""
- DHCP = 'dhcp'
- STATIC = 'static'
+ DHCP = "dhcp"
+ STATIC = "static"
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 7109aef3..39dacee0 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -15,18 +15,20 @@ class Config(object):
Specification file.
"""
- CUSTOM_SCRIPT = 'CUSTOM-SCRIPT|SCRIPT-NAME'
- DNS = 'DNS|NAMESERVER|'
- DOMAINNAME = 'NETWORK|DOMAINNAME'
- HOSTNAME = 'NETWORK|HOSTNAME'
- MARKERID = 'MISC|MARKER-ID'
- PASS = 'PASSWORD|-PASS'
- RESETPASS = 'PASSWORD|RESET'
- SUFFIX = 'DNS|SUFFIX|'
- TIMEZONE = 'DATETIME|TIMEZONE'
- UTC = 'DATETIME|UTC'
- POST_GC_STATUS = 'MISC|POST-GC-STATUS'
- DEFAULT_RUN_POST_SCRIPT = 'MISC|DEFAULT-RUN-POST-CUST-SCRIPT'
+ CUSTOM_SCRIPT = "CUSTOM-SCRIPT|SCRIPT-NAME"
+ DNS = "DNS|NAMESERVER|"
+ DOMAINNAME = "NETWORK|DOMAINNAME"
+ HOSTNAME = "NETWORK|HOSTNAME"
+ MARKERID = "MISC|MARKER-ID"
+ PASS = "PASSWORD|-PASS"
+ RESETPASS = "PASSWORD|RESET"
+ SUFFIX = "DNS|SUFFIX|"
+ TIMEZONE = "DATETIME|TIMEZONE"
+ UTC = "DATETIME|UTC"
+ POST_GC_STATUS = "MISC|POST-GC-STATUS"
+ DEFAULT_RUN_POST_SCRIPT = "MISC|DEFAULT-RUN-POST-CUST-SCRIPT"
+ CLOUDINIT_META_DATA = "CLOUDINIT|METADATA"
+ CLOUDINIT_USER_DATA = "CLOUDINIT|USERDATA"
def __init__(self, configFile):
self._configFile = configFile
@@ -82,8 +84,8 @@ class Config(object):
def nics(self):
"""Return the list of associated NICs."""
res = []
- nics = self._configFile['NIC-CONFIG|NICS']
- for nic in nics.split(','):
+ nics = self._configFile["NIC-CONFIG|NICS"]
+ for nic in nics.split(","):
res.append(Nic(nic, self._configFile))
return res
@@ -91,11 +93,11 @@ class Config(object):
@property
def reset_password(self):
"""Retreives if the root password needs to be reset."""
- resetPass = self._configFile.get(Config.RESETPASS, 'no')
+ resetPass = self._configFile.get(Config.RESETPASS, "no")
resetPass = resetPass.lower()
- if resetPass not in ('yes', 'no'):
- raise ValueError('ResetPassword value should be yes/no')
- return resetPass == 'yes'
+ if resetPass not in ("yes", "no"):
+ raise ValueError("ResetPassword value should be yes/no")
+ return resetPass == "yes"
@property
def marker_id(self):
@@ -110,11 +112,11 @@ class Config(object):
@property
def post_gc_status(self):
"""Return whether to post guestinfo.gc.status VMX property."""
- postGcStatus = self._configFile.get(Config.POST_GC_STATUS, 'no')
+ postGcStatus = self._configFile.get(Config.POST_GC_STATUS, "no")
postGcStatus = postGcStatus.lower()
- if postGcStatus not in ('yes', 'no'):
- raise ValueError('PostGcStatus value should be yes/no')
- return postGcStatus == 'yes'
+ if postGcStatus not in ("yes", "no"):
+ raise ValueError("PostGcStatus value should be yes/no")
+ return postGcStatus == "yes"
@property
def default_run_post_script(self):
@@ -123,11 +125,22 @@ class Config(object):
is absent in VM Tools configuration
"""
defaultRunPostScript = self._configFile.get(
- Config.DEFAULT_RUN_POST_SCRIPT,
- 'no')
+ Config.DEFAULT_RUN_POST_SCRIPT, "no"
+ )
defaultRunPostScript = defaultRunPostScript.lower()
- if defaultRunPostScript not in ('yes', 'no'):
- raise ValueError('defaultRunPostScript value should be yes/no')
- return defaultRunPostScript == 'yes'
+ if defaultRunPostScript not in ("yes", "no"):
+ raise ValueError("defaultRunPostScript value should be yes/no")
+ return defaultRunPostScript == "yes"
+
+ @property
+ def meta_data_name(self):
+ """Return the name of cloud-init meta data."""
+ return self._configFile.get(Config.CLOUDINIT_META_DATA, None)
+
+ @property
+ def user_data_name(self):
+ """Return the name of cloud-init user data."""
+ return self._configFile.get(Config.CLOUDINIT_USER_DATA, None)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
index 2ab22de9..8240ea8f 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -9,8 +9,7 @@ import logging
import os
import stat
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
@@ -24,8 +23,7 @@ class CustomScriptConstant(object):
# The user defined custom script
CUSTOM_SCRIPT_NAME = "customize.sh"
- CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR,
- CUSTOM_SCRIPT_NAME)
+ CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR, CUSTOM_SCRIPT_NAME)
POST_CUSTOM_PENDING_MARKER = "/.guest-customization-post-reboot-pending"
# The cc_scripts_per_instance script to launch custom script
POST_CUSTOM_SCRIPT_NAME = "post-customize-guest.sh"
@@ -39,22 +37,25 @@ class RunCustomScript(object):
def prepare_script(self):
if not os.path.exists(self.scriptpath):
- raise CustomScriptNotFound("Script %s not found!! "
- "Cannot execute custom script!"
- % self.scriptpath)
+ raise CustomScriptNotFound(
+ "Script %s not found!! Cannot execute custom script!"
+ % self.scriptpath
+ )
util.ensure_dir(CustomScriptConstant.CUSTOM_TMP_DIR)
- LOG.debug("Copying custom script to %s",
- CustomScriptConstant.CUSTOM_SCRIPT)
+ LOG.debug(
+ "Copying custom script to %s", CustomScriptConstant.CUSTOM_SCRIPT
+ )
util.copy(self.scriptpath, CustomScriptConstant.CUSTOM_SCRIPT)
# Strip any CR characters from the decoded script
- content = util.load_file(
- CustomScriptConstant.CUSTOM_SCRIPT).replace("\r", "")
- util.write_file(CustomScriptConstant.CUSTOM_SCRIPT,
- content,
- mode=0o544)
+ content = util.load_file(CustomScriptConstant.CUSTOM_SCRIPT).replace(
+ "\r", ""
+ )
+ util.write_file(
+ CustomScriptConstant.CUSTOM_SCRIPT, content, mode=0o544
+ )
class PreCustomScript(RunCustomScript):
@@ -70,8 +71,8 @@ class PostCustomScript(RunCustomScript):
super(PostCustomScript, self).__init__(scriptname, directory)
self.ccScriptsDir = ccScriptsDir
self.ccScriptPath = os.path.join(
- ccScriptsDir,
- CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME)
+ ccScriptsDir, CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME
+ )
def execute(self):
"""
@@ -81,15 +82,17 @@ class PostCustomScript(RunCustomScript):
"""
self.prepare_script()
- LOG.debug("Copying post customize run script to %s",
- self.ccScriptPath)
+ LOG.debug("Copying post customize run script to %s", self.ccScriptPath)
util.copy(
- os.path.join(self.directory,
- CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME),
- self.ccScriptPath)
+ os.path.join(
+ self.directory, CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME
+ ),
+ self.ccScriptPath,
+ )
st = os.stat(self.ccScriptPath)
os.chmod(self.ccScriptPath, st.st_mode | stat.S_IEXEC)
LOG.info("Creating post customization pending marker")
util.ensure_file(CustomScriptConstant.POST_CUSTOM_PENDING_MARKER)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index fc034c95..845294ec 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -35,7 +35,7 @@ class ConfigFile(ConfigSource, dict):
key = key.strip()
val = val.strip()
- if key.startswith('-') or '|-' in key:
+ if key.startswith("-") or "|-" in key:
canLog = False
else:
canLog = True
@@ -59,7 +59,7 @@ class ConfigFile(ConfigSource, dict):
Keyword arguments:
filename - The full path to the config file.
"""
- logger.info('Parsing the config file %s.', filename)
+ logger.info("Parsing the config file %s.", filename)
config = configparser.ConfigParser()
config.optionxform = str
@@ -71,7 +71,7 @@ class ConfigFile(ConfigSource, dict):
logger.debug("FOUND CATEGORY = '%s'", category)
for (key, value) in config.items(category):
- self._insertKey(category + '|' + key, value)
+ self._insertKey(category + "|" + key, value)
def should_keep_current_value(self, key):
"""
@@ -115,4 +115,5 @@ class ConfigFile(ConfigSource, dict):
"""
return len([key for key in self if key.startswith(prefix)])
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
index 5899d8f7..3b3b2d5a 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
@@ -11,4 +11,5 @@ from .config_source import ConfigSource
class ConfigNamespace(ConfigSource):
"""Specifies the Config Namespace."""
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 9cd2c0c0..6c135f48 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -9,9 +9,8 @@ import logging
import os
import re
-from cloudinit.net.network_state import mask_to_net_prefix
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
+from cloudinit.net.network_state import ipv4_mask_to_net_prefix
logger = logging.getLogger(__name__)
@@ -63,8 +62,10 @@ class NicConfigurator(object):
if not primary_nics:
return None
elif len(primary_nics) > 1:
- raise Exception('There can only be one primary nic',
- [nic.mac for nic in primary_nics])
+ raise Exception(
+ "There can only be one primary nic",
+ [nic.mac for nic in primary_nics],
+ )
else:
return primary_nics[0]
@@ -73,17 +74,17 @@ class NicConfigurator(object):
Create the mac2Name dictionary
The mac address(es) are in the lower case
"""
- cmd = ['ip', 'addr', 'show']
+ cmd = ["ip", "addr", "show"]
output, _err = subp.subp(cmd)
- sections = re.split(r'\n\d+: ', '\n' + output)[1:]
+ sections = re.split(r"\n\d+: ", "\n" + output)[1:]
- macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
+ macPat = r"link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))"
for section in sections:
match = re.search(macPat, section)
if not match: # Only keep info about nics
continue
mac = match.group(1).lower()
- name = section.split(':', 1)[0]
+ name = section.split(":", 1)[0]
self.mac2Name[mac] = name
def gen_one_nic(self, nic):
@@ -95,11 +96,11 @@ class NicConfigurator(object):
mac = nic.mac.lower()
name = self.mac2Name.get(mac)
if not name:
- raise ValueError('No known device has MACADDR: %s' % nic.mac)
+ raise ValueError("No known device has MACADDR: %s" % nic.mac)
nics_cfg_list = []
- cfg = {'type': 'physical', 'name': name, 'mac_address': mac}
+ cfg = {"type": "physical", "name": name, "mac_address": mac}
subnet_list = []
route_list = []
@@ -114,7 +115,7 @@ class NicConfigurator(object):
subnet_list.extend(subnets)
route_list.extend(routes)
- cfg.update({'subnets': subnet_list})
+ cfg.update({"subnets": subnet_list})
nics_cfg_list.append(cfg)
if route_list:
@@ -135,17 +136,17 @@ class NicConfigurator(object):
route_list = []
if nic.onboot:
- subnet.update({'control': 'auto'})
+ subnet.update({"control": "auto"})
bootproto = nic.bootProto.lower()
- if nic.ipv4_mode.lower() == 'disabled':
- bootproto = 'manual'
+ if nic.ipv4_mode.lower() == "disabled":
+ bootproto = "manual"
- if bootproto != 'static':
- subnet.update({'type': 'dhcp'})
+ if bootproto != "static":
+ subnet.update({"type": "dhcp"})
return ([subnet], route_list)
else:
- subnet.update({'type': 'static'})
+ subnet.update({"type": "static"})
# Static Ipv4
addrs = nic.staticIpv4
@@ -154,20 +155,21 @@ class NicConfigurator(object):
v4 = addrs[0]
if v4.ip:
- subnet.update({'address': v4.ip})
+ subnet.update({"address": v4.ip})
if v4.netmask:
- subnet.update({'netmask': v4.netmask})
+ subnet.update({"netmask": v4.netmask})
# Add the primary gateway
if nic.primary and v4.gateways:
self.ipv4PrimaryGateway = v4.gateways[0]
- subnet.update({'gateway': self.ipv4PrimaryGateway})
+ subnet.update({"gateway": self.ipv4PrimaryGateway})
return ([subnet], route_list)
# Add routes if there is no primary nic
if not self._primaryNic and v4.gateways:
subnet.update(
- {'routes': self.gen_ipv4_route(nic, v4.gateways, v4.netmask)})
+ {"routes": self.gen_ipv4_route(nic, v4.gateways, v4.netmask)}
+ )
return ([subnet], route_list)
@@ -180,14 +182,18 @@ class NicConfigurator(object):
"""
route_list = []
- cidr = mask_to_net_prefix(netmask)
+ cidr = ipv4_mask_to_net_prefix(netmask)
for gateway in gateways:
destination = "%s/%d" % (gen_subnet(gateway, netmask), cidr)
- route_list.append({'destination': destination,
- 'type': 'route',
- 'gateway': gateway,
- 'metric': 10000})
+ route_list.append(
+ {
+ "destination": destination,
+ "type": "route",
+ "gateway": gateway,
+ "metric": 10000,
+ }
+ )
return route_list
@@ -208,9 +214,11 @@ class NicConfigurator(object):
addrs = nic.staticIpv6
for addr in addrs:
- subnet = {'type': 'static6',
- 'address': addr.ip,
- 'netmask': addr.netmask}
+ subnet = {
+ "type": "static6",
+ "address": addr.ip,
+ "netmask": addr.netmask,
+ }
subnet_list.append(subnet)
# TODO: Add the primary gateway
@@ -226,9 +234,9 @@ class NicConfigurator(object):
route_list = []
for addr in addrs:
- route_list.append({'type': 'route',
- 'gateway': addr.gateway,
- 'metric': 10000})
+ route_list.append(
+ {"type": "route", "gateway": addr.gateway, "metric": 10000}
+ )
return route_list
@@ -246,7 +254,7 @@ class NicConfigurator(object):
return nics_cfg_list
def clear_dhcp(self):
- logger.info('Clearing DHCP leases')
+ logger.info("Clearing DHCP leases")
# Ignore the return code 1.
subp.subp(["pkill", "dhclient"], rcs=[0, 1])
@@ -262,11 +270,12 @@ class NicConfigurator(object):
logger.info("Debian OS not detected. Skipping the configure step")
return
- containingDir = '/etc/network'
+ containingDir = "/etc/network"
- interfaceFile = os.path.join(containingDir, 'interfaces')
- originalFile = os.path.join(containingDir,
- 'interfaces.before_vmware_customization')
+ interfaceFile = os.path.join(containingDir, "interfaces")
+ originalFile = os.path.join(
+ containingDir, "interfaces.before_vmware_customization"
+ )
if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
os.rename(interfaceFile, originalFile)
@@ -274,12 +283,13 @@ class NicConfigurator(object):
lines = [
"# DO NOT EDIT THIS FILE BY HAND --"
" AUTOMATICALLY GENERATED BY cloud-init",
- "source /etc/network/interfaces.d/*.cfg",
+ "source /etc/network/interfaces.d/*",
"source-directory /etc/network/interfaces.d",
]
- util.write_file(interfaceFile, content='\n'.join(lines))
+ util.write_file(interfaceFile, content="\n".join(lines))
self.clear_dhcp()
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
index d16a7690..4d3967a1 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -9,8 +9,7 @@
import logging
import os
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
@@ -20,6 +19,7 @@ class PasswordConfigurator(object):
Class for changing configurations related to passwords in a VM. Includes
setting and expiring passwords.
"""
+
def configure(self, passwd, resetPasswd, distro):
"""
Main method to perform all functionalities based on configuration file
@@ -28,25 +28,25 @@ class PasswordConfigurator(object):
@param resetPasswd: boolean to determine if password needs to be reset.
@return cfg: dict to be used by cloud-init set_passwd code.
"""
- LOG.info('Starting password configuration')
+ LOG.info("Starting password configuration")
if passwd:
passwd = util.b64d(passwd)
allRootUsers = []
- for line in open('/etc/passwd', 'r'):
- if line.split(':')[2] == '0':
- allRootUsers.append(line.split(':')[0])
+ for line in open("/etc/passwd", "r"):
+ if line.split(":")[2] == "0":
+ allRootUsers.append(line.split(":")[0])
# read shadow file and check for each user, if its uid0 or root.
uidUsersList = []
- for line in open('/etc/shadow', 'r'):
- user = line.split(':')[0]
+ for line in open("/etc/shadow", "r"):
+ user = line.split(":")[0]
if user in allRootUsers:
uidUsersList.append(user)
if passwd:
- LOG.info('Setting admin password')
- distro.set_passwd('root', passwd)
+ LOG.info("Setting admin password")
+ distro.set_passwd("root", passwd)
if resetPasswd:
self.reset_password(uidUsersList)
- LOG.info('Configure Password completed!')
+ LOG.info("Configure Password completed!")
def reset_password(self, uidUserList):
"""
@@ -54,15 +54,19 @@ class PasswordConfigurator(object):
not succeeded using passwd command. Log failure message otherwise.
@param: list of users for which to expire password.
"""
- LOG.info('Expiring password.')
+ LOG.info("Expiring password.")
for user in uidUserList:
try:
- subp.subp(['passwd', '--expire', user])
+ subp.subp(["passwd", "--expire", user])
except subp.ProcessExecutionError as e:
- if os.path.exists('/usr/bin/chage'):
- subp.subp(['chage', '-d', '0', user])
+ if os.path.exists("/usr/bin/chage"):
+ subp.subp(["chage", "-d", "0", user])
else:
- LOG.warning('Failed to expire password for %s with error: '
- '%s', user, e)
+ LOG.warning(
+ "Failed to expire password for %s with error: %s",
+ user,
+ e,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
index 7ec06a9c..e99f9b43 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_source.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_source.py
@@ -9,4 +9,5 @@
class ConfigSource(object):
"""Specifies a source for the Config Content."""
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
index 65ae7390..eda84cfb 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
@@ -11,5 +11,7 @@ class GuestCustErrorEnum(object):
GUESTCUST_ERROR_SUCCESS = 0
GUESTCUST_ERROR_SCRIPT_DISABLED = 6
+ GUESTCUST_ERROR_WRONG_META_FORMAT = 9
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
index e84c1cb0..33169a7e 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
@@ -14,4 +14,5 @@ class GuestCustEventEnum(object):
GUESTCUST_EVENT_ENABLE_NICS = 103
GUESTCUST_EVENT_QUERY_NICS = 104
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
index a8211dea..c74fbc8b 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
@@ -12,4 +12,5 @@ class GuestCustStateEnum(object):
GUESTCUST_STATE_RUNNING = 4
GUESTCUST_STATE_DONE = 5
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index d919f693..08763e62 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -73,7 +73,7 @@ def get_nics_to_enable(nicsfilepath):
if not os.path.exists(nicsfilepath):
return None
- with open(nicsfilepath, 'r') as fp:
+ with open(nicsfilepath, "r") as fp:
nics = fp.read(NICS_SIZE)
return nics
@@ -95,7 +95,8 @@ def enable_nics(nics):
(out, _err) = set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS,
- nics)
+ nics,
+ )
if not out:
time.sleep(enableNicsWaitCount * enableNicsWaitSeconds)
continue
@@ -108,32 +109,36 @@ def enable_nics(nics):
(out, _err) = set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS,
- nics)
+ nics,
+ )
if out and out == NICS_STATUS_CONNECTED:
logger.info("NICS are connected on %d second", count)
return
time.sleep(enableNicsWaitSeconds)
- logger.warning("Can't connect network interfaces after %d attempts",
- enableNicsWaitRetries)
+ logger.warning(
+ "Can't connect network interfaces after %d attempts",
+ enableNicsWaitRetries,
+ )
def get_tools_config(section, key, defaultVal):
- """ Return the value of [section] key from VMTools configuration.
+ """Return the value of [section] key from VMTools configuration.
- @param section: String of section to read from VMTools config
- @returns: String value from key in [section] or defaultVal if
- [section] is not present or vmware-toolbox-cmd is
- not installed.
+ @param section: String of section to read from VMTools config
+ @returns: String value from key in [section] or defaultVal if
+ [section] is not present or vmware-toolbox-cmd is
+ not installed.
"""
- if not subp.which('vmware-toolbox-cmd'):
+ if not subp.which("vmware-toolbox-cmd"):
logger.debug(
- 'vmware-toolbox-cmd not installed, returning default value')
+ "vmware-toolbox-cmd not installed, returning default value"
+ )
return defaultVal
- cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key]
+ cmd = ["vmware-toolbox-cmd", "config", "get", section, key]
try:
(outText, _) = subp.subp(cmd)
@@ -141,22 +146,27 @@ def get_tools_config(section, key, defaultVal):
if e.exit_code == 69:
logger.debug(
"vmware-toolbox-cmd returned 69 (unavailable) for cmd: %s."
- " Return default value: %s", " ".join(cmd), defaultVal)
+ " Return default value: %s",
+ " ".join(cmd),
+ defaultVal,
+ )
else:
logger.error("Failed running %s[%s]", cmd, e.exit_code)
logger.exception(e)
return defaultVal
retValue = defaultVal
- m = re.match(r'([^=]+)=(.*)', outText)
+ m = re.match(r"([^=]+)=(.*)", outText)
if m:
retValue = m.group(2).strip()
- logger.debug("Get tools config: [%s] %s = %s",
- section, key, retValue)
+ logger.debug("Get tools config: [%s] %s = %s", section, key, retValue)
else:
logger.debug(
"Tools config: [%s] %s is not found, return default value: %s",
- section, key, retValue)
+ section,
+ key,
+ retValue,
+ )
return retValue
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
index d793bdeb..673204a0 100644
--- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
+++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
@@ -18,18 +18,19 @@ class Ipv4ModeEnum(object):
# The legacy mode which only allows dhcp/static based on whether IPv4
# addresses list is empty or not
- IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE'
+ IPV4_MODE_BACKWARDS_COMPATIBLE = "BACKWARDS_COMPATIBLE"
# IPv4 must use static address. Reserved for future use
- IPV4_MODE_STATIC = 'STATIC'
+ IPV4_MODE_STATIC = "STATIC"
# IPv4 must use DHCPv4. Reserved for future use
- IPV4_MODE_DHCP = 'DHCP'
+ IPV4_MODE_DHCP = "DHCP"
# IPv4 must be disabled
- IPV4_MODE_DISABLED = 'DISABLED'
+ IPV4_MODE_DISABLED = "DISABLED"
# IPv4 settings should be left untouched. Reserved for future use
- IPV4_MODE_AS_IS = 'AS_IS'
+ IPV4_MODE_AS_IS = "AS_IS"
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
index ef8f87f7..7b742d0f 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic.py
@@ -20,7 +20,7 @@ class Nic(NicBase):
self._configFile = configFile
def _get(self, what):
- return self._configFile.get(self.name + '|' + what, None)
+ return self._configFile.get(self.name + "|" + what, None)
def _get_count_with_prefix(self, prefix):
return self._configFile.get_count_with_prefix(self.name + prefix)
@@ -31,29 +31,29 @@ class Nic(NicBase):
@property
def mac(self):
- return self._get('MACADDR').lower()
+ return self._get("MACADDR").lower()
@property
def primary(self):
- value = self._get('PRIMARY')
+ value = self._get("PRIMARY")
if value:
value = value.lower()
- return value == 'yes' or value == 'true'
+ return value == "yes" or value == "true"
else:
return False
@property
def onboot(self):
- value = self._get('ONBOOT')
+ value = self._get("ONBOOT")
if value:
value = value.lower()
- return value == 'yes' or value == 'true'
+ return value == "yes" or value == "true"
else:
return False
@property
def bootProto(self):
- value = self._get('BOOTPROTO')
+ value = self._get("BOOTPROTO")
if value:
return value.lower()
else:
@@ -61,7 +61,7 @@ class Nic(NicBase):
@property
def ipv4_mode(self):
- value = self._get('IPv4_MODE')
+ value = self._get("IPv4_MODE")
if value:
return value.lower()
else:
@@ -80,7 +80,7 @@ class Nic(NicBase):
@property
def staticIpv6(self):
- cnt = self._get_count_with_prefix('|IPv6ADDR|')
+ cnt = self._get_count_with_prefix("|IPv6ADDR|")
if not cnt:
return None
@@ -100,17 +100,17 @@ class StaticIpv4Addr(StaticIpv4Base):
@property
def ip(self):
- return self._nic._get('IPADDR')
+ return self._nic._get("IPADDR")
@property
def netmask(self):
- return self._nic._get('NETMASK')
+ return self._nic._get("NETMASK")
@property
def gateways(self):
- value = self._nic._get('GATEWAY')
+ value = self._nic._get("GATEWAY")
if value:
- return [x.strip() for x in value.split(',')]
+ return [x.strip() for x in value.split(",")]
else:
return None
@@ -124,14 +124,15 @@ class StaticIpv6Addr(StaticIpv6Base):
@property
def ip(self):
- return self._nic._get('IPv6ADDR|' + str(self._index))
+ return self._nic._get("IPv6ADDR|" + str(self._index))
@property
def netmask(self):
- return self._nic._get('IPv6NETMASK|' + str(self._index))
+ return self._nic._get("IPv6NETMASK|" + str(self._index))
@property
def gateway(self):
- return self._nic._get('IPv6GATEWAY|' + str(self._index))
+ return self._nic._get("IPv6GATEWAY|" + str(self._index))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py
index de7b866d..37d9602f 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic_base.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic_base.py
@@ -18,7 +18,7 @@ class NicBase(object):
Retrieves the mac address of the nic
@return (str) : the MACADDR setting
"""
- raise NotImplementedError('MACADDR')
+ raise NotImplementedError("MACADDR")
@property
def primary(self):
@@ -29,7 +29,7 @@ class NicBase(object):
be set.
@return (bool): the PRIMARY setting
"""
- raise NotImplementedError('PRIMARY')
+ raise NotImplementedError("PRIMARY")
@property
def onboot(self):
@@ -37,7 +37,7 @@ class NicBase(object):
Retrieves whether the nic should be up at the boot time
@return (bool) : the ONBOOT setting
"""
- raise NotImplementedError('ONBOOT')
+ raise NotImplementedError("ONBOOT")
@property
def bootProto(self):
@@ -45,7 +45,7 @@ class NicBase(object):
Retrieves the boot protocol of the nic
@return (str): the BOOTPROTO setting, valid values: dhcp and static.
"""
- raise NotImplementedError('BOOTPROTO')
+ raise NotImplementedError("BOOTPROTO")
@property
def ipv4_mode(self):
@@ -54,7 +54,7 @@ class NicBase(object):
@return (str): the IPv4_MODE setting, valid values:
backwards_compatible, static, dhcp, disabled, as_is
"""
- raise NotImplementedError('IPv4_MODE')
+ raise NotImplementedError("IPv4_MODE")
@property
def staticIpv4(self):
@@ -62,7 +62,7 @@ class NicBase(object):
Retrieves the static IPv4 configuration of the nic
@return (StaticIpv4Base list): the static ipv4 setting
"""
- raise NotImplementedError('Static IPv4')
+ raise NotImplementedError("Static IPv4")
@property
def staticIpv6(self):
@@ -70,7 +70,7 @@ class NicBase(object):
Retrieves the IPv6 configuration of the nic
@return (StaticIpv6Base list): the static ipv6 setting
"""
- raise NotImplementedError('Static Ipv6')
+ raise NotImplementedError("Static Ipv6")
def validate(self):
"""
@@ -78,7 +78,7 @@ class NicBase(object):
For example, the staticIpv4 property is required and should not be
empty when ipv4Mode is STATIC
"""
- raise NotImplementedError('Check constraints on properties')
+ raise NotImplementedError("Check constraints on properties")
class StaticIpv4Base(object):
@@ -93,7 +93,7 @@ class StaticIpv4Base(object):
Retrieves the Ipv4 address
@return (str): the IPADDR setting
"""
- raise NotImplementedError('Ipv4 Address')
+ raise NotImplementedError("Ipv4 Address")
@property
def netmask(self):
@@ -101,7 +101,7 @@ class StaticIpv4Base(object):
Retrieves the Ipv4 NETMASK setting
@return (str): the NETMASK setting
"""
- raise NotImplementedError('Ipv4 NETMASK')
+ raise NotImplementedError("Ipv4 NETMASK")
@property
def gateways(self):
@@ -109,7 +109,7 @@ class StaticIpv4Base(object):
Retrieves the gateways on this Ipv4 subnet
@return (str list): the GATEWAY setting
"""
- raise NotImplementedError('Ipv4 GATEWAY')
+ raise NotImplementedError("Ipv4 GATEWAY")
class StaticIpv6Base(object):
@@ -123,7 +123,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 address
@return (str): the IPv6ADDR setting
"""
- raise NotImplementedError('Ipv6 Address')
+ raise NotImplementedError("Ipv6 Address")
@property
def netmask(self):
@@ -131,7 +131,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 NETMASK setting
@return (str): the IPv6NETMASK setting
"""
- raise NotImplementedError('Ipv6 NETMASK')
+ raise NotImplementedError("Ipv6 NETMASK")
@property
def gateway(self):
@@ -139,6 +139,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 GATEWAY setting
@return (str): the IPv6GATEWAY setting
"""
- raise NotImplementedError('Ipv6 GATEWAY')
+ raise NotImplementedError("Ipv6 GATEWAY")
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
new file mode 100644
index 00000000..88a21034
--- /dev/null
+++ b/cloudinit/sources/helpers/vultr.py
@@ -0,0 +1,230 @@
+# Author: Eric Benner <ebenner@vultr.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+from functools import lru_cache
+
+from cloudinit import dmi
+from cloudinit import log as log
+from cloudinit import net, subp, url_helper, util
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+
+# Get LOG
+LOG = log.getLogger(__name__)
+
+
+@lru_cache()
+def get_metadata(url, timeout, retries, sec_between, agent):
+ # Bring up interface (and try untill one works)
+ exception = RuntimeError("Failed to DHCP")
+
+ # Seek iface with DHCP
+ for iface in net.get_interfaces():
+ # Skip dummy, lo interfaces
+ if "dummy" in iface[0]:
+ continue
+ if "lo" == iface[0]:
+ continue
+ try:
+ with EphemeralDHCPv4(
+ iface=iface[0], connectivity_url_data={"url": url}
+ ):
+ # Fetch the metadata
+ v1 = read_metadata(url, timeout, retries, sec_between, agent)
+
+ return json.loads(v1)
+ except (NoDHCPLeaseError, subp.ProcessExecutionError) as exc:
+ LOG.error("DHCP Exception: %s", exc)
+ exception = exc
+ raise exception
+
+
+# Read the system information from SMBIOS
+def get_sysinfo():
+ return {
+ "manufacturer": dmi.read_dmi_data("system-manufacturer"),
+ "subid": dmi.read_dmi_data("system-serial-number"),
+ }
+
+
+# Assumes is Vultr is already checked
+def is_baremetal():
+ if get_sysinfo()["manufacturer"] != "Vultr":
+ return True
+ return False
+
+
+# Confirm is Vultr
+def is_vultr():
+ # VC2, VDC, and HFC use DMI
+ sysinfo = get_sysinfo()
+
+ if sysinfo["manufacturer"] == "Vultr":
+ return True
+
+ # Baremetal requires a kernel parameter
+ if "vultr" in util.get_cmdline().split():
+ return True
+
+ return False
+
+
+# Read Metadata endpoint
+def read_metadata(url, timeout, retries, sec_between, agent):
+ url = "%s/v1.json" % url
+
+ # Announce os details so we can handle non Vultr origin
+ # images and provide correct vendordata generation.
+ headers = {"Metadata-Token": "cloudinit", "User-Agent": agent}
+
+ response = url_helper.readurl(
+ url,
+ timeout=timeout,
+ retries=retries,
+ headers=headers,
+ sec_between=sec_between,
+ )
+
+ if not response.ok():
+ raise RuntimeError(
+ "Failed to connect to %s: Code: %s" % url, response.code
+ )
+
+ return response.contents.decode()
+
+
+# Wrapped for caching
+@lru_cache()
+def get_interface_map():
+ return net.get_interfaces_by_mac()
+
+
+# Convert macs to nics
+def get_interface_name(mac):
+ macs_to_nic = get_interface_map()
+
+ if mac not in macs_to_nic:
+ return None
+
+ return macs_to_nic.get(mac)
+
+
+# Generate network configs
+def generate_network_config(interfaces):
+ network = {
+ "version": 1,
+ "config": [{"type": "nameserver", "address": ["108.61.10.10"]}],
+ }
+
+ # Prepare interface 0, public
+ if len(interfaces) > 0:
+ public = generate_interface(interfaces[0], primary=True)
+ network["config"].append(public)
+
+ # Prepare additional interfaces, private
+ for i in range(1, len(interfaces)):
+ private = generate_interface(interfaces[i])
+ network["config"].append(private)
+
+ return network
+
+
+def generate_interface(interface, primary=False):
+ interface_name = get_interface_name(interface["mac"])
+ if not interface_name:
+ raise RuntimeError(
+ "Interface: %s could not be found on the system" % interface["mac"]
+ )
+
+ netcfg = {
+ "name": interface_name,
+ "type": "physical",
+ "mac_address": interface["mac"],
+ }
+
+ if primary:
+ netcfg["accept-ra"] = 1
+ netcfg["subnets"] = [
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
+ ]
+
+ if not primary:
+ netcfg["subnets"] = [
+ {
+ "type": "static",
+ "control": "auto",
+ "address": interface["ipv4"]["address"],
+ "netmask": interface["ipv4"]["netmask"],
+ }
+ ]
+
+ generate_interface_routes(interface, netcfg)
+ generate_interface_additional_addresses(interface, netcfg)
+
+ # Add config to template
+ return netcfg
+
+
+def generate_interface_routes(interface, netcfg):
+ # Options that may or may not be used
+ if "mtu" in interface:
+ netcfg["mtu"] = interface["mtu"]
+
+ if "accept-ra" in interface:
+ netcfg["accept-ra"] = interface["accept-ra"]
+
+ if "routes" in interface:
+ netcfg["subnets"][0]["routes"] = interface["routes"]
+
+
+def generate_interface_additional_addresses(interface, netcfg):
+ # Check for additional IP's
+ additional_count = len(interface["ipv4"]["additional"])
+ if "ipv4" in interface and additional_count > 0:
+ for additional in interface["ipv4"]["additional"]:
+ add = {
+ "type": "static",
+ "control": "auto",
+ "address": additional["address"],
+ "netmask": additional["netmask"],
+ }
+
+ if "routes" in additional:
+ add["routes"] = additional["routes"]
+
+ netcfg["subnets"].append(add)
+
+ # Check for additional IPv6's
+ additional_count = len(interface["ipv6"]["additional"])
+ if "ipv6" in interface and additional_count > 0:
+ for additional in interface["ipv6"]["additional"]:
+ add = {
+ "type": "static6",
+ "control": "auto",
+ "address": "%s/%s"
+ % (additional["network"], additional["prefix"]),
+ }
+
+ if "routes" in additional:
+ add["routes"] = additional["routes"]
+
+ netcfg["subnets"].append(add)
+
+
+# Make required adjustments to the network configs provided
+def add_interface_names(interfaces):
+ for interface in interfaces:
+ interface_name = get_interface_name(interface["mac"])
+ if not interface_name:
+ raise RuntimeError(
+ "Interface: %s could not be found on the system"
+ % interface["mac"]
+ )
+ interface["name"] = interface_name
+
+ return interfaces
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
deleted file mode 100644
index 1420a988..00000000
--- a/cloudinit/sources/tests/test_init.py
+++ /dev/null
@@ -1,759 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import copy
-import inspect
-import os
-import stat
-
-from cloudinit.event import EventType
-from cloudinit.helpers import Paths
-from cloudinit import importer
-from cloudinit.sources import (
- EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE,
- METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource,
- canonical_cloud_id, redact_sensitive_keys)
-from cloudinit.tests.helpers import CiTestCase, mock
-from cloudinit.user_data import UserDataProcessor
-from cloudinit import util
-
-
-class DataSourceTestSubclassNet(DataSource):
-
- dsname = 'MyTestSubclass'
- url_max_wait = 55
-
- def __init__(self, sys_cfg, distro, paths, custom_metadata=None,
- custom_userdata=None, get_data_retval=True):
- super(DataSourceTestSubclassNet, self).__init__(
- sys_cfg, distro, paths)
- self._custom_userdata = custom_userdata
- self._custom_metadata = custom_metadata
- self._get_data_retval = get_data_retval
-
- def _get_cloud_name(self):
- return 'SubclassCloudName'
-
- def _get_data(self):
- if self._custom_metadata:
- self.metadata = self._custom_metadata
- else:
- self.metadata = {'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'}
- if self._custom_userdata:
- self.userdata_raw = self._custom_userdata
- else:
- self.userdata_raw = 'userdata_raw'
- self.vendordata_raw = 'vendordata_raw'
- return self._get_data_retval
-
-
-class InvalidDataSourceTestSubclassNet(DataSource):
- pass
-
-
-class TestDataSource(CiTestCase):
-
- with_logs = True
- maxDiff = None
-
- def setUp(self):
- super(TestDataSource, self).setUp()
- self.sys_cfg = {'datasource': {'_undef': {'key1': False}}}
- self.distro = 'distrotest' # generally should be a Distro object
- self.paths = Paths({})
- self.datasource = DataSource(self.sys_cfg, self.distro, self.paths)
-
- def test_datasource_init(self):
- """DataSource initializes metadata attributes, ds_cfg and ud_proc."""
- self.assertEqual(self.paths, self.datasource.paths)
- self.assertEqual(self.sys_cfg, self.datasource.sys_cfg)
- self.assertEqual(self.distro, self.datasource.distro)
- self.assertIsNone(self.datasource.userdata)
- self.assertEqual({}, self.datasource.metadata)
- self.assertIsNone(self.datasource.userdata_raw)
- self.assertIsNone(self.datasource.vendordata)
- self.assertIsNone(self.datasource.vendordata_raw)
- self.assertEqual({'key1': False}, self.datasource.ds_cfg)
- self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor)
-
- def test_datasource_init_gets_ds_cfg_using_dsname(self):
- """Init uses DataSource.dsname for sourcing ds_cfg."""
- sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}
- distro = 'distrotest' # generally should be a Distro object
- datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
- self.assertEqual({'key2': False}, datasource.ds_cfg)
-
- def test_str_is_classname(self):
- """The string representation of the datasource is the classname."""
- self.assertEqual('DataSource', str(self.datasource))
- self.assertEqual(
- 'DataSourceTestSubclassNet',
- str(DataSourceTestSubclassNet('', '', self.paths)))
-
- def test_datasource_get_url_params_defaults(self):
- """get_url_params default url config settings for the datasource."""
- params = self.datasource.get_url_params()
- self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait)
- self.assertEqual(params.timeout_seconds, self.datasource.url_timeout)
- self.assertEqual(params.num_retries, self.datasource.url_retries)
-
- def test_datasource_get_url_params_subclassed(self):
- """Subclasses can override get_url_params defaults."""
- sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}
- distro = 'distrotest' # generally should be a Distro object
- datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
- expected = (datasource.url_max_wait, datasource.url_timeout,
- datasource.url_retries)
- url_params = datasource.get_url_params()
- self.assertNotEqual(self.datasource.get_url_params(), url_params)
- self.assertEqual(expected, url_params)
-
- def test_datasource_get_url_params_ds_config_override(self):
- """Datasource configuration options can override url param defaults."""
- sys_cfg = {
- 'datasource': {
- 'MyTestSubclass': {
- 'max_wait': '1', 'timeout': '2', 'retries': '3'}}}
- datasource = DataSourceTestSubclassNet(
- sys_cfg, self.distro, self.paths)
- expected = (1, 2, 3)
- url_params = datasource.get_url_params()
- self.assertNotEqual(
- (datasource.url_max_wait, datasource.url_timeout,
- datasource.url_retries),
- url_params)
- self.assertEqual(expected, url_params)
-
- def test_datasource_get_url_params_is_zero_or_greater(self):
- """get_url_params ignores timeouts with a value below 0."""
- # Set an override that is below 0 which gets ignored.
- sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}}
- datasource = DataSource(sys_cfg, self.distro, self.paths)
- (_max_wait, timeout, _retries) = datasource.get_url_params()
- self.assertEqual(0, timeout)
-
- def test_datasource_get_url_uses_defaults_on_errors(self):
- """On invalid system config values for url_params defaults are used."""
- # All invalid values should be logged
- sys_cfg = {'datasource': {
- '_undef': {
- 'max_wait': 'nope', 'timeout': 'bug', 'retries': 'nonint'}}}
- datasource = DataSource(sys_cfg, self.distro, self.paths)
- url_params = datasource.get_url_params()
- expected = (datasource.url_max_wait, datasource.url_timeout,
- datasource.url_retries)
- self.assertEqual(expected, url_params)
- logs = self.logs.getvalue()
- expected_logs = [
- "Config max_wait 'nope' is not an int, using default '-1'",
- "Config timeout 'bug' is not an int, using default '10'",
- "Config retries 'nonint' is not an int, using default '5'",
- ]
- for log in expected_logs:
- self.assertIn(log, logs)
-
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
- def test_fallback_interface_is_discovered(self, m_get_fallback_nic):
- """The fallback_interface is discovered via find_fallback_nic."""
- m_get_fallback_nic.return_value = 'nic9'
- self.assertEqual('nic9', self.datasource.fallback_interface)
-
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
- def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic):
- """Log a warning when fallback_interface can not discover the nic."""
- self.datasource._cloud_name = 'MySupahCloud'
- m_get_fallback_nic.return_value = None # Couldn't discover nic
- self.assertIsNone(self.datasource.fallback_interface)
- self.assertEqual(
- 'WARNING: Did not find a fallback interface on MySupahCloud.\n',
- self.logs.getvalue())
-
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
- def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic):
- """The fallback_interface is cached and won't be rediscovered."""
- self.datasource._fallback_interface = 'nic10'
- self.assertEqual('nic10', self.datasource.fallback_interface)
- m_get_fallback_nic.assert_not_called()
-
- def test__get_data_unimplemented(self):
- """Raise an error when _get_data is not implemented."""
- with self.assertRaises(NotImplementedError) as context_manager:
- self.datasource.get_data()
- self.assertIn(
- 'Subclasses of DataSource must implement _get_data',
- str(context_manager.exception))
- datasource2 = InvalidDataSourceTestSubclassNet(
- self.sys_cfg, self.distro, self.paths)
- with self.assertRaises(NotImplementedError) as context_manager:
- datasource2.get_data()
- self.assertIn(
- 'Subclasses of DataSource must implement _get_data',
- str(context_manager.exception))
-
- def test_get_data_calls_subclass__get_data(self):
- """Datasource.get_data uses the subclass' version of _get_data."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- self.assertTrue(datasource.get_data())
- self.assertEqual(
- {'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'},
- datasource.metadata)
- self.assertEqual('userdata_raw', datasource.userdata_raw)
- self.assertEqual('vendordata_raw', datasource.vendordata_raw)
-
- def test_get_hostname_strips_local_hostname_without_domain(self):
- """Datasource.get_hostname strips metadata local-hostname of domain."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- self.assertTrue(datasource.get_data())
- self.assertEqual(
- 'test-subclass-hostname', datasource.metadata['local-hostname'])
- self.assertEqual('test-subclass-hostname', datasource.get_hostname())
- datasource.metadata['local-hostname'] = 'hostname.my.domain.com'
- self.assertEqual('hostname', datasource.get_hostname())
-
- def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self):
- """Datasource.get_hostname with fqdn set gets qualified hostname."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- self.assertTrue(datasource.get_data())
- datasource.metadata['local-hostname'] = 'hostname.my.domain.com'
- self.assertEqual(
- 'hostname.my.domain.com', datasource.get_hostname(fqdn=True))
-
- def test_get_hostname_without_metadata_uses_system_hostname(self):
- """Datasource.gethostname runs util.get_hostname when no metadata."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- self.assertEqual({}, datasource.metadata)
- mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts'
- with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost:
- with mock.patch(mock_fqdn) as m_fqdn:
- m_gethost.return_value = 'systemhostname.domain.com'
- m_fqdn.return_value = None # No maching fqdn in /etc/hosts
- self.assertEqual('systemhostname', datasource.get_hostname())
- self.assertEqual(
- 'systemhostname.domain.com',
- datasource.get_hostname(fqdn=True))
-
- def test_get_hostname_without_metadata_returns_none(self):
- """Datasource.gethostname returns None when metadata_only and no MD."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- self.assertEqual({}, datasource.metadata)
- mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts'
- with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost:
- with mock.patch(mock_fqdn) as m_fqdn:
- self.assertIsNone(datasource.get_hostname(metadata_only=True))
- self.assertIsNone(
- datasource.get_hostname(fqdn=True, metadata_only=True))
- self.assertEqual([], m_gethost.call_args_list)
- self.assertEqual([], m_fqdn.call_args_list)
-
- def test_get_hostname_without_metadata_prefers_etc_hosts(self):
- """Datasource.gethostname prefers /etc/hosts to util.get_hostname."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- self.assertEqual({}, datasource.metadata)
- mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts'
- with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost:
- with mock.patch(mock_fqdn) as m_fqdn:
- m_gethost.return_value = 'systemhostname.domain.com'
- m_fqdn.return_value = 'fqdnhostname.domain.com'
- self.assertEqual('fqdnhostname', datasource.get_hostname())
- self.assertEqual('fqdnhostname.domain.com',
- datasource.get_hostname(fqdn=True))
-
- def test_get_data_does_not_write_instance_data_on_failure(self):
- """get_data does not write INSTANCE_JSON_FILE on get_data False."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- get_data_retval=False)
- self.assertFalse(datasource.get_data())
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- self.assertFalse(
- os.path.exists(json_file), 'Found unexpected file %s' % json_file)
-
- def test_get_data_writes_json_instance_data_on_success(self):
- """get_data writes INSTANCE_JSON_FILE to run_dir as world readable."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- sys_info = {
- "python": "3.7",
- "platform":
- "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
- "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
- "x86_64"],
- "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
- with mock.patch("cloudinit.util.system_info", return_value=sys_info):
- datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- content = util.load_file(json_file)
- expected = {
- 'base64_encoded_keys': [],
- 'merged_cfg': REDACT_SENSITIVE_VALUE,
- 'sensitive_keys': ['merged_cfg'],
- 'sys_info': sys_info,
- 'v1': {
- '_beta_keys': ['subplatform'],
- 'availability-zone': 'myaz',
- 'availability_zone': 'myaz',
- 'cloud-name': 'subclasscloudname',
- 'cloud_name': 'subclasscloudname',
- 'distro': 'ubuntu',
- 'distro_release': 'focal',
- 'distro_version': '20.04',
- 'instance-id': 'iid-datasource',
- 'instance_id': 'iid-datasource',
- 'local-hostname': 'test-subclass-hostname',
- 'local_hostname': 'test-subclass-hostname',
- 'kernel_release': '5.4.0-24-generic',
- 'machine': 'x86_64',
- 'platform': 'mytestsubclass',
- 'public_ssh_keys': [],
- 'python_version': '3.7',
- 'region': 'myregion',
- 'system_platform':
- 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
- 'subplatform': 'unknown',
- 'variant': 'ubuntu'},
- 'ds': {
-
- '_doc': EXPERIMENTAL_TEXT,
- 'meta_data': {'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'}}}
- self.assertEqual(expected, util.load_json(content))
- file_stat = os.stat(json_file)
- self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
- self.assertEqual(expected, util.load_json(content))
-
- def test_get_data_writes_redacted_public_json_instance_data(self):
- """get_data writes redacted content to public INSTANCE_JSON_FILE."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={
- 'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion',
- 'some': {'security-credentials': {
- 'cred1': 'sekret', 'cred2': 'othersekret'}}})
- self.assertCountEqual(
- ('merged_cfg', 'security-credentials',),
- datasource.sensitive_metadata_keys)
- sys_info = {
- "python": "3.7",
- "platform":
- "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
- "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
- "x86_64"],
- "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
- with mock.patch("cloudinit.util.system_info", return_value=sys_info):
- datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- redacted = util.load_json(util.load_file(json_file))
- expected = {
- 'base64_encoded_keys': [],
- 'merged_cfg': REDACT_SENSITIVE_VALUE,
- 'sensitive_keys': [
- 'ds/meta_data/some/security-credentials', 'merged_cfg'],
- 'sys_info': sys_info,
- 'v1': {
- '_beta_keys': ['subplatform'],
- 'availability-zone': 'myaz',
- 'availability_zone': 'myaz',
- 'cloud-name': 'subclasscloudname',
- 'cloud_name': 'subclasscloudname',
- 'distro': 'ubuntu',
- 'distro_release': 'focal',
- 'distro_version': '20.04',
- 'instance-id': 'iid-datasource',
- 'instance_id': 'iid-datasource',
- 'local-hostname': 'test-subclass-hostname',
- 'local_hostname': 'test-subclass-hostname',
- 'kernel_release': '5.4.0-24-generic',
- 'machine': 'x86_64',
- 'platform': 'mytestsubclass',
- 'public_ssh_keys': [],
- 'python_version': '3.7',
- 'region': 'myregion',
- 'system_platform':
- 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
- 'subplatform': 'unknown',
- 'variant': 'ubuntu'},
- 'ds': {
- '_doc': EXPERIMENTAL_TEXT,
- 'meta_data': {
- 'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion',
- 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}}
- }
- self.assertCountEqual(expected, redacted)
- file_stat = os.stat(json_file)
- self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
-
- def test_get_data_writes_json_instance_data_sensitive(self):
- """
- get_data writes unmodified data to sensitive file as root-readonly.
- """
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={
- 'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion',
- 'some': {'security-credentials': {
- 'cred1': 'sekret', 'cred2': 'othersekret'}}})
- sys_info = {
- "python": "3.7",
- "platform":
- "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
- "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
- "x86_64"],
- "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
-
- self.assertCountEqual(
- ('merged_cfg', 'security-credentials',),
- datasource.sensitive_metadata_keys)
- with mock.patch("cloudinit.util.system_info", return_value=sys_info):
- datasource.get_data()
- sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
- content = util.load_file(sensitive_json_file)
- expected = {
- 'base64_encoded_keys': [],
- 'merged_cfg': {
- '_doc': (
- 'Merged cloud-init system config from '
- '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/'
- ),
- 'datasource': {'_undef': {'key1': False}}},
- 'sensitive_keys': [
- 'ds/meta_data/some/security-credentials', 'merged_cfg'],
- 'sys_info': sys_info,
- 'v1': {
- '_beta_keys': ['subplatform'],
- 'availability-zone': 'myaz',
- 'availability_zone': 'myaz',
- 'cloud-name': 'subclasscloudname',
- 'cloud_name': 'subclasscloudname',
- 'distro': 'ubuntu',
- 'distro_release': 'focal',
- 'distro_version': '20.04',
- 'instance-id': 'iid-datasource',
- 'instance_id': 'iid-datasource',
- 'kernel_release': '5.4.0-24-generic',
- 'local-hostname': 'test-subclass-hostname',
- 'local_hostname': 'test-subclass-hostname',
- 'machine': 'x86_64',
- 'platform': 'mytestsubclass',
- 'public_ssh_keys': [],
- 'python_version': '3.7',
- 'region': 'myregion',
- 'subplatform': 'unknown',
- 'system_platform':
- 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
- 'variant': 'ubuntu'},
- 'ds': {
- '_doc': EXPERIMENTAL_TEXT,
- 'meta_data': {
- 'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion',
- 'some': {
- 'security-credentials':
- {'cred1': 'sekret', 'cred2': 'othersekret'}}}}
- }
- self.assertCountEqual(expected, util.load_json(content))
- file_stat = os.stat(sensitive_json_file)
- self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
- self.assertEqual(expected, util.load_json(content))
-
- def test_get_data_handles_redacted_unserializable_content(self):
- """get_data warns unserializable content in INSTANCE_JSON_FILE."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
- datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- content = util.load_file(json_file)
- expected_metadata = {
- 'key1': 'val1',
- 'key2': {
- 'key2.1': "Warning: redacted unserializable type <class"
- " 'cloudinit.helpers.Paths'>"}}
- instance_json = util.load_json(content)
- self.assertEqual(
- expected_metadata, instance_json['ds']['meta_data'])
-
- def test_persist_instance_data_writes_ec2_metadata_when_set(self):
- """When ec2_metadata class attribute is set, persist to json."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- datasource.ec2_metadata = UNSET
- datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- instance_data = util.load_json(util.load_file(json_file))
- self.assertNotIn('ec2_metadata', instance_data['ds'])
- datasource.ec2_metadata = {'ec2stuff': 'is good'}
- datasource.persist_instance_data()
- instance_data = util.load_json(util.load_file(json_file))
- self.assertEqual(
- {'ec2stuff': 'is good'},
- instance_data['ds']['ec2_metadata'])
-
- def test_persist_instance_data_writes_network_json_when_set(self):
- """When network_data.json class attribute is set, persist to json."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- instance_data = util.load_json(util.load_file(json_file))
- self.assertNotIn('network_json', instance_data['ds'])
- datasource.network_json = {'network_json': 'is good'}
- datasource.persist_instance_data()
- instance_data = util.load_json(util.load_file(json_file))
- self.assertEqual(
- {'network_json': 'is good'},
- instance_data['ds']['network_json'])
-
- def test_get_data_base64encodes_unserializable_bytes(self):
- """On py3, get_data base64encodes any unserializable content."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
- self.assertTrue(datasource.get_data())
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- content = util.load_file(json_file)
- instance_json = util.load_json(content)
- self.assertCountEqual(
- ['ds/meta_data/key2/key2.1'],
- instance_json['base64_encoded_keys'])
- self.assertEqual(
- {'key1': 'val1', 'key2': {'key2.1': 'EjM='}},
- instance_json['ds']['meta_data'])
-
- def test_get_hostname_subclass_support(self):
- """Validate get_hostname signature on all subclasses of DataSource."""
- base_args = inspect.getfullargspec(DataSource.get_hostname)
- # Import all DataSource subclasses so we can inspect them.
- modules = util.find_modules(os.path.dirname(os.path.dirname(__file__)))
- for _loc, name in modules.items():
- mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], [])
- if mod_locs:
- importer.import_module(mod_locs[0])
- for child in DataSource.__subclasses__():
- if 'Test' in child.dsname:
- continue
- self.assertEqual(
- base_args,
- inspect.getfullargspec(child.get_hostname),
- '%s does not implement DataSource.get_hostname params'
- % child)
- for grandchild in child.__subclasses__():
- self.assertEqual(
- base_args,
- inspect.getfullargspec(grandchild.get_hostname),
- '%s does not implement DataSource.get_hostname params'
- % grandchild)
-
- def test_clear_cached_attrs_resets_cached_attr_class_attributes(self):
- """Class attributes listed in cached_attr_defaults are reset."""
- count = 0
- # Setup values for all cached class attributes
- for attr, value in self.datasource.cached_attr_defaults:
- setattr(self.datasource, attr, count)
- count += 1
- self.datasource._dirty_cache = True
- self.datasource.clear_cached_attrs()
- for attr, value in self.datasource.cached_attr_defaults:
- self.assertEqual(value, getattr(self.datasource, attr))
-
- def test_clear_cached_attrs_noops_on_clean_cache(self):
- """Class attributes listed in cached_attr_defaults are reset."""
- count = 0
- # Setup values for all cached class attributes
- for attr, _ in self.datasource.cached_attr_defaults:
- setattr(self.datasource, attr, count)
- count += 1
- self.datasource._dirty_cache = False # Fake clean cache
- self.datasource.clear_cached_attrs()
- count = 0
- for attr, _ in self.datasource.cached_attr_defaults:
- self.assertEqual(count, getattr(self.datasource, attr))
- count += 1
-
- def test_clear_cached_attrs_skips_non_attr_class_attributes(self):
- """Skip any cached_attr_defaults which aren't class attributes."""
- self.datasource._dirty_cache = True
- self.datasource.clear_cached_attrs()
- for attr in ('ec2_metadata', 'network_json'):
- self.assertFalse(hasattr(self.datasource, attr))
-
- def test_clear_cached_attrs_of_custom_attrs(self):
- """Custom attr_values can be passed to clear_cached_attrs."""
- self.datasource._dirty_cache = True
- cached_attr_name = self.datasource.cached_attr_defaults[0][0]
- setattr(self.datasource, cached_attr_name, 'himom')
- self.datasource.myattr = 'orig'
- self.datasource.clear_cached_attrs(
- attr_defaults=(('myattr', 'updated'),))
- self.assertEqual('himom', getattr(self.datasource, cached_attr_name))
- self.assertEqual('updated', self.datasource.myattr)
-
- def test_update_metadata_only_acts_on_supported_update_events(self):
- """update_metadata won't get_data on unsupported update events."""
- self.datasource.update_events['network'].discard(EventType.BOOT)
- self.assertEqual(
- {'network': set([EventType.BOOT_NEW_INSTANCE])},
- self.datasource.update_events)
-
- def fake_get_data():
- raise Exception('get_data should not be called')
-
- self.datasource.get_data = fake_get_data
- self.assertFalse(
- self.datasource.update_metadata(
- source_event_types=[EventType.BOOT]))
-
- def test_update_metadata_returns_true_on_supported_update_event(self):
- """update_metadata returns get_data response on supported events."""
-
- def fake_get_data():
- return True
-
- self.datasource.get_data = fake_get_data
- self.datasource._network_config = 'something'
- self.datasource._dirty_cache = True
- self.assertTrue(
- self.datasource.update_metadata(
- source_event_types=[
- EventType.BOOT, EventType.BOOT_NEW_INSTANCE]))
- self.assertEqual(UNSET, self.datasource._network_config)
- self.assertIn(
- "DEBUG: Update datasource metadata and network config due to"
- " events: New instance first boot",
- self.logs.getvalue())
-
-
-class TestRedactSensitiveData(CiTestCase):
-
- def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self):
- """When sensitive_keys is absent or empty from metadata do nothing."""
- md = {'my': 'data'}
- self.assertEqual(
- md, redact_sensitive_keys(md, redact_value='redacted'))
- md['sensitive_keys'] = []
- self.assertEqual(
- md, redact_sensitive_keys(md, redact_value='redacted'))
-
- def test_redact_sensitive_data_redacts_exact_match_name(self):
- """Only exact matched sensitive_keys are redacted from metadata."""
- md = {'sensitive_keys': ['md/secure'],
- 'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
- secure_md = copy.deepcopy(md)
- secure_md['md']['secure'] = 'redacted'
- self.assertEqual(
- secure_md,
- redact_sensitive_keys(md, redact_value='redacted'))
-
- def test_redact_sensitive_data_does_redacts_with_default_string(self):
- """When redact_value is absent, REDACT_SENSITIVE_VALUE is used."""
- md = {'sensitive_keys': ['md/secure'],
- 'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
- secure_md = copy.deepcopy(md)
- secure_md['md']['secure'] = 'redacted for non-root user'
- self.assertEqual(
- secure_md,
- redact_sensitive_keys(md))
-
-
-class TestCanonicalCloudID(CiTestCase):
-
- def test_cloud_id_returns_platform_on_unknowns(self):
- """When region and cloud_name are unknown, return platform."""
- self.assertEqual(
- 'platform',
- canonical_cloud_id(cloud_name=METADATA_UNKNOWN,
- region=METADATA_UNKNOWN,
- platform='platform'))
-
- def test_cloud_id_returns_platform_on_none(self):
- """When region and cloud_name are unknown, return platform."""
- self.assertEqual(
- 'platform',
- canonical_cloud_id(cloud_name=None,
- region=None,
- platform='platform'))
-
- def test_cloud_id_returns_cloud_name_on_unknown_region(self):
- """When region is unknown, return cloud_name."""
- for region in (None, METADATA_UNKNOWN):
- self.assertEqual(
- 'cloudname',
- canonical_cloud_id(cloud_name='cloudname',
- region=region,
- platform='platform'))
-
- def test_cloud_id_returns_platform_on_unknown_cloud_name(self):
- """When region is set but cloud_name is unknown return cloud_name."""
- self.assertEqual(
- 'platform',
- canonical_cloud_id(cloud_name=METADATA_UNKNOWN,
- region='region',
- platform='platform'))
-
- def test_cloud_id_aws_based_on_region_and_cloud_name(self):
- """When cloud_name is aws, return proper cloud-id based on region."""
- self.assertEqual(
- 'aws-china',
- canonical_cloud_id(cloud_name='aws',
- region='cn-north-1',
- platform='platform'))
- self.assertEqual(
- 'aws',
- canonical_cloud_id(cloud_name='aws',
- region='us-east-1',
- platform='platform'))
- self.assertEqual(
- 'aws-gov',
- canonical_cloud_id(cloud_name='aws',
- region='us-gov-1',
- platform='platform'))
- self.assertEqual( # Overrideen non-aws cloud_name is returned
- '!aws',
- canonical_cloud_id(cloud_name='!aws',
- region='us-gov-1',
- platform='platform'))
-
- def test_cloud_id_azure_based_on_region_and_cloud_name(self):
- """Report cloud-id when cloud_name is azure and region is in china."""
- self.assertEqual(
- 'azure-china',
- canonical_cloud_id(cloud_name='azure',
- region='chinaeast',
- platform='platform'))
- self.assertEqual(
- 'azure',
- canonical_cloud_id(cloud_name='azure',
- region='!chinaeast',
- platform='platform'))
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index d5113996..ab4c63aa 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -60,14 +60,16 @@ _DISABLE_USER_SSH_EXIT = 142
DISABLE_USER_OPTS = (
"no-port-forwarding,no-agent-forwarding,"
- "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
- " rather than the user \\\"$DISABLE_USER\\\".\';echo;sleep 10;"
- "exit " + str(_DISABLE_USER_SSH_EXIT) + "\"")
+ 'no-X11-forwarding,command="echo \'Please login as the user \\"$USER\\"'
+ ' rather than the user \\"$DISABLE_USER\\".\';echo;sleep 10;'
+ "exit " + str(_DISABLE_USER_SSH_EXIT) + '"'
+)
class AuthKeyLine(object):
- def __init__(self, source, keytype=None, base64=None,
- comment=None, options=None):
+ def __init__(
+ self, source, keytype=None, base64=None, comment=None, options=None
+ ):
self.base64 = base64
self.comment = comment
self.options = options
@@ -75,7 +77,7 @@ class AuthKeyLine(object):
self.source = source
def valid(self):
- return (self.base64 and self.keytype)
+ return self.base64 and self.keytype
def __str__(self):
toks = []
@@ -90,7 +92,7 @@ class AuthKeyLine(object):
if not toks:
return self.source
else:
- return ' '.join(toks)
+ return " ".join(toks)
class AuthKeyLineParser(object):
@@ -121,8 +123,7 @@ class AuthKeyLineParser(object):
"""
quoted = False
i = 0
- while (i < len(ent) and
- ((quoted) or (ent[i] not in (" ", "\t")))):
+ while i < len(ent) and ((quoted) or (ent[i] not in (" ", "\t"))):
curc = ent[i]
if i + 1 >= len(ent):
i = i + 1
@@ -143,7 +144,7 @@ class AuthKeyLineParser(object):
def parse(self, src_line, options=None):
# modeled after opensshes auth2-pubkey.c:user_key_allowed2
line = src_line.rstrip("\r\n")
- if line.startswith("#") or line.strip() == '':
+ if line.startswith("#") or line.strip() == "":
return AuthKeyLine(src_line)
def parse_ssh_key(ent):
@@ -174,8 +175,13 @@ class AuthKeyLineParser(object):
except TypeError:
return AuthKeyLine(src_line)
- return AuthKeyLine(src_line, keytype=keytype, base64=base64,
- comment=comment, options=options)
+ return AuthKeyLine(
+ src_line,
+ keytype=keytype,
+ base64=base64,
+ comment=comment,
+ options=options,
+ )
def parse_authorized_keys(fnames):
@@ -218,15 +224,15 @@ def update_authorized_keys(old_entries, keys):
lines = [str(b) for b in old_entries]
# Ensure it ends with a newline
- lines.append('')
- return '\n'.join(lines)
+ lines.append("")
+ return "\n".join(lines)
def users_ssh_info(username):
pw_ent = pwd.getpwnam(username)
if not pw_ent or not pw_ent.pw_dir:
raise RuntimeError("Unable to get SSH info for user %r" % (username))
- return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent)
+ return (os.path.join(pw_ent.pw_dir, ".ssh"), pw_ent)
def render_authorizedkeysfile_paths(value, homedir, username):
@@ -249,35 +255,213 @@ def render_authorizedkeysfile_paths(value, homedir, username):
return rendered
+# Inspired from safe_path() in openssh source code (misc.c).
+def check_permissions(username, current_path, full_path, is_file, strictmodes):
+ """Check if the file/folder in @current_path has the right permissions.
+
+ We need to check that:
+ 1. If StrictMode is enabled, the owner is either root or the user
+ 2. the user can access the file/folder, otherwise ssh won't use it
+ 3. If StrictMode is enabled, no write permission is given to group
+ and world users (022)
+ """
+
+ # group/world can only execute the folder (access)
+ minimal_permissions = 0o711
+ if is_file:
+ # group/world can only read the file
+ minimal_permissions = 0o644
+
+ # 1. owner must be either root or the user itself
+ owner = util.get_owner(current_path)
+ if strictmodes and owner != username and owner != "root":
+ LOG.debug(
+ "Path %s in %s must be own by user %s or"
+ " by root, but instead is own by %s. Ignoring key.",
+ current_path,
+ full_path,
+ username,
+ owner,
+ )
+ return False
+
+ parent_permission = util.get_permissions(current_path)
+ # 2. the user can access the file/folder, otherwise ssh won't use it
+ if owner == username:
+ # need only the owner permissions
+ minimal_permissions &= 0o700
+ else:
+ group_owner = util.get_group(current_path)
+ user_groups = util.get_user_groups(username)
+
+ if group_owner in user_groups:
+ # need only the group permissions
+ minimal_permissions &= 0o070
+ else:
+ # need only the world permissions
+ minimal_permissions &= 0o007
+
+ if parent_permission & minimal_permissions == 0:
+ LOG.debug(
+ "Path %s in %s must be accessible by user %s,"
+ " check its permissions",
+ current_path,
+ full_path,
+ username,
+ )
+ return False
+
+ # 3. no write permission (w) is given to group and world users (022)
+ # Group and world user can still have +rx.
+ if strictmodes and parent_permission & 0o022 != 0:
+ LOG.debug(
+ "Path %s in %s must not give write"
+ "permission to group or world users. Ignoring key.",
+ current_path,
+ full_path,
+ )
+ return False
+
+ return True
+
+
+def check_create_path(username, filename, strictmodes):
+ user_pwent = users_ssh_info(username)[1]
+ root_pwent = users_ssh_info("root")[1]
+ try:
+ # check the directories first
+ directories = filename.split("/")[1:-1]
+
+ # scan in order, from root to file name
+ parent_folder = ""
+ # this is to comply also with unit tests, and
+ # strange home directories
+ home_folder = os.path.dirname(user_pwent.pw_dir)
+ for directory in directories:
+ parent_folder += "/" + directory
+
+ # security check, disallow symlinks in the AuthorizedKeysFile path.
+ if os.path.islink(parent_folder):
+ LOG.debug(
+ "Invalid directory. Symlink exists in path: %s",
+ parent_folder,
+ )
+ return False
+
+ if os.path.isfile(parent_folder):
+ LOG.debug(
+ "Invalid directory. File exists in path: %s", parent_folder
+ )
+ return False
+
+ if (
+ home_folder.startswith(parent_folder)
+ or parent_folder == user_pwent.pw_dir
+ ):
+ continue
+
+ if not os.path.exists(parent_folder):
+ # directory does not exist, and permission so far are good:
+ # create the directory, and make it accessible by everyone
+ # but owned by root, as it might be used by many users.
+ with util.SeLinuxGuard(parent_folder):
+ mode = 0o755
+ uid = root_pwent.pw_uid
+ gid = root_pwent.pw_gid
+ if parent_folder.startswith(user_pwent.pw_dir):
+ mode = 0o700
+ uid = user_pwent.pw_uid
+ gid = user_pwent.pw_gid
+ os.makedirs(parent_folder, mode=mode, exist_ok=True)
+ util.chownbyid(parent_folder, uid, gid)
+
+ permissions = check_permissions(
+ username, parent_folder, filename, False, strictmodes
+ )
+ if not permissions:
+ return False
+
+ if os.path.islink(filename) or os.path.isdir(filename):
+ LOG.debug("%s is not a file!", filename)
+ return False
+
+ # check the file
+ if not os.path.exists(filename):
+ # if file does not exist: we need to create it, since the
+ # folders at this point exist and have right permissions
+ util.write_file(filename, "", mode=0o600, ensure_dir_exists=True)
+ util.chownbyid(filename, user_pwent.pw_uid, user_pwent.pw_gid)
+
+ permissions = check_permissions(
+ username, filename, filename, True, strictmodes
+ )
+ if not permissions:
+ return False
+ except (IOError, OSError) as e:
+ util.logexc(LOG, str(e))
+ return False
+
+ return True
+
+
def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG):
(ssh_dir, pw_ent) = users_ssh_info(username)
- default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys')
+ default_authorizedkeys_file = os.path.join(ssh_dir, "authorized_keys")
+ user_authorizedkeys_file = default_authorizedkeys_file
auth_key_fns = []
with util.SeLinuxGuard(ssh_dir, recursive=True):
try:
ssh_cfg = parse_ssh_config_map(sshd_cfg_file)
+ key_paths = ssh_cfg.get(
+ "authorizedkeysfile", "%h/.ssh/authorized_keys"
+ )
+ strictmodes = ssh_cfg.get("strictmodes", "yes")
auth_key_fns = render_authorizedkeysfile_paths(
- ssh_cfg.get("authorizedkeysfile", "%h/.ssh/authorized_keys"),
- pw_ent.pw_dir, username)
+ key_paths, pw_ent.pw_dir, username
+ )
except (IOError, OSError):
# Give up and use a default key filename
- auth_key_fns.append(default_authorizedkeys_file)
- util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in SSH "
- "config from %r, using 'AuthorizedKeysFile' file "
- "%r instead", DEF_SSHD_CFG, auth_key_fns[0])
+ auth_key_fns[0] = default_authorizedkeys_file
+ util.logexc(
+ LOG,
+ "Failed extracting 'AuthorizedKeysFile' in SSH "
+ "config from %r, using 'AuthorizedKeysFile' file "
+ "%r instead",
+ DEF_SSHD_CFG,
+ auth_key_fns[0],
+ )
+
+ # check if one of the keys is the user's one and has the right permissions
+ for key_path, auth_key_fn in zip(key_paths.split(), auth_key_fns):
+ if any(
+ [
+ "%u" in key_path,
+ "%h" in key_path,
+ auth_key_fn.startswith("{}/".format(pw_ent.pw_dir)),
+ ]
+ ):
+ permissions_ok = check_create_path(
+ username, auth_key_fn, strictmodes == "yes"
+ )
+ if permissions_ok:
+ user_authorizedkeys_file = auth_key_fn
+ break
- # always store all the keys in the first file configured on sshd_config
- return (auth_key_fns[0], parse_authorized_keys(auth_key_fns))
+ if user_authorizedkeys_file != default_authorizedkeys_file:
+ LOG.debug(
+ "AuthorizedKeysFile has an user-specific authorized_keys, "
+ "using %s",
+ user_authorizedkeys_file,
+ )
+ return (
+ user_authorizedkeys_file,
+ parse_authorized_keys([user_authorizedkeys_file]),
+ )
-def setup_user_keys(keys, username, options=None):
- # Make sure the users .ssh dir is setup accordingly
- (ssh_dir, pwent) = users_ssh_info(username)
- if not os.path.isdir(ssh_dir):
- util.ensure_dir(ssh_dir, mode=0o700)
- util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
+def setup_user_keys(keys, username, options=None):
# Turn the 'update' keys given into actual entries
parser = AuthKeyLineParser()
key_entries = []
@@ -286,11 +470,10 @@ def setup_user_keys(keys, username, options=None):
# Extract the old and make the new
(auth_key_fn, auth_key_entries) = extract_authorized_keys(username)
+ ssh_dir = os.path.dirname(auth_key_fn)
with util.SeLinuxGuard(ssh_dir, recursive=True):
content = update_authorized_keys(auth_key_entries, key_entries)
- util.ensure_dir(os.path.dirname(auth_key_fn), mode=0o700)
- util.write_file(auth_key_fn, content, mode=0o600)
- util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid)
+ util.write_file(auth_key_fn, content, preserve_mode=True)
class SshdConfigLine(object):
@@ -336,7 +519,15 @@ def parse_ssh_config_lines(lines):
try:
key, val = line.split(None, 1)
except ValueError:
- key, val = line.split('=', 1)
+ try:
+ key, val = line.split("=", 1)
+ except ValueError:
+ LOG.debug(
+ 'sshd_config: option "%s" has no key/value pair,'
+ " skipping it",
+ line,
+ )
+ continue
ret.append(SshdConfigLine(line, key, val))
return ret
@@ -362,9 +553,10 @@ def update_ssh_config(updates, fname=DEF_SSHD_CFG):
changed = update_ssh_config_lines(lines=lines, updates=updates)
if changed:
util.write_file(
- fname, "\n".join(
- [str(line) for line in lines]
- ) + "\n", preserve_mode=True)
+ fname,
+ "\n".join([str(line) for line in lines]) + "\n",
+ preserve_mode=True,
+ )
return len(changed) != 0
@@ -388,12 +580,18 @@ def update_ssh_config_lines(lines, updates):
value = updates[key]
found.add(key)
if line.value == value:
- LOG.debug("line %d: option %s already set to %s",
- i, key, value)
+ LOG.debug(
+ "line %d: option %s already set to %s", i, key, value
+ )
else:
changed.append(key)
- LOG.debug("line %d: option %s updated %s -> %s", i,
- key, line.value, value)
+ LOG.debug(
+ "line %d: option %s updated %s -> %s",
+ i,
+ key,
+ line.value,
+ value,
+ )
line.value = value
if len(found) != len(updates):
@@ -401,9 +599,11 @@ def update_ssh_config_lines(lines, updates):
if key in found:
continue
changed.append(key)
- lines.append(SshdConfigLine('', key, value))
- LOG.debug("line %d: option %s added with %s",
- len(lines), key, value)
+ lines.append(SshdConfigLine("", key, value))
+ LOG.debug(
+ "line %d: option %s added with %s", len(lines), key, value
+ )
return changed
+
# vi: ts=4 expandtab
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 0cce6e80..3f17294b 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -8,34 +8,34 @@ import copy
import os
import pickle
import sys
+from collections import namedtuple
+from typing import Dict, Set # noqa: F401
-from cloudinit.settings import (
- FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG)
-
-from cloudinit import handlers
+from cloudinit import cloud, config, distros, handlers, helpers, importer
+from cloudinit import log as logging
+from cloudinit import net, sources, type_utils, util
+from cloudinit.event import EventScope, EventType, userdata_to_events
# Default handlers (used if not overridden)
from cloudinit.handlers.boot_hook import BootHookPartHandler
from cloudinit.handlers.cloud_config import CloudConfigPartHandler
from cloudinit.handlers.jinja_template import JinjaTemplatePartHandler
from cloudinit.handlers.shell_script import ShellScriptPartHandler
+from cloudinit.handlers.shell_script_by_frequency import (
+ ShellScriptByFreqPartHandler,
+)
from cloudinit.handlers.upstart_job import UpstartJobPartHandler
-
-from cloudinit.event import EventType
-from cloudinit.sources import NetworkConfigSource
-
-from cloudinit import cloud
-from cloudinit import config
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import net
from cloudinit.net import cmdline
from cloudinit.reporting import events
-from cloudinit import sources
-from cloudinit import type_utils
-from cloudinit import util
+from cloudinit.settings import (
+ CLOUD_CONFIG,
+ FREQUENCIES,
+ PER_ALWAYS,
+ PER_INSTANCE,
+ PER_ONCE,
+ RUN_CLOUD_CONFIG,
+)
+from cloudinit.sources import NetworkConfigSource
LOG = logging.getLogger(__name__)
@@ -43,6 +43,60 @@ NULL_DATA_SOURCE = None
NO_PREVIOUS_INSTANCE_ID = "NO_PREVIOUS_INSTANCE_ID"
+def update_event_enabled(
+ datasource: sources.DataSource,
+ cfg: dict,
+ event_source_type: EventType,
+ scope: EventScope = None,
+) -> bool:
+ """Determine if a particular EventType is enabled.
+
+ For the `event_source_type` passed in, check whether this EventType
+ is enabled in the `updates` section of the userdata. If `updates`
+ is not enabled in userdata, check if defined as one of the
+ `default_events` on the datasource. `scope` may be used to
+ narrow the check to a particular `EventScope`.
+
+ Note that on first boot, userdata may NOT be available yet. In this
+ case, we only have the data source's `default_update_events`,
+ so an event that should be enabled in userdata may be denied.
+ """
+ default_events = (
+ datasource.default_update_events
+ ) # type: Dict[EventScope, Set[EventType]]
+ user_events = userdata_to_events(
+ cfg.get("updates", {})
+ ) # type: Dict[EventScope, Set[EventType]]
+ # A value in the first will override a value in the second
+ allowed = util.mergemanydict(
+ [
+ copy.deepcopy(user_events),
+ copy.deepcopy(default_events),
+ ]
+ )
+ LOG.debug("Allowed events: %s", allowed)
+
+ if not scope:
+ scopes = allowed.keys()
+ else:
+ scopes = [scope]
+ scope_values = [s.value for s in scopes]
+
+ for evt_scope in scopes:
+ if event_source_type in allowed.get(evt_scope, []):
+ LOG.debug(
+ "Event Allowed: scope=%s EventType=%s",
+ evt_scope.value,
+ event_source_type,
+ )
+ return True
+
+ LOG.debug(
+ "Event Denied: scopes=%s EventType=%s", scope_values, event_source_type
+ )
+ return False
+
+
class Init(object):
def __init__(self, ds_deps=None, reporter=None):
if ds_deps is not None:
@@ -60,8 +114,10 @@ class Init(object):
if reporter is None:
reporter = events.ReportEventStack(
- name="init-reporter", description="init-desc",
- reporting_enabled=False)
+ name="init-reporter",
+ description="init-desc",
+ reporting_enabled=False,
+ )
self.reporter = reporter
def _reset(self, reset_ds=False):
@@ -77,8 +133,8 @@ class Init(object):
def distro(self):
if not self._distro:
# Try to find the right class to use
- system_config = self._extract_cfg('system')
- distro_name = system_config.pop('distro', 'ubuntu')
+ system_config = self._extract_cfg("system")
+ distro_name = system_config.pop("distro", "ubuntu")
distro_cls = distros.fetch(distro_name)
LOG.debug("Using distro class %s", distro_cls)
self._distro = distro_cls(distro_name, system_config, self.paths)
@@ -92,19 +148,19 @@ class Init(object):
@property
def cfg(self):
- return self._extract_cfg('restricted')
+ return self._extract_cfg("restricted")
def _extract_cfg(self, restriction):
# Ensure actually read
self.read_cfg()
# Nobody gets the real config
ocfg = copy.deepcopy(self._cfg)
- if restriction == 'restricted':
- ocfg.pop('system_info', None)
- elif restriction == 'system':
- ocfg = util.get_cfg_by_path(ocfg, ('system_info',), {})
- elif restriction == 'paths':
- ocfg = util.get_cfg_by_path(ocfg, ('system_info', 'paths'), {})
+ if restriction == "restricted":
+ ocfg.pop("system_info", None)
+ elif restriction == "system":
+ ocfg = util.get_cfg_by_path(ocfg, ("system_info",), {})
+ elif restriction == "paths":
+ ocfg = util.get_cfg_by_path(ocfg, ("system_info", "paths"), {})
if not isinstance(ocfg, (dict)):
ocfg = {}
return ocfg
@@ -112,24 +168,26 @@ class Init(object):
@property
def paths(self):
if not self._paths:
- path_info = self._extract_cfg('paths')
+ path_info = self._extract_cfg("paths")
self._paths = helpers.Paths(path_info, self.datasource)
return self._paths
def _initial_subdirs(self):
c_dir = self.paths.cloud_dir
+ run_dir = self.paths.run_dir
initial_dirs = [
c_dir,
- os.path.join(c_dir, 'scripts'),
- os.path.join(c_dir, 'scripts', 'per-instance'),
- os.path.join(c_dir, 'scripts', 'per-once'),
- os.path.join(c_dir, 'scripts', 'per-boot'),
- os.path.join(c_dir, 'scripts', 'vendor'),
- os.path.join(c_dir, 'seed'),
- os.path.join(c_dir, 'instances'),
- os.path.join(c_dir, 'handlers'),
- os.path.join(c_dir, 'sem'),
- os.path.join(c_dir, 'data'),
+ os.path.join(c_dir, "scripts"),
+ os.path.join(c_dir, "scripts", "per-instance"),
+ os.path.join(c_dir, "scripts", "per-once"),
+ os.path.join(c_dir, "scripts", "per-boot"),
+ os.path.join(c_dir, "scripts", "vendor"),
+ os.path.join(c_dir, "seed"),
+ os.path.join(c_dir, "instances"),
+ os.path.join(c_dir, "handlers"),
+ os.path.join(c_dir, "sem"),
+ os.path.join(c_dir, "data"),
+ os.path.join(run_dir, "sem"),
]
return initial_dirs
@@ -146,10 +204,10 @@ class Init(object):
def _initialize_filesystem(self):
util.ensure_dirs(self._initial_subdirs())
- log_file = util.get_cfg_option_str(self.cfg, 'def_log_file')
+ log_file = util.get_cfg_option_str(self.cfg, "def_log_file")
if log_file:
- util.ensure_file(log_file, preserve_mode=True)
- perms = self.cfg.get('syslog_fix_perms')
+ util.ensure_file(log_file, mode=0o640, preserve_mode=True)
+ perms = self.cfg.get("syslog_fix_perms")
if not perms:
perms = {}
if not isinstance(perms, list):
@@ -164,8 +222,12 @@ class Init(object):
except OSError as e:
error = e
- LOG.warning("Failed changing perms on '%s'. tried: %s. %s",
- log_file, ','.join(perms), error)
+ LOG.warning(
+ "Failed changing perms on '%s'. tried: %s. %s",
+ log_file,
+ ",".join(perms),
+ error,
+ )
def read_cfg(self, extra_fns=None):
# None check so that we don't keep on re-loading if empty
@@ -175,37 +237,41 @@ class Init(object):
def _read_cfg(self, extra_fns):
no_cfg_paths = helpers.Paths({}, self.datasource)
- merger = helpers.ConfigMerger(paths=no_cfg_paths,
- datasource=self.datasource,
- additional_fns=extra_fns,
- base_cfg=fetch_base_config())
+ merger = helpers.ConfigMerger(
+ paths=no_cfg_paths,
+ datasource=self.datasource,
+ additional_fns=extra_fns,
+ base_cfg=fetch_base_config(),
+ )
return merger.cfg
def _restore_from_cache(self):
# We try to restore from a current link and static path
# by using the instance link, if purge_cache was called
# the file wont exist.
- return _pkl_load(self.paths.get_ipath_cur('obj_pkl'))
+ return _pkl_load(self.paths.get_ipath_cur("obj_pkl"))
def _write_to_cache(self):
if self.datasource is NULL_DATA_SOURCE:
return False
- if util.get_cfg_option_bool(self.cfg, 'manual_cache_clean', False):
+ if util.get_cfg_option_bool(self.cfg, "manual_cache_clean", False):
# The empty file in instance/ dir indicates manual cleaning,
# and can be read by ds-identify.
util.write_file(
self.paths.get_ipath_cur("manual_clean_marker"),
- omode="w", content="")
+ omode="w",
+ content="",
+ )
return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl"))
def _get_datasources(self):
# Any config provided???
- pkg_list = self.cfg.get('datasource_pkg_list') or []
+ pkg_list = self.cfg.get("datasource_pkg_list") or []
# Add the defaults at the end
- for n in ['', type_utils.obj_name(sources)]:
+ for n in ["", type_utils.obj_name(sources)]:
if n not in pkg_list:
pkg_list.append(n)
- cfg_list = self.cfg.get('datasource_list') or []
+ cfg_list = self.cfg.get("datasource_list") or []
return (cfg_list, pkg_list)
def _restore_from_checked_cache(self, existing):
@@ -216,7 +282,7 @@ class Init(object):
if not ds:
return (None, "no cache found")
- run_iid_fn = self.paths.get_runpath('instance_id')
+ run_iid_fn = self.paths.get_runpath("instance_id")
if os.path.exists(run_iid_fn):
run_iid = util.load_file(run_iid_fn).strip()
else:
@@ -227,20 +293,22 @@ class Init(object):
elif existing == "trust":
return (ds, "restored from cache: %s" % ds)
else:
- if (hasattr(ds, 'check_instance_id') and
- ds.check_instance_id(self.cfg)):
+ if hasattr(ds, "check_instance_id") and ds.check_instance_id(
+ self.cfg
+ ):
return (ds, "restored from checked cache: %s" % ds)
else:
return (None, "cache invalid in datasource: %s" % ds)
- def _get_data_source(self, existing):
+ def _get_data_source(self, existing) -> sources.DataSource:
if self.datasource is not NULL_DATA_SOURCE:
return self.datasource
with events.ReportEventStack(
- name="check-cache",
- description="attempting to read from cache [%s]" % existing,
- parent=self.reporter) as myrep:
+ name="check-cache",
+ description="attempting to read from cache [%s]" % existing,
+ parent=self.reporter,
+ ) as myrep:
ds, desc = self._restore_from_checked_cache(existing)
myrep.description = desc
@@ -252,21 +320,24 @@ class Init(object):
(cfg_list, pkg_list) = self._get_datasources()
# Deep copy so that user-data handlers can not modify
# (which will affect user-data handlers down the line...)
- (ds, dsname) = sources.find_source(self.cfg,
- self.distro,
- self.paths,
- copy.deepcopy(self.ds_deps),
- cfg_list,
- pkg_list, self.reporter)
+ (ds, dsname) = sources.find_source(
+ self.cfg,
+ self.distro,
+ self.paths,
+ copy.deepcopy(self.ds_deps),
+ cfg_list,
+ pkg_list,
+ self.reporter,
+ )
LOG.info("Loaded datasource %s - %s", dsname, ds)
- self.datasource = ds
+ self.datasource = ds # type: sources.DataSource
# Ensure we adjust our path members datasource
# now that we have one (thus allowing ipath to be used)
self._reset()
return ds
def _get_instance_subdirs(self):
- return ['handlers', 'scripts', 'sem']
+ return ["handlers", "scripts", "sem"]
def _get_ipath(self, subname=None):
# Force a check to see if anything
@@ -274,8 +345,10 @@ class Init(object):
# then a datasource has not been assigned...
instance_dir = self.paths.get_ipath(subname)
if not instance_dir:
- raise RuntimeError(("No instance directory is available."
- " Has a datasource been fetched??"))
+ raise RuntimeError(
+ "No instance directory is available."
+ " Has a datasource been fetched??"
+ )
return instance_dir
def _reflect_cur_instance(self):
@@ -293,12 +366,12 @@ class Init(object):
# Write out information on what is being used for the current instance
# and what may have been used for a previous instance...
- dp = self.paths.get_cpath('data')
+ dp = self.paths.get_cpath("data")
# Write what the datasource was and is..
ds = "%s: %s" % (type_utils.obj_name(self.datasource), self.datasource)
previous_ds = None
- ds_fn = os.path.join(idir, 'datasource')
+ ds_fn = os.path.join(idir, "datasource")
try:
previous_ds = util.load_file(ds_fn).strip()
except Exception:
@@ -306,18 +379,20 @@ class Init(object):
if not previous_ds:
previous_ds = ds
util.write_file(ds_fn, "%s\n" % ds)
- util.write_file(os.path.join(dp, 'previous-datasource'),
- "%s\n" % (previous_ds))
+ util.write_file(
+ os.path.join(dp, "previous-datasource"), "%s\n" % (previous_ds)
+ )
# What the instance id was and is...
iid = self.datasource.get_instance_id()
- iid_fn = os.path.join(dp, 'instance-id')
+ iid_fn = os.path.join(dp, "instance-id")
previous_iid = self.previous_iid()
util.write_file(iid_fn, "%s\n" % iid)
- util.write_file(self.paths.get_runpath('instance_id'), "%s\n" % iid)
- util.write_file(os.path.join(dp, 'previous-instance-id'),
- "%s\n" % (previous_iid))
+ util.write_file(self.paths.get_runpath("instance_id"), "%s\n" % iid)
+ util.write_file(
+ os.path.join(dp, "previous-instance-id"), "%s\n" % (previous_iid)
+ )
self._write_to_cache()
# Ensure needed components are regenerated
@@ -330,8 +405,8 @@ class Init(object):
if self._previous_iid is not None:
return self._previous_iid
- dp = self.paths.get_cpath('data')
- iid_fn = os.path.join(dp, 'instance-id')
+ dp = self.paths.get_cpath("data")
+ iid_fn = os.path.join(dp, "instance-id")
try:
self._previous_iid = util.load_file(iid_fn).strip()
except Exception:
@@ -341,9 +416,16 @@ class Init(object):
return self._previous_iid
def is_new_instance(self):
+ """Return true if this is a new instance.
+
+ If datasource has already been initialized, this will return False,
+ even on first boot.
+ """
previous = self.previous_iid()
- ret = (previous == NO_PREVIOUS_INSTANCE_ID or
- previous != self.datasource.get_instance_id())
+ ret = (
+ previous == NO_PREVIOUS_INSTANCE_ID
+ or previous != self.datasource.get_instance_id()
+ )
return ret
def fetch(self, existing="check"):
@@ -354,75 +436,102 @@ class Init(object):
def cloudify(self):
# Form the needed options to cloudify our members
- return cloud.Cloud(self.datasource,
- self.paths, self.cfg,
- self.distro, helpers.Runners(self.paths),
- reporter=self.reporter)
+ return cloud.Cloud(
+ self.datasource,
+ self.paths,
+ self.cfg,
+ self.distro,
+ helpers.Runners(self.paths),
+ reporter=self.reporter,
+ )
def update(self):
- self._store_userdata()
- self._store_vendordata()
+ self._store_rawdata(self.datasource.get_userdata_raw(), "userdata")
+ self._store_processeddata(self.datasource.get_userdata(), "userdata")
+ self._store_raw_vendordata(
+ self.datasource.get_vendordata_raw(), "vendordata"
+ )
+ self._store_processeddata(
+ self.datasource.get_vendordata(), "vendordata"
+ )
+ self._store_raw_vendordata(
+ self.datasource.get_vendordata2_raw(), "vendordata2"
+ )
+ self._store_processeddata(
+ self.datasource.get_vendordata2(), "vendordata2"
+ )
def setup_datasource(self):
- with events.ReportEventStack("setup-datasource",
- "setting up datasource",
- parent=self.reporter):
+ with events.ReportEventStack(
+ "setup-datasource", "setting up datasource", parent=self.reporter
+ ):
if self.datasource is None:
raise RuntimeError("Datasource is None, cannot setup.")
self.datasource.setup(is_new_instance=self.is_new_instance())
def activate_datasource(self):
- with events.ReportEventStack("activate-datasource",
- "activating datasource",
- parent=self.reporter):
+ with events.ReportEventStack(
+ "activate-datasource",
+ "activating datasource",
+ parent=self.reporter,
+ ):
if self.datasource is None:
raise RuntimeError("Datasource is None, cannot activate.")
- self.datasource.activate(cfg=self.cfg,
- is_new_instance=self.is_new_instance())
+ self.datasource.activate(
+ cfg=self.cfg, is_new_instance=self.is_new_instance()
+ )
self._write_to_cache()
- def _store_userdata(self):
- raw_ud = self.datasource.get_userdata_raw()
- if raw_ud is None:
- raw_ud = b''
- util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0o600)
- # processed userdata is a Mime message, so write it as string.
- processed_ud = self.datasource.get_userdata()
- if processed_ud is None:
- raw_ud = ''
- util.write_file(self._get_ipath('userdata'), str(processed_ud), 0o600)
-
- def _store_vendordata(self):
- raw_vd = self.datasource.get_vendordata_raw()
- if raw_vd is None:
- raw_vd = b''
- util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0o600)
- # processed vendor data is a Mime message, so write it as string.
- processed_vd = str(self.datasource.get_vendordata())
- if processed_vd is None:
- processed_vd = ''
- util.write_file(self._get_ipath('vendordata'), str(processed_vd),
- 0o600)
+ def _store_rawdata(self, data, datasource):
+ # Raw data is bytes, not a string
+ if data is None:
+ data = b""
+ util.write_file(self._get_ipath("%s_raw" % datasource), data, 0o600)
+
+ def _store_raw_vendordata(self, data, datasource):
+ # Only these data types
+ if data is not None and type(data) not in [bytes, str, list]:
+ raise TypeError(
+ "vendordata_raw is unsupported type '%s'" % str(type(data))
+ )
+ # This data may be a list, convert it to a string if so
+ if isinstance(data, list):
+ data = util.json_dumps(data)
+ self._store_rawdata(data, datasource)
+
+ def _store_processeddata(self, processed_data, datasource):
+ # processed is a Mime message, so write as string.
+ if processed_data is None:
+ processed_data = ""
+ util.write_file(
+ self._get_ipath(datasource), str(processed_data), 0o600
+ )
def _default_handlers(self, opts=None):
if opts is None:
opts = {}
- opts.update({
- 'paths': self.paths,
- 'datasource': self.datasource,
- })
+ opts.update(
+ {
+ "paths": self.paths,
+ "datasource": self.datasource,
+ }
+ )
# TODO(harlowja) Hmmm, should we dynamically import these??
cloudconfig_handler = CloudConfigPartHandler(**opts)
shellscript_handler = ShellScriptPartHandler(**opts)
def_handlers = [
cloudconfig_handler,
shellscript_handler,
+ ShellScriptByFreqPartHandler(PER_ALWAYS, **opts),
+ ShellScriptByFreqPartHandler(PER_INSTANCE, **opts),
+ ShellScriptByFreqPartHandler(PER_ONCE, **opts),
BootHookPartHandler(**opts),
UpstartJobPartHandler(**opts),
]
opts.update(
- {'sub_handlers': [cloudconfig_handler, shellscript_handler]})
+ {"sub_handlers": [cloudconfig_handler, shellscript_handler]}
+ )
def_handlers.append(JinjaTemplatePartHandler(**opts))
return def_handlers
@@ -431,11 +540,23 @@ class Init(object):
def _default_vendordata_handlers(self):
return self._default_handlers(
- opts={'script_path': 'vendor_scripts',
- 'cloud_config_path': 'vendor_cloud_config'})
+ opts={
+ "script_path": "vendor_scripts",
+ "cloud_config_path": "vendor_cloud_config",
+ }
+ )
+
+ def _default_vendordata2_handlers(self):
+ return self._default_handlers(
+ opts={
+ "script_path": "vendor_scripts",
+ "cloud_config_path": "vendor2_cloud_config",
+ }
+ )
- def _do_handlers(self, data_msg, c_handlers_list, frequency,
- excluded=None):
+ def _do_handlers(
+ self, data_msg, c_handlers_list, frequency, excluded=None
+ ):
"""
Generalized handlers suitable for use with either vendordata
or userdata
@@ -462,21 +583,31 @@ class Init(object):
for (fname, mod_name) in potential_handlers.items():
try:
mod_locs, looked_locs = importer.find_module(
- mod_name, [''], ['list_types', 'handle_part'])
+ mod_name, [""], ["list_types", "handle_part"]
+ )
if not mod_locs:
- LOG.warning("Could not find a valid user-data handler"
- " named %s in file %s (searched %s)",
- mod_name, fname, looked_locs)
+ LOG.warning(
+ "Could not find a valid user-data handler"
+ " named %s in file %s (searched %s)",
+ mod_name,
+ fname,
+ looked_locs,
+ )
continue
mod = importer.import_module(mod_locs[0])
mod = handlers.fixup_handler(mod)
types = c_handlers.register(mod)
if types:
- LOG.debug("Added custom handler for %s [%s] from %s",
- types, mod, fname)
+ LOG.debug(
+ "Added custom handler for %s [%s] from %s",
+ types,
+ mod,
+ fname,
+ )
except Exception:
- util.logexc(LOG, "Failed to register handler from %s",
- fname)
+ util.logexc(
+ LOG, "Failed to register handler from %s", fname
+ )
# This keeps track of all the active handlers
c_handlers = helpers.ContentHandlers()
@@ -508,17 +639,17 @@ class Init(object):
def walk_handlers(excluded):
# Walk the user data
part_data = {
- 'handlers': c_handlers,
+ "handlers": c_handlers,
# Any new handlers that are encountered get writen here
- 'handlerdir': idir,
- 'data': data,
+ "handlerdir": idir,
+ "data": data,
# The default frequency if handlers don't have one
- 'frequency': frequency,
+ "frequency": frequency,
# This will be used when new handlers are found
# to help write their contents to files with numbered
# names...
- 'handlercount': 0,
- 'excluded': excluded,
+ "handlercount": 0,
+ "excluded": excluded,
}
handlers.walk(data_msg, handlers.walker_callback, data=part_data)
@@ -544,18 +675,29 @@ class Init(object):
def consume_data(self, frequency=PER_INSTANCE):
# Consume the userdata first, because we need want to let the part
# handlers run first (for merging stuff)
- with events.ReportEventStack("consume-user-data",
- "reading and applying user-data",
- parent=self.reporter):
- if util.get_cfg_option_bool(self.cfg, 'allow_userdata', True):
+ with events.ReportEventStack(
+ "consume-user-data",
+ "reading and applying user-data",
+ parent=self.reporter,
+ ):
+ if util.get_cfg_option_bool(self.cfg, "allow_userdata", True):
self._consume_userdata(frequency)
else:
- LOG.debug('allow_userdata = False: discarding user-data')
+ LOG.debug("allow_userdata = False: discarding user-data")
+
+ with events.ReportEventStack(
+ "consume-vendor-data",
+ "reading and applying vendor-data",
+ parent=self.reporter,
+ ):
+ self._consume_vendordata("vendordata", frequency)
- with events.ReportEventStack("consume-vendor-data",
- "reading and applying vendor-data",
- parent=self.reporter):
- self._consume_vendordata(frequency)
+ with events.ReportEventStack(
+ "consume-vendor-data2",
+ "reading and applying vendor-data2",
+ parent=self.reporter,
+ ):
+ self._consume_vendordata("vendordata2", frequency)
# Perform post-consumption adjustments so that
# modules that run during the init stage reflect
@@ -568,50 +710,75 @@ class Init(object):
# objects before the load of the userdata happened,
# this is expected.
- def _consume_vendordata(self, frequency=PER_INSTANCE):
+ def _consume_vendordata(self, vendor_source, frequency=PER_INSTANCE):
"""
Consume the vendordata and run the part handlers on it
"""
+
# User-data should have been consumed first.
# So we merge the other available cloud-configs (everything except
# vendor provided), and check whether or not we should consume
# vendor data at all. That gives user or system a chance to override.
- if not self.datasource.get_vendordata_raw():
- LOG.debug("no vendordata from datasource")
- return
-
- _cc_merger = helpers.ConfigMerger(paths=self._paths,
- datasource=self.datasource,
- additional_fns=[],
- base_cfg=self.cfg,
- include_vendor=False)
- vdcfg = _cc_merger.cfg.get('vendor_data', {})
+ if vendor_source == "vendordata":
+ if not self.datasource.get_vendordata_raw():
+ LOG.debug("no vendordata from datasource")
+ return
+ cfg_name = "vendor_data"
+ elif vendor_source == "vendordata2":
+ if not self.datasource.get_vendordata2_raw():
+ LOG.debug("no vendordata2 from datasource")
+ return
+ cfg_name = "vendor_data2"
+ else:
+ raise RuntimeError(
+ "vendor_source arg must be either 'vendordata'"
+ " or 'vendordata2'"
+ )
+
+ _cc_merger = helpers.ConfigMerger(
+ paths=self._paths,
+ datasource=self.datasource,
+ additional_fns=[],
+ base_cfg=self.cfg,
+ include_vendor=False,
+ )
+ vdcfg = _cc_merger.cfg.get(cfg_name, {})
if not isinstance(vdcfg, dict):
- vdcfg = {'enabled': False}
- LOG.warning("invalid 'vendor_data' setting. resetting to: %s",
- vdcfg)
+ vdcfg = {"enabled": False}
+ LOG.warning(
+ "invalid %s setting. resetting to: %s", cfg_name, vdcfg
+ )
- enabled = vdcfg.get('enabled')
- no_handlers = vdcfg.get('disabled_handlers', None)
+ enabled = vdcfg.get("enabled")
+ no_handlers = vdcfg.get("disabled_handlers", None)
if not util.is_true(enabled):
- LOG.debug("vendordata consumption is disabled.")
+ LOG.debug("%s consumption is disabled.", vendor_source)
return
- LOG.debug("vendor data will be consumed. disabled_handlers=%s",
- no_handlers)
+ LOG.debug(
+ "%s will be consumed. disabled_handlers=%s",
+ vendor_source,
+ no_handlers,
+ )
- # Ensure vendordata source fetched before activation (just incase)
- vendor_data_msg = self.datasource.get_vendordata()
+ # Ensure vendordata source fetched before activation (just in case.)
- # This keeps track of all the active handlers, while excluding what the
- # users doesn't want run, i.e. boot_hook, cloud_config, shell_script
- c_handlers_list = self._default_vendordata_handlers()
+ # c_handlers_list keeps track of all the active handlers, while
+ # excluding what the users doesn't want run, i.e. boot_hook,
+ # cloud_config, shell_script
+ if vendor_source == "vendordata":
+ vendor_data_msg = self.datasource.get_vendordata()
+ c_handlers_list = self._default_vendordata_handlers()
+ else:
+ vendor_data_msg = self.datasource.get_vendordata2()
+ c_handlers_list = self._default_vendordata2_handlers()
# Run the handlers
- self._do_handlers(vendor_data_msg, c_handlers_list, frequency,
- excluded=no_handlers)
+ self._do_handlers(
+ vendor_data_msg, c_handlers_list, frequency, excluded=no_handlers
+ )
def _consume_userdata(self, frequency=PER_INSTANCE):
"""
@@ -629,7 +796,8 @@ class Init(object):
def _find_networking_config(self):
disable_file = os.path.join(
- self.paths.get_cpath('data'), 'upgraded-network')
+ self.paths.get_cpath("data"), "upgraded-network"
+ )
if os.path.exists(disable_file):
return (None, disable_file)
@@ -637,12 +805,13 @@ class Init(object):
NetworkConfigSource.cmdline: cmdline.read_kernel_cmdline_config(),
NetworkConfigSource.initramfs: cmdline.read_initramfs_config(),
NetworkConfigSource.ds: None,
- NetworkConfigSource.system_cfg: self.cfg.get('network'),
+ NetworkConfigSource.system_cfg: self.cfg.get("network"),
}
- if self.datasource and hasattr(self.datasource, 'network_config'):
- available_cfgs[NetworkConfigSource.ds] = (
- self.datasource.network_config)
+ if self.datasource and hasattr(self.datasource, "network_config"):
+ available_cfgs[
+ NetworkConfigSource.ds
+ ] = self.datasource.network_config
if self.datasource:
order = self.datasource.network_config_sources
@@ -650,12 +819,17 @@ class Init(object):
order = sources.DataSource.network_config_sources
for cfg_source in order:
if not hasattr(NetworkConfigSource, cfg_source):
- LOG.warning('data source specifies an invalid network'
- ' cfg_source: %s', cfg_source)
+ LOG.warning(
+ "data source specifies an invalid network cfg_source: %s",
+ cfg_source,
+ )
continue
if cfg_source not in available_cfgs:
- LOG.warning('data source specifies an unavailable network'
- ' cfg_source: %s', cfg_source)
+ LOG.warning(
+ "data source specifies an unavailable network"
+ " cfg_source: %s",
+ cfg_source,
+ )
continue
ncfg = available_cfgs[cfg_source]
if net.is_disabled_cfg(ncfg):
@@ -663,8 +837,10 @@ class Init(object):
return (None, cfg_source)
if ncfg:
return (ncfg, cfg_source)
- return (self.distro.generate_fallback_config(),
- NetworkConfigSource.fallback)
+ return (
+ self.distro.generate_fallback_config(),
+ NetworkConfigSource.fallback,
+ )
def _apply_netcfg_names(self, netcfg):
try:
@@ -673,27 +849,60 @@ class Init(object):
except Exception as e:
LOG.warning("Failed to rename devices: %s", e)
+ def _get_per_boot_network_semaphore(self):
+ return namedtuple("Semaphore", "semaphore args")(
+ helpers.FileSemaphores(self.paths.get_runpath("sem")),
+ ("apply_network_config", PER_ONCE),
+ )
+
+ def _network_already_configured(self) -> bool:
+ sem = self._get_per_boot_network_semaphore()
+ return sem.semaphore.has_run(*sem.args)
+
def apply_network_config(self, bring_up):
- # get a network config
+ """Apply the network config.
+
+ Find the config, determine whether to apply it, apply it via
+ the distro, and optionally bring it up
+ """
netcfg, src = self._find_networking_config()
if netcfg is None:
LOG.info("network config is disabled by %s", src)
return
- # request an update if needed/available
- if self.datasource is not NULL_DATA_SOURCE:
- if not self.is_new_instance():
- if not self.datasource.update_metadata([EventType.BOOT]):
- LOG.debug(
- "No network config applied. Neither a new instance"
- " nor datasource network update on '%s' event",
- EventType.BOOT)
- # nothing new, but ensure proper names
- self._apply_netcfg_names(netcfg)
- return
- else:
- # refresh netcfg after update
- netcfg, src = self._find_networking_config()
+ def event_enabled_and_metadata_updated(event_type):
+ return (
+ update_event_enabled(
+ datasource=self.datasource,
+ cfg=self.cfg,
+ event_source_type=event_type,
+ scope=EventScope.NETWORK,
+ )
+ and self.datasource.update_metadata_if_supported([event_type])
+ )
+
+ def should_run_on_boot_event():
+ return (
+ not self._network_already_configured()
+ and event_enabled_and_metadata_updated(EventType.BOOT)
+ )
+
+ if (
+ self.datasource is not NULL_DATA_SOURCE
+ and not self.is_new_instance()
+ and not should_run_on_boot_event()
+ and not event_enabled_and_metadata_updated(EventType.BOOT_LEGACY)
+ ):
+ LOG.debug(
+ "No network config applied. Neither a new instance"
+ " nor datasource network update allowed"
+ )
+ # nothing new, but ensure proper names
+ self._apply_netcfg_names(netcfg)
+ return
+
+ # refresh netcfg after update
+ netcfg, src = self._find_networking_config()
# ensure all physical devices in config are present
self.distro.networking.wait_for_physdevs(netcfg)
@@ -702,18 +911,32 @@ class Init(object):
self._apply_netcfg_names(netcfg)
# rendering config
- LOG.info("Applying network configuration from %s bringup=%s: %s",
- src, bring_up, netcfg)
+ LOG.info(
+ "Applying network configuration from %s bringup=%s: %s",
+ src,
+ bring_up,
+ netcfg,
+ )
+
+ sem = self._get_per_boot_network_semaphore()
try:
- return self.distro.apply_network_config(netcfg, bring_up=bring_up)
+ with sem.semaphore.lock(*sem.args):
+ return self.distro.apply_network_config(
+ netcfg, bring_up=bring_up
+ )
except net.RendererNotFoundError as e:
- LOG.error("Unable to render networking. Network config is "
- "likely broken: %s", e)
+ LOG.error(
+ "Unable to render networking. Network config is "
+ "likely broken: %s",
+ e,
+ )
return
except NotImplementedError:
- LOG.warning("distro '%s' does not implement apply_network_config. "
- "networking may not be configured properly.",
- self.distro)
+ LOG.warning(
+ "distro '%s' does not implement apply_network_config. "
+ "networking may not be configured properly.",
+ self.distro,
+ )
return
@@ -725,18 +948,22 @@ class Modules(object):
self._cached_cfg = None
if reporter is None:
reporter = events.ReportEventStack(
- name="module-reporter", description="module-desc",
- reporting_enabled=False)
+ name="module-reporter",
+ description="module-desc",
+ reporting_enabled=False,
+ )
self.reporter = reporter
@property
def cfg(self):
# None check to avoid empty case causing re-reading
if self._cached_cfg is None:
- merger = helpers.ConfigMerger(paths=self.init.paths,
- datasource=self.init.datasource,
- additional_fns=self.cfg_files,
- base_cfg=self.init.cfg)
+ merger = helpers.ConfigMerger(
+ paths=self.init.paths,
+ datasource=self.init.datasource,
+ additional_fns=self.cfg_files,
+ base_cfg=self.init.cfg,
+ )
self._cached_cfg = merger.cfg
# LOG.debug("Loading 'module' config %s", self._cached_cfg)
# Only give out a copy so that others can't modify this...
@@ -757,57 +984,67 @@ class Modules(object):
if not item:
continue
if isinstance(item, str):
- module_list.append({
- 'mod': item.strip(),
- })
+ module_list.append(
+ {
+ "mod": item.strip(),
+ }
+ )
elif isinstance(item, (list)):
contents = {}
# Meant to fall through...
if len(item) >= 1:
- contents['mod'] = item[0].strip()
+ contents["mod"] = item[0].strip()
if len(item) >= 2:
- contents['freq'] = item[1].strip()
+ contents["freq"] = item[1].strip()
if len(item) >= 3:
- contents['args'] = item[2:]
+ contents["args"] = item[2:]
if contents:
module_list.append(contents)
elif isinstance(item, (dict)):
contents = {}
valid = False
- if 'name' in item:
- contents['mod'] = item['name'].strip()
+ if "name" in item:
+ contents["mod"] = item["name"].strip()
valid = True
- if 'frequency' in item:
- contents['freq'] = item['frequency'].strip()
- if 'args' in item:
- contents['args'] = item['args'] or []
+ if "frequency" in item:
+ contents["freq"] = item["frequency"].strip()
+ if "args" in item:
+ contents["args"] = item["args"] or []
if contents and valid:
module_list.append(contents)
else:
- raise TypeError(("Failed to read '%s' item in config,"
- " unknown type %s") %
- (item, type_utils.obj_name(item)))
+ raise TypeError(
+ "Failed to read '%s' item in config, unknown type %s"
+ % (item, type_utils.obj_name(item))
+ )
return module_list
def _fixup_modules(self, raw_mods):
mostly_mods = []
for raw_mod in raw_mods:
- raw_name = raw_mod['mod']
- freq = raw_mod.get('freq')
- run_args = raw_mod.get('args') or []
+ raw_name = raw_mod["mod"]
+ freq = raw_mod.get("freq")
+ run_args = raw_mod.get("args") or []
mod_name = config.form_module_name(raw_name)
if not mod_name:
continue
if freq and freq not in FREQUENCIES:
- LOG.warning(("Config specified module %s"
- " has an unknown frequency %s"), raw_name, freq)
+ LOG.warning(
+ "Config specified module %s has an unknown frequency %s",
+ raw_name,
+ freq,
+ )
# Reset it so when ran it will get set to a known value
freq = None
mod_locs, looked_locs = importer.find_module(
- mod_name, ['', type_utils.obj_name(config)], ['handle'])
+ mod_name, ["", type_utils.obj_name(config)], ["handle"]
+ )
if not mod_locs:
- LOG.warning("Could not find module named %s (searched %s)",
- mod_name, looked_locs)
+ LOG.warning(
+ "Could not find module named %s (searched %s)",
+ mod_name,
+ looked_locs,
+ )
continue
mod = config.fixup_module(importer.import_module(mod_locs[0]))
mostly_mods.append([mod, raw_name, freq, run_args])
@@ -826,15 +1063,15 @@ class Modules(object):
freq = mod.frequency
if freq not in FREQUENCIES:
freq = PER_INSTANCE
- LOG.debug("Running module %s (%s) with frequency %s",
- name, mod, freq)
+ LOG.debug(
+ "Running module %s (%s) with frequency %s", name, mod, freq
+ )
# Use the configs logger and not our own
# TODO(harlowja): possibly check the module
# for having a LOG attr and just give it back
# its own logger?
- func_args = [name, self.cfg,
- cc, config.LOG, args]
+ func_args = [name, self.cfg, cc, config.LOG, args]
# Mark it as having started running
which_ran.append(name)
# This name will affect the semaphore name created
@@ -842,11 +1079,13 @@ class Modules(object):
desc = "running %s with frequency %s" % (run_name, freq)
myrep = events.ReportEventStack(
- name=run_name, description=desc, parent=self.reporter)
+ name=run_name, description=desc, parent=self.reporter
+ )
with myrep:
- ran, _r = cc.run(run_name, mod.handle, func_args,
- freq=freq)
+ ran, _r = cc.run(
+ run_name, mod.handle, func_args, freq=freq
+ )
if ran:
myrep.message = "%s ran successfully" % run_name
else:
@@ -860,9 +1099,9 @@ class Modules(object):
def run_single(self, mod_name, args=None, freq=None):
# Form the users module 'specs'
mod_to_be = {
- 'mod': mod_name,
- 'args': args,
- 'freq': freq,
+ "mod": mod_name,
+ "args": args,
+ "freq": freq,
}
# Now resume doing the normal fixups and running
raw_mods = [mod_to_be]
@@ -876,13 +1115,14 @@ class Modules(object):
skipped = []
forced = []
- overridden = self.cfg.get('unverified_modules', [])
+ overridden = self.cfg.get("unverified_modules", [])
active_mods = []
all_distros = set([distros.ALL_DISTROS])
for (mod, name, _freq, _args) in mostly_mods:
worked_distros = set(mod.distros) # Minimally [] per fixup_modules
worked_distros.update(
- distros.Distro.expand_osfamily(mod.osfamilies))
+ distros.Distro.expand_osfamily(mod.osfamilies)
+ )
# Skip only when the following conditions are all met:
# - distros are defined in the module != ALL_DISTROS
@@ -898,12 +1138,15 @@ class Modules(object):
active_mods.append([mod, name, _freq, _args])
if skipped:
- LOG.info("Skipping modules '%s' because they are not verified "
- "on distro '%s'. To run anyway, add them to "
- "'unverified_modules' in config.",
- ','.join(skipped), d_name)
+ LOG.info(
+ "Skipping modules '%s' because they are not verified "
+ "on distro '%s'. To run anyway, add them to "
+ "'unverified_modules' in config.",
+ ",".join(skipped),
+ d_name,
+ )
if forced:
- LOG.info("running unverified_modules: '%s'", ', '.join(forced))
+ LOG.info("running unverified_modules: '%s'", ", ".join(forced))
return self._run_modules(active_mods)
@@ -923,7 +1166,9 @@ def fetch_base_config():
read_runtime_config(),
# Kernel/cmdline parameters override system config
util.read_conf_from_cmdline(),
- ], reverse=True)
+ ],
+ reverse=True,
+ )
def _pkl_store(obj, fname):
@@ -953,8 +1198,11 @@ def _pkl_load(fname):
return None
try:
return pickle.loads(pickle_contents)
+ except sources.DatasourceUnpickleUserDataError:
+ return None
except Exception:
util.logexc(LOG, "Failed loading pickled blob from %s", fname)
return None
+
# vi: ts=4 expandtab
diff --git a/cloudinit/subp.py b/cloudinit/subp.py
index 024e1a98..7693601d 100644
--- a/cloudinit/subp.py
+++ b/cloudinit/subp.py
@@ -4,7 +4,6 @@
import logging
import os
import subprocess
-
from errno import ENOEXEC
LOG = logging.getLogger(__name__)
@@ -37,7 +36,7 @@ def prepend_base_command(base_command, commands):
elif command[0] != base_command: # Automatically prepend
command.insert(0, base_command)
elif isinstance(command, str):
- if not command.startswith('%s ' % base_command):
+ if not command.startswith("%s " % base_command):
warnings.append(command)
else:
errors.append(str(command))
@@ -46,30 +45,43 @@ def prepend_base_command(base_command, commands):
if warnings:
LOG.warning(
- 'Non-%s commands in %s config:\n%s',
- base_command, base_command, '\n'.join(warnings))
+ "Non-%s commands in %s config:\n%s",
+ base_command,
+ base_command,
+ "\n".join(warnings),
+ )
if errors:
raise TypeError(
- 'Invalid {name} config.'
- ' These commands are not a string or list:\n{errors}'.format(
- name=base_command, errors='\n'.join(errors)))
+ "Invalid {name} config."
+ " These commands are not a string or list:\n{errors}".format(
+ name=base_command, errors="\n".join(errors)
+ )
+ )
return fixed_commands
class ProcessExecutionError(IOError):
- MESSAGE_TMPL = ('%(description)s\n'
- 'Command: %(cmd)s\n'
- 'Exit code: %(exit_code)s\n'
- 'Reason: %(reason)s\n'
- 'Stdout: %(stdout)s\n'
- 'Stderr: %(stderr)s')
- empty_attr = '-'
-
- def __init__(self, stdout=None, stderr=None,
- exit_code=None, cmd=None,
- description=None, reason=None,
- errno=None):
+ MESSAGE_TMPL = (
+ "%(description)s\n"
+ "Command: %(cmd)s\n"
+ "Exit code: %(exit_code)s\n"
+ "Reason: %(reason)s\n"
+ "Stdout: %(stdout)s\n"
+ "Stderr: %(stderr)s"
+ )
+ empty_attr = "-"
+
+ def __init__(
+ self,
+ stdout=None,
+ stderr=None,
+ exit_code=None,
+ cmd=None,
+ description=None,
+ reason=None,
+ errno=None,
+ ):
if not cmd:
self.cmd = self.empty_attr
else:
@@ -77,9 +89,9 @@ class ProcessExecutionError(IOError):
if not description:
if not exit_code and errno == ENOEXEC:
- self.description = 'Exec format error. Missing #! in script?'
+ self.description = "Exec format error. Missing #! in script?"
else:
- self.description = 'Unexpected error while running command.'
+ self.description = "Unexpected error while running command."
else:
self.description = description
@@ -111,12 +123,12 @@ class ProcessExecutionError(IOError):
self.errno = errno
message = self.MESSAGE_TMPL % {
- 'description': self._ensure_string(self.description),
- 'cmd': self._ensure_string(self.cmd),
- 'exit_code': self._ensure_string(self.exit_code),
- 'stdout': self._ensure_string(self.stdout),
- 'stderr': self._ensure_string(self.stderr),
- 'reason': self._ensure_string(self.reason),
+ "description": self._ensure_string(self.description),
+ "cmd": self._ensure_string(self.cmd),
+ "exit_code": self._ensure_string(self.exit_code),
+ "stdout": self._ensure_string(self.stdout),
+ "stderr": self._ensure_string(self.stderr),
+ "reason": self._ensure_string(self.reason),
}
IOError.__init__(self, message)
@@ -130,8 +142,8 @@ class ProcessExecutionError(IOError):
"""
indent text on all but the first line, allowing for easy to read output
"""
- cr = '\n'
- indent = ' ' * indent_level
+ cr = "\n"
+ indent = " " * indent_level
# if input is bytes, return bytes
if isinstance(text, bytes):
cr = cr.encode()
@@ -141,10 +153,21 @@ class ProcessExecutionError(IOError):
return text.rstrip(cr).replace(cr, cr + indent)
-def subp(args, data=None, rcs=None, env=None, capture=True,
- combine_capture=False, shell=False,
- logstring=False, decode="replace", target=None, update_env=None,
- status_cb=None, cwd=None):
+def subp(
+ args,
+ data=None,
+ rcs=None,
+ env=None,
+ capture=True,
+ combine_capture=False,
+ shell=False,
+ logstring=False,
+ decode="replace",
+ target=None,
+ update_env=None,
+ status_cb=None,
+ cwd=None,
+):
"""Run a subprocess.
:param args: command to run in a list. [cmd, arg1, arg2...]
@@ -210,18 +233,26 @@ def subp(args, data=None, rcs=None, env=None, capture=True,
env.update(update_env)
if target_path(target) != "/":
- args = ['chroot', target] + list(args)
+ args = ["chroot", target] + list(args)
if status_cb:
- command = ' '.join(args) if isinstance(args, list) else args
- status_cb('Begin run command: {command}\n'.format(command=command))
+ command = " ".join(args) if isinstance(args, list) else args
+ status_cb("Begin run command: {command}\n".format(command=command))
if not logstring:
- LOG.debug(("Running command %s with allowed return codes %s"
- " (shell=%s, capture=%s)"),
- args, rcs, shell, 'combine' if combine_capture else capture)
+ LOG.debug(
+ "Running command %s with allowed return codes %s"
+ " (shell=%s, capture=%s)",
+ args,
+ rcs,
+ shell,
+ "combine" if combine_capture else capture,
+ )
else:
- LOG.debug(("Running hidden command to protect sensitive "
- "input/output logstring: %s"), logstring)
+ LOG.debug(
+ "Running hidden command to protect sensitive "
+ "input/output logstring: %s",
+ logstring,
+ )
stdin = None
stdout = None
@@ -251,20 +282,28 @@ def subp(args, data=None, rcs=None, env=None, capture=True,
bytes_args = args.encode("utf-8")
else:
bytes_args = [
- x if isinstance(x, bytes) else x.encode("utf-8")
- for x in args]
+ x if isinstance(x, bytes) else x.encode("utf-8") for x in args
+ ]
try:
- sp = subprocess.Popen(bytes_args, stdout=stdout,
- stderr=stderr, stdin=stdin,
- env=env, shell=shell, cwd=cwd)
+ sp = subprocess.Popen(
+ bytes_args,
+ stdout=stdout,
+ stderr=stderr,
+ stdin=stdin,
+ env=env,
+ shell=shell,
+ cwd=cwd,
+ )
(out, err) = sp.communicate(data)
except OSError as e:
if status_cb:
- status_cb('ERROR: End run command: invalid command provided\n')
+ status_cb("ERROR: End run command: invalid command provided\n")
raise ProcessExecutionError(
- cmd=args, reason=e, errno=e.errno,
+ cmd=args,
+ reason=e,
+ errno=e.errno,
stdout="-" if decode else b"-",
- stderr="-" if decode else b"-"
+ stderr="-" if decode else b"-",
) from e
finally:
if devnull_fp:
@@ -273,11 +312,12 @@ def subp(args, data=None, rcs=None, env=None, capture=True,
# Just ensure blank instead of none.
if capture or combine_capture:
if not out:
- out = b''
+ out = b""
if not err:
- err = b''
+ err = b""
if decode:
- def ldecode(data, m='utf-8'):
+
+ def ldecode(data, m="utf-8"):
if not isinstance(data, bytes):
return data
return data.decode(m, decode)
@@ -288,13 +328,12 @@ def subp(args, data=None, rcs=None, env=None, capture=True,
rc = sp.returncode
if rc not in rcs:
if status_cb:
- status_cb(
- 'ERROR: End run command: exit({code})\n'.format(code=rc))
- raise ProcessExecutionError(stdout=out, stderr=err,
- exit_code=rc,
- cmd=args)
+ status_cb("ERROR: End run command: exit({code})\n".format(code=rc))
+ raise ProcessExecutionError(
+ stdout=out, stderr=err, exit_code=rc, cmd=args
+ )
if status_cb:
- status_cb('End run command: exit({code})\n'.format(code=rc))
+ status_cb("End run command: exit({code})\n".format(code=rc))
return (out, err)
@@ -331,8 +370,9 @@ def which(program, search=None, target=None):
return program
if search is None:
- paths = [p.strip('"') for p in
- os.environ.get("PATH", "").split(os.pathsep)]
+ paths = [
+ p.strip('"') for p in os.environ.get("PATH", "").split(os.pathsep)
+ ]
if target == "/":
search = paths
else:
@@ -382,8 +422,9 @@ def runparts(dirp, skip_no_exist=True, exe_prefix=None):
if failed and attempted:
raise RuntimeError(
- 'Runparts: %s failures (%s) in %s attempted commands' %
- (len(failed), ",".join(failed), len(attempted)))
+ "Runparts: %s failures (%s) in %s attempted commands"
+ % (len(failed), ",".join(failed), len(attempted))
+ )
# vi: ts=4 expandtab
diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py
index 346276ec..e23b6599 100644
--- a/cloudinit/temp_utils.py
+++ b/cloudinit/temp_utils.py
@@ -42,7 +42,7 @@ def _tempfile_dir_arg(odir=None, needs_exe=False):
if os.getuid() == 0:
tdir = _ROOT_TMPDIR
else:
- tdir = os.environ.get('TMPDIR', '/tmp')
+ tdir = os.environ.get("TMPDIR", "/tmp")
if not os.path.isdir(tdir):
os.makedirs(tdir)
os.chmod(tdir, 0o1777)
@@ -52,8 +52,9 @@ def _tempfile_dir_arg(odir=None, needs_exe=False):
def ExtendedTemporaryFile(**kwargs):
- kwargs['dir'] = _tempfile_dir_arg(
- kwargs.pop('dir', None), kwargs.pop('needs_exe', False))
+ kwargs["dir"] = _tempfile_dir_arg(
+ kwargs.pop("dir", None), kwargs.pop("needs_exe", False)
+ )
fh = tempfile.NamedTemporaryFile(**kwargs)
# Replace its unlink with a quiet version
# that does not raise errors when the
@@ -76,7 +77,7 @@ def ExtendedTemporaryFile(**kwargs):
def unlink_now():
fh.unlink(fh.name)
- setattr(fh, 'unlink_now', unlink_now)
+ setattr(fh, "unlink_now", unlink_now)
return fh
@@ -93,14 +94,17 @@ def tempdir(rmtree_ignore_errors=False, **kwargs):
def mkdtemp(**kwargs):
- kwargs['dir'] = _tempfile_dir_arg(
- kwargs.pop('dir', None), kwargs.pop('needs_exe', False))
+ kwargs["dir"] = _tempfile_dir_arg(
+ kwargs.pop("dir", None), kwargs.pop("needs_exe", False)
+ )
return tempfile.mkdtemp(**kwargs)
def mkstemp(**kwargs):
- kwargs['dir'] = _tempfile_dir_arg(
- kwargs.pop('dir', None), kwargs.pop('needs_exe', False))
+ kwargs["dir"] = _tempfile_dir_arg(
+ kwargs.pop("dir", None), kwargs.pop("needs_exe", False)
+ )
return tempfile.mkstemp(**kwargs)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index a00ade20..1e147d4a 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -12,17 +12,19 @@
import collections
import re
-
+import sys
try:
from Cheetah.Template import Template as CTemplate
+
CHEETAH_AVAILABLE = True
except (ImportError, AttributeError):
CHEETAH_AVAILABLE = False
try:
- from jinja2 import Template as JTemplate
from jinja2 import DebugUndefined as JUndefined
+ from jinja2 import Template as JTemplate
+
JINJA_AVAILABLE = True
except (ImportError, AttributeError):
JINJA_AVAILABLE = False
@@ -31,26 +33,28 @@ except (ImportError, AttributeError):
from cloudinit import log as logging
from cloudinit import type_utils as tu
from cloudinit import util
-
+from cloudinit.atomic_helper import write_file
LOG = logging.getLogger(__name__)
TYPE_MATCHER = re.compile(r"##\s*template:(.*)", re.I)
-BASIC_MATCHER = re.compile(r'\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)')
-MISSING_JINJA_PREFIX = u'CI_MISSING_JINJA_VAR/'
+BASIC_MATCHER = re.compile(r"\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)")
+MISSING_JINJA_PREFIX = "CI_MISSING_JINJA_VAR/"
class UndefinedJinjaVariable(JUndefined):
"""Class used to represent any undefined jinja template variable."""
def __str__(self):
- return u'%s%s' % (MISSING_JINJA_PREFIX, self._undefined_name)
+ return "%s%s" % (MISSING_JINJA_PREFIX, self._undefined_name)
def __sub__(self, other):
- other = str(other).replace(MISSING_JINJA_PREFIX, '')
+ other = str(other).replace(MISSING_JINJA_PREFIX, "")
raise TypeError(
'Undefined jinja variable: "{this}-{other}". Jinja tried'
' subtraction. Perhaps you meant "{this}_{other}"?'.format(
- this=self._undefined_name, other=other))
+ this=self._undefined_name, other=other
+ )
+ )
def basic_render(content, params):
@@ -73,67 +77,75 @@ def basic_render(content, params):
while len(path) > 1:
key = path.popleft()
if not isinstance(selected_params, dict):
- raise TypeError("Can not traverse into"
- " non-dictionary '%s' of type %s while"
- " looking for subkey '%s'"
- % (selected_params,
- tu.obj_name(selected_params),
- key))
+ raise TypeError(
+ "Can not traverse into"
+ " non-dictionary '%s' of type %s while"
+ " looking for subkey '%s'"
+ % (selected_params, tu.obj_name(selected_params), key)
+ )
selected_params = selected_params[key]
key = path.popleft()
if not isinstance(selected_params, dict):
- raise TypeError("Can not extract key '%s' from non-dictionary"
- " '%s' of type %s"
- % (key, selected_params,
- tu.obj_name(selected_params)))
+ raise TypeError(
+ "Can not extract key '%s' from non-dictionary '%s' of type %s"
+ % (key, selected_params, tu.obj_name(selected_params))
+ )
return str(selected_params[key])
return BASIC_MATCHER.sub(replacer, content)
def detect_template(text):
-
def cheetah_render(content, params):
return CTemplate(content, searchList=[params]).respond()
def jinja_render(content, params):
# keep_trailing_newline is in jinja2 2.7+, not 2.6
add = "\n" if content.endswith("\n") else ""
- return JTemplate(content,
- undefined=UndefinedJinjaVariable,
- trim_blocks=True).render(**params) + add
+ return (
+ JTemplate(
+ content, undefined=UndefinedJinjaVariable, trim_blocks=True
+ ).render(**params)
+ + add
+ )
if text.find("\n") != -1:
ident, rest = text.split("\n", 1)
else:
ident = text
- rest = ''
+ rest = ""
type_match = TYPE_MATCHER.match(ident)
if not type_match:
if CHEETAH_AVAILABLE:
LOG.debug("Using Cheetah as the renderer for unknown template.")
- return ('cheetah', cheetah_render, text)
+ return ("cheetah", cheetah_render, text)
else:
- return ('basic', basic_render, text)
+ return ("basic", basic_render, text)
else:
template_type = type_match.group(1).lower().strip()
- if template_type not in ('jinja', 'cheetah', 'basic'):
- raise ValueError("Unknown template rendering type '%s' requested"
- % template_type)
- if template_type == 'jinja' and not JINJA_AVAILABLE:
- LOG.warning("Jinja not available as the selected renderer for"
- " desired template, reverting to the basic renderer.")
- return ('basic', basic_render, rest)
- elif template_type == 'jinja' and JINJA_AVAILABLE:
- return ('jinja', jinja_render, rest)
- if template_type == 'cheetah' and not CHEETAH_AVAILABLE:
- LOG.warning("Cheetah not available as the selected renderer for"
- " desired template, reverting to the basic renderer.")
- return ('basic', basic_render, rest)
- elif template_type == 'cheetah' and CHEETAH_AVAILABLE:
- return ('cheetah', cheetah_render, rest)
+ if template_type not in ("jinja", "cheetah", "basic"):
+ raise ValueError(
+ "Unknown template rendering type '%s' requested"
+ % template_type
+ )
+ if template_type == "jinja" and not JINJA_AVAILABLE:
+ LOG.warning(
+ "Jinja not available as the selected renderer for"
+ " desired template, reverting to the basic renderer."
+ )
+ return ("basic", basic_render, rest)
+ elif template_type == "jinja" and JINJA_AVAILABLE:
+ return ("jinja", jinja_render, rest)
+ if template_type == "cheetah" and not CHEETAH_AVAILABLE:
+ LOG.warning(
+ "Cheetah not available as the selected renderer for"
+ " desired template, reverting to the basic renderer."
+ )
+ return ("basic", basic_render, rest)
+ elif template_type == "cheetah" and CHEETAH_AVAILABLE:
+ return ("cheetah", cheetah_render, rest)
# Only thing left over is the basic renderer (it is always available).
- return ('basic', basic_render, rest)
+ return ("basic", basic_render, rest)
def render_from_file(fn, params):
@@ -143,7 +155,8 @@ def render_from_file(fn, params):
# If it is given a str that has non-ascii then it will raise a
# UnicodeDecodeError. So we explicitly convert to unicode type here.
template_type, renderer, content = detect_template(
- util.load_file(fn, decode=False).decode('utf-8'))
+ util.load_file(fn, decode=False).decode("utf-8")
+ )
LOG.debug("Rendering content of '%s' using renderer %s", fn, template_type)
return renderer(content, params)
@@ -168,4 +181,18 @@ def render_string(content, params):
_template_type, renderer, content = detect_template(content)
return renderer(content, params)
+
+def render_cloudcfg(variant, template, output):
+
+ with open(template, "r") as fh:
+ contents = fh.read()
+ tpl_params = {"variant": variant}
+ contents = (render_string(contents, tpl_params)).rstrip() + "\n"
+ util.load_yaml(contents)
+ if output == "-":
+ sys.stdout.write(contents)
+ else:
+ write_file(output, contents, omode="w")
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py
deleted file mode 100644
index 311dfad6..00000000
--- a/cloudinit/tests/test_gpg.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-"""Test gpg module."""
-
-from unittest import mock
-
-from cloudinit import gpg
-from cloudinit import subp
-from cloudinit.tests.helpers import CiTestCase
-
-
-@mock.patch("cloudinit.gpg.time.sleep")
-@mock.patch("cloudinit.gpg.subp.subp")
-class TestReceiveKeys(CiTestCase):
- """Test the recv_key method."""
-
- def test_retries_on_subp_exc(self, m_subp, m_sleep):
- """retry should be done on gpg receive keys failure."""
- retries = (1, 2, 4)
- my_exc = subp.ProcessExecutionError(
- stdout='', stderr='', exit_code=2, cmd=['mycmd'])
- m_subp.side_effect = (my_exc, my_exc, ('', ''))
- gpg.recv_key("ABCD", "keyserver.example.com", retries=retries)
- self.assertEqual([mock.call(1), mock.call(2)], m_sleep.call_args_list)
-
- def test_raises_error_after_retries(self, m_subp, m_sleep):
- """If the final run fails, error should be raised."""
- naplen = 1
- keyid, keyserver = ("ABCD", "keyserver.example.com")
- m_subp.side_effect = subp.ProcessExecutionError(
- stdout='', stderr='', exit_code=2, cmd=['mycmd'])
- with self.assertRaises(ValueError) as rcm:
- gpg.recv_key(keyid, keyserver, retries=(naplen,))
- self.assertIn(keyid, str(rcm.exception))
- self.assertIn(keyserver, str(rcm.exception))
- m_sleep.assert_called_with(naplen)
-
- def test_no_retries_on_none(self, m_subp, m_sleep):
- """retry should not be done if retries is None."""
- m_subp.side_effect = subp.ProcessExecutionError(
- stdout='', stderr='', exit_code=2, cmd=['mycmd'])
- with self.assertRaises(ValueError):
- gpg.recv_key("ABCD", "keyserver.example.com", retries=None)
- m_sleep.assert_not_called()
-
- def test_expected_gpg_command(self, m_subp, m_sleep):
- """Verify gpg is called with expected args."""
- key, keyserver = ("DEADBEEF", "keyserver.example.com")
- retries = (1, 2, 4)
- m_subp.return_value = ('', '')
- gpg.recv_key(key, keyserver, retries=retries)
- m_subp.assert_called_once_with(
- ['gpg', '--no-tty',
- '--keyserver=%s' % keyserver, '--recv-keys', key],
- capture=True)
- m_sleep.assert_not_called()
diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py
deleted file mode 100644
index e44b16d8..00000000
--- a/cloudinit/tests/test_netinfo.py
+++ /dev/null
@@ -1,181 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Tests netinfo module functions and classes."""
-
-from copy import copy
-
-from cloudinit.netinfo import netdev_info, netdev_pformat, route_pformat
-from cloudinit.tests.helpers import CiTestCase, mock, readResource
-
-
-# Example ifconfig and route output
-SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output")
-SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output")
-SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output")
-SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output")
-SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4")
-SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6")
-SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4")
-SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6")
-NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output")
-ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output")
-FREEBSD_NETDEV_OUT = readResource("netinfo/freebsd-netdev-formatted-output")
-
-
-class TestNetInfo(CiTestCase):
-
- maxDiff = None
- with_logs = True
-
- @mock.patch('cloudinit.netinfo.subp.which')
- @mock.patch('cloudinit.netinfo.subp.subp')
- def test_netdev_old_nettools_pformat(self, m_subp, m_which):
- """netdev_pformat properly rendering old nettools info."""
- m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, '')
- m_which.side_effect = lambda x: x if x == 'ifconfig' else None
- content = netdev_pformat()
- self.assertEqual(NETDEV_FORMATTED_OUT, content)
-
- @mock.patch('cloudinit.netinfo.subp.which')
- @mock.patch('cloudinit.netinfo.subp.subp')
- def test_netdev_new_nettools_pformat(self, m_subp, m_which):
- """netdev_pformat properly rendering netdev new nettools info."""
- m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, '')
- m_which.side_effect = lambda x: x if x == 'ifconfig' else None
- content = netdev_pformat()
- self.assertEqual(NETDEV_FORMATTED_OUT, content)
-
- @mock.patch('cloudinit.netinfo.subp.which')
- @mock.patch('cloudinit.netinfo.subp.subp')
- def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which):
- """netdev_pformat properly rendering netdev new nettools info."""
- m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, '')
- m_which.side_effect = lambda x: x if x == 'ifconfig' else None
- content = netdev_pformat()
- print()
- print(content)
- print()
- self.assertEqual(FREEBSD_NETDEV_OUT, content)
-
- @mock.patch('cloudinit.netinfo.subp.which')
- @mock.patch('cloudinit.netinfo.subp.subp')
- def test_netdev_iproute_pformat(self, m_subp, m_which):
- """netdev_pformat properly rendering ip route info."""
- m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '')
- m_which.side_effect = lambda x: x if x == 'ip' else None
- content = netdev_pformat()
- new_output = copy(NETDEV_FORMATTED_OUT)
- # ip route show describes global scopes on ipv4 addresses
- # whereas ifconfig does not. Add proper global/host scope to output.
- new_output = new_output.replace('| . | 50:7b', '| global | 50:7b')
- new_output = new_output.replace(
- '255.0.0.0 | . |', '255.0.0.0 | host |')
- self.assertEqual(new_output, content)
-
- @mock.patch('cloudinit.netinfo.subp.which')
- @mock.patch('cloudinit.netinfo.subp.subp')
- def test_netdev_warn_on_missing_commands(self, m_subp, m_which):
- """netdev_pformat warns when missing both ip and 'netstat'."""
- m_which.return_value = None # Niether ip nor netstat found
- content = netdev_pformat()
- self.assertEqual('\n', content)
- self.assertEqual(
- "WARNING: Could not print networks: missing 'ip' and 'ifconfig'"
- " commands\n",
- self.logs.getvalue())
- m_subp.assert_not_called()
-
- @mock.patch('cloudinit.netinfo.subp.which')
- @mock.patch('cloudinit.netinfo.subp.subp')
- def test_netdev_info_nettools_down(self, m_subp, m_which):
- """test netdev_info using nettools and down interfaces."""
- m_subp.return_value = (
- readResource("netinfo/new-ifconfig-output-down"), "")
- m_which.side_effect = lambda x: x if x == 'ifconfig' else None
- self.assertEqual(
- {'eth0': {'ipv4': [], 'ipv6': [],
- 'hwaddr': '00:16:3e:de:51:a6', 'up': False},
- 'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0'}],
- 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}],
- 'hwaddr': '.', 'up': True}},
- netdev_info("."))
-
- @mock.patch('cloudinit.netinfo.subp.which')
- @mock.patch('cloudinit.netinfo.subp.subp')
- def test_netdev_info_iproute_down(self, m_subp, m_which):
- """Test netdev_info with ip and down interfaces."""
- m_subp.return_value = (
- readResource("netinfo/sample-ipaddrshow-output-down"), "")
- m_which.side_effect = lambda x: x if x == 'ip' else None
- self.assertEqual(
- {'lo': {'ipv4': [{'ip': '127.0.0.1', 'bcast': '.',
- 'mask': '255.0.0.0', 'scope': 'host'}],
- 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}],
- 'hwaddr': '.', 'up': True},
- 'eth0': {'ipv4': [], 'ipv6': [],
- 'hwaddr': '00:16:3e:de:51:a6', 'up': False}},
- netdev_info("."))
-
- @mock.patch('cloudinit.netinfo.netdev_info')
- def test_netdev_pformat_with_down(self, m_netdev_info):
- """test netdev_pformat when netdev_info returns 'down' interfaces."""
- m_netdev_info.return_value = (
- {'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0',
- 'scope': 'host'}],
- 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}],
- 'hwaddr': '.', 'up': True},
- 'eth0': {'ipv4': [], 'ipv6': [],
- 'hwaddr': '00:16:3e:de:51:a6', 'up': False}})
- self.assertEqual(
- readResource("netinfo/netdev-formatted-output-down"),
- netdev_pformat())
-
- @mock.patch('cloudinit.netinfo.subp.which')
- @mock.patch('cloudinit.netinfo.subp.subp')
- def test_route_nettools_pformat(self, m_subp, m_which):
- """route_pformat properly rendering nettools route info."""
-
- def subp_netstat_route_selector(*args, **kwargs):
- if args[0] == ['netstat', '--route', '--numeric', '--extend']:
- return (SAMPLE_ROUTE_OUT_V4, '')
- if args[0] == ['netstat', '-A', 'inet6', '--route', '--numeric']:
- return (SAMPLE_ROUTE_OUT_V6, '')
- raise Exception('Unexpected subp call %s' % args[0])
-
- m_subp.side_effect = subp_netstat_route_selector
- m_which.side_effect = lambda x: x if x == 'netstat' else None
- content = route_pformat()
- self.assertEqual(ROUTE_FORMATTED_OUT, content)
-
- @mock.patch('cloudinit.netinfo.subp.which')
- @mock.patch('cloudinit.netinfo.subp.subp')
- def test_route_iproute_pformat(self, m_subp, m_which):
- """route_pformat properly rendering ip route info."""
-
- def subp_iproute_selector(*args, **kwargs):
- if ['ip', '-o', 'route', 'list'] == args[0]:
- return (SAMPLE_IPROUTE_OUT_V4, '')
- v6cmd = ['ip', '--oneline', '-6', 'route', 'list', 'table', 'all']
- if v6cmd == args[0]:
- return (SAMPLE_IPROUTE_OUT_V6, '')
- raise Exception('Unexpected subp call %s' % args[0])
-
- m_subp.side_effect = subp_iproute_selector
- m_which.side_effect = lambda x: x if x == 'ip' else None
- content = route_pformat()
- self.assertEqual(ROUTE_FORMATTED_OUT, content)
-
- @mock.patch('cloudinit.netinfo.subp.which')
- @mock.patch('cloudinit.netinfo.subp.subp')
- def test_route_warn_on_missing_commands(self, m_subp, m_which):
- """route_pformat warns when missing both ip and 'netstat'."""
- m_which.return_value = None # Niether ip nor netstat found
- content = route_pformat()
- self.assertEqual('\n', content)
- self.assertEqual(
- "WARNING: Could not print routes: missing 'ip' and 'netstat'"
- " commands\n",
- self.logs.getvalue())
- m_subp.assert_not_called()
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py
deleted file mode 100644
index d2d1b37f..00000000
--- a/cloudinit/tests/test_stages.py
+++ /dev/null
@@ -1,406 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Tests related to cloudinit.stages module."""
-
-import os
-import stat
-
-import pytest
-
-from cloudinit import stages
-from cloudinit import sources
-from cloudinit.sources import NetworkConfigSource
-
-from cloudinit.event import EventType
-from cloudinit.util import write_file
-
-from cloudinit.tests.helpers import CiTestCase, mock
-
-TEST_INSTANCE_ID = 'i-testing'
-
-
-class FakeDataSource(sources.DataSource):
-
- def __init__(self, paths=None, userdata=None, vendordata=None,
- network_config=''):
- super(FakeDataSource, self).__init__({}, None, paths=paths)
- self.metadata = {'instance-id': TEST_INSTANCE_ID}
- self.userdata_raw = userdata
- self.vendordata_raw = vendordata
- self._network_config = None
- if network_config: # Permit for None value to setup attribute
- self._network_config = network_config
-
- @property
- def network_config(self):
- return self._network_config
-
- def _get_data(self):
- return True
-
-
-class TestInit(CiTestCase):
- with_logs = True
- allowed_subp = False
-
- def setUp(self):
- super(TestInit, self).setUp()
- self.tmpdir = self.tmp_dir()
- self.init = stages.Init()
- # Setup fake Paths for Init to reference
- self.init._cfg = {'system_info': {
- 'distro': 'ubuntu', 'paths': {'cloud_dir': self.tmpdir,
- 'run_dir': self.tmpdir}}}
- self.init.datasource = FakeDataSource(paths=self.init.paths)
-
- def test_wb__find_networking_config_disabled(self):
- """find_networking_config returns no config when disabled."""
- disable_file = os.path.join(
- self.init.paths.get_cpath('data'), 'upgraded-network')
- write_file(disable_file, '')
- self.assertEqual(
- (None, disable_file),
- self.init._find_networking_config())
-
- @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
- @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_disabled_by_kernel(
- self, m_cmdline, m_initramfs):
- """find_networking_config returns when disabled by kernel cmdline."""
- m_cmdline.return_value = {'config': 'disabled'}
- m_initramfs.return_value = {'config': ['fake_initrd']}
- self.assertEqual(
- (None, NetworkConfigSource.cmdline),
- self.init._find_networking_config())
- self.assertEqual('DEBUG: network config disabled by cmdline\n',
- self.logs.getvalue())
-
- @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
- @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_disabled_by_initrd(
- self, m_cmdline, m_initramfs):
- """find_networking_config returns when disabled by kernel cmdline."""
- m_cmdline.return_value = {}
- m_initramfs.return_value = {'config': 'disabled'}
- self.assertEqual(
- (None, NetworkConfigSource.initramfs),
- self.init._find_networking_config())
- self.assertEqual('DEBUG: network config disabled by initramfs\n',
- self.logs.getvalue())
-
- @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
- @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_disabled_by_datasrc(
- self, m_cmdline, m_initramfs):
- """find_networking_config returns when disabled by datasource cfg."""
- m_cmdline.return_value = {} # Kernel doesn't disable networking
- m_initramfs.return_value = {} # initramfs doesn't disable networking
- self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
- 'network': {}} # system config doesn't disable
-
- self.init.datasource = FakeDataSource(
- network_config={'config': 'disabled'})
- self.assertEqual(
- (None, NetworkConfigSource.ds),
- self.init._find_networking_config())
- self.assertEqual('DEBUG: network config disabled by ds\n',
- self.logs.getvalue())
-
- @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
- @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_disabled_by_sysconfig(
- self, m_cmdline, m_initramfs):
- """find_networking_config returns when disabled by system config."""
- m_cmdline.return_value = {} # Kernel doesn't disable networking
- m_initramfs.return_value = {} # initramfs doesn't disable networking
- self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
- 'network': {'config': 'disabled'}}
- self.assertEqual(
- (None, NetworkConfigSource.system_cfg),
- self.init._find_networking_config())
- self.assertEqual('DEBUG: network config disabled by system_cfg\n',
- self.logs.getvalue())
-
- @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
- @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test__find_networking_config_uses_datasrc_order(
- self, m_cmdline, m_initramfs):
- """find_networking_config should check sources in DS defined order"""
- # cmdline and initramfs, which would normally be preferred over other
- # sources, disable networking; in this case, though, the DS moves them
- # later so its own config is preferred
- m_cmdline.return_value = {'config': 'disabled'}
- m_initramfs.return_value = {'config': 'disabled'}
-
- ds_net_cfg = {'config': {'needle': True}}
- self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
- self.init.datasource.network_config_sources = [
- NetworkConfigSource.ds, NetworkConfigSource.system_cfg,
- NetworkConfigSource.cmdline, NetworkConfigSource.initramfs]
-
- self.assertEqual(
- (ds_net_cfg, NetworkConfigSource.ds),
- self.init._find_networking_config())
-
- @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
- @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test__find_networking_config_warns_if_datasrc_uses_invalid_src(
- self, m_cmdline, m_initramfs):
- """find_networking_config should check sources in DS defined order"""
- ds_net_cfg = {'config': {'needle': True}}
- self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
- self.init.datasource.network_config_sources = [
- 'invalid_src', NetworkConfigSource.ds]
-
- self.assertEqual(
- (ds_net_cfg, NetworkConfigSource.ds),
- self.init._find_networking_config())
- self.assertIn('WARNING: data source specifies an invalid network'
- ' cfg_source: invalid_src',
- self.logs.getvalue())
-
- @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
- @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test__find_networking_config_warns_if_datasrc_uses_unavailable_src(
- self, m_cmdline, m_initramfs):
- """find_networking_config should check sources in DS defined order"""
- ds_net_cfg = {'config': {'needle': True}}
- self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
- self.init.datasource.network_config_sources = [
- NetworkConfigSource.fallback, NetworkConfigSource.ds]
-
- self.assertEqual(
- (ds_net_cfg, NetworkConfigSource.ds),
- self.init._find_networking_config())
- self.assertIn('WARNING: data source specifies an unavailable network'
- ' cfg_source: fallback',
- self.logs.getvalue())
-
- @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
- @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_returns_kernel(
- self, m_cmdline, m_initramfs):
- """find_networking_config returns kernel cmdline config if present."""
- expected_cfg = {'config': ['fakekernel']}
- m_cmdline.return_value = expected_cfg
- m_initramfs.return_value = {'config': ['fake_initrd']}
- self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
- 'network': {'config': ['fakesys_config']}}
- self.init.datasource = FakeDataSource(
- network_config={'config': ['fakedatasource']})
- self.assertEqual(
- (expected_cfg, NetworkConfigSource.cmdline),
- self.init._find_networking_config())
-
- @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
- @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_returns_initramfs(
- self, m_cmdline, m_initramfs):
- """find_networking_config returns kernel cmdline config if present."""
- expected_cfg = {'config': ['fake_initrd']}
- m_cmdline.return_value = {}
- m_initramfs.return_value = expected_cfg
- self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
- 'network': {'config': ['fakesys_config']}}
- self.init.datasource = FakeDataSource(
- network_config={'config': ['fakedatasource']})
- self.assertEqual(
- (expected_cfg, NetworkConfigSource.initramfs),
- self.init._find_networking_config())
-
- @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
- @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_returns_system_cfg(
- self, m_cmdline, m_initramfs):
- """find_networking_config returns system config when present."""
- m_cmdline.return_value = {} # No kernel network config
- m_initramfs.return_value = {} # no initramfs network config
- expected_cfg = {'config': ['fakesys_config']}
- self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
- 'network': expected_cfg}
- self.init.datasource = FakeDataSource(
- network_config={'config': ['fakedatasource']})
- self.assertEqual(
- (expected_cfg, NetworkConfigSource.system_cfg),
- self.init._find_networking_config())
-
- @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
- @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_returns_datasrc_cfg(
- self, m_cmdline, m_initramfs):
- """find_networking_config returns datasource net config if present."""
- m_cmdline.return_value = {} # No kernel network config
- m_initramfs.return_value = {} # no initramfs network config
- # No system config for network in setUp
- expected_cfg = {'config': ['fakedatasource']}
- self.init.datasource = FakeDataSource(network_config=expected_cfg)
- self.assertEqual(
- (expected_cfg, NetworkConfigSource.ds),
- self.init._find_networking_config())
-
- @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
- @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_returns_fallback(
- self, m_cmdline, m_initramfs):
- """find_networking_config returns fallback config if not defined."""
- m_cmdline.return_value = {} # Kernel doesn't disable networking
- m_initramfs.return_value = {} # no initramfs network config
- # Neither datasource nor system_info disable or provide network
-
- fake_cfg = {'config': [{'type': 'physical', 'name': 'eth9'}],
- 'version': 1}
-
- def fake_generate_fallback():
- return fake_cfg
-
- # Monkey patch distro which gets cached on self.init
- distro = self.init.distro
- distro.generate_fallback_config = fake_generate_fallback
- self.assertEqual(
- (fake_cfg, NetworkConfigSource.fallback),
- self.init._find_networking_config())
- self.assertNotIn('network config disabled', self.logs.getvalue())
-
- def test_apply_network_config_disabled(self):
- """Log when network is disabled by upgraded-network."""
- disable_file = os.path.join(
- self.init.paths.get_cpath('data'), 'upgraded-network')
-
- def fake_network_config():
- return (None, disable_file)
-
- self.init._find_networking_config = fake_network_config
-
- self.init.apply_network_config(True)
- self.assertIn(
- 'INFO: network config is disabled by %s' % disable_file,
- self.logs.getvalue())
-
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
- @mock.patch('cloudinit.distros.ubuntu.Distro')
- def test_apply_network_on_new_instance(self, m_ubuntu, m_macs):
- """Call distro apply_network_config methods on is_new_instance."""
- net_cfg = {
- 'version': 1, 'config': [
- {'subnets': [{'type': 'dhcp'}], 'type': 'physical',
- 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
-
- def fake_network_config():
- return net_cfg, NetworkConfigSource.fallback
-
- m_macs.return_value = {'42:42:42:42:42:42': 'eth9'}
-
- self.init._find_networking_config = fake_network_config
- self.init.apply_network_config(True)
- self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
- self.init.distro.apply_network_config.assert_called_with(
- net_cfg, bring_up=True)
-
- @mock.patch('cloudinit.distros.ubuntu.Distro')
- def test_apply_network_on_same_instance_id(self, m_ubuntu):
- """Only call distro.apply_network_config_names on same instance id."""
- old_instance_id = os.path.join(
- self.init.paths.get_cpath('data'), 'instance-id')
- write_file(old_instance_id, TEST_INSTANCE_ID)
- net_cfg = {
- 'version': 1, 'config': [
- {'subnets': [{'type': 'dhcp'}], 'type': 'physical',
- 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
-
- def fake_network_config():
- return net_cfg, NetworkConfigSource.fallback
-
- self.init._find_networking_config = fake_network_config
- self.init.apply_network_config(True)
- self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
- self.init.distro.apply_network_config.assert_not_called()
- self.assertIn(
- 'No network config applied. Neither a new instance'
- " nor datasource network update on '%s' event" % EventType.BOOT,
- self.logs.getvalue())
-
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
- @mock.patch('cloudinit.distros.ubuntu.Distro')
- def test_apply_network_on_datasource_allowed_event(self, m_ubuntu, m_macs):
- """Apply network if datasource.update_metadata permits BOOT event."""
- old_instance_id = os.path.join(
- self.init.paths.get_cpath('data'), 'instance-id')
- write_file(old_instance_id, TEST_INSTANCE_ID)
- net_cfg = {
- 'version': 1, 'config': [
- {'subnets': [{'type': 'dhcp'}], 'type': 'physical',
- 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
-
- def fake_network_config():
- return net_cfg, NetworkConfigSource.fallback
-
- m_macs.return_value = {'42:42:42:42:42:42': 'eth9'}
-
- self.init._find_networking_config = fake_network_config
- self.init.datasource = FakeDataSource(paths=self.init.paths)
- self.init.datasource.update_events = {'network': [EventType.BOOT]}
- self.init.apply_network_config(True)
- self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
- self.init.distro.apply_network_config.assert_called_with(
- net_cfg, bring_up=True)
-
-
-class TestInit_InitializeFilesystem:
- """Tests for cloudinit.stages.Init._initialize_filesystem.
-
- TODO: Expand these tests to cover all of _initialize_filesystem's behavior.
- """
-
- @pytest.yield_fixture
- def init(self, paths):
- """A fixture which yields a stages.Init instance with paths and cfg set
-
- As it is replaced with a mock, consumers of this fixture can set
- `init.cfg` if the default empty dict configuration is not appropriate.
- """
- with mock.patch(
- "cloudinit.stages.Init.cfg", mock.PropertyMock(return_value={})
- ):
- with mock.patch("cloudinit.stages.util.ensure_dirs"):
- init = stages.Init()
- init._paths = paths
- yield init
-
- @mock.patch("cloudinit.stages.util.ensure_file")
- def test_ensure_file_not_called_if_no_log_file_configured(
- self, m_ensure_file, init
- ):
- """If no log file is configured, we should not ensure its existence."""
- init.cfg = {}
-
- init._initialize_filesystem()
-
- assert 0 == m_ensure_file.call_count
-
- def test_log_files_existence_is_ensured_if_configured(self, init, tmpdir):
- """If a log file is configured, we should ensure its existence."""
- log_file = tmpdir.join("cloud-init.log")
- init.cfg = {"def_log_file": str(log_file)}
-
- init._initialize_filesystem()
-
- assert log_file.exists
-
- def test_existing_file_permissions_are_not_modified(self, init, tmpdir):
- """If the log file already exists, we should not modify its permissions
-
- See https://bugs.launchpad.net/cloud-init/+bug/1900837.
- """
- # Use a mode that will never be made the default so this test will
- # always be valid
- mode = 0o606
- log_file = tmpdir.join("cloud-init.log")
- log_file.ensure()
- log_file.chmod(mode)
- init.cfg = {"def_log_file": str(log_file)}
-
- init._initialize_filesystem()
-
- assert mode == stat.S_IMODE(log_file.stat().mode)
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_subp.py b/cloudinit/tests/test_subp.py
deleted file mode 100644
index 911c1f3d..00000000
--- a/cloudinit/tests/test_subp.py
+++ /dev/null
@@ -1,286 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Tests for cloudinit.subp utility functions"""
-
-import json
-import os
-import sys
-import stat
-
-from unittest import mock
-
-from cloudinit import subp, util
-from cloudinit.tests.helpers import CiTestCase
-
-
-BASH = subp.which('bash')
-BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name'
-
-
-class TestPrependBaseCommands(CiTestCase):
-
- with_logs = True
-
- def test_prepend_base_command_errors_on_neither_string_nor_list(self):
- """Raise an error for each command which is not a string or list."""
- orig_commands = ['ls', 1, {'not': 'gonna work'}, ['basecmd', 'list']]
- with self.assertRaises(TypeError) as context_manager:
- subp.prepend_base_command(
- base_command='basecmd', commands=orig_commands)
- self.assertEqual(
- "Invalid basecmd config. These commands are not a string or"
- " list:\n1\n{'not': 'gonna work'}",
- str(context_manager.exception))
-
- def test_prepend_base_command_warns_on_non_base_string_commands(self):
- """Warn on each non-base for commands of type string."""
- orig_commands = [
- 'ls', 'basecmd list', 'touch /blah', 'basecmd install x']
- fixed_commands = subp.prepend_base_command(
- base_command='basecmd', commands=orig_commands)
- self.assertEqual(
- 'WARNING: Non-basecmd commands in basecmd config:\n'
- 'ls\ntouch /blah\n',
- self.logs.getvalue())
- self.assertEqual(orig_commands, fixed_commands)
-
- def test_prepend_base_command_prepends_on_non_base_list_commands(self):
- """Prepend 'basecmd' for each non-basecmd command of type list."""
- orig_commands = [['ls'], ['basecmd', 'list'], ['basecmda', '/blah'],
- ['basecmd', 'install', 'x']]
- expected = [['basecmd', 'ls'], ['basecmd', 'list'],
- ['basecmd', 'basecmda', '/blah'],
- ['basecmd', 'install', 'x']]
- fixed_commands = subp.prepend_base_command(
- base_command='basecmd', commands=orig_commands)
- self.assertEqual('', self.logs.getvalue())
- self.assertEqual(expected, fixed_commands)
-
- def test_prepend_base_command_removes_first_item_when_none(self):
- """Remove the first element of a non-basecmd when it is None."""
- orig_commands = [[None, 'ls'], ['basecmd', 'list'],
- [None, 'touch', '/blah'],
- ['basecmd', 'install', 'x']]
- expected = [['ls'], ['basecmd', 'list'],
- ['touch', '/blah'],
- ['basecmd', 'install', 'x']]
- fixed_commands = subp.prepend_base_command(
- base_command='basecmd', commands=orig_commands)
- self.assertEqual('', self.logs.getvalue())
- self.assertEqual(expected, fixed_commands)
-
-
-class TestSubp(CiTestCase):
- allowed_subp = [BASH, 'cat', CiTestCase.SUBP_SHELL_TRUE,
- BOGUS_COMMAND, sys.executable]
-
- stdin2err = [BASH, '-c', 'cat >&2']
- stdin2out = ['cat']
- utf8_invalid = b'ab\xaadef'
- utf8_valid = b'start \xc3\xa9 end'
- utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7'
- printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--']
-
- def printf_cmd(self, *args):
- # bash's printf supports \xaa. So does /usr/bin/printf
- # but by using bash, we remove dependency on another program.
- return([BASH, '-c', 'printf "$@"', 'printf'] + list(args))
-
- def test_subp_handles_bytestrings(self):
- """subp can run a bytestring command if shell is True."""
- tmp_file = self.tmp_path('test.out')
- cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file)
- (out, _err) = subp.subp(cmd.encode('utf-8'), shell=True)
- self.assertEqual(u'', out)
- self.assertEqual(u'', _err)
- self.assertEqual('HI MOM\n', util.load_file(tmp_file))
-
- def test_subp_handles_strings(self):
- """subp can run a string command if shell is True."""
- tmp_file = self.tmp_path('test.out')
- cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file)
- (out, _err) = subp.subp(cmd, shell=True)
- self.assertEqual(u'', out)
- self.assertEqual(u'', _err)
- self.assertEqual('HI MOM\n', util.load_file(tmp_file))
-
- def test_subp_handles_utf8(self):
- # The given bytes contain utf-8 accented characters as seen in e.g.
- # the "deja dup" package in Ubuntu.
- cmd = self.printf_cmd(self.utf8_valid_2)
- (out, _err) = subp.subp(cmd, capture=True)
- self.assertEqual(out, self.utf8_valid_2.decode('utf-8'))
-
- def test_subp_respects_decode_false(self):
- (out, err) = subp.subp(self.stdin2out, capture=True, decode=False,
- data=self.utf8_valid)
- self.assertTrue(isinstance(out, bytes))
- self.assertTrue(isinstance(err, bytes))
- self.assertEqual(out, self.utf8_valid)
-
- def test_subp_decode_ignore(self):
- # this executes a string that writes invalid utf-8 to stdout
- (out, _err) = subp.subp(self.printf_cmd('abc\\xaadef'),
- capture=True, decode='ignore')
- self.assertEqual(out, 'abcdef')
-
- def test_subp_decode_strict_valid_utf8(self):
- (out, _err) = subp.subp(self.stdin2out, capture=True,
- decode='strict', data=self.utf8_valid)
- self.assertEqual(out, self.utf8_valid.decode('utf-8'))
-
- def test_subp_decode_invalid_utf8_replaces(self):
- (out, _err) = subp.subp(self.stdin2out, capture=True,
- data=self.utf8_invalid)
- expected = self.utf8_invalid.decode('utf-8', 'replace')
- self.assertEqual(out, expected)
-
- def test_subp_decode_strict_raises(self):
- args = []
- kwargs = {'args': self.stdin2out, 'capture': True,
- 'decode': 'strict', 'data': self.utf8_invalid}
- self.assertRaises(UnicodeDecodeError, subp.subp, *args, **kwargs)
-
- def test_subp_capture_stderr(self):
- data = b'hello world'
- (out, err) = subp.subp(self.stdin2err, capture=True,
- decode=False, data=data,
- update_env={'LC_ALL': 'C'})
- self.assertEqual(err, data)
- self.assertEqual(out, b'')
-
- def test_subp_reads_env(self):
- with mock.patch.dict("os.environ", values={'FOO': 'BAR'}):
- out, _err = subp.subp(self.printenv + ['FOO'], capture=True)
- self.assertEqual('FOO=BAR', out.splitlines()[0])
-
- def test_subp_env_and_update_env(self):
- out, _err = subp.subp(
- self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
- env={'FOO': 'BAR'},
- update_env={'HOME': '/myhome', 'K2': 'V2'})
- self.assertEqual(
- ['FOO=BAR', 'HOME=/myhome', 'K1=', 'K2=V2'], out.splitlines())
-
- def test_subp_update_env(self):
- extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'}
- with mock.patch.dict("os.environ", values=extra):
- out, _err = subp.subp(
- self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
- update_env={'HOME': '/myhome', 'K2': 'V2'})
-
- self.assertEqual(
- ['FOO=BAR', 'HOME=/myhome', 'K1=V1', 'K2=V2'], out.splitlines())
-
- def test_subp_warn_missing_shebang(self):
- """Warn on no #! in script"""
- noshebang = self.tmp_path('noshebang')
- util.write_file(noshebang, 'true\n')
-
- print("os is %s" % os)
- os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC)
- with self.allow_subp([noshebang]):
- self.assertRaisesRegex(subp.ProcessExecutionError,
- r'Missing #! in script\?',
- subp.subp, (noshebang,))
-
- def test_subp_combined_stderr_stdout(self):
- """Providing combine_capture as True redirects stderr to stdout."""
- data = b'hello world'
- (out, err) = subp.subp(self.stdin2err, capture=True,
- combine_capture=True, decode=False, data=data)
- self.assertEqual(b'', err)
- self.assertEqual(data, out)
-
- def test_returns_none_if_no_capture(self):
- (out, err) = subp.subp(self.stdin2out, data=b'', capture=False)
- self.assertIsNone(err)
- self.assertIsNone(out)
-
- def test_exception_has_out_err_are_bytes_if_decode_false(self):
- """Raised exc should have stderr, stdout as bytes if no decode."""
- with self.assertRaises(subp.ProcessExecutionError) as cm:
- subp.subp([BOGUS_COMMAND], decode=False)
- self.assertTrue(isinstance(cm.exception.stdout, bytes))
- self.assertTrue(isinstance(cm.exception.stderr, bytes))
-
- def test_exception_has_out_err_are_bytes_if_decode_true(self):
- """Raised exc should have stderr, stdout as string if no decode."""
- with self.assertRaises(subp.ProcessExecutionError) as cm:
- subp.subp([BOGUS_COMMAND], decode=True)
- self.assertTrue(isinstance(cm.exception.stdout, str))
- self.assertTrue(isinstance(cm.exception.stderr, str))
-
- def test_bunch_of_slashes_in_path(self):
- self.assertEqual("/target/my/path/",
- subp.target_path("/target/", "//my/path/"))
- self.assertEqual("/target/my/path/",
- subp.target_path("/target/", "///my/path/"))
-
- def test_c_lang_can_take_utf8_args(self):
- """Independent of system LC_CTYPE, args can contain utf-8 strings.
-
- When python starts up, its default encoding gets set based on
- the value of LC_CTYPE. If no system locale is set, the default
- encoding for both python2 and python3 in some paths will end up
- being ascii.
-
- Attempts to use setlocale or patching (or changing) os.environ
- in the current environment seem to not be effective.
-
- This test starts up a python with LC_CTYPE set to C so that
- the default encoding will be set to ascii. In such an environment
- Popen(['command', 'non-ascii-arg']) would cause a UnicodeDecodeError.
- """
- python_prog = '\n'.join([
- 'import json, sys',
- 'from cloudinit.subp import subp',
- 'data = sys.stdin.read()',
- 'cmd = json.loads(data)',
- 'subp(cmd, capture=False)',
- ''])
- cmd = [BASH, '-c', 'echo -n "$@"', '--',
- self.utf8_valid.decode("utf-8")]
- python_subp = [sys.executable, '-c', python_prog]
-
- out, _err = subp.subp(
- python_subp, update_env={'LC_CTYPE': 'C'},
- data=json.dumps(cmd).encode("utf-8"),
- decode=False)
- self.assertEqual(self.utf8_valid, out)
-
- def test_bogus_command_logs_status_messages(self):
- """status_cb gets status messages logs on bogus commands provided."""
- logs = []
-
- def status_cb(log):
- logs.append(log)
-
- with self.assertRaises(subp.ProcessExecutionError):
- subp.subp([BOGUS_COMMAND], status_cb=status_cb)
-
- expected = [
- 'Begin run command: {cmd}\n'.format(cmd=BOGUS_COMMAND),
- 'ERROR: End run command: invalid command provided\n']
- self.assertEqual(expected, logs)
-
- def test_command_logs_exit_codes_to_status_cb(self):
- """status_cb gets status messages containing command exit code."""
- logs = []
-
- def status_cb(log):
- logs.append(log)
-
- with self.assertRaises(subp.ProcessExecutionError):
- subp.subp([BASH, '-c', 'exit 2'], status_cb=status_cb)
- subp.subp([BASH, '-c', 'exit 0'], status_cb=status_cb)
-
- expected = [
- 'Begin run command: %s -c exit 2\n' % BASH,
- 'ERROR: End run command: exit(2)\n',
- 'Begin run command: %s -c exit 0\n' % BASH,
- 'End run command: exit(0)\n']
- self.assertEqual(expected, logs)
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_temp_utils.py b/cloudinit/tests/test_temp_utils.py
deleted file mode 100644
index 4a52ef89..00000000
--- a/cloudinit/tests/test_temp_utils.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Tests for cloudinit.temp_utils"""
-
-from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir
-from cloudinit.tests.helpers import CiTestCase, wrap_and_call
-import os
-
-
-class TestTempUtils(CiTestCase):
-
- def test_mkdtemp_default_non_root(self):
- """mkdtemp creates a dir under /tmp for the unprivileged."""
- calls = []
-
- def fake_mkdtemp(*args, **kwargs):
- calls.append(kwargs)
- return '/fake/return/path'
-
- retval = wrap_and_call(
- 'cloudinit.temp_utils',
- {'os.getuid': 1000,
- 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp},
- '_TMPDIR': {'new': None},
- 'os.path.isdir': True},
- mkdtemp)
- self.assertEqual('/fake/return/path', retval)
- self.assertEqual([{'dir': '/tmp'}], calls)
-
- def test_mkdtemp_default_non_root_needs_exe(self):
- """mkdtemp creates a dir under /var/tmp/cloud-init when needs_exe."""
- calls = []
-
- def fake_mkdtemp(*args, **kwargs):
- calls.append(kwargs)
- return '/fake/return/path'
-
- retval = wrap_and_call(
- 'cloudinit.temp_utils',
- {'os.getuid': 1000,
- 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp},
- '_TMPDIR': {'new': None},
- 'os.path.isdir': True},
- mkdtemp, needs_exe=True)
- self.assertEqual('/fake/return/path', retval)
- self.assertEqual([{'dir': '/var/tmp/cloud-init'}], calls)
-
- def test_mkdtemp_default_root(self):
- """mkdtemp creates a dir under /run/cloud-init for the privileged."""
- calls = []
-
- def fake_mkdtemp(*args, **kwargs):
- calls.append(kwargs)
- return '/fake/return/path'
-
- retval = wrap_and_call(
- 'cloudinit.temp_utils',
- {'os.getuid': 0,
- 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp},
- '_TMPDIR': {'new': None},
- 'os.path.isdir': True},
- mkdtemp)
- self.assertEqual('/fake/return/path', retval)
- self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls)
-
- def test_mkstemp_default_non_root(self):
- """mkstemp creates secure tempfile under /tmp for the unprivileged."""
- calls = []
-
- def fake_mkstemp(*args, **kwargs):
- calls.append(kwargs)
- return '/fake/return/path'
-
- retval = wrap_and_call(
- 'cloudinit.temp_utils',
- {'os.getuid': 1000,
- 'tempfile.mkstemp': {'side_effect': fake_mkstemp},
- '_TMPDIR': {'new': None},
- 'os.path.isdir': True},
- mkstemp)
- self.assertEqual('/fake/return/path', retval)
- self.assertEqual([{'dir': '/tmp'}], calls)
-
- def test_mkstemp_default_root(self):
- """mkstemp creates a secure tempfile in /run/cloud-init for root."""
- calls = []
-
- def fake_mkstemp(*args, **kwargs):
- calls.append(kwargs)
- return '/fake/return/path'
-
- retval = wrap_and_call(
- 'cloudinit.temp_utils',
- {'os.getuid': 0,
- 'tempfile.mkstemp': {'side_effect': fake_mkstemp},
- '_TMPDIR': {'new': None},
- 'os.path.isdir': True},
- mkstemp)
- self.assertEqual('/fake/return/path', retval)
- self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls)
-
- def test_tempdir_error_suppression(self):
- """test tempdir suppresses errors during directory removal."""
-
- with self.assertRaises(OSError):
- with tempdir(prefix='cloud-init-dhcp-') as tdir:
- os.rmdir(tdir)
- # As a result, the directory is already gone,
- # so shutil.rmtree should raise OSError
-
- with tempdir(rmtree_ignore_errors=True,
- prefix='cloud-init-dhcp-') as tdir:
- os.rmdir(tdir)
- # Since the directory is already gone, shutil.rmtree would raise
- # OSError, but we suppress that
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
deleted file mode 100644
index b7a302f1..00000000
--- a/cloudinit/tests/test_util.py
+++ /dev/null
@@ -1,854 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Tests for cloudinit.util"""
-
-import base64
-import logging
-import json
-import platform
-import pytest
-
-import cloudinit.util as util
-from cloudinit import subp
-
-from cloudinit.tests.helpers import CiTestCase, mock
-from textwrap import dedent
-
-LOG = logging.getLogger(__name__)
-
-MOUNT_INFO = [
- '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64',
- '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2'
-]
-
-OS_RELEASE_SLES = dedent("""\
- NAME="SLES"
- VERSION="12-SP3"
- VERSION_ID="12.3"
- PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"
- ID="sles"
- ANSI_COLOR="0;32"
- CPE_NAME="cpe:/o:suse:sles:12:sp3"
-""")
-
-OS_RELEASE_OPENSUSE = dedent("""\
- NAME="openSUSE Leap"
- VERSION="42.3"
- ID=opensuse
- ID_LIKE="suse"
- VERSION_ID="42.3"
- PRETTY_NAME="openSUSE Leap 42.3"
- ANSI_COLOR="0;32"
- CPE_NAME="cpe:/o:opensuse:leap:42.3"
- BUG_REPORT_URL="https://bugs.opensuse.org"
- HOME_URL="https://www.opensuse.org/"
-""")
-
-OS_RELEASE_OPENSUSE_L15 = dedent("""\
- NAME="openSUSE Leap"
- VERSION="15.0"
- ID="opensuse-leap"
- ID_LIKE="suse opensuse"
- VERSION_ID="15.0"
- PRETTY_NAME="openSUSE Leap 15.0"
- ANSI_COLOR="0;32"
- CPE_NAME="cpe:/o:opensuse:leap:15.0"
- BUG_REPORT_URL="https://bugs.opensuse.org"
- HOME_URL="https://www.opensuse.org/"
-""")
-
-OS_RELEASE_OPENSUSE_TW = dedent("""\
- NAME="openSUSE Tumbleweed"
- ID="opensuse-tumbleweed"
- ID_LIKE="opensuse suse"
- VERSION_ID="20180920"
- PRETTY_NAME="openSUSE Tumbleweed"
- ANSI_COLOR="0;32"
- CPE_NAME="cpe:/o:opensuse:tumbleweed:20180920"
- BUG_REPORT_URL="https://bugs.opensuse.org"
- HOME_URL="https://www.opensuse.org/"
-""")
-
-OS_RELEASE_CENTOS = dedent("""\
- NAME="CentOS Linux"
- VERSION="7 (Core)"
- ID="centos"
- ID_LIKE="rhel fedora"
- VERSION_ID="7"
- PRETTY_NAME="CentOS Linux 7 (Core)"
- ANSI_COLOR="0;31"
- CPE_NAME="cpe:/o:centos:centos:7"
- HOME_URL="https://www.centos.org/"
- BUG_REPORT_URL="https://bugs.centos.org/"
-
- CENTOS_MANTISBT_PROJECT="CentOS-7"
- CENTOS_MANTISBT_PROJECT_VERSION="7"
- REDHAT_SUPPORT_PRODUCT="centos"
- REDHAT_SUPPORT_PRODUCT_VERSION="7"
-""")
-
-OS_RELEASE_REDHAT_7 = dedent("""\
- NAME="Red Hat Enterprise Linux Server"
- VERSION="7.5 (Maipo)"
- ID="rhel"
- ID_LIKE="fedora"
- VARIANT="Server"
- VARIANT_ID="server"
- VERSION_ID="7.5"
- PRETTY_NAME="Red Hat"
- ANSI_COLOR="0;31"
- CPE_NAME="cpe:/o:redhat:enterprise_linux:7.5:GA:server"
- HOME_URL="https://www.redhat.com/"
- BUG_REPORT_URL="https://bugzilla.redhat.com/"
-
- REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7"
- REDHAT_BUGZILLA_PRODUCT_VERSION=7.5
- REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"
- REDHAT_SUPPORT_PRODUCT_VERSION="7.5"
-""")
-
-REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)"
-REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)"
-REDHAT_RELEASE_REDHAT_6 = (
- "Red Hat Enterprise Linux Server release 6.10 (Santiago)")
-REDHAT_RELEASE_REDHAT_7 = (
- "Red Hat Enterprise Linux Server release 7.5 (Maipo)")
-
-
-OS_RELEASE_DEBIAN = dedent("""\
- PRETTY_NAME="Debian GNU/Linux 9 (stretch)"
- NAME="Debian GNU/Linux"
- VERSION_ID="9"
- VERSION="9 (stretch)"
- ID=debian
- HOME_URL="https://www.debian.org/"
- SUPPORT_URL="https://www.debian.org/support"
- BUG_REPORT_URL="https://bugs.debian.org/"
-""")
-
-OS_RELEASE_UBUNTU = dedent("""\
- NAME="Ubuntu"\n
- # comment test
- VERSION="16.04.3 LTS (Xenial Xerus)"\n
- ID=ubuntu\n
- ID_LIKE=debian\n
- PRETTY_NAME="Ubuntu 16.04.3 LTS"\n
- VERSION_ID="16.04"\n
- HOME_URL="http://www.ubuntu.com/"\n
- SUPPORT_URL="http://help.ubuntu.com/"\n
- BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n
- VERSION_CODENAME=xenial\n
- UBUNTU_CODENAME=xenial\n
-""")
-
-
-class FakeCloud(object):
-
- def __init__(self, hostname, fqdn):
- self.hostname = hostname
- self.fqdn = fqdn
- self.calls = []
-
- def get_hostname(self, fqdn=None, metadata_only=None):
- myargs = {}
- if fqdn is not None:
- myargs['fqdn'] = fqdn
- if metadata_only is not None:
- myargs['metadata_only'] = metadata_only
- self.calls.append(myargs)
- if fqdn:
- return self.fqdn
- return self.hostname
-
-
-class TestUtil(CiTestCase):
-
- def test_parse_mount_info_no_opts_no_arg(self):
- result = util.parse_mount_info('/home', MOUNT_INFO, LOG)
- self.assertEqual(('/dev/sda2', 'xfs', '/home'), result)
-
- def test_parse_mount_info_no_opts_arg(self):
- result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False)
- self.assertEqual(('/dev/sda2', 'xfs', '/home'), result)
-
- def test_parse_mount_info_with_opts(self):
- result = util.parse_mount_info('/', MOUNT_INFO, LOG, True)
- self.assertEqual(
- ('/dev/sda1', 'btrfs', '/', 'ro,relatime'),
- result
- )
-
- @mock.patch('cloudinit.util.get_mount_info')
- def test_mount_is_rw(self, m_mount_info):
- m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime')
- is_rw = util.mount_is_read_write('/')
- self.assertEqual(is_rw, True)
-
- @mock.patch('cloudinit.util.get_mount_info')
- def test_mount_is_ro(self, m_mount_info):
- m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime')
- is_rw = util.mount_is_read_write('/')
- self.assertEqual(is_rw, False)
-
-
-class TestUptime(CiTestCase):
-
- @mock.patch('cloudinit.util.boottime')
- @mock.patch('cloudinit.util.os.path.exists')
- @mock.patch('cloudinit.util.time.time')
- def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime):
- boottime = 1000.0
- uptime = 10.0
- m_boottime.return_value = boottime
- m_time.return_value = boottime + uptime
- m_exists.return_value = False
- result = util.uptime()
- self.assertEqual(str(uptime), result)
-
-
-class TestShellify(CiTestCase):
-
- def test_input_dict_raises_type_error(self):
- self.assertRaisesRegex(
- TypeError, 'Input.*was.*dict.*xpected',
- util.shellify, {'mykey': 'myval'})
-
- def test_input_str_raises_type_error(self):
- self.assertRaisesRegex(
- TypeError, 'Input.*was.*str.*xpected', util.shellify, "foobar")
-
- def test_value_with_int_raises_type_error(self):
- self.assertRaisesRegex(
- TypeError, 'shellify.*int', util.shellify, ["foo", 1])
-
- def test_supports_strings_and_lists(self):
- self.assertEqual(
- '\n'.join(["#!/bin/sh", "echo hi mom", "'echo' 'hi dad'",
- "'echo' 'hi' 'sis'", ""]),
- util.shellify(["echo hi mom", ["echo", "hi dad"],
- ('echo', 'hi', 'sis')]))
-
-
-class TestGetHostnameFqdn(CiTestCase):
-
- def test_get_hostname_fqdn_from_only_cfg_fqdn(self):
- """When cfg only has the fqdn key, derive hostname and fqdn from it."""
- hostname, fqdn = util.get_hostname_fqdn(
- cfg={'fqdn': 'myhost.domain.com'}, cloud=None)
- self.assertEqual('myhost', hostname)
- self.assertEqual('myhost.domain.com', fqdn)
-
- def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self):
- """When cfg has both fqdn and hostname keys, return them."""
- hostname, fqdn = util.get_hostname_fqdn(
- cfg={'fqdn': 'myhost.domain.com', 'hostname': 'other'}, cloud=None)
- self.assertEqual('other', hostname)
- self.assertEqual('myhost.domain.com', fqdn)
-
- def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self):
- """When cfg has only hostname key which represents a fqdn, use that."""
- hostname, fqdn = util.get_hostname_fqdn(
- cfg={'hostname': 'myhost.domain.com'}, cloud=None)
- self.assertEqual('myhost', hostname)
- self.assertEqual('myhost.domain.com', fqdn)
-
- def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self):
- """When cfg has a hostname without a '.' query cloud.get_hostname."""
- mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com')
- hostname, fqdn = util.get_hostname_fqdn(
- cfg={'hostname': 'myhost'}, cloud=mycloud)
- self.assertEqual('myhost', hostname)
- self.assertEqual('cloudhost.mycloud.com', fqdn)
- self.assertEqual(
- [{'fqdn': True, 'metadata_only': False}], mycloud.calls)
-
- def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self):
- """When cfg has neither hostname nor fqdn cloud.get_hostname."""
- mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com')
- hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud)
- self.assertEqual('cloudhost', hostname)
- self.assertEqual('cloudhost.mycloud.com', fqdn)
- self.assertEqual(
- [{'fqdn': True, 'metadata_only': False},
- {'metadata_only': False}], mycloud.calls)
-
- def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self):
- """Calls to cloud.get_hostname pass the metadata_only parameter."""
- mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com')
- _hn, _fqdn = util.get_hostname_fqdn(
- cfg={}, cloud=mycloud, metadata_only=True)
- self.assertEqual(
- [{'fqdn': True, 'metadata_only': True},
- {'metadata_only': True}], mycloud.calls)
-
-
-class TestBlkid(CiTestCase):
- ids = {
- "id01": "1111-1111",
- "id02": "22222222-2222",
- "id03": "33333333-3333",
- "id04": "44444444-4444",
- "id05": "55555555-5555-5555-5555-555555555555",
- "id06": "66666666-6666-6666-6666-666666666666",
- "id07": "52894610484658920398",
- "id08": "86753098675309867530",
- "id09": "99999999-9999-9999-9999-999999999999",
- }
-
- blkid_out = dedent("""\
- /dev/loop0: TYPE="squashfs"
- /dev/loop1: TYPE="squashfs"
- /dev/loop2: TYPE="squashfs"
- /dev/loop3: TYPE="squashfs"
- /dev/sda1: UUID="{id01}" TYPE="vfat" PARTUUID="{id02}"
- /dev/sda2: UUID="{id03}" TYPE="ext4" PARTUUID="{id04}"
- /dev/sda3: UUID="{id05}" TYPE="ext4" PARTUUID="{id06}"
- /dev/sda4: LABEL="default" UUID="{id07}" UUID_SUB="{id08}" """
- """TYPE="zfs_member" PARTUUID="{id09}"
- /dev/loop4: TYPE="squashfs"
- """)
-
- maxDiff = None
-
- def _get_expected(self):
- return ({
- "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"},
- "/dev/loop1": {"DEVNAME": "/dev/loop1", "TYPE": "squashfs"},
- "/dev/loop2": {"DEVNAME": "/dev/loop2", "TYPE": "squashfs"},
- "/dev/loop3": {"DEVNAME": "/dev/loop3", "TYPE": "squashfs"},
- "/dev/loop4": {"DEVNAME": "/dev/loop4", "TYPE": "squashfs"},
- "/dev/sda1": {"DEVNAME": "/dev/sda1", "TYPE": "vfat",
- "UUID": self.ids["id01"],
- "PARTUUID": self.ids["id02"]},
- "/dev/sda2": {"DEVNAME": "/dev/sda2", "TYPE": "ext4",
- "UUID": self.ids["id03"],
- "PARTUUID": self.ids["id04"]},
- "/dev/sda3": {"DEVNAME": "/dev/sda3", "TYPE": "ext4",
- "UUID": self.ids["id05"],
- "PARTUUID": self.ids["id06"]},
- "/dev/sda4": {"DEVNAME": "/dev/sda4", "TYPE": "zfs_member",
- "LABEL": "default",
- "UUID": self.ids["id07"],
- "UUID_SUB": self.ids["id08"],
- "PARTUUID": self.ids["id09"]},
- })
-
- @mock.patch("cloudinit.subp.subp")
- def test_functional_blkid(self, m_subp):
- m_subp.return_value = (
- self.blkid_out.format(**self.ids), "")
- self.assertEqual(self._get_expected(), util.blkid())
- m_subp.assert_called_with(["blkid", "-o", "full"], capture=True,
- decode="replace")
-
- @mock.patch("cloudinit.subp.subp")
- def test_blkid_no_cache_uses_no_cache(self, m_subp):
- """blkid should turn off cache if disable_cache is true."""
- m_subp.return_value = (
- self.blkid_out.format(**self.ids), "")
- self.assertEqual(self._get_expected(),
- util.blkid(disable_cache=True))
- m_subp.assert_called_with(["blkid", "-o", "full", "-c", "/dev/null"],
- capture=True, decode="replace")
-
-
-@mock.patch('cloudinit.subp.subp')
-class TestUdevadmSettle(CiTestCase):
- def test_with_no_params(self, m_subp):
- """called with no parameters."""
- util.udevadm_settle()
- m_subp.called_once_with(mock.call(['udevadm', 'settle']))
-
- def test_with_exists_and_not_exists(self, m_subp):
- """with exists=file where file does not exist should invoke subp."""
- mydev = self.tmp_path("mydev")
- util.udevadm_settle(exists=mydev)
- m_subp.called_once_with(
- ['udevadm', 'settle', '--exit-if-exists=%s' % mydev])
-
- def test_with_exists_and_file_exists(self, m_subp):
- """with exists=file where file does exist should not invoke subp."""
- mydev = self.tmp_path("mydev")
- util.write_file(mydev, "foo\n")
- util.udevadm_settle(exists=mydev)
- self.assertIsNone(m_subp.call_args)
-
- def test_with_timeout_int(self, m_subp):
- """timeout can be an integer."""
- timeout = 9
- util.udevadm_settle(timeout=timeout)
- m_subp.called_once_with(
- ['udevadm', 'settle', '--timeout=%s' % timeout])
-
- def test_with_timeout_string(self, m_subp):
- """timeout can be a string."""
- timeout = "555"
- util.udevadm_settle(timeout=timeout)
- m_subp.assert_called_once_with(
- ['udevadm', 'settle', '--timeout=%s' % timeout])
-
- def test_with_exists_and_timeout(self, m_subp):
- """test call with both exists and timeout."""
- mydev = self.tmp_path("mydev")
- timeout = "3"
- util.udevadm_settle(exists=mydev)
- m_subp.called_once_with(
- ['udevadm', 'settle', '--exit-if-exists=%s' % mydev,
- '--timeout=%s' % timeout])
-
- def test_subp_exception_raises_to_caller(self, m_subp):
- m_subp.side_effect = subp.ProcessExecutionError("BOOM")
- self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle)
-
-
-@mock.patch('os.path.exists')
-class TestGetLinuxDistro(CiTestCase):
-
- def setUp(self):
- # python2 has no lru_cache, and therefore, no cache_clear()
- if hasattr(util.get_linux_distro, "cache_clear"):
- util.get_linux_distro.cache_clear()
-
- @classmethod
- def os_release_exists(self, path):
- """Side effect function"""
- if path == '/etc/os-release':
- return 1
-
- @classmethod
- def redhat_release_exists(self, path):
- """Side effect function """
- if path == '/etc/redhat-release':
- return 1
-
- @mock.patch('cloudinit.util.load_file')
- def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
- """Verify we get the correct name if the os-release file has
- the distro name in quotes"""
- m_os_release.return_value = OS_RELEASE_SLES
- m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
- dist = util.get_linux_distro()
- self.assertEqual(('sles', '12.3', platform.machine()), dist)
-
- @mock.patch('cloudinit.util.load_file')
- def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists):
- """Verify we get the correct name if the os-release file does not
- have the distro name in quotes"""
- m_os_release.return_value = OS_RELEASE_UBUNTU
- m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
- dist = util.get_linux_distro()
- self.assertEqual(('ubuntu', '16.04', 'xenial'), dist)
-
- @mock.patch('platform.system')
- @mock.patch('platform.release')
- @mock.patch('cloudinit.util._parse_redhat_release')
- def test_get_linux_freebsd(self, m_parse_redhat_release,
- m_platform_release,
- m_platform_system, m_path_exists):
- """Verify we get the correct name and release name on FreeBSD."""
- m_path_exists.return_value = False
- m_platform_release.return_value = '12.0-RELEASE-p10'
- m_platform_system.return_value = 'FreeBSD'
- m_parse_redhat_release.return_value = {}
- util.is_BSD.cache_clear()
- dist = util.get_linux_distro()
- self.assertEqual(('freebsd', '12.0-RELEASE-p10', ''), dist)
-
- @mock.patch('cloudinit.util.load_file')
- def test_get_linux_centos6(self, m_os_release, m_path_exists):
- """Verify we get the correct name and release name on CentOS 6."""
- m_os_release.return_value = REDHAT_RELEASE_CENTOS_6
- m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
- dist = util.get_linux_distro()
- self.assertEqual(('centos', '6.10', 'Final'), dist)
-
- @mock.patch('cloudinit.util.load_file')
- def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists):
- """Verify the correct release info on CentOS 7 without os-release."""
- m_os_release.return_value = REDHAT_RELEASE_CENTOS_7
- m_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
- dist = util.get_linux_distro()
- self.assertEqual(('centos', '7.5.1804', 'Core'), dist)
-
- @mock.patch('cloudinit.util.load_file')
- def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists):
- """Verify redhat 7 read from os-release."""
- m_os_release.return_value = OS_RELEASE_REDHAT_7
- m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
- dist = util.get_linux_distro()
- self.assertEqual(('redhat', '7.5', 'Maipo'), dist)
-
- @mock.patch('cloudinit.util.load_file')
- def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists):
- """Verify redhat 7 read from redhat-release."""
- m_os_release.return_value = REDHAT_RELEASE_REDHAT_7
- m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
- dist = util.get_linux_distro()
- self.assertEqual(('redhat', '7.5', 'Maipo'), dist)
-
- @mock.patch('cloudinit.util.load_file')
- def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists):
- """Verify redhat 6 read from redhat-release."""
- m_os_release.return_value = REDHAT_RELEASE_REDHAT_6
- m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
- dist = util.get_linux_distro()
- self.assertEqual(('redhat', '6.10', 'Santiago'), dist)
-
- @mock.patch('cloudinit.util.load_file')
- def test_get_linux_copr_centos(self, m_os_release, m_path_exists):
- """Verify we get the correct name and release name on COPR CentOS."""
- m_os_release.return_value = OS_RELEASE_CENTOS
- m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
- dist = util.get_linux_distro()
- self.assertEqual(('centos', '7', 'Core'), dist)
-
- @mock.patch('cloudinit.util.load_file')
- def test_get_linux_debian(self, m_os_release, m_path_exists):
- """Verify we get the correct name and release name on Debian."""
- m_os_release.return_value = OS_RELEASE_DEBIAN
- m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
- dist = util.get_linux_distro()
- self.assertEqual(('debian', '9', 'stretch'), dist)
-
- @mock.patch('cloudinit.util.load_file')
- def test_get_linux_opensuse(self, m_os_release, m_path_exists):
- """Verify we get the correct name and machine arch on openSUSE
- prior to openSUSE Leap 15.
- """
- m_os_release.return_value = OS_RELEASE_OPENSUSE
- m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
- dist = util.get_linux_distro()
- self.assertEqual(('opensuse', '42.3', platform.machine()), dist)
-
- @mock.patch('cloudinit.util.load_file')
- def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists):
- """Verify we get the correct name and machine arch on openSUSE
- for openSUSE Leap 15.0 and later.
- """
- m_os_release.return_value = OS_RELEASE_OPENSUSE_L15
- m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
- dist = util.get_linux_distro()
- self.assertEqual(('opensuse-leap', '15.0', platform.machine()), dist)
-
- @mock.patch('cloudinit.util.load_file')
- def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists):
- """Verify we get the correct name and machine arch on openSUSE
- for openSUSE Tumbleweed
- """
- m_os_release.return_value = OS_RELEASE_OPENSUSE_TW
- m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
- dist = util.get_linux_distro()
- self.assertEqual(
- ('opensuse-tumbleweed', '20180920', platform.machine()), dist)
-
- @mock.patch('platform.system')
- @mock.patch('platform.dist', create=True)
- def test_get_linux_distro_no_data(self, m_platform_dist,
- m_platform_system, m_path_exists):
- """Verify we get no information if os-release does not exist"""
- m_platform_dist.return_value = ('', '', '')
- m_platform_system.return_value = "Linux"
- m_path_exists.return_value = 0
- dist = util.get_linux_distro()
- self.assertEqual(('', '', ''), dist)
-
- @mock.patch('platform.system')
- @mock.patch('platform.dist', create=True)
- def test_get_linux_distro_no_impl(self, m_platform_dist,
- m_platform_system, m_path_exists):
- """Verify we get an empty tuple when no information exists and
- Exceptions are not propagated"""
- m_platform_dist.side_effect = Exception()
- m_platform_system.return_value = "Linux"
- m_path_exists.return_value = 0
- dist = util.get_linux_distro()
- self.assertEqual(('', '', ''), dist)
-
- @mock.patch('platform.system')
- @mock.patch('platform.dist', create=True)
- def test_get_linux_distro_plat_data(self, m_platform_dist,
- m_platform_system, m_path_exists):
- """Verify we get the correct platform information"""
- m_platform_dist.return_value = ('foo', '1.1', 'aarch64')
- m_platform_system.return_value = "Linux"
- m_path_exists.return_value = 0
- dist = util.get_linux_distro()
- self.assertEqual(('foo', '1.1', 'aarch64'), dist)
-
-
-class TestJsonDumps(CiTestCase):
- def test_is_str(self):
- """json_dumps should return a string."""
- self.assertTrue(isinstance(util.json_dumps({'abc': '123'}), str))
-
- def test_utf8(self):
- smiley = '\\ud83d\\ude03'
- self.assertEqual(
- {'smiley': smiley},
- json.loads(util.json_dumps({'smiley': smiley})))
-
- def test_non_utf8(self):
- blob = b'\xba\x03Qx-#y\xea'
- self.assertEqual(
- {'blob': 'ci-b64:' + base64.b64encode(blob).decode('utf-8')},
- json.loads(util.json_dumps({'blob': blob})))
-
-
-@mock.patch('os.path.exists')
-class TestIsLXD(CiTestCase):
-
- def test_is_lxd_true_on_sock_device(self, m_exists):
- """When lxd's /dev/lxd/sock exists, is_lxd returns true."""
- m_exists.return_value = True
- self.assertTrue(util.is_lxd())
- m_exists.assert_called_once_with('/dev/lxd/sock')
-
- def test_is_lxd_false_when_sock_device_absent(self, m_exists):
- """When lxd's /dev/lxd/sock is absent, is_lxd returns false."""
- m_exists.return_value = False
- self.assertFalse(util.is_lxd())
- m_exists.assert_called_once_with('/dev/lxd/sock')
-
-
-class TestReadCcFromCmdline:
-
- @pytest.mark.parametrize(
- "cmdline,expected_cfg",
- [
- # Return None if cmdline has no cc:<YAML>end_cc content.
- (CiTestCase.random_string(), None),
- # Return None if YAML content is empty string.
- ('foo cc: end_cc bar', None),
- # Return expected dictionary without trailing end_cc marker.
- ('foo cc: ssh_pwauth: true', {'ssh_pwauth': True}),
- # Return expected dictionary w escaped newline and no end_cc.
- ('foo cc: ssh_pwauth: true\\n', {'ssh_pwauth': True}),
- # Return expected dictionary of yaml between cc: and end_cc.
- ('foo cc: ssh_pwauth: true end_cc bar', {'ssh_pwauth': True}),
- # Return dict with list value w escaped newline, no end_cc.
- (
- 'cc: ssh_import_id: [smoser, kirkland]\\n',
- {'ssh_import_id': ['smoser', 'kirkland']}
- ),
- # Parse urlencoded brackets in yaml content.
- (
- 'cc: ssh_import_id: %5Bsmoser, kirkland%5D end_cc',
- {'ssh_import_id': ['smoser', 'kirkland']}
- ),
- # Parse complete urlencoded yaml content.
- (
- 'cc: ssh_import_id%3A%20%5Buser1%2C%20user2%5D end_cc',
- {'ssh_import_id': ['user1', 'user2']}
- ),
- # Parse nested dictionary in yaml content.
- (
- 'cc: ntp: {enabled: true, ntp_client: myclient} end_cc',
- {'ntp': {'enabled': True, 'ntp_client': 'myclient'}}
- ),
- # Parse single mapping value in yaml content.
- ('cc: ssh_import_id: smoser end_cc', {'ssh_import_id': 'smoser'}),
- # Parse multiline content with multiple mapping and nested lists.
- (
- ('cc: ssh_import_id: [smoser, bob]\\n'
- 'runcmd: [ [ ls, -l ], echo hi ] end_cc'),
- {'ssh_import_id': ['smoser', 'bob'],
- 'runcmd': [['ls', '-l'], 'echo hi']}
- ),
- # Parse multiline encoded content w/ mappings and nested lists.
- (
- ('cc: ssh_import_id: %5Bsmoser, bob%5D\\n'
- 'runcmd: [ [ ls, -l ], echo hi ] end_cc'),
- {'ssh_import_id': ['smoser', 'bob'],
- 'runcmd': [['ls', '-l'], 'echo hi']}
- ),
- # test encoded escaped newlines work.
- #
- # unquote(encoded_content)
- # 'ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ]'
- (
- ('cc: ' +
- ('ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%5Cn'
- 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C'
- '%20echo%20hi%20%5D') + ' end_cc'),
- {'ssh_import_id': ['smoser', 'bob'],
- 'runcmd': [['ls', '-l'], 'echo hi']}
- ),
- # test encoded newlines work.
- #
- # unquote(encoded_content)
- # 'ssh_import_id: [smoser, bob]\nruncmd: [ [ ls, -l ], echo hi ]'
- (
- ("cc: " +
- ('ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%0A'
- 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C'
- '%20echo%20hi%20%5D') + ' end_cc'),
- {'ssh_import_id': ['smoser', 'bob'],
- 'runcmd': [['ls', '-l'], 'echo hi']}
- ),
- # Parse and merge multiple yaml content sections.
- (
- ('cc:ssh_import_id: [smoser, bob] end_cc '
- 'cc: runcmd: [ [ ls, -l ] ] end_cc'),
- {'ssh_import_id': ['smoser', 'bob'],
- 'runcmd': [['ls', '-l']]}
- ),
- # Parse and merge multiple encoded yaml content sections.
- (
- ('cc:ssh_import_id%3A%20%5Bsmoser%5D end_cc '
- 'cc:runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%20%5D end_cc'),
- {'ssh_import_id': ['smoser'], 'runcmd': [['ls', '-l']]}
- ),
- ]
- )
- def test_read_conf_from_cmdline_config(self, expected_cfg, cmdline):
- assert expected_cfg == util.read_conf_from_cmdline(cmdline=cmdline)
-
-
-class TestMountCb:
- """Tests for ``util.mount_cb``.
-
- These tests consider the "unit" under test to be ``util.mount_cb`` and
- ``util.unmounter``, which is only used by ``mount_cb``.
-
- TODO: Test default mtype determination
- TODO: Test the if/else branch that actually performs the mounting operation
- """
-
- @pytest.yield_fixture
- def already_mounted_device_and_mountdict(self):
- """Mock an already-mounted device, and yield (device, mount dict)"""
- device = "/dev/fake0"
- mountpoint = "/mnt/fake"
- with mock.patch("cloudinit.util.subp.subp"):
- with mock.patch("cloudinit.util.mounts") as m_mounts:
- mounts = {device: {"mountpoint": mountpoint}}
- m_mounts.return_value = mounts
- yield device, mounts[device]
-
- @pytest.fixture
- def already_mounted_device(self, already_mounted_device_and_mountdict):
- """already_mounted_device_and_mountdict, but return only the device"""
- return already_mounted_device_and_mountdict[0]
-
- @pytest.mark.parametrize(
- "mtype,expected",
- [
- # While the filesystem is called iso9660, the mount type is cd9660
- ("iso9660", "cd9660"),
- # vfat is generally called "msdos" on BSD
- ("vfat", "msdos"),
- # judging from man pages, only FreeBSD has this alias
- ("msdosfs", "msdos"),
- # Test happy path
- ("ufs", "ufs")
- ],
- )
- @mock.patch("cloudinit.util.is_Linux", autospec=True)
- @mock.patch("cloudinit.util.is_BSD", autospec=True)
- @mock.patch("cloudinit.util.subp.subp")
- @mock.patch("cloudinit.temp_utils.tempdir", autospec=True)
- def test_normalize_mtype_on_bsd(
- self, m_tmpdir, m_subp, m_is_BSD, m_is_Linux, mtype, expected
- ):
- m_is_BSD.return_value = True
- m_is_Linux.return_value = False
- m_tmpdir.return_value.__enter__ = mock.Mock(
- autospec=True, return_value="/tmp/fake"
- )
- m_tmpdir.return_value.__exit__ = mock.Mock(
- autospec=True, return_value=True
- )
- callback = mock.Mock(autospec=True)
-
- util.mount_cb('/dev/fake0', callback, mtype=mtype)
- assert mock.call(
- ["mount", "-o", "ro", "-t", expected, "/dev/fake0", "/tmp/fake"],
- update_env=None) in m_subp.call_args_list
-
- @pytest.mark.parametrize("invalid_mtype", [int(0), float(0.0), dict()])
- def test_typeerror_raised_for_invalid_mtype(self, invalid_mtype):
- with pytest.raises(TypeError):
- util.mount_cb(mock.Mock(), mock.Mock(), mtype=invalid_mtype)
-
- @mock.patch("cloudinit.util.subp.subp")
- def test_already_mounted_does_not_mount_or_umount_anything(
- self, m_subp, already_mounted_device
- ):
- util.mount_cb(already_mounted_device, mock.Mock())
-
- assert 0 == m_subp.call_count
-
- @pytest.mark.parametrize("trailing_slash_in_mounts", ["/", ""])
- def test_already_mounted_calls_callback(
- self, trailing_slash_in_mounts, already_mounted_device_and_mountdict
- ):
- device, mount_dict = already_mounted_device_and_mountdict
- mountpoint = mount_dict["mountpoint"]
- mount_dict["mountpoint"] += trailing_slash_in_mounts
-
- callback = mock.Mock()
- util.mount_cb(device, callback)
-
- # The mountpoint passed to callback should always have a trailing
- # slash, regardless of the input
- assert [mock.call(mountpoint + "/")] == callback.call_args_list
-
- def test_already_mounted_calls_callback_with_data(
- self, already_mounted_device
- ):
- callback = mock.Mock()
- util.mount_cb(
- already_mounted_device, callback, data=mock.sentinel.data
- )
-
- assert [
- mock.call(mock.ANY, mock.sentinel.data)
- ] == callback.call_args_list
-
-
-@mock.patch("cloudinit.util.write_file")
-class TestEnsureFile:
- """Tests for ``cloudinit.util.ensure_file``."""
-
- def test_parameters_passed_through(self, m_write_file):
- """Test the parameters in the signature are passed to write_file."""
- util.ensure_file(
- mock.sentinel.path,
- mode=mock.sentinel.mode,
- preserve_mode=mock.sentinel.preserve_mode,
- )
-
- assert 1 == m_write_file.call_count
- args, kwargs = m_write_file.call_args
- assert (mock.sentinel.path,) == args
- assert mock.sentinel.mode == kwargs["mode"]
- assert mock.sentinel.preserve_mode == kwargs["preserve_mode"]
-
- @pytest.mark.parametrize(
- "kwarg,expected",
- [
- # Files should be world-readable by default
- ("mode", 0o644),
- # The previous behaviour of not preserving mode should be retained
- ("preserve_mode", False),
- ],
- )
- def test_defaults(self, m_write_file, kwarg, expected):
- """Test that ensure_file defaults appropriately."""
- util.ensure_file(mock.sentinel.path)
-
- assert 1 == m_write_file.call_count
- _args, kwargs = m_write_file.call_args
- assert expected == kwargs[kwarg]
-
- def test_static_parameters_are_passed(self, m_write_file):
- """Test that the static write_files parameters are passed correctly."""
- util.ensure_file(mock.sentinel.path)
-
- assert 1 == m_write_file.call_count
- _args, kwargs = m_write_file.call_args
- assert "" == kwargs["content"]
- assert "ab" == kwargs["omode"]
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py
index 2c1ae368..d971b278 100644
--- a/cloudinit/type_utils.py
+++ b/cloudinit/type_utils.py
@@ -10,7 +10,6 @@
import types
-
_NAME_TYPES = (
types.ModuleType,
types.FunctionType,
@@ -23,9 +22,10 @@ def obj_name(obj):
if isinstance(obj, _NAME_TYPES):
return str(obj.__name__)
else:
- if not hasattr(obj, '__class__'):
+ if not hasattr(obj, "__class__"):
return repr(obj)
else:
return obj_name(obj.__class__)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index caa88435..5b2f2ef9 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -17,7 +17,7 @@ from errno import ENOENT
from functools import partial
from http.client import NOT_FOUND
from itertools import count
-from urllib.parse import urlparse, urlunparse, quote
+from urllib.parse import quote, urlparse, urlunparse
import requests
from requests import exceptions
@@ -27,37 +27,20 @@ from cloudinit import version
LOG = logging.getLogger(__name__)
-
-# Check if requests has ssl support (added in requests >= 0.8.8)
-SSL_ENABLED = False
-CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0)
-_REQ_VER = None
-REDACTED = 'REDACTED'
-try:
- from distutils.version import LooseVersion
- import pkg_resources
- _REQ = pkg_resources.get_distribution('requests')
- _REQ_VER = LooseVersion(_REQ.version) # pylint: disable=no-member
- if _REQ_VER >= LooseVersion('0.8.8'):
- SSL_ENABLED = True
- if LooseVersion('0.7.0') <= _REQ_VER < LooseVersion('1.0.0'):
- CONFIG_ENABLED = True
-except ImportError:
- pass
+REDACTED = "REDACTED"
def _cleanurl(url):
- parsed_url = list(urlparse(url, scheme='http'))
+ parsed_url = list(urlparse(url, scheme="http"))
if not parsed_url[1] and parsed_url[2]:
# Swap these since this seems to be a common
# occurrence when given urls like 'www.google.com'
parsed_url[1] = parsed_url[2]
- parsed_url[2] = ''
+ parsed_url[2] = ""
return urlunparse(parsed_url)
def combine_url(base, *add_ons):
-
def combine_single(url, add_on):
url_parsed = list(urlparse(url))
path = url_parsed[2]
@@ -87,7 +70,7 @@ def read_file_or_url(url, **kwargs):
if url.lower().startswith("file://"):
if kwargs.get("data"):
LOG.warning("Unable to post data to file resource %s", url)
- file_path = url[len("file://"):]
+ file_path = url[len("file://") :]
try:
with open(file_path, "rb") as fp:
contents = fp.read()
@@ -117,7 +100,7 @@ class StringResponse(object):
return True
def __str__(self):
- return self.contents.decode('utf-8')
+ return self.contents.decode("utf-8")
class FileResponse(StringResponse):
@@ -173,28 +156,39 @@ class UrlError(IOError):
def _get_ssl_args(url, ssl_details):
ssl_args = {}
scheme = urlparse(url).scheme
- if scheme == 'https' and ssl_details:
- if not SSL_ENABLED:
- LOG.warning("SSL is not supported in requests v%s, "
- "cert. verification can not occur!", _REQ_VER)
+ if scheme == "https" and ssl_details:
+ if "ca_certs" in ssl_details and ssl_details["ca_certs"]:
+ ssl_args["verify"] = ssl_details["ca_certs"]
else:
- if 'ca_certs' in ssl_details and ssl_details['ca_certs']:
- ssl_args['verify'] = ssl_details['ca_certs']
- else:
- ssl_args['verify'] = True
- if 'cert_file' in ssl_details and 'key_file' in ssl_details:
- ssl_args['cert'] = [ssl_details['cert_file'],
- ssl_details['key_file']]
- elif 'cert_file' in ssl_details:
- ssl_args['cert'] = str(ssl_details['cert_file'])
+ ssl_args["verify"] = True
+ if "cert_file" in ssl_details and "key_file" in ssl_details:
+ ssl_args["cert"] = [
+ ssl_details["cert_file"],
+ ssl_details["key_file"],
+ ]
+ elif "cert_file" in ssl_details:
+ ssl_args["cert"] = str(ssl_details["cert_file"])
return ssl_args
-def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
- headers=None, headers_cb=None, headers_redact=None,
- ssl_details=None, check_status=True, allow_redirects=True,
- exception_cb=None, session=None, infinite=False, log_req_resp=True,
- request_method=None):
+def readurl(
+ url,
+ data=None,
+ timeout=None,
+ retries=0,
+ sec_between=1,
+ headers=None,
+ headers_cb=None,
+ headers_redact=None,
+ ssl_details=None,
+ check_status=True,
+ allow_redirects=True,
+ exception_cb=None,
+ session=None,
+ infinite=False,
+ log_req_resp=True,
+ request_method=None,
+) -> UrlResponse:
"""Wrapper around requests.Session to read the url and retry if necessary
:param url: Mandatory url to request.
@@ -227,47 +221,36 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
"""
url = _cleanurl(url)
req_args = {
- 'url': url,
+ "url": url,
}
req_args.update(_get_ssl_args(url, ssl_details))
- req_args['allow_redirects'] = allow_redirects
+ req_args["allow_redirects"] = allow_redirects
if not request_method:
- request_method = 'POST' if data else 'GET'
- req_args['method'] = request_method
+ request_method = "POST" if data else "GET"
+ req_args["method"] = request_method
if timeout is not None:
- req_args['timeout'] = max(float(timeout), 0)
+ req_args["timeout"] = max(float(timeout), 0)
if headers_redact is None:
headers_redact = []
- # It doesn't seem like config
- # was added in older library versions (or newer ones either), thus we
- # need to manually do the retries if it wasn't...
- if CONFIG_ENABLED:
- req_config = {
- 'store_cookies': False,
- }
- # Don't use the retry support built-in
- # since it doesn't allow for 'sleep_times'
- # in between tries....
- # if retries:
- # req_config['max_retries'] = max(int(retries), 0)
- req_args['config'] = req_config
manual_tries = 1
if retries:
manual_tries = max(int(retries) + 1, 1)
def_headers = {
- 'User-Agent': 'Cloud-Init/%s' % (version.version_string()),
+ "User-Agent": "Cloud-Init/%s" % (version.version_string()),
}
if headers:
def_headers.update(headers)
headers = def_headers
if not headers_cb:
+
def _cb(url):
return headers
+
headers_cb = _cb
if data:
- req_args['data'] = data
+ req_args["data"] = data
if sec_between is None:
sec_between = -1
@@ -276,12 +259,12 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# doesn't handle sleeping between tries...
# Infinitely retry if infinite is True
for i in count() if infinite else range(0, manual_tries):
- req_args['headers'] = headers_cb(url)
+ req_args["headers"] = headers_cb(url)
filtered_req_args = {}
for (k, v) in req_args.items():
- if k == 'data':
+ if k == "data":
continue
- if k == 'headers' and headers_redact:
+ if k == "headers" and headers_redact:
matched_headers = [k for k in headers_redact if v.get(k)]
if matched_headers:
filtered_req_args[k] = copy.deepcopy(v)
@@ -292,9 +275,13 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
try:
if log_req_resp:
- LOG.debug("[%s/%s] open '%s' with %s configuration", i,
- "infinite" if infinite else manual_tries, url,
- filtered_req_args)
+ LOG.debug(
+ "[%s/%s] open '%s' with %s configuration",
+ i,
+ "infinite" if infinite else manual_tries,
+ url,
+ filtered_req_args,
+ )
if session is None:
session = requests.Session()
@@ -304,22 +291,36 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
if check_status:
r.raise_for_status()
- LOG.debug("Read from %s (%s, %sb) after %s attempts", url,
- r.status_code, len(r.content), (i + 1))
+ LOG.debug(
+ "Read from %s (%s, %sb) after %s attempts",
+ url,
+ r.status_code,
+ len(r.content),
+ (i + 1),
+ )
# Doesn't seem like we can make it use a different
# subclass for responses, so add our own backward-compat
# attrs
return UrlResponse(r)
except exceptions.RequestException as e:
- if (isinstance(e, (exceptions.HTTPError)) and
- hasattr(e, 'response') and # This appeared in v 0.10.8
- hasattr(e.response, 'status_code')):
- excps.append(UrlError(e, code=e.response.status_code,
- headers=e.response.headers,
- url=url))
+ if (
+ isinstance(e, (exceptions.HTTPError))
+ and hasattr(e, "response")
+ and hasattr( # This appeared in v 0.10.8
+ e.response, "status_code"
+ )
+ ):
+ excps.append(
+ UrlError(
+ e,
+ code=e.response.status_code,
+ headers=e.response.headers,
+ url=url,
+ )
+ )
else:
excps.append(UrlError(e, url=url))
- if SSL_ENABLED and isinstance(e, exceptions.SSLError):
+ if isinstance(e, exceptions.SSLError):
# ssl exceptions are not going to get fixed by waiting a
# few seconds
break
@@ -328,22 +329,32 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# to continue retrying and False to break and re-raise the
# exception
break
- if (infinite and sec_between > 0) or \
- (i + 1 < manual_tries and sec_between > 0):
+ if (infinite and sec_between > 0) or (
+ i + 1 < manual_tries and sec_between > 0
+ ):
if log_req_resp:
LOG.debug(
"Please wait %s seconds while we wait to try again",
- sec_between)
+ sec_between,
+ )
time.sleep(sec_between)
- if excps:
- raise excps[-1]
- return None # Should throw before this...
-
-def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None,
- headers_cb=None, headers_redact=None, sleep_time=1,
- exception_cb=None, sleep_time_cb=None, request_method=None):
+ raise excps[-1]
+
+
+def wait_for_url(
+ urls,
+ max_wait=None,
+ timeout=None,
+ status_cb=None,
+ headers_cb=None,
+ headers_redact=None,
+ sleep_time=1,
+ exception_cb=None,
+ sleep_time_cb=None,
+ request_method=None,
+):
"""
urls: a list of urls to try
max_wait: roughly the maximum time to wait before giving up
@@ -388,9 +399,9 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None,
status_cb = log_status_cb
def timeup(max_wait, start_time):
- if (max_wait is None):
+ if max_wait is None:
return False
- return ((max_wait <= 0) or (time.time() - start_time > max_wait))
+ return (max_wait <= 0) or (time.time() - start_time > max_wait)
loop_n = 0
response = None
@@ -404,8 +415,11 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None,
if loop_n != 0:
if timeup(max_wait, start_time):
break
- if (max_wait is not None and
- timeout and (now + timeout > (start_time + max_wait))):
+ if (
+ max_wait is not None
+ and timeout
+ and (now + timeout > (start_time + max_wait))
+ ):
# shorten timeout to not run way over max_time
timeout = int((start_time + max_wait) - now)
@@ -418,17 +432,29 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None,
headers = {}
response = readurl(
- url, headers=headers, headers_redact=headers_redact,
- timeout=timeout, check_status=False,
- request_method=request_method)
+ url,
+ headers=headers,
+ headers_redact=headers_redact,
+ timeout=timeout,
+ check_status=False,
+ request_method=request_method,
+ )
if not response.contents:
reason = "empty response [%s]" % (response.code)
- url_exc = UrlError(ValueError(reason), code=response.code,
- headers=response.headers, url=url)
+ url_exc = UrlError(
+ ValueError(reason),
+ code=response.code,
+ headers=response.headers,
+ url=url,
+ )
elif not response.ok():
reason = "bad status code [%s]" % (response.code)
- url_exc = UrlError(ValueError(reason), code=response.code,
- headers=response.headers, url=url)
+ url_exc = UrlError(
+ ValueError(reason),
+ code=response.code,
+ headers=response.headers,
+ url=url,
+ )
else:
return url, response.contents
except UrlError as e:
@@ -440,10 +466,12 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None,
time_taken = int(time.time() - start_time)
max_wait_str = "%ss" % max_wait if max_wait else "unlimited"
- status_msg = "Calling '%s' failed [%s/%s]: %s" % (url,
- time_taken,
- max_wait_str,
- reason)
+ status_msg = "Calling '%s' failed [%s/%s]: %s" % (
+ url,
+ time_taken,
+ max_wait_str,
+ reason,
+ )
status_cb(status_msg)
if exception_cb:
# This can be used to alter the headers that will be sent
@@ -455,17 +483,23 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None,
break
loop_n = loop_n + 1
- LOG.debug("Please wait %s seconds while we wait to try again",
- sleep_time)
+ LOG.debug(
+ "Please wait %s seconds while we wait to try again", sleep_time
+ )
time.sleep(sleep_time)
return False, None
class OauthUrlHelper(object):
- def __init__(self, consumer_key=None, token_key=None,
- token_secret=None, consumer_secret=None,
- skew_data_file="/run/oauth_skew.json"):
+ def __init__(
+ self,
+ consumer_key=None,
+ token_key=None,
+ token_secret=None,
+ consumer_secret=None,
+ skew_data_file="/run/oauth_skew.json",
+ ):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret or ""
self.token_key = token_key
@@ -477,8 +511,10 @@ class OauthUrlHelper(object):
if not any(required):
self._do_oauth = False
elif not all(required):
- raise ValueError("all or none of token_key, token_secret, or "
- "consumer_key can be set")
+ raise ValueError(
+ "all or none of token_key, token_secret, or "
+ "consumer_key can be set"
+ )
old = self.read_skew_file()
self.skew_data = old or {}
@@ -501,16 +537,17 @@ class OauthUrlHelper(object):
fp.write(json.dumps(cur))
def exception_cb(self, msg, exception):
- if not (isinstance(exception, UrlError) and
- (exception.code == 403 or exception.code == 401)):
+ if not (
+ isinstance(exception, UrlError)
+ and (exception.code == 403 or exception.code == 401)
+ ):
return
- if 'date' not in exception.headers:
- LOG.warning("Missing header 'date' in %s response",
- exception.code)
+ if "date" not in exception.headers:
+ LOG.warning("Missing header 'date' in %s response", exception.code)
return
- date = exception.headers['date']
+ date = exception.headers["date"]
try:
remote_time = time.mktime(parsedate(date))
except Exception as e:
@@ -537,15 +574,21 @@ class OauthUrlHelper(object):
timestamp = int(time.time()) + self.skew_data[host]
return oauth_headers(
- url=url, consumer_key=self.consumer_key,
- token_key=self.token_key, token_secret=self.token_secret,
- consumer_secret=self.consumer_secret, timestamp=timestamp)
+ url=url,
+ consumer_key=self.consumer_key,
+ token_key=self.token_key,
+ token_secret=self.token_secret,
+ consumer_secret=self.consumer_secret,
+ timestamp=timestamp,
+ )
def _wrapped(self, wrapped_func, args, kwargs):
- kwargs['headers_cb'] = partial(
- self._headers_cb, kwargs.get('headers_cb'))
- kwargs['exception_cb'] = partial(
- self._exception_cb, kwargs.get('exception_cb'))
+ kwargs["headers_cb"] = partial(
+ self._headers_cb, kwargs.get("headers_cb")
+ )
+ kwargs["exception_cb"] = partial(
+ self._exception_cb, kwargs.get("exception_cb")
+ )
return wrapped_func(*args, **kwargs)
def wait_for_url(self, *args, **kwargs):
@@ -571,12 +614,13 @@ class OauthUrlHelper(object):
return headers
-def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
- timestamp=None):
+def oauth_headers(
+ url, consumer_key, token_key, token_secret, consumer_secret, timestamp=None
+):
try:
import oauthlib.oauth1 as oauth1
except ImportError as e:
- raise NotImplementedError('oauth support is not available') from e
+ raise NotImplementedError("oauth support is not available") from e
if timestamp:
timestamp = str(timestamp)
@@ -589,7 +633,8 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
resource_owner_key=token_key,
resource_owner_secret=token_secret,
signature_method=oauth1.SIGNATURE_PLAINTEXT,
- timestamp=timestamp)
+ timestamp=timestamp,
+ )
_uri, signed_headers, _body = client.sign(url)
return signed_headers
@@ -607,4 +652,5 @@ def retry_on_url_exc(msg, exc):
return True
return False
+
# vi: ts=4 expandtab
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 1317e063..05c66741 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -14,11 +14,10 @@ from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.mime.text import MIMEText
-from cloudinit import handlers
+from cloudinit import features, handlers
from cloudinit import log as logging
-from cloudinit import features
-from cloudinit.url_helper import read_file_or_url, UrlError
from cloudinit import util
+from cloudinit.url_helper import UrlError, read_file_or_url
LOG = logging.getLogger(__name__)
@@ -28,11 +27,11 @@ PART_FN_TPL = handlers.PART_FN_TPL
OCTET_TYPE = handlers.OCTET_TYPE
# Saves typing errors
-CONTENT_TYPE = 'Content-Type'
+CONTENT_TYPE = "Content-Type"
# Various special content types that cause special actions
TYPE_NEEDED = ["text/plain", "text/x-not-multipart"]
-INCLUDE_TYPES = ['text/x-include-url', 'text/x-include-once-url']
+INCLUDE_TYPES = ["text/x-include-url", "text/x-include-once-url"]
ARCHIVE_TYPES = ["text/cloud-config-archive"]
UNDEF_TYPE = "text/plain"
ARCHIVE_UNDEF_TYPE = "text/cloud-config"
@@ -40,18 +39,18 @@ ARCHIVE_UNDEF_BINARY_TYPE = "application/octet-stream"
# This seems to hit most of the gzip possible content types.
DECOMP_TYPES = [
- 'application/gzip',
- 'application/gzip-compressed',
- 'application/gzipped',
- 'application/x-compress',
- 'application/x-compressed',
- 'application/x-gunzip',
- 'application/x-gzip',
- 'application/x-gzip-compressed',
+ "application/gzip",
+ "application/gzip-compressed",
+ "application/gzipped",
+ "application/x-compress",
+ "application/x-compressed",
+ "application/x-gunzip",
+ "application/x-gzip",
+ "application/x-gzip-compressed",
]
# Msg header used to track attachments
-ATTACHMENT_FIELD = 'Number-Attachments'
+ATTACHMENT_FIELD = "Number-Attachments"
# Only the following content types can have there launch index examined
# in there payload, evey other content type can still provide a header
@@ -64,9 +63,8 @@ def _replace_header(msg, key, value):
def _set_filename(msg, filename):
- del msg['Content-Disposition']
- msg.add_header('Content-Disposition',
- 'attachment', filename=str(filename))
+ del msg["Content-Disposition"]
+ msg.add_header("Content-Disposition", "attachment", filename=str(filename))
def _handle_error(error_message, source_exception=None):
@@ -91,7 +89,6 @@ class UserDataProcessor(object):
return accumulating_msg
def _process_msg(self, base_msg, append_msg):
-
def find_ctype(payload):
return handlers.type_from_starts_with(payload)
@@ -118,7 +115,9 @@ class UserDataProcessor(object):
error_message = (
"Failed decompressing payload from {} of"
" length {} due to: {}".format(
- ctype_orig, len(payload), e))
+ ctype_orig, len(payload), e
+ )
+ )
_handle_error(error_message, e)
continue
@@ -130,7 +129,7 @@ class UserDataProcessor(object):
# to check the true MIME type for x-shellscript type since all
# shellscript payloads must have a #! header. The other MIME types
# that cloud-init supports do not have the same guarantee.
- if ctype_orig in TYPE_NEEDED + ['text/x-shellscript']:
+ if ctype_orig in TYPE_NEEDED + ["text/x-shellscript"]:
ctype = find_ctype(payload)
if ctype is None:
ctype = ctype_orig
@@ -148,7 +147,7 @@ class UserDataProcessor(object):
# after decoding and decompression.
if part.get_filename():
_set_filename(n_part, part.get_filename())
- for h in ('Launch-Index',):
+ for h in ("Launch-Index",):
if h in part:
_replace_header(n_part, h, str(part[h]))
part = n_part
@@ -171,7 +170,7 @@ class UserDataProcessor(object):
self._attach_part(append_msg, part)
def _attach_launch_index(self, msg):
- header_idx = msg.get('Launch-Index', None)
+ header_idx = msg.get("Launch-Index", None)
payload_idx = None
if msg.get_content_type() in EXAMINE_FOR_LAUNCH_INDEX:
try:
@@ -179,7 +178,7 @@ class UserDataProcessor(object):
# that might affect the final header
payload = util.load_yaml(msg.get_payload(decode=True))
if payload:
- payload_idx = payload.get('launch-index')
+ payload_idx = payload.get("launch-index")
except Exception:
pass
# Header overrides contents, for now (?) or the other way around?
@@ -190,14 +189,15 @@ class UserDataProcessor(object):
payload_idx = header_idx
if payload_idx is not None:
try:
- msg.add_header('Launch-Index', str(int(payload_idx)))
+ msg.add_header("Launch-Index", str(int(payload_idx)))
except (ValueError, TypeError):
pass
def _get_include_once_filename(self, entry):
- entry_fn = util.hash_blob(entry, 'md5', 64)
- return os.path.join(self.paths.get_ipath_cur('data'),
- 'urlcache', entry_fn)
+ entry_fn = util.hash_blob(entry, "md5", 64)
+ return os.path.join(
+ self.paths.get_ipath_cur("data"), "urlcache", entry_fn
+ )
def _process_before_attach(self, msg, attached_id):
if not msg.get_filename():
@@ -212,13 +212,13 @@ class UserDataProcessor(object):
for line in content.splitlines():
lc_line = line.lower()
if lc_line.startswith("#include-once"):
- line = line[len("#include-once"):].lstrip()
+ line = line[len("#include-once") :].lstrip()
# Every following include will now
# not be refetched.... but will be
# re-read from a local urlcache (if it worked)
include_once_on = True
elif lc_line.startswith("#include"):
- line = line[len("#include"):].lstrip()
+ line = line[len("#include") :].lstrip()
# Disable the include once if it was on
# if it wasn't, then this has no effect.
include_once_on = False
@@ -236,29 +236,37 @@ class UserDataProcessor(object):
content = util.load_file(include_once_fn)
else:
try:
- resp = read_file_or_url(include_url, timeout=5, retries=10,
- ssl_details=self.ssl_details)
+ resp = read_file_or_url(
+ include_url,
+ timeout=5,
+ retries=10,
+ ssl_details=self.ssl_details,
+ )
if include_once_on and resp.ok():
- util.write_file(include_once_fn, resp.contents,
- mode=0o600)
+ util.write_file(
+ include_once_fn, resp.contents, mode=0o600
+ )
if resp.ok():
content = resp.contents
else:
error_message = (
"Fetching from {} resulted in"
" a invalid http code of {}".format(
- include_url, resp.code))
+ include_url, resp.code
+ )
+ )
_handle_error(error_message)
except UrlError as urle:
message = str(urle)
# Older versions of requests.exceptions.HTTPError may not
# include the errant url. Append it for clarity in logs.
if include_url not in message:
- message += ' for url: {0}'.format(include_url)
+ message += " for url: {0}".format(include_url)
_handle_error(message, urle)
except IOError as ioe:
error_message = "Fetching from {} resulted in {}".format(
- include_url, ioe)
+ include_url, ioe
+ )
_handle_error(error_message, ioe)
if content is not None:
@@ -275,20 +283,20 @@ class UserDataProcessor(object):
# or
# scalar(payload)
if isinstance(ent, str):
- ent = {'content': ent}
+ ent = {"content": ent}
if not isinstance(ent, (dict)):
# TODO(harlowja) raise?
continue
- content = ent.get('content', '')
- mtype = ent.get('type')
+ content = ent.get("content", "")
+ mtype = ent.get("type")
if not mtype:
default = ARCHIVE_UNDEF_TYPE
if isinstance(content, bytes):
default = ARCHIVE_UNDEF_BINARY_TYPE
mtype = handlers.type_from_starts_with(content, default)
- maintype, subtype = mtype.split('/', 1)
+ maintype, subtype = mtype.split("/", 1)
if maintype == "text":
if isinstance(content, bytes):
content = content.decode()
@@ -297,16 +305,21 @@ class UserDataProcessor(object):
msg = MIMEBase(maintype, subtype)
msg.set_payload(content)
- if 'filename' in ent:
- _set_filename(msg, ent['filename'])
- if 'launch-index' in ent:
- msg.add_header('Launch-Index', str(ent['launch-index']))
+ if "filename" in ent:
+ _set_filename(msg, ent["filename"])
+ if "launch-index" in ent:
+ msg.add_header("Launch-Index", str(ent["launch-index"]))
for header in list(ent.keys()):
- if header.lower() in ('content', 'filename', 'type',
- 'launch-index', 'content-disposition',
- ATTACHMENT_FIELD.lower(),
- CONTENT_TYPE.lower()):
+ if header.lower() in (
+ "content",
+ "filename",
+ "type",
+ "launch-index",
+ "content-disposition",
+ ATTACHMENT_FIELD.lower(),
+ CONTENT_TYPE.lower(),
+ ):
continue
msg.add_header(header, ent[header])
@@ -318,7 +331,7 @@ class UserDataProcessor(object):
at its 'Number-Attachments' header.
"""
if ATTACHMENT_FIELD not in outer_msg:
- outer_msg[ATTACHMENT_FIELD] = '0'
+ outer_msg[ATTACHMENT_FIELD] = "0"
if new_count is not None:
_replace_header(outer_msg, ATTACHMENT_FIELD, str(new_count))
@@ -344,8 +357,8 @@ class UserDataProcessor(object):
def is_skippable(part):
# multipart/* are just containers
- part_maintype = part.get_content_maintype() or ''
- if part_maintype.lower() == 'multipart':
+ part_maintype = part.get_content_maintype() or ""
+ if part_maintype.lower() == "multipart":
return True
return False
@@ -355,7 +368,7 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
"""convert a string (more likely bytes) or a message into
a mime message."""
if not raw_data:
- raw_data = b''
+ raw_data = b""
def create_binmsg(data, content_type):
maintype, subtype = content_type.split("/", 1)
@@ -364,12 +377,12 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
return msg
if isinstance(raw_data, str):
- bdata = raw_data.encode('utf-8')
+ bdata = raw_data.encode("utf-8")
else:
bdata = raw_data
bdata = util.decomp_gzip(bdata, decode=False)
if b"mime-version:" in bdata[0:4096].lower():
- msg = util.message_from_string(bdata.decode('utf-8'))
+ msg = util.message_from_string(bdata.decode("utf-8"))
else:
msg = create_binmsg(bdata, content_type)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 769f3425..569fc215 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -34,14 +34,15 @@ import time
from base64 import b64decode, b64encode
from errno import ENOENT
from functools import lru_cache
+from typing import List
from urllib import parse
from cloudinit import importer
from cloudinit import log as logging
-from cloudinit import subp
from cloudinit import (
mergers,
safeyaml,
+ subp,
temp_utils,
type_utils,
url_helper,
@@ -54,16 +55,16 @@ LOG = logging.getLogger(__name__)
# Helps cleanup filenames to ensure they aren't FS incompatible
FN_REPLACEMENTS = {
- os.sep: '_',
+ os.sep: "_",
}
-FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
+FN_ALLOWED = "_-.()" + string.digits + string.ascii_letters
-TRUE_STRINGS = ('true', '1', 'on', 'yes')
-FALSE_STRINGS = ('off', '0', 'no', 'false')
+TRUE_STRINGS = ("true", "1", "on", "yes")
+FALSE_STRINGS = ("off", "0", "no", "false")
def kernel_version():
- return tuple(map(int, os.uname().release.split('.')[:2]))
+ return tuple(map(int, os.uname().release.split(".")[:2]))
@lru_cache()
@@ -73,28 +74,36 @@ def get_dpkg_architecture(target=None):
N.B. This function is wrapped in functools.lru_cache, so repeated calls
won't shell out every time.
"""
- out, _ = subp.subp(['dpkg', '--print-architecture'], capture=True,
- target=target)
+ out, _ = subp.subp(
+ ["dpkg", "--print-architecture"], capture=True, target=target
+ )
return out.strip()
@lru_cache()
def lsb_release(target=None):
- fmap = {'Codename': 'codename', 'Description': 'description',
- 'Distributor ID': 'id', 'Release': 'release'}
+ fmap = {
+ "Codename": "codename",
+ "Description": "description",
+ "Distributor ID": "id",
+ "Release": "release",
+ }
data = {}
try:
- out, _ = subp.subp(['lsb_release', '--all'], capture=True,
- target=target)
+ out, _ = subp.subp(
+ ["lsb_release", "--all"], capture=True, target=target
+ )
for line in out.splitlines():
fname, _, val = line.partition(":")
if fname in fmap:
data[fmap[fname]] = val.strip()
missing = [k for k in fmap.values() if k not in data]
if len(missing):
- LOG.warning("Missing fields in lsb_release --all output: %s",
- ','.join(missing))
+ LOG.warning(
+ "Missing fields in lsb_release --all output: %s",
+ ",".join(missing),
+ )
except subp.ProcessExecutionError as err:
LOG.warning("Unable to get lsb_release --all: %s", err)
@@ -103,14 +112,14 @@ def lsb_release(target=None):
return data
-def decode_binary(blob, encoding='utf-8'):
+def decode_binary(blob, encoding="utf-8"):
# Converts a binary type into a text type using given encoding.
if isinstance(blob, str):
return blob
return blob.decode(encoding)
-def encode_text(text, encoding='utf-8'):
+def encode_text(text, encoding="utf-8"):
# Converts a text string into a binary type using given encoding.
if isinstance(text, bytes):
return text
@@ -122,7 +131,7 @@ def b64d(source):
# str/unicode if the result is utf-8 compatible, otherwise returning bytes.
decoded = b64decode(source)
try:
- return decoded.decode('utf-8')
+ return decoded.decode("utf-8")
except UnicodeDecodeError:
return decoded
@@ -131,8 +140,8 @@ def b64e(source):
# Base64 encode some data, accepting bytes or unicode/str, and returning
# str/unicode if the result is utf-8 compatible, otherwise returning bytes.
if not isinstance(source, bytes):
- source = source.encode('utf-8')
- return b64encode(source).decode('utf-8')
+ source = source.encode("utf-8")
+ return b64encode(source).decode("utf-8")
def fully_decoded_payload(part):
@@ -142,14 +151,15 @@ def fully_decoded_payload(part):
# bytes, first try to decode to str via CT charset, and failing that, try
# utf-8 using surrogate escapes.
cte_payload = part.get_payload(decode=True)
- if (part.get_content_maintype() == 'text' and
- isinstance(cte_payload, bytes)):
+ if part.get_content_maintype() == "text" and isinstance(
+ cte_payload, bytes
+ ):
charset = part.get_charset()
if charset and charset.input_codec:
encoding = charset.input_codec
else:
- encoding = 'utf-8'
- return cte_payload.decode(encoding, 'surrogateescape')
+ encoding = "utf-8"
+ return cte_payload.decode(encoding, "surrogateescape")
return cte_payload
@@ -158,7 +168,7 @@ class SeLinuxGuard(object):
# Late import since it might not always
# be possible to use this
try:
- self.selinux = importer.import_module('selinux')
+ self.selinux = importer.import_module("selinux")
except ImportError:
self.selinux = None
self.path = path
@@ -183,13 +193,20 @@ class SeLinuxGuard(object):
except OSError:
return
- LOG.debug("Restoring selinux mode for %s (recursive=%s)",
- path, self.recursive)
+ LOG.debug(
+ "Restoring selinux mode for %s (recursive=%s)",
+ path,
+ self.recursive,
+ )
try:
self.selinux.restorecon(path, recursive=self.recursive)
except OSError as e:
- LOG.warning('restorecon failed on %s,%s maybe badness? %s',
- path, self.recursive, e)
+ LOG.warning(
+ "restorecon failed on %s,%s maybe badness? %s",
+ path,
+ self.recursive,
+ e,
+ )
class MountFailedError(Exception):
@@ -207,12 +224,18 @@ def fork_cb(child_cb, *args, **kwargs):
child_cb(*args, **kwargs)
os._exit(0)
except Exception:
- logexc(LOG, "Failed forking and calling callback %s",
- type_utils.obj_name(child_cb))
+ logexc(
+ LOG,
+ "Failed forking and calling callback %s",
+ type_utils.obj_name(child_cb),
+ )
os._exit(1)
else:
- LOG.debug("Forked child %s who will run callback %s",
- fid, type_utils.obj_name(child_cb))
+ LOG.debug(
+ "Forked child %s who will run callback %s",
+ fid,
+ type_utils.obj_name(child_cb),
+ )
def is_true(val, addons=None):
@@ -296,7 +319,7 @@ def uniq_merge(*lists):
if isinstance(a_list, str):
a_list = a_list.strip().split(",")
# Kickout the empty ones
- a_list = [a for a in a_list if len(a)]
+ a_list = [a for a in a_list if a]
combined_list.extend(a_list)
return uniq_list(combined_list)
@@ -309,7 +332,7 @@ def clean_filename(fn):
if k not in FN_ALLOWED:
removals.append(k)
for k in removals:
- fn = fn.replace(k, '')
+ fn = fn.replace(k, "")
fn = fn.strip()
return fn
@@ -333,7 +356,7 @@ def decomp_gzip(data, quiet=True, decode=True):
def extract_usergroup(ug_pair):
if not ug_pair:
return (None, None)
- ug_parted = ug_pair.split(':', 1)
+ ug_parted = ug_pair.split(":", 1)
u = ug_parted[0].strip()
if len(ug_parted) == 2:
g = ug_parted[1].strip()
@@ -346,7 +369,7 @@ def extract_usergroup(ug_pair):
return (u, g)
-def find_modules(root_dir):
+def find_modules(root_dir) -> dict:
entries = dict()
for fname in glob.glob(os.path.join(root_dir, "*.py")):
if not os.path.isfile(fname):
@@ -358,17 +381,23 @@ def find_modules(root_dir):
return entries
-def multi_log(text, console=True, stderr=True,
- log=None, log_level=logging.DEBUG):
+def multi_log(
+ text,
+ console=True,
+ stderr=True,
+ log=None,
+ log_level=logging.DEBUG,
+ fallback_to_stdout=True,
+):
if stderr:
sys.stderr.write(text)
if console:
conpath = "/dev/console"
if os.path.exists(conpath):
- with open(conpath, 'w') as wfh:
+ with open(conpath, "w") as wfh:
wfh.write(text)
wfh.flush()
- else:
+ elif fallback_to_stdout:
# A container may lack /dev/console (arguably a container bug). If
# it does not exist, then write output to stdout. this will result
# in duplicate stderr and stdout messages if stderr was True.
@@ -387,27 +416,36 @@ def multi_log(text, console=True, stderr=True,
@lru_cache()
def is_Linux():
- return 'Linux' in platform.system()
+ return "Linux" in platform.system()
@lru_cache()
def is_BSD():
- return 'BSD' in platform.system()
+ if "BSD" in platform.system():
+ return True
+ if platform.system() == "DragonFly":
+ return True
+ return False
@lru_cache()
def is_FreeBSD():
- return system_info()['variant'] == "freebsd"
+ return system_info()["variant"] == "freebsd"
+
+
+@lru_cache()
+def is_DragonFlyBSD():
+ return system_info()["variant"] == "dragonfly"
@lru_cache()
def is_NetBSD():
- return system_info()['variant'] == "netbsd"
+ return system_info()["variant"] == "netbsd"
@lru_cache()
def is_OpenBSD():
- return system_info()['variant'] == "openbsd"
+ return system_info()["variant"] == "openbsd"
def get_cfg_option_bool(yobj, key, default=False):
@@ -437,57 +475,80 @@ def _parse_redhat_release(release_file=None):
"""
if not release_file:
- release_file = '/etc/redhat-release'
+ release_file = "/etc/redhat-release"
if not os.path.exists(release_file):
return {}
redhat_release = load_file(release_file)
redhat_regex = (
- r'(?P<name>.+) release (?P<version>[\d\.]+) '
- r'\((?P<codename>[^)]+)\)')
+ r"(?P<name>.+) release (?P<version>[\d\.]+) "
+ r"\((?P<codename>[^)]+)\)"
+ )
+
+ # Virtuozzo deviates here
+ if "Virtuozzo" in redhat_release:
+ redhat_regex = r"(?P<name>.+) release (?P<version>[\d\.]+)"
+
match = re.match(redhat_regex, redhat_release)
if match:
group = match.groupdict()
- group['name'] = group['name'].lower().partition(' linux')[0]
- if group['name'] == 'red hat enterprise':
- group['name'] = 'redhat'
- return {'ID': group['name'], 'VERSION_ID': group['version'],
- 'VERSION_CODENAME': group['codename']}
+
+ # Virtuozzo has no codename in this file
+ if "Virtuozzo" in group["name"]:
+ group["codename"] = group["name"]
+
+ group["name"] = group["name"].lower().partition(" linux")[0]
+ if group["name"] == "red hat enterprise":
+ group["name"] = "redhat"
+ return {
+ "ID": group["name"],
+ "VERSION_ID": group["version"],
+ "VERSION_CODENAME": group["codename"],
+ }
return {}
@lru_cache()
def get_linux_distro():
- distro_name = ''
- distro_version = ''
- flavor = ''
+ distro_name = ""
+ distro_version = ""
+ flavor = ""
os_release = {}
- if os.path.exists('/etc/os-release'):
- os_release = load_shell_content(load_file('/etc/os-release'))
+ os_release_rhel = False
+ if os.path.exists("/etc/os-release"):
+ os_release = load_shell_content(load_file("/etc/os-release"))
if not os_release:
+ os_release_rhel = True
os_release = _parse_redhat_release()
if os_release:
- distro_name = os_release.get('ID', '')
- distro_version = os_release.get('VERSION_ID', '')
- if 'sles' in distro_name or 'suse' in distro_name:
+ distro_name = os_release.get("ID", "")
+ distro_version = os_release.get("VERSION_ID", "")
+ if "sles" in distro_name or "suse" in distro_name:
# RELEASE_BLOCKER: We will drop this sles divergent behavior in
# the future so that get_linux_distro returns a named tuple
# which will include both version codename and architecture
# on all distributions.
flavor = platform.machine()
+ elif distro_name == "photon":
+ flavor = os_release.get("PRETTY_NAME", "")
+ elif distro_name == "virtuozzo" and not os_release_rhel:
+ # Only use this if the redhat file is not parsed
+ flavor = os_release.get("PRETTY_NAME", "")
else:
- flavor = os_release.get('VERSION_CODENAME', '')
+ flavor = os_release.get("VERSION_CODENAME", "")
if not flavor:
- match = re.match(r'[^ ]+ \((?P<codename>[^)]+)\)',
- os_release.get('VERSION', ''))
+ match = re.match(
+ r"[^ ]+ \((?P<codename>[^)]+)\)",
+ os_release.get("VERSION", ""),
+ )
if match:
- flavor = match.groupdict()['codename']
- if distro_name == 'rhel':
- distro_name = 'redhat'
+ flavor = match.groupdict()["codename"]
+ if distro_name == "rhel":
+ distro_name = "redhat"
elif is_BSD():
distro_name = platform.system().lower()
distro_version = platform.release()
else:
- dist = ('', '', '')
+ dist = ("", "", "")
try:
# Was removed in 3.8
dist = platform.dist() # pylint: disable=W1505,E1101
@@ -499,46 +560,76 @@ def get_linux_distro():
if entry:
found = 1
if not found:
- LOG.warning('Unable to determine distribution, template '
- 'expansion may have unexpected results')
+ LOG.warning(
+ "Unable to determine distribution, template "
+ "expansion may have unexpected results"
+ )
return dist
return (distro_name, distro_version, flavor)
-@lru_cache()
-def system_info():
- info = {
- 'platform': platform.platform(),
- 'system': platform.system(),
- 'release': platform.release(),
- 'python': platform.python_version(),
- 'uname': list(platform.uname()),
- 'dist': get_linux_distro()
- }
- system = info['system'].lower()
- var = 'unknown'
+def _get_variant(info):
+ system = info["system"].lower()
+ variant = "unknown"
if system == "linux":
- linux_dist = info['dist'][0].lower()
+ linux_dist = info["dist"][0].lower()
if linux_dist in (
- 'alpine', 'arch', 'centos', 'debian', 'fedora', 'rhel',
- 'suse'):
- var = linux_dist
- elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
- var = 'ubuntu'
- elif linux_dist == 'redhat':
- var = 'rhel'
+ "almalinux",
+ "alpine",
+ "arch",
+ "centos",
+ "cloudlinux",
+ "debian",
+ "eurolinux",
+ "fedora",
+ "miraclelinux",
+ "openeuler",
+ "photon",
+ "rhel",
+ "rocky",
+ "suse",
+ "virtuozzo",
+ ):
+ variant = linux_dist
+ elif linux_dist in ("ubuntu", "linuxmint", "mint"):
+ variant = "ubuntu"
+ elif linux_dist == "redhat":
+ variant = "rhel"
elif linux_dist in (
- 'opensuse', 'opensuse-tumbleweed', 'opensuse-leap',
- 'sles', 'sle_hpc'):
- var = 'suse'
+ "opensuse",
+ "opensuse-tumbleweed",
+ "opensuse-leap",
+ "sles",
+ "sle_hpc",
+ ):
+ variant = "suse"
else:
- var = 'linux'
- elif system in ('windows', 'darwin', "freebsd", "netbsd", "openbsd"):
- var = system
+ variant = "linux"
+ elif system in (
+ "windows",
+ "darwin",
+ "freebsd",
+ "netbsd",
+ "openbsd",
+ "dragonfly",
+ ):
+ variant = system
+
+ return variant
- info['variant'] = var
+@lru_cache()
+def system_info():
+ info = {
+ "platform": platform.platform(),
+ "system": platform.system(),
+ "release": platform.release(),
+ "python": platform.python_version(),
+ "uname": list(platform.uname()),
+ "dist": get_linux_distro(),
+ }
+ info["variant"] = _get_variant(info)
return info
@@ -623,6 +714,26 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
if not o_err:
o_err = sys.stderr
+ # pylint: disable=subprocess-popen-preexec-fn
+ def set_subprocess_umask_and_gid():
+ """Reconfigure umask and group ID to create output files securely.
+
+ This is passed to subprocess.Popen as preexec_fn, so it is executed in
+ the context of the newly-created process. It:
+
+ * sets the umask of the process so created files aren't world-readable
+ * if an adm group exists in the system, sets that as the process' GID
+ (so that the created file(s) are owned by root:adm)
+ """
+ os.umask(0o037)
+ try:
+ group_id = grp.getgrnam("adm").gr_gid
+ except KeyError:
+ # No adm group, don't set a group
+ pass
+ else:
+ os.setgid(group_id)
+
if outfmt:
LOG.debug("Redirecting %s to %s", o_out, outfmt)
(mode, arg) = outfmt.split(" ", 1)
@@ -632,7 +743,12 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
owith = "wb"
new_fp = open(arg, owith)
elif mode == "|":
- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
+ proc = subprocess.Popen(
+ arg,
+ shell=True,
+ stdin=subprocess.PIPE,
+ preexec_fn=set_subprocess_umask_and_gid,
+ )
new_fp = proc.stdin
else:
raise TypeError("Invalid type for output format: %s" % outfmt)
@@ -654,7 +770,12 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
owith = "wb"
new_fp = open(arg, owith)
elif mode == "|":
- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
+ proc = subprocess.Popen(
+ arg,
+ shell=True,
+ stdin=subprocess.PIPE,
+ preexec_fn=set_subprocess_umask_and_gid,
+ )
new_fp = proc.stdin
else:
raise TypeError("Invalid type for error format: %s" % errfmt)
@@ -663,23 +784,24 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
os.dup2(new_fp.fileno(), o_err.fileno())
-def make_url(scheme, host, port=None,
- path='', params='', query='', fragment=''):
+def make_url(
+ scheme, host, port=None, path="", params="", query="", fragment=""
+):
- pieces = [scheme or '']
+ pieces = [scheme or ""]
- netloc = ''
+ netloc = ""
if host:
netloc = str(host)
if port is not None:
netloc += ":" + "%s" % (port)
- pieces.append(netloc or '')
- pieces.append(path or '')
- pieces.append(params or '')
- pieces.append(query or '')
- pieces.append(fragment or '')
+ pieces.append(netloc or "")
+ pieces.append(path or "")
+ pieces.append(params or "")
+ pieces.append(query or "")
+ pieces.append(fragment or "")
return parse.urlunparse(pieces)
@@ -719,8 +841,9 @@ def umask(n_msk):
def center(text, fill, max_len):
- return '{0:{fill}{align}{size}}'.format(text, fill=fill,
- align="^", size=max_len)
+ return "{0:{fill}{align}{size}}".format(
+ text, fill=fill, align="^", size=max_len
+ )
def del_dir(path):
@@ -735,9 +858,9 @@ def del_dir(path):
def read_optional_seed(fill, base="", ext="", timeout=5):
try:
(md, ud, vd) = read_seeded(base, ext, timeout)
- fill['user-data'] = ud
- fill['vendor-data'] = vd
- fill['meta-data'] = md
+ fill["user-data"] = ud
+ fill["vendor-data"] = vd
+ fill["meta-data"] = md
return True
except url_helper.UrlError as e:
if e.code == url_helper.NOT_FOUND:
@@ -749,31 +872,33 @@ def fetch_ssl_details(paths=None):
ssl_details = {}
# Lookup in these locations for ssl key/cert files
ssl_cert_paths = [
- '/var/lib/cloud/data/ssl',
- '/var/lib/cloud/instance/data/ssl',
+ "/var/lib/cloud/data/ssl",
+ "/var/lib/cloud/instance/data/ssl",
]
if paths:
- ssl_cert_paths.extend([
- os.path.join(paths.get_ipath_cur('data'), 'ssl'),
- os.path.join(paths.get_cpath('data'), 'ssl'),
- ])
+ ssl_cert_paths.extend(
+ [
+ os.path.join(paths.get_ipath_cur("data"), "ssl"),
+ os.path.join(paths.get_cpath("data"), "ssl"),
+ ]
+ )
ssl_cert_paths = uniq_merge(ssl_cert_paths)
ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)]
cert_file = None
for d in ssl_cert_paths:
- if os.path.isfile(os.path.join(d, 'cert.pem')):
- cert_file = os.path.join(d, 'cert.pem')
+ if os.path.isfile(os.path.join(d, "cert.pem")):
+ cert_file = os.path.join(d, "cert.pem")
break
key_file = None
for d in ssl_cert_paths:
- if os.path.isfile(os.path.join(d, 'key.pem')):
- key_file = os.path.join(d, 'key.pem')
+ if os.path.isfile(os.path.join(d, "key.pem")):
+ key_file = os.path.join(d, "key.pem")
break
if cert_file and key_file:
- ssl_details['cert_file'] = cert_file
- ssl_details['key_file'] = key_file
+ ssl_details["cert_file"] = cert_file
+ ssl_details["key_file"] = key_file
elif cert_file:
- ssl_details['cert_file'] = cert_file
+ ssl_details["cert_file"] = cert_file
return ssl_details
@@ -781,32 +906,38 @@ def load_yaml(blob, default=None, allowed=(dict,)):
loaded = default
blob = decode_binary(blob)
try:
- LOG.debug("Attempting to load yaml from string "
- "of length %s with allowed root types %s",
- len(blob), allowed)
+ LOG.debug(
+ "Attempting to load yaml from string "
+ "of length %s with allowed root types %s",
+ len(blob),
+ allowed,
+ )
converted = safeyaml.load(blob)
if converted is None:
LOG.debug("loaded blob returned None, returning default.")
converted = default
elif not isinstance(converted, allowed):
# Yes this will just be caught, but thats ok for now...
- raise TypeError(("Yaml load allows %s root types,"
- " but got %s instead") %
- (allowed, type_utils.obj_name(converted)))
+ raise TypeError(
+ "Yaml load allows %s root types, but got %s instead"
+ % (allowed, type_utils.obj_name(converted))
+ )
loaded = converted
except (safeyaml.YAMLError, TypeError, ValueError) as e:
- msg = 'Failed loading yaml blob'
+ msg = "Failed loading yaml blob"
mark = None
- if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
- mark = getattr(e, 'context_mark')
- elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'):
- mark = getattr(e, 'problem_mark')
+ if hasattr(e, "context_mark") and getattr(e, "context_mark"):
+ mark = getattr(e, "context_mark")
+ elif hasattr(e, "problem_mark") and getattr(e, "problem_mark"):
+ mark = getattr(e, "problem_mark")
if mark:
msg += (
'. Invalid format at line {line} column {col}: "{err}"'.format(
- line=mark.line + 1, col=mark.column + 1, err=e))
+ line=mark.line + 1, col=mark.column + 1, err=e
+ )
+ )
else:
- msg += '. {err}'.format(err=e)
+ msg += ". {err}".format(err=e)
LOG.warning(msg)
return loaded
@@ -821,22 +952,25 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
vd_url = "%s%s%s" % (base, "vendor-data", ext)
md_url = "%s%s%s" % (base, "meta-data", ext)
- md_resp = url_helper.read_file_or_url(md_url, timeout=timeout,
- retries=retries)
+ md_resp = url_helper.read_file_or_url(
+ md_url, timeout=timeout, retries=retries
+ )
md = None
if md_resp.ok():
md = load_yaml(decode_binary(md_resp.contents), default={})
- ud_resp = url_helper.read_file_or_url(ud_url, timeout=timeout,
- retries=retries)
+ ud_resp = url_helper.read_file_or_url(
+ ud_url, timeout=timeout, retries=retries
+ )
ud = None
if ud_resp.ok():
ud = ud_resp.contents
vd = None
try:
- vd_resp = url_helper.read_file_or_url(vd_url, timeout=timeout,
- retries=retries)
+ vd_resp = url_helper.read_file_or_url(
+ vd_url, timeout=timeout, retries=retries
+ )
except url_helper.UrlError as e:
LOG.debug("Error in vendor-data response: %s", e)
else:
@@ -856,8 +990,7 @@ def read_conf_d(confd):
confs = [f for f in confs if f.endswith(".cfg")]
# Remove anything not a file
- confs = [f for f in confs
- if os.path.isfile(os.path.join(confd, f))]
+ confs = [f for f in confs if os.path.isfile(os.path.join(confd, f))]
# Load them all so that they can be merged
cfgs = []
@@ -872,12 +1005,13 @@ def read_conf_with_confd(cfgfile):
confd = False
if "conf_d" in cfg:
- confd = cfg['conf_d']
+ confd = cfg["conf_d"]
if confd:
if not isinstance(confd, str):
- raise TypeError(("Config file %s contains 'conf_d' "
- "with non-string type %s") %
- (cfgfile, type_utils.obj_name(confd)))
+ raise TypeError(
+ "Config file %s contains 'conf_d' with non-string type %s"
+ % (cfgfile, type_utils.obj_name(confd))
+ )
else:
confd = str(confd).strip()
elif os.path.isdir("%s.d" % cfgfile):
@@ -921,19 +1055,21 @@ def read_cc_from_cmdline(cmdline=None):
if end < 0:
end = clen
tokens.append(
- parse.unquote(
- cmdline[begin + begin_l:end].lstrip()).replace("\\n", "\n"))
+ parse.unquote(cmdline[begin + begin_l : end].lstrip()).replace(
+ "\\n", "\n"
+ )
+ )
begin = cmdline.find(tag_begin, end + end_l)
- return '\n'.join(tokens)
+ return "\n".join(tokens)
def dos2unix(contents):
# find first end of line
- pos = contents.find('\n')
- if pos <= 0 or contents[pos - 1] != '\r':
+ pos = contents.find("\n")
+ if pos <= 0 or contents[pos - 1] != "\r":
return contents
- return contents.replace('\r\n', '\n')
+ return contents.replace("\r\n", "\n")
def get_hostname_fqdn(cfg, cloud, metadata_only=False):
@@ -948,20 +1084,20 @@ def get_hostname_fqdn(cfg, cloud, metadata_only=False):
"""
if "fqdn" in cfg:
# user specified a fqdn. Default hostname then is based off that
- fqdn = cfg['fqdn']
- hostname = get_cfg_option_str(cfg, "hostname", fqdn.split('.')[0])
+ fqdn = cfg["fqdn"]
+ hostname = get_cfg_option_str(cfg, "hostname", fqdn.split(".")[0])
else:
- if "hostname" in cfg and cfg['hostname'].find('.') > 0:
+ if "hostname" in cfg and cfg["hostname"].find(".") > 0:
# user specified hostname, and it had '.' in it
# be nice to them. set fqdn and hostname from that
- fqdn = cfg['hostname']
- hostname = cfg['hostname'][:fqdn.find('.')]
+ fqdn = cfg["hostname"]
+ hostname = cfg["hostname"][: fqdn.find(".")]
else:
# no fqdn set, get fqdn from cloud.
# get hostname from cfg if available otherwise cloud
fqdn = cloud.get_hostname(fqdn=True, metadata_only=metadata_only)
if "hostname" in cfg:
- hostname = cfg['hostname']
+ hostname = cfg["hostname"]
else:
hostname = cloud.get_hostname(metadata_only=metadata_only)
return (hostname, fqdn)
@@ -1022,14 +1158,17 @@ def is_resolvable(name):
global _DNS_REDIRECT_IP
if _DNS_REDIRECT_IP is None:
badips = set()
- badnames = ("does-not-exist.example.com.", "example.invalid.",
- "__cloud_init_expected_not_found__")
+ badnames = (
+ "does-not-exist.example.com.",
+ "example.invalid.",
+ "__cloud_init_expected_not_found__",
+ )
badresults = {}
for iname in badnames:
try:
- result = socket.getaddrinfo(iname, None, 0, 0,
- socket.SOCK_STREAM,
- socket.AI_CANONNAME)
+ result = socket.getaddrinfo(
+ iname, None, 0, 0, socket.SOCK_STREAM, socket.AI_CANONNAME
+ )
badresults[iname] = []
for (_fam, _stype, _proto, cname, sockaddr) in result:
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
@@ -1065,9 +1204,12 @@ def gethostbyaddr(ip):
def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
- return log_time(logfunc=LOG.debug, msg="Resolving URL: " + url,
- func=is_resolvable,
- args=(parse.urlparse(url).hostname,))
+ return log_time(
+ logfunc=LOG.debug,
+ msg="Resolving URL: " + url,
+ func=is_resolvable,
+ args=(parse.urlparse(url).hostname,),
+ )
def search_for_mirror(candidates):
@@ -1103,16 +1245,19 @@ def close_stdin():
os.dup2(fp.fileno(), sys.stdin.fileno())
-def find_devs_with_freebsd(criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
+def find_devs_with_freebsd(
+ criteria=None, oformat="device", tag=None, no_cache=False, path=None
+):
devlist = []
if not criteria:
return glob.glob("/dev/msdosfs/*") + glob.glob("/dev/iso9660/*")
if criteria.startswith("LABEL="):
label = criteria.lstrip("LABEL=")
devlist = [
- p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label]
- if os.path.exists(p)]
+ p
+ for p in ["/dev/msdosfs/" + label, "/dev/iso9660/" + label]
+ if os.path.exists(p)
+ ]
elif criteria == "TYPE=vfat":
devlist = glob.glob("/dev/msdosfs/*")
elif criteria == "TYPE=iso9660":
@@ -1120,8 +1265,9 @@ def find_devs_with_freebsd(criteria=None, oformat='device',
return devlist
-def find_devs_with_netbsd(criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
+def find_devs_with_netbsd(
+ criteria=None, oformat="device", tag=None, no_cache=False, path=None
+):
devlist = []
label = None
_type = None
@@ -1130,43 +1276,65 @@ def find_devs_with_netbsd(criteria=None, oformat='device',
label = criteria.lstrip("LABEL=")
if criteria.startswith("TYPE="):
_type = criteria.lstrip("TYPE=")
- out, _err = subp.subp(['sysctl', '-n', 'hw.disknames'], rcs=[0])
+ out, _err = subp.subp(["sysctl", "-n", "hw.disknames"], rcs=[0])
for dev in out.split():
if label or _type:
- mscdlabel_out, _ = subp.subp(['mscdlabel', dev], rcs=[0, 1])
+ mscdlabel_out, _ = subp.subp(["mscdlabel", dev], rcs=[0, 1])
if label and not ('label "%s"' % label) in mscdlabel_out:
continue
if _type == "iso9660" and "ISO filesystem" not in mscdlabel_out:
continue
if _type == "vfat" and "ISO filesystem" in mscdlabel_out:
continue
- devlist.append('/dev/' + dev)
+ devlist.append("/dev/" + dev)
return devlist
-def find_devs_with_openbsd(criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
- out, _err = subp.subp(['sysctl', '-n', 'hw.disknames'], rcs=[0])
+def find_devs_with_openbsd(
+ criteria=None, oformat="device", tag=None, no_cache=False, path=None
+):
+ out, _err = subp.subp(["sysctl", "-n", "hw.disknames"], rcs=[0])
devlist = []
- for entry in out.split(','):
- if not entry.endswith(':'):
+ for entry in out.rstrip().split(","):
+ if not entry.endswith(":"):
# ffs partition with a serial, not a config-drive
continue
- if entry == 'fd0:':
+ if entry == "fd0:":
continue
- part_id = 'a' if entry.startswith('cd') else 'i'
- devlist.append(entry[:-1] + part_id)
+ devlist.append(entry[:-1] + "a")
+ if not entry.startswith("cd"):
+ devlist.append(entry[:-1] + "i")
+ return ["/dev/" + i for i in devlist]
+
+
+def find_devs_with_dragonflybsd(
+ criteria=None, oformat="device", tag=None, no_cache=False, path=None
+):
+ out, _err = subp.subp(["sysctl", "-n", "kern.disks"], rcs=[0])
+ devlist = [
+ i
+ for i in sorted(out.split(), reverse=True)
+ if not i.startswith("md") and not i.startswith("vn")
+ ]
+
if criteria == "TYPE=iso9660":
- devlist = [i for i in devlist if i.startswith('cd')]
+ devlist = [
+ i for i in devlist if i.startswith("cd") or i.startswith("acd")
+ ]
elif criteria in ["LABEL=CONFIG-2", "TYPE=vfat"]:
- devlist = [i for i in devlist if not i.startswith('cd')]
+ devlist = [
+ i
+ for i in devlist
+ if not (i.startswith("cd") or i.startswith("acd"))
+ ]
elif criteria:
LOG.debug("Unexpected criteria: %s", criteria)
- return ['/dev/' + i for i in devlist]
+ return ["/dev/" + i for i in devlist]
-def find_devs_with(criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
+def find_devs_with(
+ criteria=None, oformat="device", tag=None, no_cache=False, path=None
+):
"""
find devices matching given criteria (via blkid)
criteria can be *one* of:
@@ -1175,16 +1343,17 @@ def find_devs_with(criteria=None, oformat='device',
UUID=<uuid>
"""
if is_FreeBSD():
- return find_devs_with_freebsd(criteria, oformat,
- tag, no_cache, path)
+ return find_devs_with_freebsd(criteria, oformat, tag, no_cache, path)
elif is_NetBSD():
- return find_devs_with_netbsd(criteria, oformat,
- tag, no_cache, path)
+ return find_devs_with_netbsd(criteria, oformat, tag, no_cache, path)
elif is_OpenBSD():
- return find_devs_with_openbsd(criteria, oformat,
- tag, no_cache, path)
+ return find_devs_with_openbsd(criteria, oformat, tag, no_cache, path)
+ elif is_DragonFlyBSD():
+ return find_devs_with_dragonflybsd(
+ criteria, oformat, tag, no_cache, path
+ )
- blk_id_cmd = ['blkid']
+ blk_id_cmd = ["blkid"]
options = []
if criteria:
# Search for block devices with tokens named NAME that
@@ -1206,7 +1375,7 @@ def find_devs_with(criteria=None, oformat='device',
# Display blkid's output using the specified format.
# The format parameter may be:
# full, value, list, device, udev, export
- options.append('-o%s' % (oformat))
+ options.append("-o%s" % (oformat))
if path:
options.append(path)
cmd = blk_id_cmd + options
@@ -1240,9 +1409,9 @@ def blkid(devs=None, disable_cache=False):
else:
devs = list(devs)
- cmd = ['blkid', '-o', 'full']
+ cmd = ["blkid", "-o", "full"]
if disable_cache:
- cmd.extend(['-c', '/dev/null'])
+ cmd.extend(["-c", "/dev/null"])
cmd.extend(devs)
# we have to decode with 'replace' as shelx.split (called by
@@ -1260,7 +1429,7 @@ def blkid(devs=None, disable_cache=False):
def peek_file(fname, max_bytes):
LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes)
- with open(fname, 'rb') as ifh:
+ with open(fname, "rb") as ifh:
return ifh.read(max_bytes)
@@ -1278,7 +1447,7 @@ def load_file(fname, read_cb=None, quiet=False, decode=True):
LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
ofh = io.BytesIO()
try:
- with open(fname, 'rb') as ifh:
+ with open(fname, "rb") as ifh:
pipe_in_out(ifh, ofh, chunk_cb=read_cb)
except IOError as e:
if not quiet:
@@ -1313,7 +1482,7 @@ def _get_cmdline():
def get_cmdline():
- if 'DEBUG_PROC_CMDLINE' in os.environ:
+ if "DEBUG_PROC_CMDLINE" in os.environ:
return os.environ["DEBUG_PROC_CMDLINE"]
return _get_cmdline()
@@ -1366,18 +1535,18 @@ def chownbyname(fname, user=None, group=None):
# this returns the specific 'mode' entry, cleanly formatted, with value
def get_output_cfg(cfg, mode):
ret = [None, None]
- if not cfg or 'output' not in cfg:
+ if not cfg or "output" not in cfg:
return ret
- outcfg = cfg['output']
+ outcfg = cfg["output"]
if mode in outcfg:
modecfg = outcfg[mode]
else:
- if 'all' not in outcfg:
+ if "all" not in outcfg:
return ret
# if there is a 'all' item in the output list
# then it applies to all users of this (init, config, final)
- modecfg = outcfg['all']
+ modecfg = outcfg["all"]
# if value is a string, it specifies stdout and stderr
if isinstance(modecfg, str):
@@ -1393,10 +1562,10 @@ def get_output_cfg(cfg, mode):
# if it is a dictionary, expect 'out' and 'error'
# items, which indicate out and error
if isinstance(modecfg, dict):
- if 'output' in modecfg:
- ret[0] = modecfg['output']
- if 'error' in modecfg:
- ret[1] = modecfg['error']
+ if "output" in modecfg:
+ ret[0] = modecfg["output"]
+ if "error" in modecfg:
+ ret[1] = modecfg["error"]
# if err's entry == "&1", then make it same as stdout
# as in shell syntax of "echo foo >/dev/null 2>&1"
@@ -1411,7 +1580,7 @@ def get_output_cfg(cfg, mode):
found = False
for s in swlist:
if val.startswith(s):
- val = "%s %s" % (s, val[len(s):].strip())
+ val = "%s %s" % (s, val[len(s) :].strip())
found = True
break
if not found:
@@ -1430,20 +1599,20 @@ def get_config_logfiles(cfg):
logs = []
if not cfg or not isinstance(cfg, dict):
return logs
- default_log = cfg.get('def_log_file')
+ default_log = cfg.get("def_log_file")
if default_log:
logs.append(default_log)
for fmt in get_output_cfg(cfg, None):
if not fmt:
continue
- match = re.match(r'(?P<type>\||>+)\s*(?P<target>.*)', fmt)
+ match = re.match(r"(?P<type>\||>+)\s*(?P<target>.*)", fmt)
if not match:
continue
- target = match.group('target')
+ target = match.group("target")
parts = target.split()
if len(parts) == 1:
logs.append(target)
- elif ['tee', '-a'] == parts[:2]:
+ elif ["tee", "-a"] == parts[:2]:
logs.append(parts[2])
return list(set(logs))
@@ -1508,17 +1677,19 @@ def load_json(text, root_types=(dict,)):
decoded = json.loads(decode_binary(text))
if not isinstance(decoded, tuple(root_types)):
expected_types = ", ".join([str(t) for t in root_types])
- raise TypeError("(%s) root types expected, got %s instead"
- % (expected_types, type(decoded)))
+ raise TypeError(
+ "(%s) root types expected, got %s instead"
+ % (expected_types, type(decoded))
+ )
return decoded
def json_serialize_default(_obj):
"""Handler for types which aren't json serializable."""
try:
- return 'ci-b64:{0}'.format(b64e(_obj))
+ return "ci-b64:{0}".format(b64e(_obj))
except AttributeError:
- return 'Warning: redacted unserializable type {0}'.format(type(_obj))
+ return "Warning: redacted unserializable type {0}".format(type(_obj))
def json_preserialize_binary(data):
@@ -1533,7 +1704,7 @@ def json_preserialize_binary(data):
if isinstance(value, (dict)):
data[key] = json_preserialize_binary(value)
if isinstance(value, bytes):
- data[key] = 'ci-b64:{0}'.format(b64e(value))
+ data[key] = "ci-b64:{0}".format(b64e(value))
return data
@@ -1541,8 +1712,12 @@ def json_dumps(data):
"""Return data in nicely formatted json."""
try:
return json.dumps(
- data, indent=1, sort_keys=True, separators=(',', ': '),
- default=json_serialize_default)
+ data,
+ indent=1,
+ sort_keys=True,
+ separators=(",", ": "),
+ default=json_serialize_default,
+ )
except UnicodeDecodeError:
if sys.version_info[:2] == (2, 7):
data = json_preserialize_binary(data)
@@ -1577,17 +1752,17 @@ def mounts():
# Go through mounts to see what is already mounted
if os.path.exists("/proc/mounts"):
mount_locs = load_file("/proc/mounts").splitlines()
- method = 'proc'
+ method = "proc"
else:
(mountoutput, _err) = subp.subp("mount")
mount_locs = mountoutput.splitlines()
- method = 'mount'
- mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
+ method = "mount"
+ mountre = r"^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$"
for mpline in mount_locs:
# Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
# FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
try:
- if method == 'proc':
+ if method == "proc":
(dev, mp, fstype, opts, _freq, _passno) = mpline.split()
else:
m = re.search(mountre, mpline)
@@ -1601,9 +1776,9 @@ def mounts():
# can be escaped as '\040', so undo that..
mp = mp.replace("\\040", " ")
mounted[dev] = {
- 'fstype': fstype,
- 'mountpoint': mp,
- 'opts': opts,
+ "fstype": fstype,
+ "mountpoint": mp,
+ "opts": opts,
}
LOG.debug("Fetched %s mounts from %s", mounted, method)
except (IOError, OSError):
@@ -1611,8 +1786,9 @@ def mounts():
return mounted
-def mount_cb(device, callback, data=None, mtype=None,
- update_env_for_mount=None):
+def mount_cb(
+ device, callback, data=None, mtype=None, update_env_for_mount=None
+):
"""
Mount the device, call method 'callback' passing the directory
in which it was mounted, then unmount. Return whatever 'callback'
@@ -1630,8 +1806,10 @@ def mount_cb(device, callback, data=None, mtype=None,
mtypes = None
else:
raise TypeError(
- 'Unsupported type provided for mtype parameter: {_type}'.format(
- _type=type(mtype)))
+ "Unsupported type provided for mtype parameter: {_type}".format(
+ _type=type(mtype)
+ )
+ )
# clean up 'mtype' input a bit based on platform.
if is_Linux():
@@ -1639,7 +1817,7 @@ def mount_cb(device, callback, data=None, mtype=None,
mtypes = ["auto"]
elif is_BSD():
if mtypes is None:
- mtypes = ['ufs', 'cd9660', 'msdos']
+ mtypes = ["ufs", "cd9660", "msdos"]
for index, mtype in enumerate(mtypes):
if mtype == "iso9660":
mtypes[index] = "cd9660"
@@ -1647,21 +1825,21 @@ def mount_cb(device, callback, data=None, mtype=None,
mtypes[index] = "msdos"
else:
# we cannot do a smart "auto", so just call 'mount' once with no -t
- mtypes = ['']
+ mtypes = [""]
mounted = mounts()
with temp_utils.tempdir() as tmpd:
umount = False
if os.path.realpath(device) in mounted:
- mountpoint = mounted[os.path.realpath(device)]['mountpoint']
+ mountpoint = mounted[os.path.realpath(device)]["mountpoint"]
else:
failure_reason = None
for mtype in mtypes:
mountpoint = None
try:
- mountcmd = ['mount', '-o', 'ro']
+ mountcmd = ["mount", "-o", "ro"]
if mtype:
- mountcmd.extend(['-t', mtype])
+ mountcmd.extend(["-t", mtype])
mountcmd.append(device)
mountcmd.append(tmpd)
subp.subp(mountcmd, update_env=update_env_for_mount)
@@ -1669,12 +1847,21 @@ def mount_cb(device, callback, data=None, mtype=None,
mountpoint = tmpd
break
except (IOError, OSError) as exc:
- LOG.debug("Failed mount of '%s' as '%s': %s",
- device, mtype, exc)
+ LOG.debug(
+ "Failed to mount device: '%s' with type: '%s' "
+ "using mount command: '%s', "
+ "which caused exception: %s",
+ device,
+ mtype,
+ " ".join(mountcmd),
+ exc,
+ )
failure_reason = exc
if not mountpoint:
- raise MountFailedError("Failed mounting %s to %s due to: %s" %
- (device, tmpd, failure_reason))
+ raise MountFailedError(
+ "Failed mounting %s to %s due to: %s"
+ % (device, tmpd, failure_reason)
+ )
# Be nice and ensure it ends with a slash
if not mountpoint.endswith("/"):
@@ -1741,31 +1928,37 @@ def boottime():
NULL_BYTES = b"\x00"
class timeval(ctypes.Structure):
- _fields_ = [
- ("tv_sec", ctypes.c_int64),
- ("tv_usec", ctypes.c_int64)
- ]
- libc = ctypes.CDLL(ctypes.util.find_library('c'))
+ _fields_ = [("tv_sec", ctypes.c_int64), ("tv_usec", ctypes.c_int64)]
+
+ libc = ctypes.CDLL(ctypes.util.find_library("c"))
size = ctypes.c_size_t()
size.value = ctypes.sizeof(timeval)
buf = timeval()
- if libc.sysctlbyname(b"kern.boottime" + NULL_BYTES, ctypes.byref(buf),
- ctypes.byref(size), None, 0) != -1:
+ if (
+ libc.sysctlbyname(
+ b"kern.boottime" + NULL_BYTES,
+ ctypes.byref(buf),
+ ctypes.byref(size),
+ None,
+ 0,
+ )
+ != -1
+ ):
return buf.tv_sec + buf.tv_usec / 1000000.0
raise RuntimeError("Unable to retrieve kern.boottime on this system")
def uptime():
- uptime_str = '??'
- method = 'unknown'
+ uptime_str = "??"
+ method = "unknown"
try:
if os.path.exists("/proc/uptime"):
- method = '/proc/uptime'
+ method = "/proc/uptime"
contents = load_file("/proc/uptime")
if contents:
uptime_str = contents.split()[0]
else:
- method = 'ctypes'
+ method = "ctypes"
# This is the *BSD codepath
uptime_str = str(time.time() - boottime())
@@ -1782,7 +1975,7 @@ def ensure_file(
path, mode: int = 0o644, *, preserve_mode: bool = False
) -> None:
write_file(
- path, content='', omode="ab", mode=mode, preserve_mode=preserve_mode
+ path, content="", omode="ab", mode=mode, preserve_mode=preserve_mode
)
@@ -1800,6 +1993,67 @@ def chmod(path, mode):
os.chmod(path, real_mode)
+def get_group_id(grp_name: str) -> int:
+ """
+ Returns the group id of a group name, or -1 if no group exists
+
+ @param grp_name: the name of the group
+ """
+ gid = -1
+ try:
+ gid = grp.getgrnam(grp_name).gr_gid
+ except KeyError:
+ LOG.debug("Group %s is not a valid group name", grp_name)
+ return gid
+
+
+def get_permissions(path: str) -> int:
+ """
+ Returns the octal permissions of the file/folder pointed by the path,
+ encoded as an int.
+
+ @param path: The full path of the file/folder.
+ """
+
+ return stat.S_IMODE(os.stat(path).st_mode)
+
+
+def get_owner(path: str) -> str:
+ """
+ Returns the owner of the file/folder pointed by the path.
+
+ @param path: The full path of the file/folder.
+ """
+ st = os.stat(path)
+ return pwd.getpwuid(st.st_uid).pw_name
+
+
+def get_group(path: str) -> str:
+ """
+ Returns the group of the file/folder pointed by the path.
+
+ @param path: The full path of the file/folder.
+ """
+ st = os.stat(path)
+ return grp.getgrgid(st.st_gid).gr_name
+
+
+def get_user_groups(username: str) -> List[str]:
+ """
+ Returns a list of all groups to which the user belongs
+
+ @param username: the user we want to check
+ """
+ groups = []
+ for group in grp.getgrall():
+ if username in group.gr_mem:
+ groups.append(group.gr_name)
+
+ gid = pwd.getpwnam(username).pw_gid
+ groups.append(grp.getgrgid(gid).gr_name)
+ return groups
+
+
def write_file(
filename,
content,
@@ -1826,25 +2080,30 @@ def write_file(
if preserve_mode:
try:
- file_stat = os.stat(filename)
- mode = stat.S_IMODE(file_stat.st_mode)
+ mode = get_permissions(filename)
except OSError:
pass
if ensure_dir_exists:
ensure_dir(os.path.dirname(filename))
- if 'b' in omode.lower():
+ if "b" in omode.lower():
content = encode_text(content)
- write_type = 'bytes'
+ write_type = "bytes"
else:
content = decode_binary(content)
- write_type = 'characters'
+ write_type = "characters"
try:
mode_r = "%o" % mode
except TypeError:
mode_r = "%r" % mode
- LOG.debug("Writing to %s - %s: [%s] %s %s",
- filename, omode, mode_r, len(content), write_type)
+ LOG.debug(
+ "Writing to %s - %s: [%s] %s %s",
+ filename,
+ omode,
+ mode_r,
+ len(content),
+ write_type,
+ )
with SeLinuxGuard(path=filename):
with open(filename, omode) as fh:
fh.write(content)
@@ -1866,7 +2125,7 @@ def delete_dir_contents(dirname):
del_file(node_fullpath)
-def make_header(comment_char="#", base='created'):
+def make_header(comment_char="#", base="created"):
ci_ver = version.version_string()
header = str(comment_char)
header += " %s by cloud-init v. %s" % (base.title(), ci_ver)
@@ -1885,13 +2144,14 @@ def abs_join(base, *paths):
def shellify(cmdlist, add_header=True):
if not isinstance(cmdlist, (tuple, list)):
raise TypeError(
- "Input to shellify was type '%s'. Expected list or tuple." %
- (type_utils.obj_name(cmdlist)))
+ "Input to shellify was type '%s'. Expected list or tuple."
+ % (type_utils.obj_name(cmdlist))
+ )
- content = ''
+ content = ""
if add_header:
content += "#!/bin/sh\n"
- escaped = "%s%s%s%s" % ("'", '\\', "'", "'")
+ escaped = "%s%s%s%s" % ("'", "\\", "'", "'")
cmds_made = 0
for args in cmdlist:
# If the item is a list, wrap all items in single tick.
@@ -1900,15 +2160,19 @@ def shellify(cmdlist, add_header=True):
fixed = []
for f in args:
fixed.append("'%s'" % (str(f).replace("'", escaped)))
- content = "%s%s\n" % (content, ' '.join(fixed))
+ content = "%s%s\n" % (content, " ".join(fixed))
cmds_made += 1
elif isinstance(args, str):
content = "%s%s\n" % (content, args)
cmds_made += 1
+ # Yaml parsing of a comment results in None
+ elif args is None:
+ pass
else:
raise TypeError(
"Unable to shellify type '%s'. Expected list, string, tuple. "
- "Got: %s" % (type_utils.obj_name(args), args))
+ "Got: %s" % (type_utils.obj_name(args), args)
+ )
LOG.debug("Shellified %s commands.", cmds_made)
return content
@@ -1916,9 +2180,9 @@ def shellify(cmdlist, add_header=True):
def strip_prefix_suffix(line, prefix=None, suffix=None):
if prefix and line.startswith(prefix):
- line = line[len(prefix):]
+ line = line[len(prefix) :]
if suffix and line.endswith(suffix):
- line = line[:-len(suffix)]
+ line = line[: -len(suffix)]
return line
@@ -1963,7 +2227,8 @@ def is_container():
_is_container_systemd,
_is_container_freebsd,
_is_container_upstart,
- _is_container_old_lxc)
+ _is_container_old_lxc,
+ )
for helper in checks:
if helper():
@@ -2002,10 +2267,10 @@ def is_container():
def is_lxd():
"""Check to see if we are running in a lxd container."""
- return os.path.exists('/dev/lxd/sock')
+ return os.path.exists("/dev/lxd/sock")
-def get_proc_env(pid, encoding='utf-8', errors='replace'):
+def get_proc_env(pid, encoding="utf-8", errors="replace"):
"""
Return the environment in a dict that a given process id was started with.
@@ -2085,7 +2350,7 @@ def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False):
"""Return the mount information for PATH given the lines from
/proc/$$/mountinfo."""
- path_elements = [e for e in path.split('/') if e]
+ path_elements = [e for e in path.split("/") if e]
devpth = None
fs_type = None
match_mount_point = None
@@ -2100,12 +2365,13 @@ def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False):
# The minimum number of elements in a valid line is 10.
if len(parts) < 10:
- log.debug("Line %d has two few columns (%d): %s",
- i + 1, len(parts), line)
+ log.debug(
+ "Line %d has two few columns (%d): %s", i + 1, len(parts), line
+ )
return None
mount_point = parts[4]
- mount_point_elements = [e for e in mount_point.split('/') if e]
+ mount_point_elements = [e for e in mount_point.split("/") if e]
# Ignore mounts deeper than the path in question.
if len(mount_point_elements) > len(path_elements):
@@ -2118,18 +2384,20 @@ def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False):
# Ignore mount points higher than an already seen mount
# point.
- if (match_mount_point_elements is not None and
- len(match_mount_point_elements) > len(mount_point_elements)):
+ if match_mount_point_elements is not None and len(
+ match_mount_point_elements
+ ) > len(mount_point_elements):
continue
# Find the '-' which terminates a list of optional columns to
# find the filesystem type and the path to the device. See
# man 5 proc for the format of this file.
try:
- i = parts.index('-')
+ i = parts.index("-")
except ValueError:
- log.debug("Did not find column named '-' in line %d: %s",
- i + 1, line)
+ log.debug(
+ "Did not find column named '-' in line %d: %s", i + 1, line
+ )
return None
# Get the path to the device.
@@ -2137,8 +2405,9 @@ def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False):
fs_type = parts[i + 1]
devpth = parts[i + 2]
except IndexError:
- log.debug("Too few columns after '-' column in line %d: %s",
- i + 1, line)
+ log.debug(
+ "Too few columns after '-' column in line %d: %s", i + 1, line
+ )
return None
match_mount_point = mount_point
@@ -2165,12 +2434,12 @@ def parse_mtab(path):
def find_freebsd_part(fs):
- splitted = fs.split('/')
+ splitted = fs.split("/")
if len(splitted) == 3:
return splitted[2]
- elif splitted[2] in ['label', 'gpt', 'ufs']:
+ elif splitted[2] in ["label", "gpt", "ufs"]:
target_label = fs[5:]
- (part, _err) = subp.subp(['glabel', 'status', '-s'])
+ (part, _err) = subp.subp(["glabel", "status", "-s"])
for labels in part.split("\n"):
items = labels.split()
if len(items) > 0 and items[0] == target_label:
@@ -2181,23 +2450,31 @@ def find_freebsd_part(fs):
LOG.warning("Unexpected input in find_freebsd_part: %s", fs)
+def find_dragonflybsd_part(fs):
+ splitted = fs.split("/")
+ if len(splitted) == 3 and splitted[1] == "dev":
+ return splitted[2]
+ else:
+ LOG.warning("Unexpected input in find_dragonflybsd_part: %s", fs)
+
+
def get_path_dev_freebsd(path, mnt_list):
path_found = None
for line in mnt_list.split("\n"):
items = line.split()
- if (len(items) > 2 and os.path.exists(items[1] + path)):
+ if len(items) > 2 and os.path.exists(items[1] + path):
path_found = line
break
return path_found
def get_mount_info_freebsd(path):
- (result, err) = subp.subp(['mount', '-p', path], rcs=[0, 1])
+ (result, err) = subp.subp(["mount", "-p", path], rcs=[0, 1])
if len(err):
# find a path if the input is not a mounting point
- (mnt_list, err) = subp.subp(['mount', '-p'])
+ (mnt_list, err) = subp.subp(["mount", "-p"])
path_found = get_path_dev_freebsd(path, mnt_list)
- if (path_found is None):
+ if path_found is None:
return None
result = path_found
ret = result.split()
@@ -2207,17 +2484,17 @@ def get_mount_info_freebsd(path):
def get_device_info_from_zpool(zpool):
# zpool has 10 second timeout waiting for /dev/zfs LP: #1760173
- if not os.path.exists('/dev/zfs'):
- LOG.debug('Cannot get zpool info, no /dev/zfs')
+ if not os.path.exists("/dev/zfs"):
+ LOG.debug("Cannot get zpool info, no /dev/zfs")
return None
try:
- (zpoolstatus, err) = subp.subp(['zpool', 'status', zpool])
+ (zpoolstatus, err) = subp.subp(["zpool", "status", zpool])
except subp.ProcessExecutionError as err:
LOG.warning("Unable to get zpool status of %s: %s", zpool, err)
return None
if len(err):
return None
- r = r'.*(ONLINE).*'
+ r = r".*(ONLINE).*"
for line in zpoolstatus.split("\n"):
if re.search(r, line) and zpool not in line and "state" not in line:
disk = line.split()[0]
@@ -2226,14 +2503,21 @@ def get_device_info_from_zpool(zpool):
def parse_mount(path):
- (mountoutput, _err) = subp.subp(['mount'])
+ (mountoutput, _err) = subp.subp(["mount"])
mount_locs = mountoutput.splitlines()
# there are 2 types of mount outputs we have to parse therefore
# the regex is a bit complex. to better understand this regex see:
# https://regex101.com/r/2F6c1k/1
# https://regex101.com/r/T2en7a/1
- regex = (r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) '
- r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))')
+ regex = (
+ r"^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) "
+ r"(?=(?:type)[\s]+([\S]+)|\(([^,]*))"
+ )
+ if is_DragonFlyBSD():
+ regex = (
+ r"^(/dev/[\S]+|\S*?) on (/[\S]*) "
+ r"(?=(?:type)[\s]+([\S]+)|\(([^,]*))"
+ )
for line in mount_locs:
m = re.search(regex, line)
if not m:
@@ -2245,15 +2529,19 @@ def parse_mount(path):
fs_type = m.group(3)
if fs_type is None:
fs_type = m.group(4)
- LOG.debug('found line in mount -> devpth: %s, mount_point: %s, '
- 'fs_type: %s', devpth, mount_point, fs_type)
+ LOG.debug(
+ "found line in mount -> devpth: %s, mount_point: %s, fs_type: %s",
+ devpth,
+ mount_point,
+ fs_type,
+ )
# check whether the dev refers to a label on FreeBSD
# for example, if dev is '/dev/label/rootfs', we should
# continue finding the real device like '/dev/da0'.
# this is only valid for non zfs file systems as a zpool
# can have gpt labels as disk.
- devm = re.search('^(/dev/.+)p([0-9])$', devpth)
- if not devm and is_FreeBSD() and fs_type != 'zfs':
+ devm = re.search("^(/dev/.+)p([0-9])$", devpth)
+ if not devm and is_FreeBSD() and fs_type != "zfs":
return get_mount_info_freebsd(path)
elif mount_point == path:
return devpth, fs_type, mount_point
@@ -2289,7 +2577,7 @@ def get_mount_info(path, log=LOG, get_mnt_opts=False):
#
# So use /proc/$$/mountinfo to find the device underlying the
# input path.
- mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
+ mountinfo_path = "/proc/%s/mountinfo" % os.getpid()
if os.path.exists(mountinfo_path):
lines = load_file(mountinfo_path).splitlines()
return parse_mount_info(path, lines, log, get_mnt_opts)
@@ -2367,7 +2655,8 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
if len(missing):
raise ValueError(
- 'Missing required files: {files}'.format(files=','.join(missing)))
+ "Missing required files: {files}".format(files=",".join(missing))
+ )
return ret
@@ -2375,16 +2664,19 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
def read_meminfo(meminfo="/proc/meminfo", raw=False):
# read a /proc/meminfo style file and return
# a dict with 'total', 'free', and 'available'
- mpliers = {'kB': 2 ** 10, 'mB': 2 ** 20, 'B': 1, 'gB': 2 ** 30}
- kmap = {'MemTotal:': 'total', 'MemFree:': 'free',
- 'MemAvailable:': 'available'}
+ mpliers = {"kB": 2 ** 10, "mB": 2 ** 20, "B": 1, "gB": 2 ** 30}
+ kmap = {
+ "MemTotal:": "total",
+ "MemFree:": "free",
+ "MemAvailable:": "available",
+ }
ret = {}
for line in load_file(meminfo).splitlines():
try:
key, value, unit = line.split()
except ValueError:
key, value = line.split()
- unit = 'B'
+ unit = "B"
if raw:
ret[key] = int(value) * mpliers[unit]
elif key in kmap:
@@ -2395,21 +2687,21 @@ def read_meminfo(meminfo="/proc/meminfo", raw=False):
def human2bytes(size):
"""Convert human string or integer to size in bytes
- 10M => 10485760
- .5G => 536870912
+ 10M => 10485760
+ .5G => 536870912
"""
size_in = size
if size.endswith("B"):
size = size[:-1]
- mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40}
+ mpliers = {"B": 1, "K": 2 ** 10, "M": 2 ** 20, "G": 2 ** 30, "T": 2 ** 40}
num = size
- mplier = 'B'
+ mplier = "B"
for m in mpliers:
if size.endswith(m):
mplier = m
- num = size[0:-len(m)]
+ num = size[0 : -len(m)]
try:
num = float(num)
@@ -2426,9 +2718,9 @@ def is_x86(uname_arch=None):
"""Return True if platform is x86-based"""
if uname_arch is None:
uname_arch = os.uname()[4]
- x86_arch_match = (
- uname_arch == 'x86_64' or
- (uname_arch[0] == 'i' and uname_arch[2:] == '86'))
+ x86_arch_match = uname_arch == "x86_64" or (
+ uname_arch[0] == "i" and uname_arch[2:] == "86"
+ )
return x86_arch_match
@@ -2439,7 +2731,7 @@ def message_from_string(string):
def get_installed_packages(target=None):
- (out, _) = subp.subp(['dpkg-query', '--list'], target=target, capture=True)
+ (out, _) = subp.subp(["dpkg-query", "--list"], target=target, capture=True)
pkgs_inst = set()
for line in out.splitlines():
@@ -2460,17 +2752,17 @@ def system_is_snappy():
orpath = "/etc/os-release"
try:
orinfo = load_shell_content(load_file(orpath, quiet=True))
- if orinfo.get('ID', '').lower() == "ubuntu-core":
+ if orinfo.get("ID", "").lower() == "ubuntu-core":
return True
except ValueError as e:
LOG.warning("Unexpected error loading '%s': %s", orpath, e)
cmdline = get_cmdline()
- if 'snap_core=' in cmdline:
+ if "snap_core=" in cmdline:
return True
content = load_file("/etc/system-image/channel.ini", quiet=True)
- if 'ubuntu-core' in content.lower():
+ if "ubuntu-core" in content.lower():
return True
if os.path.isdir("/etc/system-image/config.d/"):
return True
@@ -2482,7 +2774,7 @@ def indent(text, prefix):
lines = []
for line in text.splitlines(True):
lines.append(prefix + line)
- return ''.join(lines)
+ return "".join(lines)
def rootdev_from_cmdline(cmdline):
@@ -2497,12 +2789,13 @@ def rootdev_from_cmdline(cmdline):
if found.startswith("/dev/"):
return found
if found.startswith("LABEL="):
- return "/dev/disk/by-label/" + found[len("LABEL="):]
+ return "/dev/disk/by-label/" + found[len("LABEL=") :]
if found.startswith("UUID="):
- return "/dev/disk/by-uuid/" + found[len("UUID="):].lower()
+ return "/dev/disk/by-uuid/" + found[len("UUID=") :].lower()
if found.startswith("PARTUUID="):
- disks_path = ("/dev/disk/by-partuuid/" +
- found[len("PARTUUID="):].lower())
+ disks_path = (
+ "/dev/disk/by-partuuid/" + found[len("PARTUUID=") :].lower()
+ )
if os.path.exists(disks_path):
return disks_path
results = find_devs_with(found)
@@ -2517,9 +2810,9 @@ def rootdev_from_cmdline(cmdline):
def load_shell_content(content, add_empty=False, empty_val=None):
"""Given shell like syntax (key=value\nkey2=value2\n) in content
- return the data in dictionary form. If 'add_empty' is True
- then add entries in to the returned dictionary for 'VAR='
- variables. Set their value to empty_val."""
+ return the data in dictionary form. If 'add_empty' is True
+ then add entries in to the returned dictionary for 'VAR='
+ variables. Set their value to empty_val."""
def _shlex_split(blob):
return shlex.split(blob, comments=True)
@@ -2535,33 +2828,42 @@ def load_shell_content(content, add_empty=False, empty_val=None):
return data
-def wait_for_files(flist, maxwait, naplen=.5, log_pre=""):
+def wait_for_files(flist, maxwait, naplen=0.5, log_pre=""):
need = set(flist)
waited = 0
while True:
need -= set([f for f in need if os.path.exists(f)])
if len(need) == 0:
- LOG.debug("%sAll files appeared after %s seconds: %s",
- log_pre, waited, flist)
+ LOG.debug(
+ "%sAll files appeared after %s seconds: %s",
+ log_pre,
+ waited,
+ flist,
+ )
return []
if waited == 0:
- LOG.debug("%sWaiting up to %s seconds for the following files: %s",
- log_pre, maxwait, flist)
+ LOG.debug(
+ "%sWaiting up to %s seconds for the following files: %s",
+ log_pre,
+ maxwait,
+ flist,
+ )
if waited + naplen > maxwait:
break
time.sleep(naplen)
waited += naplen
- LOG.debug("%sStill missing files after %s seconds: %s",
- log_pre, maxwait, need)
+ LOG.debug(
+ "%sStill missing files after %s seconds: %s", log_pre, maxwait, need
+ )
return need
def mount_is_read_write(mount_point):
"""Check whether the given mount point is mounted rw"""
result = get_mount_info(mount_point, get_mnt_opts=True)
- mount_opts = result[-1].split(',')
- return mount_opts[0] == 'rw'
+ mount_opts = result[-1].split(",")
+ return mount_opts[0] == "rw"
def udevadm_settle(exists=None, timeout=None):
@@ -2571,9 +2873,9 @@ def udevadm_settle(exists=None, timeout=None):
# skip the settle if the requested path already exists
if os.path.exists(exists):
return
- settle_cmd.extend(['--exit-if-exists=%s' % exists])
+ settle_cmd.extend(["--exit-if-exists=%s" % exists])
if timeout:
- settle_cmd.extend(['--timeout=%s' % timeout])
+ settle_cmd.extend(["--timeout=%s" % timeout])
return subp.subp(settle_cmd)
@@ -2586,7 +2888,7 @@ def get_proc_ppid(pid):
try:
contents = load_file("/proc/%s/stat" % pid, quiet=True)
except IOError as e:
- LOG.warning('Failed to load /proc/%s/stat. %s', pid, e)
+ LOG.warning("Failed to load /proc/%s/stat. %s", pid, e)
if contents:
parts = contents.split(" ", 4)
# man proc says
@@ -2594,4 +2896,20 @@ def get_proc_ppid(pid):
ppid = int(parts[3])
return ppid
+
+def error(msg, rc=1, fmt="Error:\n{}", sys_exit=False):
+ """
+ Print error to stderr and return or exit
+
+ @param msg: message to print
+ @param rc: return code (default: 1)
+ @param fmt: format string for putting message in (default: 'Error:\n {}')
+ @param sys_exit: exit when called (default: false)
+ """
+ print(fmt.format(msg), file=sys.stderr)
+ if sys_exit:
+ sys.exit(rc)
+ return rc
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/version.py b/cloudinit/version.py
index f25e9145..fa51cb9e 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,21 +4,22 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "20.4"
-_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
+__VERSION__ = "22.1"
+_PACKAGED_VERSION = "@@PACKAGED_VERSION@@"
FEATURES = [
# supports network config version 1
- 'NETWORK_CONFIG_V1',
+ "NETWORK_CONFIG_V1",
# supports network config version 2 (netplan)
- 'NETWORK_CONFIG_V2',
+ "NETWORK_CONFIG_V2",
]
def version_string():
"""Extract a version string from cloud-init."""
- if not _PACKAGED_VERSION.startswith('@@'):
+ if not _PACKAGED_VERSION.startswith("@@"):
return _PACKAGED_VERSION
return __VERSION__
+
# vi: ts=4 expandtab
diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py
index 1da90c40..7ddd2f8d 100644
--- a/cloudinit/warnings.py
+++ b/cloudinit/warnings.py
@@ -1,16 +1,16 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import os
+import time
+
from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
-import os
-import time
-
LOG = logging.getLogger()
WARNINGS = {
- 'non_ec2_md': """
+ "non_ec2_md": """
This system is using the EC2 Metadata Service, but does not appear to
be running on Amazon EC2 or one of cloud-init's known platforms that
provide a EC2 Metadata service. In the future, cloud-init may stop
@@ -35,7 +35,7 @@ putting that content into
datasource:
Ec2:
strict_id: false""",
- 'dsid_missing_source': """
+ "dsid_missing_source": """
A new feature in cloud-init identified possible datasources for
this system as:
{dslist}
@@ -64,8 +64,9 @@ warnings:
def _get_warn_dir(cfg):
paths = helpers.Paths(
- path_cfgs=cfg.get('system_info', {}).get('paths', {}))
- return paths.get_ipath_cur('warnings')
+ path_cfgs=cfg.get("system_info", {}).get("paths", {})
+ )
+ return paths.get_ipath_cur("warnings")
def _load_warn_cfg(cfg, name, mode=True, sleep=None):
@@ -77,7 +78,7 @@ def _load_warn_cfg(cfg, name, mode=True, sleep=None):
if not cfg or not isinstance(cfg, dict):
return default
- ncfg = util.get_cfg_by_path(cfg, ('warnings', name))
+ ncfg = util.get_cfg_by_path(cfg, ("warnings", name))
if ncfg is None:
return default
@@ -128,7 +129,8 @@ def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs):
util.write_file(
os.path.join(_get_warn_dir(cfg), name),
- topline + "\n".join(fmtlines) + "\n" + topline)
+ topline + "\n".join(fmtlines) + "\n" + topline,
+ )
LOG.warning("%s%s\n%s", topline, "\n".join(fmtlines), closeline)
@@ -136,4 +138,5 @@ def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs):
LOG.debug("sleeping %d seconds for warning '%s'", sleep, name)
time.sleep(sleep)
+
# vi: ts=4 expandtab
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index 7171aaa5..fb4b456c 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -1,8 +1,8 @@
## template:jinja
# The top level settings are used as module
# and system configuration.
-
-{% if variant.endswith("bsd") %}
+{% set is_bsd = variant in ["dragonfly", "freebsd", "netbsd", "openbsd"] %}
+{% if is_bsd %}
syslog_fix_perms: root:wheel
{% elif variant in ["suse"] %}
syslog_fix_perms: root:root
@@ -11,17 +11,29 @@ syslog_fix_perms: root:root
# when a 'default' entry is found it will reference the 'default_user'
# from the distro configuration specified below
users:
+{% if variant in ["photon"] %}
+ - name: root
+ lock_passwd: false
+{% else %}
- default
+{% endif %}
+
+{% if variant in ["photon"] %}
+# VMware guest customization.
+disable_vmware_customization: true
+manage_etc_hosts: false
+{% endif %}
# If this is set, 'root' will not be able to ssh in and they
# will get a message to login instead as the default $user
-{% if variant in ["freebsd"] %}
+{% if variant in ["freebsd", "photon"] %}
disable_root: false
{% else %}
disable_root: true
{% endif %}
-{% if variant in ["alpine", "amazon", "centos", "fedora", "rhel"] %}
+{% if variant in ["almalinux", "alpine", "amazon", "centos", "cloudlinux", "eurolinux",
+ "fedora", "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %}
mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
{% if variant == "amazon" %}
resize_rootfs: noblock
@@ -33,6 +45,8 @@ ssh_pwauth: 0
# This will cause the set+update hostname module to not operate (if true)
preserve_hostname: false
+# If you use datasource_list array, keep array items in a single line.
+# If you use multi line array, ds-identify script won't read array items.
{% if variant.endswith("bsd") %}
# This should not be required, but leave it in place until the real cause of
# not finding -any- datasources is resolved.
@@ -60,22 +74,24 @@ cloud_init_modules:
{% endif %}
- bootcmd
- write-files
-{% if variant not in ["netbsd"] %}
+{% if variant not in ["netbsd", "openbsd"] %}
- growpart
- resizefs
{% endif %}
-{% if variant not in ["freebsd", "netbsd"] %}
+{% if not is_bsd %}
- disk_setup
- mounts
{% endif %}
- set_hostname
- update_hostname
- update_etc_hosts
-{% if variant in ["alpine"] %}
+{% if variant in ["alpine", "photon"] %}
- resolv_conf
{% endif %}
{% if not variant.endswith("bsd") %}
+{% if variant not in ["photon"] %}
- ca-certs
+{% endif %}
- rsyslog
{% endif %}
- users-groups
@@ -89,11 +105,16 @@ cloud_config_modules:
- emit_upstart
- snap
{% endif %}
+{% if variant not in ["photon"] %}
- ssh-import-id
+ - keyboard
- locale
+{% endif %}
- set-passwords
-{% if variant in ["rhel", "fedora"] %}
+{% if variant in ["rhel", "fedora", "photon"] %}
+{% if variant not in ["photon"] %}
- spacewalk
+{% endif %}
- yum-add-repo
{% endif %}
{% if variant in ["ubuntu", "unknown", "debian"] %}
@@ -131,6 +152,7 @@ cloud_final_modules:
{% if variant in ["ubuntu", "unknown"] %}
- ubuntu-drivers
{% endif %}
+ - write-files-deferred
- puppet
- chef
- mcollective
@@ -145,6 +167,7 @@ cloud_final_modules:
- scripts-user
- ssh-authkey-fingerprints
- keys-to-console
+ - install-hotplug
- phone-home
- final-message
- power-state-change
@@ -153,10 +176,12 @@ cloud_final_modules:
# (not accessible to handlers/transforms)
system_info:
# This will affect which distro class gets used
-{% if variant in ["alpine", "amazon", "arch", "centos", "debian",
- "fedora", "freebsd", "netbsd", "openbsd", "rhel",
- "suse", "ubuntu"] %}
+{% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "debian",
+ "eurolinux", "fedora", "freebsd", "gentoo", "netbsd", "miraclelinux", "openbsd", "openEuler",
+ "photon", "rhel", "rocky", "suse", "ubuntu", "virtuozzo"] %}
distro: {{ variant }}
+{% elif variant in ["dragonfly"] %}
+ distro: dragonflybsd
{% else %}
# Unknown/fallback distro.
distro: ubuntu
@@ -206,8 +231,8 @@ system_info:
primary: http://ports.ubuntu.com/ubuntu-ports
security: http://ports.ubuntu.com/ubuntu-ports
ssh_svcname: ssh
-{% elif variant in ["alpine", "amazon", "arch", "centos", "fedora",
- "rhel", "suse"] %}
+{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "eurolinux",
+ "fedora", "gentoo", "miraclelinux", "openEuler", "rhel", "rocky", "suse", "virtuozzo"] %}
# Default user name + that default users groups (if added/used)
default_user:
{% if variant == "amazon" %}
@@ -221,6 +246,10 @@ system_info:
{% endif %}
{% if variant == "suse" %}
groups: [cdrom, users]
+{% elif variant == "gentoo" %}
+ groups: [users, wheel]
+ primary_group: users
+ no_user_group: true
{% elif variant == "alpine" %}
groups: [adm, sudo]
{% elif variant == "arch" %}
@@ -248,6 +277,15 @@ system_info:
groups: [wheel]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/tcsh
+{% elif variant in ["dragonfly"] %}
+ # Default user name + that default users groups (if added/used)
+ default_user:
+ name: dragonfly
+ lock_passwd: True
+ gecos: DragonFly
+ groups: [wheel]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/sh
{% elif variant in ["netbsd"] %}
default_user:
name: netbsd
@@ -264,8 +302,32 @@ system_info:
groups: [wheel]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/ksh
+{% elif variant == "photon" %}
+ default_user:
+ name: photon
+ lock_passwd: True
+ gecos: PhotonOS
+ groups: [wheel]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/bash
+ # Other config here will be given to the distro class and/or path classes
+ paths:
+ cloud_dir: /var/lib/cloud/
+ templates_dir: /etc/cloud/templates/
+ network:
+ renderers: ['networkd']
+
+ ssh_svcname: sshd
+
+ # If set to true, cloud-init will not use fallback network config.
+ # In Photon, we have default network settings, hence if network settings are
+ # not explicitly given in metadata, don't use fallback network config.
+ disable_fallback_netcfg: true
{% endif %}
{% if variant in ["freebsd", "netbsd", "openbsd"] %}
network:
renderers: ['{{ variant }}']
+{% elif variant in ["dragonfly"] %}
+ network:
+ renderers: ['freebsd']
{% endif %}
diff --git a/conftest.py b/conftest.py
index 9e9d9ff8..3979eb0a 100644
--- a/conftest.py
+++ b/conftest.py
@@ -12,7 +12,7 @@ from unittest import mock
import pytest
-from cloudinit import helpers, subp
+from cloudinit import helpers, subp, util
class _FixtureUtils:
@@ -65,7 +65,7 @@ class _FixtureUtils:
return result[0]
-@pytest.yield_fixture(autouse=True)
+@pytest.fixture(autouse=True)
def disable_subp_usage(request, fixture_utils):
"""
Across all (pytest) tests, ensure that subp.subp is not invoked.
@@ -138,6 +138,7 @@ def disable_subp_usage(request, fixture_utils):
" this either by modifying your test code, or by modifying"
" disable_subp_usage to handle precedence."
)
+
else:
# Look this up before our patch is in place, so we have access to
# the real implementation in side_effect
@@ -165,7 +166,7 @@ def fixture_utils():
return _FixtureUtils
-@pytest.yield_fixture
+@pytest.fixture
def httpretty():
"""
Enable HTTPretty for duration of the testcase, resetting before and after.
@@ -201,3 +202,19 @@ def paths(tmpdir):
"run_dir": tmpdir.mkdir("run_dir").strpath,
}
return helpers.Paths(dirs)
+
+
+@pytest.fixture(autouse=True, scope="session")
+def monkeypatch_system_info():
+ def my_system_info():
+ return {
+ "platform": "invalid",
+ "system": "invalid",
+ "release": "invalid",
+ "python": "invalid",
+ "uname": ["invalid"] * 6,
+ "dist": ("Distro", "-1.1", "Codename"),
+ "variant": "ubuntu",
+ }
+
+ util.system_info = my_system_info
diff --git a/doc-requirements.txt b/doc-requirements.txt
index d5f921e3..5bcac862 100644
--- a/doc-requirements.txt
+++ b/doc-requirements.txt
@@ -3,3 +3,7 @@ m2r
sphinx<2
sphinx_rtd_theme
pyyaml
+
+# Indirect dependencies
+docutils<0.18
+mistune<2.0.0 # https://github.com/miyakogi/m2r/issues/66
diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt
index 004894b7..39f546e1 100644
--- a/doc/examples/cloud-config-apt.txt
+++ b/doc/examples/cloud-config-apt.txt
@@ -138,11 +138,18 @@ apt:
# the first defining a valid mirror wins (in the order as defined here,
# not the order as listed in the config).
#
+ # Additionally, if the repository requires a custom signing key, it can be
+ # specified via the same fields as for custom sources:
+ # 'keyid': providing a key to import via shortid or fingerprint
+ # 'key': providing a raw PGP key
+ # 'keyserver': specify an alternate keyserver to pull keys from that
+ # were specified by keyid
- arches: [s390x, arm64]
# as above, allowing to have one config for different per arch mirrors
# security is optional, if not defined it is set to the same value as primary
security:
- uri: http://security.ubuntu.com/ubuntu
+ arches: [default]
# If search_dns is set for security the searched pattern is:
# <distro>-security-mirror
@@ -206,14 +213,14 @@ apt:
#
# The key of each source entry is the filename and will be prepended by
# /etc/apt/sources.list.d/ if it doesn't start with a '/'.
- # If it doesn't end with .list it will be appended so that apt picks up it's
+ # If it doesn't end with .list it will be appended so that apt picks up its
# configuration.
#
# Whenever there is no content to be written into such a file, the key is
# not used as filename - yet it can still be used as index for merging
# configuration.
#
- # The values inside the entries consost of the following optional entries:
+ # The values inside the entries consist of the following optional entries:
# 'source': a sources.list entry (some variable replacements apply)
# 'keyid': providing a key to import via shortid or fingerprint
# 'key': providing a raw PGP key
@@ -247,7 +254,7 @@ apt:
#
# Creates a file in /etc/apt/sources.list.d/ for the sources list entry
# based on the key: "/etc/apt/sources.list.d/curtin-dev-ppa.list"
- source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main"
+ source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu bionic main"
# 2.2 keyid
#
@@ -270,17 +277,18 @@ apt:
my-repo2.list:
# 2.4 replacement variables
#
- # sources can use $MIRROR, $PRIMARY, $SECURITY and $RELEASE replacement
- # variables.
+ # sources can use $MIRROR, $PRIMARY, $SECURITY, $RELEASE and $KEY_FILE
+ # replacement variables.
# They will be replaced with the default or specified mirrors and the
# running release.
# The entry below would be possibly turned into:
- # source: deb http://archive.ubuntu.com/ubuntu xenial multiverse
- source: deb $MIRROR $RELEASE multiverse
+ # source: deb http://archive.ubuntu.com/ubuntu bionic multiverse
+ source: deb [signed-by=$KEY_FILE] $MIRROR $RELEASE multiverse
+ keyid: F430BBA5
my-repo3.list:
# this would have the same end effect as 'ppa:curtin-dev/test-archive'
- source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main"
+ source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu bionic main"
keyid: F430BBA5 # GPG key ID published on the key server
filename: curtin-dev-ppa.list
@@ -304,9 +312,19 @@ apt:
keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77
keyserver: pgp.mit.edu
+ ignored5:
+ # 2.8 signed-by
+ #
+ # One can specify [signed-by=$KEY_FILE] in the source definition, which
+ # will make the key be installed in the directory /etc/cloud-init.gpg.d/
+ # and the $KEY_FILE replacement variable will be replaced with the path
+ # to the specified key. If $KEY_FILE is used, but no key is specified,
+ # apt update will (rightfully) fail due to an invalid value.
+ source: deb [signed-by=$KEY_FILE] $MIRROR $RELEASE multiverse
+ keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77
my-repo4.list:
- # 2.8 raw key
+ # 2.9 raw key
#
# The apt signing key can also be specified by providing a pgp public key
# block. Providing the PGP key this way is the most robust method for
diff --git a/doc/examples/cloud-config-ca-certs.txt b/doc/examples/cloud-config-ca-certs.txt
index 5e9115a0..9f7beb05 100644
--- a/doc/examples/cloud-config-ca-certs.txt
+++ b/doc/examples/cloud-config-ca-certs.txt
@@ -7,13 +7,13 @@
# Make sure that this file is valid yaml before starting instances.
# It should be passed as user-data when starting the instance.
-ca-certs:
- # If present and set to True, the 'remove-defaults' parameter will remove
+ca_certs:
+ # If present and set to True, the 'remove_defaults' parameter will remove
# all the default trusted CA certificates that are normally shipped with
# Ubuntu.
# This is mainly for paranoid admins - most users will not need this
# functionality.
- remove-defaults: true
+ remove_defaults: true
# If present, the 'trusted' parameter should contain a certificate (or list
# of certificates) to add to the system as trusted CA certificates.
diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt
index 8cebfd80..9bb3c150 100644
--- a/doc/examples/cloud-config-chef.txt
+++ b/doc/examples/cloud-config-chef.txt
@@ -4,9 +4,6 @@
# list of recipes when the instance boots for the first time.
# Make sure that this file is valid yaml before starting instances.
# It should be passed as user-data when starting the instance.
-#
-# This example assumes the instance is 16.04 (xenial)
-
# The default is to install from packages.
@@ -15,47 +12,47 @@ apt:
sources:
source1:
source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.12 (Darwin)
- Comment: GPGTools - http://gpgtools.org
-
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg
- PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK
- CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid
- AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd
- Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz
- SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK
- OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/
- Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY
- IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu
- twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8
- DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE
- WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS
- 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA
- dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC
- MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD
- 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K
- zA==
- =IxPr
- -----END PGP PUBLIC KEY BLOCK-----
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v1.4.12 (Darwin)
+ Comment: GPGTools - http://gpgtools.org
+
+ mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
+ twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
+ dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
+ JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
+ ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
+ XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
+ DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
+ sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
+ Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
+ YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
+ CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
+ +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg
+ PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK
+ CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid
+ AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd
+ Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz
+ SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK
+ OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/
+ Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY
+ IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu
+ twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8
+ DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE
+ WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS
+ 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA
+ dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC
+ MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD
+ 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K
+ zA==
+ =IxPr
+ -----END PGP PUBLIC KEY BLOCK-----
chef:
# Valid values are 'accept' and 'accept-no-persist'
chef_license: "accept"
-
+
# Valid values are 'gems' and 'packages' and 'omnibus'
install_type: "packages"
diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt
index 13bb687c..7a8c4284 100644
--- a/doc/examples/cloud-config-datasources.txt
+++ b/doc/examples/cloud-config-datasources.txt
@@ -45,13 +45,6 @@ datasource:
instance-id: i-87018aed
local-hostname: myhost.internal
- Azure:
- agent_command: [service, walinuxagent, start]
- set_hostname: True
- hostname_bounce:
- interface: eth0
- policy: on # [can be 'on', 'off' or 'force']
-
SmartOS:
# For KVM guests:
# Smart OS datasource works over a serial console interacting with
diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt
index 5c6de77e..08cf5d8b 100644
--- a/doc/examples/cloud-config-disk-setup.txt
+++ b/doc/examples/cloud-config-disk-setup.txt
@@ -80,7 +80,7 @@ fs_setup:
disk_setup:
ephmeral0:
table_type: 'mbr'
- layout: 'auto'
+ layout: true
/dev/xvdh:
table_type: 'mbr'
layout:
@@ -207,7 +207,7 @@ fs_setup:
# as a partition value. However, ephermalX.0 is the _same_ as ephemeralX.
#
# <PART_VALUE>:
-# Partition definitions are overwriten if you use the '<DEVICE>.Y' notation.
+# Partition definitions are overwritten if you use the '<DEVICE>.Y' notation.
#
# The valid options are:
# "auto|any": tell cloud-init not to care whether there is a partition
@@ -243,7 +243,7 @@ fs_setup:
#
# In general, if you have a specific partition configuration in mind,
# you should define either the device or the partition number. 'auto'
-# and 'any' are specifically intended for formating ephemeral storage or
+# and 'any' are specifically intended for formatting ephemeral storage or
# for simple schemes.
#
# "none": Put the file system directly on the device.
diff --git a/doc/examples/cloud-config-install-packages.txt b/doc/examples/cloud-config-install-packages.txt
index 2edc63da..7b90d7df 100644
--- a/doc/examples/cloud-config-install-packages.txt
+++ b/doc/examples/cloud-config-install-packages.txt
@@ -7,7 +7,7 @@
# if packages are specified, this apt_update will be set to true
#
# packages may be supplied as a single package name or as a list
-# with the format [<package>, <version>] wherein the specifc
+# with the format [<package>, <version>] wherein the specific
# package version will be installed.
packages:
- pwgen
diff --git a/doc/examples/cloud-config-landscape.txt b/doc/examples/cloud-config-landscape.txt
index 88be57ce..b76bf028 100644
--- a/doc/examples/cloud-config-landscape.txt
+++ b/doc/examples/cloud-config-landscape.txt
@@ -2,7 +2,7 @@
# Landscape-client configuration
#
# Anything under the top 'landscape: client' entry
-# will be basically rendered into a ConfigObj formated file
+# will be basically rendered into a ConfigObj formatted file
# under the '[client]' section of /etc/landscape/client.conf
#
# Note: 'tags' should be specified as a comma delimited string
diff --git a/doc/examples/cloud-config-mount-points.txt b/doc/examples/cloud-config-mount-points.txt
index 43f80ec9..d0ad8383 100644
--- a/doc/examples/cloud-config-mount-points.txt
+++ b/doc/examples/cloud-config-mount-points.txt
@@ -19,7 +19,7 @@
#
# - if a device does not exist at the time, an entry will still be
# written to /etc/fstab.
-# - '/dev' can be ommitted for device names that begin with: xvd, sd, hd, vd
+# - '/dev' can be omitted for device names that begin with: xvd, sd, hd, vd
# - if an entry does not have all 6 fields, they will be filled in
# with values from 'mount_default_fields' below.
#
diff --git a/doc/examples/cloud-config-power-state.txt b/doc/examples/cloud-config-power-state.txt
index 002707ec..0bbb10e2 100644
--- a/doc/examples/cloud-config-power-state.txt
+++ b/doc/examples/cloud-config-power-state.txt
@@ -4,7 +4,7 @@
# default: none
#
# power_state can be used to make the system shutdown, reboot or
-# halt after boot is finished. This same thing can be acheived by
+# halt after boot is finished. This same thing can be achieved by
# user-data scripts or by runcmd by simply invoking 'shutdown'.
#
# Doing it this way ensures that cloud-init is entirely finished with
diff --git a/doc/examples/cloud-config-puppet.txt b/doc/examples/cloud-config-puppet.txt
index 3c7e2da7..c6bc15de 100644
--- a/doc/examples/cloud-config-puppet.txt
+++ b/doc/examples/cloud-config-puppet.txt
@@ -1,25 +1,65 @@
#cloud-config
#
-# This is an example file to automatically setup and run puppetd
+# This is an example file to automatically setup and run puppet
# when the instance boots for the first time.
# Make sure that this file is valid yaml before starting instances.
# It should be passed as user-data when starting the instance.
puppet:
+ # Boolean: whether or not to install puppet (default: true)
+ install: true
+
+ # A specific version to pass to the installer script or package manager
+ version: "7.7.0"
+
+ # Valid values are 'packages' and 'aio' (default: 'packages')
+ install_type: "packages"
+
+ # Puppet collection to install if 'install_type' is 'aio'
+ collection: "puppet7"
+
+ # Boolean: whether or not to remove the puppetlabs repo after installation
+ # if 'install_type' is 'aio' (default: true)
+ cleanup: true
+
+ # If 'install_type' is 'aio', change the url to the install script
+ aio_install_url: "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh"
+
+ # Path to the puppet config file (default: depends on 'install_type')
+ conf_file: "/etc/puppet/puppet.conf"
+
+ # Path to the puppet SSL directory (default: depends on 'install_type')
+ ssl_dir: "/var/lib/puppet/ssl"
+
+ # Path to the CSR attributes file (default: depends on 'install_type')
+ csr_attributes_path: "/etc/puppet/csr_attributes.yaml"
+
+ # The name of the puppet package to install (no-op if 'install_type' is 'aio')
+ package_name: "puppet"
+
+ # Boolean: whether or not to run puppet after configuration finishes
+ # (default: false)
+ exec: false
+
+ # A list of arguments to pass to 'puppet agent' if 'exec' is true
+ # (default: ['--test'])
+ exec_args: ['--test']
+
# Every key present in the conf object will be added to puppet.conf:
# [name]
# subkey=value
#
# For example the configuration below will have the following section
# added to puppet.conf:
- # [puppetd]
- # server=puppetmaster.example.org
+ # [main]
+ # server=puppetserver.example.org
# certname=i-0123456.ip-X-Y-Z.cloud.internal
#
- # The puppmaster ca certificate will be available in
- # /var/lib/puppet/ssl/certs/ca.pem
+ # The puppetserver ca certificate will be available in
+ # /var/lib/puppet/ssl/certs/ca.pem if using distro packages
+ # or /etc/puppetlabs/puppet/ssl/certs/ca.pem if using AIO packages.
conf:
agent:
- server: "puppetmaster.example.org"
+ server: "puppetserver.example.org"
# certname supports substitutions at runtime:
# %i: instanceid
# Example: i-0123456
@@ -29,11 +69,13 @@ puppet:
# NB: the certname will automatically be lowercased as required by puppet
certname: "%i.%f"
# ca_cert is a special case. It won't be added to puppet.conf.
- # It holds the puppetmaster certificate in pem format.
+ # It holds the puppetserver certificate in pem format.
# It should be a multi-line string (using the | yaml notation for
# multi-line strings).
- # The puppetmaster certificate is located in
- # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host.
+ # The puppetserver certificate is located in
+ # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetserver host if using
+ # distro packages or /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem if using AIO
+ # packages.
#
ca_cert: |
-----BEGIN CERTIFICATE-----
diff --git a/doc/examples/cloud-config-ssh-keys.txt b/doc/examples/cloud-config-ssh-keys.txt
index aad8b683..bfe5ab44 100644
--- a/doc/examples/cloud-config-ssh-keys.txt
+++ b/doc/examples/cloud-config-ssh-keys.txt
@@ -42,3 +42,13 @@ ssh_keys:
-----END DSA PRIVATE KEY-----
dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost
+
+# By default, the fingerprints of the authorized keys for the users
+# cloud-init adds are printed to the console. Setting
+# no_ssh_fingerprints to true suppresses this output.
+no_ssh_fingerprints: false
+
+# By default, (most) ssh host keys are printed to the console. Setting
+# emit_keys_to_console to false suppresses this output.
+ssh:
+ emit_keys_to_console: false
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index 4a5a7e20..eaa8dd24 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -19,15 +19,19 @@ users:
primary_group: foobar
groups: users
selinux_user: staff_u
- expiredate: '2012-09-01'
- ssh_import_id: foobar
+ expiredate: '2032-09-01'
+ ssh_import_id:
+ - lp:falcojr
+ - gh:TheRealFalcon
lock_passwd: false
passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
- name: barfoo
gecos: Bar B. Foo
sudo: ALL=(ALL) NOPASSWD:ALL
groups: users, admin
- ssh_import_id: None
+ ssh_import_id:
+ - lp:falcojr
+ - gh:TheRealFalcon
lock_passwd: true
ssh_authorized_keys:
- <ssh pub key 1>
@@ -91,7 +95,6 @@ users:
# provided public-keys. An error will be raised if ssh_authorized_keys
# or ssh_import_id is provided for the same user.
#
-# ssh_authorized_keys.
# sudo: Defaults to none. Accepts a sudo rule string, a list of sudo rule
# strings or False to explicitly deny sudo usage. Examples:
#
@@ -114,14 +117,14 @@ users:
# command available on Ubuntu systems. If the user has an account
# on the Ubuntu SSO, specifying the email will allow snap to
# request a username and any public ssh keys and will import
-# these into the system with username specifed by SSO account.
+# these into the system with username specified by SSO account.
# If 'username' is not set in SSO, then username will be the
# shortname before the email domain.
#
# Default user creation:
#
-# Unless you define users, you will get a 'ubuntu' user on ubuntu systems with the
+# Unless you define users, you will get a 'ubuntu' user on Ubuntu systems with the
# legacy permission (no password sudo, locked user, etc). If however, you want
# to have the 'ubuntu' user in addition to other users, you need to instruct
# cloud-init that you also want the default user. To do this use the following
diff --git a/doc/examples/cloud-config-write-files.txt b/doc/examples/cloud-config-write-files.txt
index 6c67c503..c7f95adf 100644
--- a/doc/examples/cloud-config-write-files.txt
+++ b/doc/examples/cloud-config-write-files.txt
@@ -2,7 +2,7 @@
# vim: syntax=yaml
#
# This is the configuration syntax that the write_files module
-# will know how to understand. encoding can be given b64 or gzip or (gz+b64).
+# will know how to understand. Encoding can be given b64 or gzip or (gz+b64).
# The content will be decoded accordingly and then written to the path that is
# provided.
#
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index de9a0f87..a2b4a3fa 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -42,7 +42,7 @@ packages:
#
# - if a device does not exist at the time, an entry will still be
# written to /etc/fstab.
-# - '/dev' can be ommitted for device names that begin with: xvd, sd, hd, vd
+# - '/dev' can be omitted for device names that begin with: xvd, sd, hd, vd
# - if an entry does not have all 6 fields, they will be filled in
# with values from 'mount_default_fields' below.
#
@@ -258,7 +258,7 @@ locale: en_US.UTF-8
locale_configfile: /etc/default/locale
# add entries to rsyslog configuration
-# The first occurance of a given filename will truncate.
+# The first occurrence of a given filename will truncate.
# subsequent entries will append.
# if value is a scalar, its content is assumed to be 'content', and the
# default filename is used.
@@ -272,7 +272,7 @@ rsyslog:
- filename: 01-examplecom.conf
content: "*.* @@syslogd.example.com"
-# resize_rootfs should the / filesytem be resized on first boot
+# resize_rootfs should the / filesystem be resized on first boot
# this allows you to launch an instance with a larger disk / partition
# and have the instance automatically grow / to accomoddate it
# set to 'False' to disable
@@ -339,7 +339,7 @@ resize_rootfs: True
# /etc/cloud/templates/hosts.tmpl.
# The strings '$hostname' and '$fqdn' are replaced in the template
# with the appropriate values.
-# To make modifications persistant across a reboot, you must make
+# To make modifications persistent across a reboot, you must make
# modificatoins to /etc/cloud/templates/hosts.tmpl
#
# localhost:
@@ -509,7 +509,7 @@ manual_cache_clean: False
# default: none
#
# power_state can be used to make the system shutdown, reboot or
-# halt after boot is finished. This same thing can be acheived by
+# halt after boot is finished. This same thing can be achieved by
# user-data scripts or by runcmd by simply invoking 'shutdown'.
#
# Doing it this way ensures that cloud-init is entirely finished with
diff --git a/doc/examples/kernel-cmdline.txt b/doc/examples/kernel-cmdline.txt
index 805bc3d3..8eee86b2 100644
--- a/doc/examples/kernel-cmdline.txt
+++ b/doc/examples/kernel-cmdline.txt
@@ -7,7 +7,7 @@ The format is:
cloud-config will consider any content after 'cc:' to be cloud-config
data. If an 'end_cc' string is present, then it will stop reading there.
-otherwise it considers everthing after 'cc:' to be cloud-config content.
+otherwise it considers everything after 'cc:' to be cloud-config content.
In order to allow carriage returns, you must enter '\\n', literally,
on the command line two backslashes followed by a letter 'n'.
diff --git a/doc/examples/part-handler.txt b/doc/examples/part-handler.txt
index a6e66415..1484e1a0 100644
--- a/doc/examples/part-handler.txt
+++ b/doc/examples/part-handler.txt
@@ -1,5 +1,4 @@
#part-handler
-# vi: syntax=python ts=4
def list_types():
# return a list of mime-types that are handled by this module
diff --git a/doc/examples/seed/README b/doc/examples/seed/README
index cc15839e..b0d97afd 100644
--- a/doc/examples/seed/README
+++ b/doc/examples/seed/README
@@ -15,7 +15,7 @@ The directory must have both files.
This is the user data, as would be consumed from ec2's metadata service
see examples in doc/examples.
- meta-data:
- This file is yaml formated data similar to what is in the ec2 metadata
+ This file is yaml formatted data similar to what is in the ec2 metadata
service under meta-data/. See the example, or, on an ec2 instance,
run:
python -c 'import boto.utils, yaml; print(
diff --git a/doc/examples/seed/meta-data b/doc/examples/seed/meta-data
index d0551448..38baca9a 100644
--- a/doc/examples/seed/meta-data
+++ b/doc/examples/seed/meta-data
@@ -1,4 +1,4 @@
-# this is yaml formated data
+# this is yaml formatted data
# it is expected to be roughly what you would get from running the following
# on an ec2 instance:
# python -c 'import boto.utils, yaml; print(yaml.dump(boto.utils.get_instance_metadata()))'
diff --git a/doc/man/cloud-id.1 b/doc/man/cloud-id.1
index 98ce130c..cb500189 100644
--- a/doc/man/cloud-id.1
+++ b/doc/man/cloud-id.1
@@ -6,6 +6,15 @@ cloud-id \- Report the canonical cloud-id for this instance
.SH SYNOPSIS
.BR "cloud-id" " [-h] [-j] [-l] [-i <INSTANCE_DATA>]"
+.SH DESCRIPTION
+cloud-id is the lowercase name of the cloud datasource discovered.
+
+The cloud-id will be 'not run' when systemd generator has not run yet.
+The cloud-id will be 'disabled' when cloud-init is disabled or when
+ds-identify did not find a valid datasource.
+
+See cloud-init status --long for more information.
+
.SH OPTIONS
.TP
.B "-h, --help"
@@ -24,8 +33,22 @@ Report extended cloud-id information as tab-delimited string
Path to instance-data.json file. Default is
/run/cloud-init/instance-data.json
+.SH EXIT STATUS
+.TP
+0
+On success
+.TP
+1
+Due to an error
+.TP
+2
+Due to cloud-init in a disabled state. See: cloud-init status --long
+.TP
+3
+The cloud-init generator and discovery has not yet run.
+
.SH COPYRIGHT
-Copyright (C) 2020 Canonical Ltd. License GPL-3 or Apache-2.0
+Copyright (C) 2021 Canonical Ltd. License GPL-3 or Apache-2.0
.SH SEE ALSO
Full documentation at: <https://cloudinit.readthedocs.io>
diff --git a/doc/man/cloud-init.1 b/doc/man/cloud-init.1
index 9b52dc8d..3fde4148 100644
--- a/doc/man/cloud-init.1
+++ b/doc/man/cloud-init.1
@@ -10,7 +10,7 @@ cloud-init \- Cloud instance initialization
Cloud-init provides a mechanism for cloud instance initialization.
This is done by identifying the cloud platform that is in use, reading
provided cloud metadata and optional vendor and user
-data, and then intializing the instance as requested.
+data, and then initializing the instance as requested.
Generally, this command is not normally meant to be run directly by
the user. However, some subcommands may useful for development or
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 684822c2..9976afa4 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -1,24 +1,24 @@
import os
import sys
+from cloudinit import version
+
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.insert(0, os.path.abspath('../../'))
-sys.path.insert(0, os.path.abspath('../'))
-sys.path.insert(0, os.path.abspath('./'))
-sys.path.insert(0, os.path.abspath('.'))
+sys.path.insert(0, os.path.abspath("../../"))
+sys.path.insert(0, os.path.abspath("../"))
+sys.path.insert(0, os.path.abspath("./"))
+sys.path.insert(0, os.path.abspath("."))
-from cloudinit import version
-from cloudinit.config.schema import get_schema_doc
-# Supress warnings for docs that aren't used yet
+# Suppress warnings for docs that aren't used yet
# unused_docs = [
# ]
# General information about the project.
-project = 'cloud-init'
-copyright = '2020, Canonical Ltd.'
+project = "cloud-init"
+copyright = "2020, Canonical Ltd."
# -- General configuration ----------------------------------------------------
@@ -28,17 +28,17 @@ copyright = '2020, Canonical Ltd.'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
- 'm2r',
- 'sphinx.ext.autodoc',
- 'sphinx.ext.autosectionlabel',
- 'sphinx.ext.viewcode',
+ "m2r",
+ "sphinx.ext.autodoc",
+ "sphinx.ext.autosectionlabel",
+ "sphinx.ext.viewcode",
]
# The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -47,7 +47,7 @@ version = version.version_string()
release = version
# Set the default Pygments syntax
-highlight_language = 'yaml'
+highlight_language = "yaml"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
@@ -61,17 +61,8 @@ show_authors = False
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = 'sphinx_rtd_theme'
+html_theme = "sphinx_rtd_theme"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-html_logo = 'static/logo.png'
-
-def generate_docstring_from_schema(app, what, name, obj, options, lines):
- """Override module docs from schema when present."""
- if what == 'module' and hasattr(obj, "schema"):
- del lines[:]
- lines.extend(get_schema_doc(obj.schema).split('\n'))
-
-def setup(app):
- app.connect('autodoc-process-docstring', generate_docstring_from_schema)
+html_logo = "static/logo.png"
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index ddcb0b31..251a904d 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -27,7 +27,7 @@ Getting help
Having trouble? We would like to help!
- Try the :ref:`FAQ` – its got answers to some common questions
-- Ask a question in the ``#cloud-init`` IRC channel on Freenode
+- Ask a question in the ``#cloud-init`` IRC channel on Libera
- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_
- Find a bug? `Report bugs on Launchpad <https://bugs.launchpad.net/cloud-init/+filebug>`_
@@ -49,6 +49,7 @@ Having trouble? We would like to help!
topics/format.rst
topics/examples.rst
+ topics/events.rst
topics/modules.rst
topics/merging.rst
@@ -67,7 +68,7 @@ Having trouble? We would like to help!
:titlesonly:
:caption: Development
- topics/hacking.rst
+ topics/contributing.rst
topics/code_review.rst
topics/security.rst
topics/debugging.rst
@@ -75,7 +76,7 @@ Having trouble? We would like to help!
topics/dir_layout.rst
topics/analyze.rst
topics/docs.rst
+ topics/testing.rst
topics/integration_tests.rst
- topics/cloud_tests.rst
.. vi: textwidth=79
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
index 8f56a7d2..d8ca9d16 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/topics/availability.rst
@@ -14,18 +14,20 @@ distributions and clouds, both public and private.
Distributions
=============
-Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD
-and OpenBSD:
+Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD,
+OpenBSD and DragonFlyBSD:
- Alpine Linux
- ArchLinux
- Debian
+- DragonFlyBSD
- Fedora
- FreeBSD
- Gentoo Linux
- NetBSD
- OpenBSD
-- RHEL/CentOS
+- Photon OS
+- RHEL/CentOS/AlmaLinux/Rocky Linux/EuroLinux
- SLES/openSUSE
- Ubuntu
@@ -42,7 +44,7 @@ environments in the public cloud:
- Softlayer
- Rackspace Public Cloud
- IBM Cloud
-- Digital Ocean
+- DigitalOcean
- Bigstep
- Hetzner
- Joyent
@@ -55,6 +57,9 @@ environments in the public cloud:
- CloudStack
- AltCloud
- SmartOS
+- UpCloud
+- Vultr
+- Zadara Edge Cloud Platform
Additionally, cloud-init is supported on these private clouds:
@@ -63,5 +68,6 @@ Additionally, cloud-init is supported on these private clouds:
- LXD
- KVM
- Metal-as-a-Service (MAAS)
+- VMware
.. vi: textwidth=79
diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst
index a5282e35..b904eaf4 100644
--- a/doc/rtd/topics/boot.rst
+++ b/doc/rtd/topics/boot.rst
@@ -65,10 +65,10 @@ If this is an instance's first boot, then the selected network configuration
is rendered. This includes clearing of all previous (stale) configuration
including persistent device naming with old mac addresses.
-This stage must block network bring-up or any stale configuration might
-already have been applied. That could have negative effects such as DHCP
-hooks or broadcast of an old hostname. It would also put the system in
-an odd state to recover from as it may then have to restart network
+This stage must block network bring-up or any stale configuration that might
+have already been applied. Otherwise, that could have negative effects such
+as DHCP hooks or broadcast of an old hostname. It would also put the system
+in an odd state to recover from, as it may then have to restart network
devices.
Cloud-init then exits and expects for the continued boot of the operating
@@ -93,7 +93,7 @@ Network
+---------+--------+----------------------------------------------------------+
This stage requires all configured networking to be online, as it will fully
-process any user-data that is found. Here, processing means:
+process any user-data that is found. Here processing means:
* retrieve any ``#include`` or ``#include-once`` (recursively) including http
* decompress any compressed content
@@ -106,7 +106,7 @@ from sources only available via network. For example, a user may have
provided user-data in a network resource that describes how local mounts
should be done.
-On some clouds such as Azure, this stage will create filesystems to be
+On some clouds, such as Azure, this stage will create filesystems to be
mounted, including ones that have stale (previous instance) references in
``/etc/fstab``. As such, entries ``/etc/fstab`` other than those necessary for
cloud-init to run should not be done until after this stage.
@@ -146,11 +146,11 @@ Final
This stage runs as late in boot as possible. Any scripts that a user is
accustomed to running after logging into a system should run correctly here.
-Things that run here include
+Things that run here include:
* package installations
* configuration management plugins (puppet, chef, salt-minion)
- * user-scripts (i.e. shell scripts passed as user-data)
+ * user-defined scripts (i.e. shell scripts passed as user-data)
For scripts external to cloud-init looking to wait until cloud-init is
finished, the ``cloud-init status`` subcommand can help block external
diff --git a/doc/rtd/topics/bugs.rst b/doc/rtd/topics/bugs.rst
index 4b60776b..ee3828de 100644
--- a/doc/rtd/topics/bugs.rst
+++ b/doc/rtd/topics/bugs.rst
@@ -17,7 +17,7 @@ To aid in debugging, please collect the necessary logs. To do so, run the
.. code-block:: shell-session
- $ cloud-init collect-logs
+ $ sudo cloud-init collect-logs
Wrote /home/ubuntu/cloud-init.tar.gz
If your version of cloud-init does not have the `collect-logs` subcommand,
@@ -25,7 +25,7 @@ then please manually collect the base log files by doing the following:
.. code-block:: shell-session
- $ dmesg > dmesg.txt
+ $ sudo dmesg > dmesg.txt
$ sudo journalctl -o short-precise > journal.txt
$ sudo tar -cvf cloud-init.tar dmesg.txt journal.txt /run/cloud-init \
/var/log/cloud-init.log /var/log/cloud-init-output.log
diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst
index 0ff230b5..e2f48bf0 100644
--- a/doc/rtd/topics/cli.rst
+++ b/doc/rtd/topics/cli.rst
@@ -17,10 +17,10 @@ option. This can be used against cloud-init itself or any of its subcommands.
-h, --help show this help message and exit
--version, -v show program's version number and exit
--file FILES, -f FILES
- additional yaml configuration files to use
+ additional yaml configuration files to use
--debug, -d show additional pre-action logging (default: False)
--force force running even if no datasource is found (use at
- your own risk)
+ your own risk)
Subcommands:
{init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status}
@@ -115,10 +115,14 @@ Current subcommands:
containing the jinja template header ``## template: jinja`` and renders
that content with any instance-data.json variables present.
* ``schema``: a **#cloud-config** format and schema
- validator. It accepts a cloud-config yaml file and annotates potential
+ validator. It accepts a cloud-config YAML file and annotates potential
schema errors locally without the need for deployment. Schema
validation is work in progress and supports a subset of cloud-config
modules.
+ * ``hotplug-hook``: respond to newly added system devices by retrieving
+ updated system metadata and bringing up/down the corresponding device.
+ This command is intended to be called via a systemd service and is
+ not considered user-accessible except for debugging purposes.
.. _cli_features:
diff --git a/doc/rtd/topics/cloud_tests.rst b/doc/rtd/topics/cloud_tests.rst
deleted file mode 100644
index 0fbb1301..00000000
--- a/doc/rtd/topics/cloud_tests.rst
+++ /dev/null
@@ -1,764 +0,0 @@
-************************
-Cloud tests (Deprecated)
-************************
-
-Cloud tests are longer be maintained. For writing integration
-tests, see the :ref:`integration_tests` page.
-
-Overview
-========
-
-This page describes the execution, development, and architecture of the
-cloud-init integration tests:
-
-* Execution explains the options available and running of tests
-* Development shows how to write test cases
-* Architecture explains the internal processes
-
-Execution
-=========
-
-Overview
---------
-
-In order to avoid the need for dependencies and ease the setup and
-configuration users can run the integration tests via tox:
-
-.. code-block:: shell-session
-
- $ git clone https://github.com/canonical/cloud-init
- $ cd cloud-init
- $ tox -e citest -- -h
-
-Everything after the double dash will be passed to the integration tests.
-Executing tests has several options:
-
-* ``run`` an alias to run both ``collect`` and ``verify``. The ``tree_run``
- command does the same thing, except uses a deb built from the current
- working tree.
-
-* ``collect`` deploys on the specified platform and distro, patches with the
- requested deb or rpm, and finally collects output of the arbitrary
- commands. Similarly, ```tree_collect`` will collect output using a deb
- built from the current working tree.
-
-* ``verify`` given a directory of test data, run the Python unit tests on
- it to generate results.
-
-* ``bddeb`` will build a deb of the current working tree.
-
-Run
----
-
-The first example will provide a complete end-to-end run of data
-collection and verification. There are additional examples below
-explaining how to run one or the other independently.
-
-.. code-block:: shell-session
-
- $ git clone https://github.com/canonical/cloud-init
- $ cd cloud-init
- $ tox -e citest -- run --verbose \
- --os-name stretch --os-name xenial \
- --deb cloud-init_0.7.8~my_patch_all.deb \
- --preserve-data --data-dir ~/collection \
- --preserve-instance
-
-The above command will do the following:
-
-* ``run`` both collect output and run tests the output
-
-* ``--verbose`` verbose output
-
-* ``--os-name stretch`` on the Debian Stretch release
-
-* ``--os-name xenial`` on the Ubuntu Xenial release
-
-* ``--deb cloud-init_0.7.8~patch_all.deb`` use this deb as the version of
- cloud-init to run with
-
-* ``--preserve-data`` always preserve collected data, do not remove data
- after successful test run
-
-* ``--preserve-instance`` do not destroy the instance after test to allow
- for debugging the stopped instance during integration test development. By
- default, test instances are destroyed after the test completes.
-
-* ``--data-dir ~/collection`` write collected data into `~/collection`,
- rather than using a temporary directory
-
-For a more detailed explanation of each option see below.
-
-.. note::
- By default, data collected by the run command will be written into a
- temporary directory and deleted after a successful. If you would
- like to preserve this data, please use the option ``--preserve-data``.
-
-Collect
--------
-
-If developing tests it may be necessary to see if cloud-config works as
-expected and the correct files are pulled down. In this case only a
-collect can be ran by running:
-
-.. code-block:: shell-session
-
- $ tox -e citest -- collect -n xenial --data-dir /tmp/collection
-
-The above command will run the collection tests on xenial and place
-all results into `/tmp/collection`.
-
-Verify
-------
-
-When developing tests it is much easier to simply rerun the verify scripts
-without the more lengthy collect process. This can be done by running:
-
-.. code-block:: shell-session
-
- $ tox -e citest -- verify --data-dir /tmp/collection
-
-The above command will run the verify scripts on the data discovered in
-`/tmp/collection`.
-
-TreeRun and TreeCollect
------------------------
-
-If working on a cloud-init feature or resolving a bug, it may be useful to
-run the current copy of cloud-init in the integration testing environment.
-The integration testing suite can automatically build a deb based on the
-current working tree of cloud-init and run the test suite using this deb.
-
-The ``tree_run`` and ``tree_collect`` commands take the same arguments as
-the ``run`` and ``collect`` commands. These commands will build a deb and
-write it into a temporary file, then start the test suite and pass that deb
-in. To build a deb only, and not run the test suite, the ``bddeb`` command
-can be used.
-
-Note that code in the cloud-init working tree that has not been committed
-when the cloud-init deb is built will still be included. To build a
-cloud-init deb from or use the ``tree_run`` command using a copy of
-cloud-init located in a different directory, use the option ``--cloud-init
-/path/to/cloud-init``.
-
-.. code-block:: shell-session
-
- $ tox -e citest -- tree_run --verbose \
- --os-name xenial --os-name stretch \
- --test modules/final_message --test modules/write_files \
- --result /tmp/result.yaml
-
-Bddeb
------
-
-The ``bddeb`` command can be used to generate a deb file. This is used by the
-tree_run and tree_collect commands to build a deb of the current working tree
-using the packaging template contained in the ``packages/debian/`` directory.
-It can also be used to generate a deb for use in other situations and avoid
-needing to have all the build and test dependencies installed locally.
-
-* ``--bddeb-args``: arguments to pass through to bddeb
-* ``--build-os``: distribution to use as build system (default is xenial)
-* ``--build-platform``: platform to use for build system (default is lxd)
-* ``--cloud-init``: path to base of cloud-init tree (default is '.')
-* ``--deb``: path to write output deb to (default is '.')
-* ``--packaging-branch``: import the ``debian/`` packaging directory
- from the specified branch (default: ``ubuntu/devel``) instead of using
- the packaging template.
-
-Setup Image
------------
-
-By default an image that is used will remain unmodified, but certain
-scenarios may require image modification. For example, many images may use
-a much older cloud-init. As a result tests looking at newer functionality
-will fail because a newer version of cloud-init may be required. The
-following options can be used for further customization:
-
-* ``--deb``: install the specified deb into the image
-* ``--rpm``: install the specified rpm into the image
-* ``--repo``: enable a repository and upgrade cloud-init afterwards
-* ``--ppa``: enable a ppa and upgrade cloud-init afterwards
-* ``--upgrade``: upgrade cloud-init from repos
-* ``--upgrade-full``: run a full system upgrade
-* ``--script``: execute a script in the image. This can perform any setup
- required that is not covered by the other options
-
-Test Case Development
-=====================
-
-Overview
---------
-
-As a test writer you need to develop a test configuration and a
-verification file:
-
- * The test configuration specifies a specific cloud-config to be used by
- cloud-init and a list of arbitrary commands to capture the output of
- (e.g my_test.yaml)
-
- * The verification file runs tests on the collected output to determine
- the result of the test (e.g. my_test.py)
-
-The names must match, however the extensions will of course be different,
-yaml vs py.
-
-Configuration
--------------
-
-The test configuration is a YAML file such as *ntp_server.yaml* below:
-
-.. code-block:: yaml
-
- #
- # Empty NTP config to setup using defaults
- #
- # NOTE: this should not require apt feature, use 'which' rather than 'dpkg -l'
- # NOTE: this should not require no_ntpdate feature, use 'which' to check for
- # installation rather than 'dpkg -l', as 'grep ntp' matches 'ntpdate'
- # NOTE: the verifier should check for any ntp server not 'ubuntu.pool.ntp.org'
- cloud_config: |
- #cloud-config
- ntp:
- servers:
- - pool.ntp.org
- required_features:
- - apt
- - no_ntpdate
- - ubuntu_ntp
- collect_scripts:
- ntp_installed_servers: |
- #!/bin/bash
- dpkg -l | grep ntp | wc -l
- ntp_conf_dist_servers: |
- #!/bin/bash
- ls /etc/ntp.conf.dist | wc -l
- ntp_conf_servers: |
- #!/bin/bash
- cat /etc/ntp.conf | grep '^server'
-
-There are several keys, 1 required and some optional, in the YAML file:
-
-1. The required key is ``cloud_config``. This should be a string of valid
- YAML that is exactly what would normally be placed in a cloud-config
- file, including the cloud-config header. This essentially sets up the
- scenario under test.
-
-2. One optional key is ``collect_scripts``. This key has one or more
- sub-keys containing strings of arbitrary commands to execute (e.g.
- ```cat /var/log/cloud-config-output.log```). In the example above the
- output of dpkg is captured, grep for ntp, and the number of lines
- reported. The name of the sub-key is important. The sub-key is used by
- the verification script to recall the output of the commands ran.
-
-3. The optional ``enabled`` key enables or disables the test case. By
- default the test case will be enabled.
-
-4. The optional ``required_features`` key may be used to specify a list
- of features flags that an image must have to be able to run the test
- case. For example, if a test case relies on an image supporting apt,
- then the config for the test case should include ``required_features:
- [ apt ]``.
-
-
-Default Collect Scripts
------------------------
-
-By default the following files will be collected for every test. There is
-no need to specify these items:
-
-* ``/var/log/cloud-init.log``
-* ``/var/log/cloud-init-output.log``
-* ``/run/cloud-init/.instance-id``
-* ``/run/cloud-init/result.json``
-* ``/run/cloud-init/status.json``
-* ```dpkg-query -W -f='${Version}' cloud-init```
-
-Verification
-------------
-
-The verification script is a Python file with unit tests like the one,
-`ntp_server.py`, below:
-
-.. code-block:: python
-
- # This file is part of cloud-init. See LICENSE file for license information.
-
- """cloud-init Integration Test Verify Script"""
- from tests.cloud_tests.testcases import base
-
-
- class TestNtp(base.CloudTestCase):
- """Test ntp module"""
-
- def test_ntp_installed(self):
- """Test ntp installed"""
- out = self.get_data_file('ntp_installed_empty')
- self.assertEqual(1, int(out))
-
- def test_ntp_dist_entries(self):
- """Test dist config file has one entry"""
- out = self.get_data_file('ntp_conf_dist_empty')
- self.assertEqual(1, int(out))
-
- def test_ntp_entires(self):
- """Test config entries"""
- out = self.get_data_file('ntp_conf_empty')
- self.assertIn('pool 0.ubuntu.pool.ntp.org iburst', out)
- self.assertIn('pool 1.ubuntu.pool.ntp.org iburst', out)
- self.assertIn('pool 2.ubuntu.pool.ntp.org iburst', out)
- self.assertIn('pool 3.ubuntu.pool.ntp.org iburst', out)
-
- # vi: ts=4 expandtab
-
-
-Here is a breakdown of the unit test file:
-
-* The import statement allows access to the output files.
-
-* The class can be named anything, but must import the
- ``base.CloudTestCase``, either directly or via another test class.
-
-* There can be 1 to N number of functions with any name, however only
- functions starting with ``test_*`` will be executed.
-
-* There can be 1 to N number of classes in a test module, however only
- classes inheriting from ``base.CloudTestCase`` will be loaded.
-
-* Output from the commands can be accessed via
- ``self.get_data_file('key')`` where key is the sub-key of
- ``collect_scripts`` above.
-
-* The cloud config that the test ran with can be accessed via
- ``self.cloud_config``, or any entry from the cloud config can be accessed
- via ``self.get_config_entry('key')``.
-
-* See the base ``CloudTestCase`` for additional helper functions.
-
-Layout
-------
-
-Integration tests are located under the `tests/cloud_tests` directory.
-Test configurations are placed under `configs` and the test verification
-scripts under `testcases`:
-
-.. code-block:: shell-session
-
- cloud-init$ tree -d tests/cloud_tests/
- tests/cloud_tests/
- ├── configs
- │   ├── bugs
- │   ├── examples
- │   ├── main
- │   └── modules
- └── testcases
- ├── bugs
- ├── examples
- ├── main
- └── modules
-
-The sub-folders of bugs, examples, main, and modules help organize the
-tests. View the README.md in each to understand in more detail each
-directory.
-
-Test Creation Helper
---------------------
-
-The integration testing suite has a built in helper to aid in test
-development. Help can be invoked via ``tox -e citest -- create --help``. It
-can create a template test case config file with user data passed in from
-the command line, as well as a template test case verifier module.
-
-The following would create a test case named ``example`` under the
-``modules`` category with the given description, and cloud config data read
-in from ``/tmp/user_data``.
-
-.. code-block:: shell-session
-
- $ tox -e citest -- create modules/example \
- -d "a simple example test case" -c "$(< /tmp/user_data)"
-
-
-Development Checklist
----------------------
-
-* Configuration File
- * Named 'your_test.yaml'
- * Contains at least a valid cloud-config
- * Optionally, commands to capture additional output
- * Valid YAML
- * Placed in the appropriate sub-folder in the configs directory
- * Any image features required for the test are specified
-* Verification File
- * Named 'your_test.py'
- * Valid unit tests validating output collected
- * Passes pylint & pep8 checks
- * Placed in the appropriate sub-folder in the test cases directory
-* Tested by running the test:
-
- .. code-block:: shell-session
-
- $ tox -e citest -- run -verbose \
- --os-name <release target> \
- --test modules/your_test.yaml \
- [--deb <build of cloud-init>]
-
-
-Platforms
-=========
-
-EC2
----
-To run on the EC2 platform it is required that the user has an AWS credentials
-configuration file specifying his or her access keys and a default region.
-These configuration files are the standard that the AWS cli and other AWS
-tools utilize for interacting directly with AWS itself and are normally
-generated when running ``aws configure``:
-
-.. code-block:: shell-session
-
- $ cat $HOME/.aws/credentials
- [default]
- aws_access_key_id = <KEY HERE>
- aws_secret_access_key = <KEY HERE>
-
-.. code-block:: shell-session
-
- $ cat $HOME/.aws/config
- [default]
- region = us-west-2
-
-
-Azure Cloud
------------
-
-To run on Azure Cloud platform users login with Service Principal and export
-credentials file. Region is defaulted and can be set in
-``tests/cloud_tests/platforms.yaml``. The Service Principal credentials are
-the standard authentication for Azure SDK to interact with Azure Services:
-
-Create Service Principal account or login
-
-.. code-block:: shell-session
-
- $ az ad sp create-for-rbac --name "APP_ID" --password "STRONG-SECRET-PASSWORD"
-
-.. code-block:: shell-session
-
- $ az login --service-principal --username "APP_ID" --password "STRONG-SECRET-PASSWORD"
-
-Export credentials
-
-.. code-block:: shell-session
-
- $ az ad sp create-for-rbac --sdk-auth > $HOME/.azure/credentials.json
-
-.. code-block:: json
-
- {
- "clientId": "<Service principal ID>",
- "clientSecret": "<Service principal secret/password>",
- "subscriptionId": "<Subscription associated with the service principal>",
- "tenantId": "<The service principal's tenant>",
- "activeDirectoryEndpointUrl": "https://login.microsoftonline.com",
- "resourceManagerEndpointUrl": "https://management.azure.com/",
- "activeDirectoryGraphResourceId": "https://graph.windows.net/",
- "sqlManagementEndpointUrl": "https://management.core.windows.net:8443/",
- "galleryEndpointUrl": "https://gallery.azure.com/",
- "managementEndpointUrl": "https://management.core.windows.net/"
- }
-
-Set region in platforms.yaml
-
-.. code-block:: yaml
-
- azurecloud:
- enabled: true
- region: West US 2
- vm_size: Standard_DS1_v2
- storage_sku: standard_lrs
- tag: ci
-
-
-Architecture
-============
-
-The following section outlines the high-level architecture of the
-integration process.
-
-Overview
---------
-The process flow during a complete end-to-end LXD-backed test.
-
-1. Configuration
- * The back end and specific distro releases are verified as supported
- * The test or tests that need to be run are determined either by
- directory or by individual yaml
-
-2. Image Creation
- * Acquire the request LXD image
- * Install the specified cloud-init package
- * Clean the image so that it does not appear to have been booted
- * A snapshot of the image is created and reused by all tests
-
-3. Configuration
- * For each test, the cloud-config is injected into a copy of the
- snapshot and booted
- * The framework waits for ``/var/lib/cloud/instance/boot-finished``
- (up to 120 seconds)
- * All default commands are ran and output collected
- * Any commands the user specified are executed and output collected
-
-4. Verification
- * The default commands are checked for any failures, errors, and
- warnings to validate basic functionality of cloud-init completed
- successfully
- * The user generated unit tests are then ran validating against the
- collected output
-
-5. Results
- * If any failures were detected the test suite returns a failure
- * Results can be dumped in yaml format to a specified file using the
- ``-r <result_file_name>.yaml`` option
-
-Configuring the Test Suite
---------------------------
-
-Most of the behavior of the test suite is configurable through several yaml
-files. These control the behavior of the test suite's platforms, images, and
-tests. The main config files for platforms, images and test cases are
-``platforms.yaml``, ``releases.yaml`` and ``testcases.yaml``.
-
-Config handling
-^^^^^^^^^^^^^^^
-
-All configurable parts of the test suite use a defaults + overrides system
-for managing config entries. All base config items are dictionaries.
-
-Merging is done on a key-by-key basis, with all keys in the default and
-override represented in the final result. If a key exists both in
-the defaults and the overrides, then the behavior depends on the type of data
-the key refers to. If it is atomic data or a list, then the overrides will
-replace the default. If the data is a dictionary then the value will be the
-result of merging that dictionary from the default config and that
-dictionary from the overrides.
-
-Merging is done using the function
-``tests.cloud_tests.config.merge_config``, which can be examined for more
-detail on config merging behavior.
-
-The following demonstrates merge behavior:
-
-.. code-block:: yaml
-
- defaults:
- list_item:
- - list_entry_1
- - list_entry_2
- int_item_1: 123
- int_item_2: 234
- dict_item:
- subkey_1: 1
- subkey_2: 2
- subkey_dict:
- subsubkey_1: a
- subsubkey_2: b
-
- overrides:
- list_item:
- - overridden_list_entry
- int_item_1: 0
- dict_item:
- subkey_2: false
- subkey_dict:
- subsubkey_2: 'new value'
-
- result:
- list_item:
- - overridden_list_entry
- int_item_1: 0
- int_item_2: 234
- dict_item:
- subkey_1: 1
- subkey_2: false
- subkey_dict:
- subsubkey_1: a
- subsubkey_2: 'new value'
-
-
-Image Config
-------------
-
-Image configuration is handled in ``releases.yaml``. The image configuration
-controls how platforms locate and acquire images, how the platforms should
-interact with the images, how platforms should detect when an image has
-fully booted, any options that are required to set the image up, and
-features that the image supports.
-
-Since settings for locating an image and interacting with it differ from
-platform to platform, there are 4 levels of settings available for images on
-top of the default image settings. The structure of the image config file
-is:
-
-.. code-block:: yaml
-
- default_release_config:
- default:
- ...
- <platform>:
- ...
- <platform>:
- ...
-
- releases:
- <release name>:
- <default>:
- ...
- <platform>:
- ...
- <platform>:
- ...
-
-
-The base config is created from the overall defaults and the overrides for
-the platform. The overrides are created from the default config for the
-image and the platform specific overrides for the image.
-
-System Boot
-^^^^^^^^^^^
-
-The test suite must be able to test if a system has fully booted and if
-cloud-init has finished running, so that running collect scripts does not
-race against the target image booting. This is done using the
-``system_ready_script`` and ``cloud_init_ready_script`` image config keys.
-
-Each of these keys accepts a small bash test statement as a string that must
-return 0 or 1. Since this test statement will be added into a larger bash
-statement it must be a single statement using the ``[`` test syntax.
-
-The default image config provides a system ready script that works for any
-systemd based image. If the image is not systemd based, then a different
-test statement must be provided. The default config also provides a test
-for whether or not cloud-init has finished which checks for the file
-``/run/cloud-init/result.json``. This should be sufficient for most systems
-as writing this file is one of the last things cloud-init does.
-
-The setting ``boot_timeout`` controls how long, in seconds, the platform
-should wait for an image to boot. If the system ready script has not
-indicated that the system is fully booted within this time an error will be
-raised.
-
-Feature Flags
-^^^^^^^^^^^^^
-
-Not all test cases can work on all images due to features the test case
-requires not being present on that image. If a test case requires features
-in an image that are not likely to be present across all distros and
-platforms that the test suite supports, then the test can be skipped
-everywhere it is not supported.
-
-Feature flags, which are names for features supported on some images, but
-not all that may be required by test cases. Configuration for feature flags
-is provided in ``releases.yaml`` under the ``features`` top level key. The
-features config includes a list of all currently defined feature flags,
-their meanings, and a list of feature groups.
-
-Feature groups are groups of features that many images have in common. For
-example, the ``Ubuntu_specific`` feature group includes features that
-should be present across most Ubuntu releases, but may or may not be for
-other distros. Feature groups are specified for an image as a list under
-the key ``feature_groups``.
-
-An image's feature flags are derived from the features groups that that
-image has and any feature overrides provided. Feature overrides can be
-specified under the ``features`` key which accepts a dictionary of
-``{<feature_name>: true/false}`` mappings. If a feature is omitted from an
-image's feature flags or set to false in the overrides then the test suite
-will skip any tests that require that feature when using that image.
-
-Feature flags may be overridden at run time using the ``--feature-override``
-command line argument. It accepts a feature flag and value to set in the
-format ``<feature name>=true/false``. Multiple ``--feature-override``
-flags can be used, and will all be applied to all feature flags for images
-used during a test.
-
-Setup Overrides
-^^^^^^^^^^^^^^^
-
-If an image requires some of the options for image setup to be used, then it
-may specify overrides for the command line arguments passed into setup
-image. These may be specified as a dictionary under the ``setup_overrides``
-key. When an image is set up, the arguments that control how it is set up
-will be the arguments from the command line, with any entries in
-``setup_overrides`` used to override these arguments.
-
-For example, images that do not come with cloud-init already installed
-should have ``setup_overrides: {upgrade: true}`` specified so that in the
-event that no additional setup options are given, cloud-init will be
-installed from the image's repos before running tests. Note that if other
-options such as ``--deb`` are passed in on the command line, these will
-still work as expected, since apt's policy for cloud-init would prefer the
-locally installed deb over an older version from the repos.
-
-Platform Specific Options
-^^^^^^^^^^^^^^^^^^^^^^^^^
-
-There are many platform specific options in image configuration that allow
-platforms to locate images and that control additional setup that the
-platform may have to do to make the image usable. For information on how
-these work, please consult the documentation for that platform in the
-integration testing suite and the ``releases.yaml`` file for examples.
-
-Error Handling
---------------
-
-The test suite makes an attempt to run as many tests as possible even in the
-event of some failing so that automated runs collect as much data as
-possible. In the event that something goes wrong while setting up for or
-running a test, the test suite will attempt to continue running any tests
-which have not been affected by the error.
-
-For example, if the test suite was told to run tests on one platform for two
-releases and an error occurred setting up the first image, all tests for
-that image would be skipped, and the test suite would continue to set up
-the second image and run tests on it. Or, if the system does not start
-properly for one test case out of many to run on that image, that test case
-will be skipped and the next one will be run.
-
-Note that if any errors occur, the test suite will record the failure and
-where it occurred in the result data and write it out to the specified
-result file.
-
-Results
--------
-
-The test suite generates result data that includes how long each stage of
-the test suite took and which parts were and were not successful. This data
-is dumped to the log after the collect and verify stages, and may also be
-written out in yaml format to a file. If part of the setup failed, the
-traceback for the failure and the error message will be included in the
-result file. If a test verifier finds a problem with the collected data
-from a test run, the class, test function and test will be recorded in the
-result data.
-
-Exit Codes
-^^^^^^^^^^
-
-The test suite counts how many errors occur throughout a run. The exit code
-after a run is the number of errors that occurred. If the exit code is
-non-zero then something is wrong either with the test suite, the
-configuration for an image, a test case, or cloud-init itself.
-
-Note that the exit code does not always directly correspond to the number
-of failed test cases, since in some cases, a single error during image setup
-can mean that several test cases are not run. If run is used, then the exit
-code will be the sum of the number of errors in the collect and verify
-stages.
-
-Data Dir
-^^^^^^^^
-
-When using run, the collected data is written into a temporary directory. In
-the event that all tests pass, this directory is deleted, but if a test
-fails or an error occurs, this data will be left in place, and a message
-will be written to the log giving the location of the data.
diff --git a/doc/rtd/topics/code_review.rst b/doc/rtd/topics/code_review.rst
index 68c10405..33aad789 100644
--- a/doc/rtd/topics/code_review.rst
+++ b/doc/rtd/topics/code_review.rst
@@ -22,7 +22,7 @@ questions about the code review process, or at any point during the
code review process, these are the available avenues:
* if you have an open Pull Request, comment on that pull request
-* join the ``#cloud-init`` channel on the Freenode IRC network and ask
+* join the ``#cloud-init`` channel on the Libera IRC network and ask
away
* send an email to the cloud-init mailing list,
cloud-init@lists.launchpad.net
@@ -58,12 +58,12 @@ Reviewer
Committer
A cloud-init core developer (i.e. a person who has permission to
- merge PRs into master).
+ merge PRs into **main**).
Prerequisites For Landing Pull Requests
=======================================
-Before a PR can be landed into master, the following conditions *must*
+Before a PR can be landed into **main**, the following conditions *must*
be met:
* the CLA has been signed by the **Proposer** (or is covered by an
@@ -148,7 +148,7 @@ temporarily closed. (The first two are covered in this section; see
(In the below, when the verbs "merge" or "squash merge" are used, they
should be understood to mean "squash merged using the GitHub UI", which
-is the only way that changes can land in cloud-init's master branch.)
+is the only way that changes can land in cloud-init's **main** branch.)
These are the steps that comprise the review phase:
diff --git a/doc/rtd/topics/contributing.rst b/doc/rtd/topics/contributing.rst
new file mode 100644
index 00000000..b9aee867
--- /dev/null
+++ b/doc/rtd/topics/contributing.rst
@@ -0,0 +1,2 @@
+.. include:: ../../../CONTRIBUTING.rst
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 3d026143..f73a5b2a 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -4,9 +4,9 @@ Datasources
***********
Datasources are sources of configuration data for cloud-init that typically
-come from the user (e.g. userdata) or come from the cloud that created the
-configuration drive (e.g. metadata). Typical userdata would include files,
-yaml, and shell scripts while typical metadata would include server name,
+come from the user (i.e. userdata) or come from the cloud that created the
+configuration drive (i.e. metadata). Typical userdata would include files,
+YAML, and shell scripts while typical metadata would include server name,
instance id, display name and other cloud specific details.
Since there are multiple ways to provide this data (each cloud solution seems
@@ -29,16 +29,17 @@ The following is a list of documents for each supported datasource:
datasources/aliyun.rst
datasources/altcloud.rst
+ datasources/ec2.rst
datasources/azure.rst
datasources/cloudsigma.rst
datasources/cloudstack.rst
datasources/configdrive.rst
datasources/digitalocean.rst
datasources/e24cloud.rst
- datasources/ec2.rst
datasources/exoscale.rst
datasources/fallback.rst
datasources/gce.rst
+ datasources/lxd.rst
datasources/maas.rst
datasources/nocloud.rst
datasources/opennebula.rst
@@ -47,9 +48,11 @@ The following is a list of documents for each supported datasource:
datasources/ovf.rst
datasources/rbxcloud.rst
datasources/smartos.rst
+ datasources/upcloud.rst
+ datasources/vmware.rst
+ datasources/vultr.rst
datasources/zstack.rst
-
Creation
========
@@ -96,7 +99,7 @@ need to take care of the following items:
your datasource module name to the end of the ``datasource_list``
entry in ``cloudinit/settings.py``.
-* **Add your your cloud platform to apport collection prompts:** Update the
+* **Add your cloud platform to apport collection prompts:** Update the
list of cloud platforms in ``cloudinit/apport.py``. This list will be
provided to the user who invokes ``ubuntu-bug cloud-init``.
diff --git a/doc/rtd/topics/datasources/aliyun.rst b/doc/rtd/topics/datasources/aliyun.rst
index 3f4f40ca..0bb9c19e 100644
--- a/doc/rtd/topics/datasources/aliyun.rst
+++ b/doc/rtd/topics/datasources/aliyun.rst
@@ -12,6 +12,21 @@ The Alibaba Cloud metadata service is available at the well known url
Alibaba Cloud ECS on `metadata
<https://www.alibabacloud.com/help/zh/faq-detail/49122.htm>`__.
+Configuration
+-------------
+The following configuration can be set for the datasource in system
+configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``).
+
+An example configuration with the default values is provided below:
+
+.. sourcecode:: yaml
+
+ datasource:
+ AliYun:
+ metadata_urls: ["http://100.100.100.200"]
+ timeout: 50
+ max_wait: 120
+
Versions
^^^^^^^^
Like the EC2 metadata service, Alibaba Cloud's metadata service provides
@@ -71,4 +86,4 @@ If no user-data is provided, this will return a 404.
#!/bin/sh
echo "Hello World."
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/altcloud.rst b/doc/rtd/topics/datasources/altcloud.rst
index 9d7e3de1..acd5e2a3 100644
--- a/doc/rtd/topics/datasources/altcloud.rst
+++ b/doc/rtd/topics/datasources/altcloud.rst
@@ -91,4 +91,4 @@ For more information on Delta Cloud see: http://deltacloud.apache.org
.. _RHEVm: https://www.redhat.com/virtualization/rhev/desktop/rhevm/
.. _vSphere: https://www.vmware.com/products/datacenter-virtualization/vsphere/overview.html
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
index e04c3a33..1bd03970 100644
--- a/doc/rtd/topics/datasources/azure.rst
+++ b/doc/rtd/topics/datasources/azure.rst
@@ -5,28 +5,6 @@ Azure
This datasource finds metadata and user-data from the Azure cloud platform.
-walinuxagent
-------------
-walinuxagent has several functions within images. For cloud-init
-specifically, the relevant functionality it performs is to register the
-instance with the Azure cloud platform at boot so networking will be
-permitted. For more information about the other functionality of
-walinuxagent, see `Azure's documentation
-<https://github.com/Azure/WALinuxAgent#introduction>`_ for more details.
-(Note, however, that only one of walinuxagent's provisioning and cloud-init
-should be used to perform instance customisation.)
-
-If you are configuring walinuxagent yourself, you will want to ensure that you
-have `Provisioning.UseCloudInit
-<https://github.com/Azure/WALinuxAgent#provisioningusecloudinit>`_ set to
-``y``.
-
-
-Builtin Agent
--------------
-An alternative to using walinuxagent to register to the Azure cloud platform
-is to use the ``__builtin__`` agent command. This section contains more
-background on what that code path does, and how to enable it.
The Azure cloud platform provides initial data to an instance via an attached
CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some
@@ -41,16 +19,6 @@ by calling a script in /etc/dhcp/dhclient-exit-hooks or a file in
'dhclient_hook' of cloud-init itself. This sub-command will write the client
information in json format to /run/cloud-init/dhclient.hook/<interface>.json.
-In order for cloud-init to leverage this method to find the endpoint, the
-cloud.cfg file must contain:
-
-.. sourcecode:: yaml
-
- datasource:
- Azure:
- set_hostname: False
- agent_command: __builtin__
-
If those files are not available, the fallback is to check the leases file
for the endpoint server (again option 245).
@@ -83,9 +51,6 @@ configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``).
The settings that may be configured are:
- * **agent_command**: Either __builtin__ (default) or a command to run to getcw
- metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the
- provided command to obtain metadata.
* **apply_network_config**: Boolean set to True to use network configuration
described by Azure's IMDS endpoint instead of fallback network config of
dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is
@@ -95,20 +60,6 @@ The settings that may be configured are:
custom DHCP option 245 from Azure fabric.
* **disk_aliases**: A dictionary defining which device paths should be
interpreted as ephemeral images. See cc_disk_setup module for more info.
- * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to
- metadata changes. The '``hostname_bounce: command``' entry can be either
- the literal string 'builtin' or a command to execute. The command will be
- invoked after the hostname is set, and will have the 'interface' in its
- environment. If ``set_hostname`` is not true, then ``hostname_bounce``
- will be ignored. An example might be:
-
- ``command: ["sh", "-c", "killall dhclient; dhclient $interface"]``
-
- * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to
- metadata changes. Azure will throttle ifup/down in some cases after metadata
- has been updated to inform dhcp server about updated hostnames.
- * **set_hostname**: Boolean set to True when we want Azure to set the hostname
- based on metadata.
Configuration for the datasource can also be read from a
``dscfg`` entry in the ``LinuxProvisioningConfigurationSet``. Content in
@@ -121,18 +72,11 @@ An example configuration with the default values is provided below:
datasource:
Azure:
- agent_command: __builtin__
apply_network_config: true
data_dir: /var/lib/waagent
dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
disk_aliases:
ephemeral0: /dev/disk/cloud/azure_resource
- hostname_bounce:
- interface: eth0
- command: builtin
- policy: true
- hostname_command: hostname
- set_hostname: true
Userdata
@@ -144,9 +88,7 @@ child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``)
If both ``UserData`` and ``CustomData`` are provided behavior is undefined on
which will be selected.
-In the example below, user-data provided is 'this is my userdata', and the
-datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``.
-That agent command will take affect as if it were specified in system config.
+In the example below, user-data provided is 'this is my userdata'
Example:
@@ -184,21 +126,17 @@ The hostname is provided to the instance in the ovf-env.xml file as
Whatever value the instance provides in its dhcp request will resolve in the
domain returned in the 'search' request.
-The interesting issue is that a generic image will already have a hostname
-configured. The ubuntu cloud images have 'ubuntu' as the hostname of the
-system, and the initial dhcp request on eth0 is not guaranteed to occur after
-the datasource code has been run. So, on first boot, that initial value will
-be sent in the dhcp request and *that* value will resolve.
-
-In order to make the ``HostName`` provided in the ovf-env.xml resolve, a
-dhcp request must be made with the new value. Walinuxagent (in its current
-version) handles this by polling the state of hostname and bouncing ('``ifdown
-eth0; ifup eth0``' the network interface if it sees that a change has been
-made.
+A generic image will already have a hostname configured. The ubuntu
+cloud images have 'ubuntu' as the hostname of the system, and the
+initial dhcp request on eth0 is not guaranteed to occur after the
+datasource code has been run. So, on first boot, that initial value
+will be sent in the dhcp request and *that* value will resolve.
-cloud-init handles this by setting the hostname in the DataSource's 'get_data'
-method via '``hostname $HostName``', and then bouncing the interface. This
+In order to make the ``HostName`` provided in the ovf-env.xml resolve,
+a dhcp request must be made with the new value. cloud-init handles
+this by setting the hostname in the DataSource's 'get_data' method via
+'``hostname $HostName``', and then bouncing the interface. This
behavior can be configured or disabled in the datasource config. See
'Configuration' above.
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/cloudsigma.rst b/doc/rtd/topics/datasources/cloudsigma.rst
index 86b834c8..dee665a4 100644
--- a/doc/rtd/topics/datasources/cloudsigma.rst
+++ b/doc/rtd/topics/datasources/cloudsigma.rst
@@ -39,4 +39,4 @@ value. If this field does not exist the default value is "net".
.. _server context: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
.. _meta field: http://cloudsigma-docs.readthedocs.org/en/latest/meta.html
.. _config formats: http://cloudinit.readthedocs.org/en/latest/topics/format.html
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst
index a24de34f..e889ab6e 100644
--- a/doc/rtd/topics/datasources/cloudstack.rst
+++ b/doc/rtd/topics/datasources/cloudstack.rst
@@ -46,11 +46,9 @@ An example configuration with the default values is provided below:
CloudStack:
max_wait: 120
timeout: 50
- datasource_list:
- - CloudStack
.. _Apache CloudStack: http://cloudstack.apache.org/
.. _CloudStack Administrator Guide: http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/virtual_machines.html#user-data-and-meta-data
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/configdrive.rst b/doc/rtd/topics/datasources/configdrive.rst
index 4fcbccee..ecc37df6 100644
--- a/doc/rtd/topics/datasources/configdrive.rst
+++ b/doc/rtd/topics/datasources/configdrive.rst
@@ -128,4 +128,4 @@ what all can be present here.
.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table
.. _the config drive extension: https://docs.openstack.org/nova/latest/admin/config-drive.html
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/digitalocean.rst b/doc/rtd/topics/datasources/digitalocean.rst
index 88f1e5f5..801841c1 100644
--- a/doc/rtd/topics/datasources/digitalocean.rst
+++ b/doc/rtd/topics/datasources/digitalocean.rst
@@ -1,7 +1,7 @@
.. _datasource_digital_ocean:
-Digital Ocean
-=============
+DigitalOcean
+============
The `DigitalOcean`_ datasource consumes the content served from DigitalOcean's
`metadata service`_. This metadata service serves information about the
@@ -29,4 +29,4 @@ DigitalOcean's datasource can be configured as follows:
.. _metadata service: https://developers.digitalocean.com/metadata/
.. _Full documentation: https://developers.digitalocean.com/metadata/
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/e24cloud.rst b/doc/rtd/topics/datasources/e24cloud.rst
index de9a4127..2af6634b 100644
--- a/doc/rtd/topics/datasources/e24cloud.rst
+++ b/doc/rtd/topics/datasources/e24cloud.rst
@@ -2,8 +2,8 @@
E24Cloud
========
-`E24Cloud <https://www.e24cloud.com/en/>` platform provides an AWS Ec2 metadata
+`E24Cloud <https://www.e24cloud.com/en/>`_ platform provides an AWS Ec2 metadata
service clone. It identifies itself to guests using the dmi
system-manufacturer (/sys/class/dmi/id/sys_vendor).
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst
index 274ca1e4..94e4158d 100644
--- a/doc/rtd/topics/datasources/ec2.rst
+++ b/doc/rtd/topics/datasources/ec2.rst
@@ -121,4 +121,4 @@ Notes
For example: the primary NIC will have a DHCP route-metric of 100,
the next NIC will be 200.
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/fallback.rst b/doc/rtd/topics/datasources/fallback.rst
index 2b133fcd..03658f54 100644
--- a/doc/rtd/topics/datasources/fallback.rst
+++ b/doc/rtd/topics/datasources/fallback.rst
@@ -15,4 +15,4 @@ will be so that the user is not left with an inaccessible instance.
**Note:** the instance id that this datasource provides is
``iid-datasource-none``.
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/gce.rst b/doc/rtd/topics/datasources/gce.rst
index 8406695c..70aefea2 100644
--- a/doc/rtd/topics/datasources/gce.rst
+++ b/doc/rtd/topics/datasources/gce.rst
@@ -15,6 +15,28 @@ to provide ``public-keys``.
``user-data`` and ``user-data-encoding`` can be provided to cloud-init by
setting those custom metadata keys for an *instance*.
+Configuration
+-------------
+The following configuration can be set for the datasource in system
+configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are:
+
+ * **retries**: The number of retries that should be done for an http request.
+ This value is used only after metadata_url is selected. (default: 5)
+ * **sec_between_retries**: The amount of wait time between the retries when
+ crawling the metadata service. (default: 1)
+
+
+An example configuration with the default values is provided below:
+
+.. sourcecode:: yaml
+
+ datasource:
+ GCE:
+ retries: 5
+ sec_between_retries: 1
+
.. _GCE metadata docs: https://cloud.google.com/compute/docs/storing-retrieving-metadata#querying
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/lxd.rst b/doc/rtd/topics/datasources/lxd.rst
new file mode 100644
index 00000000..fa2dcf5d
--- /dev/null
+++ b/doc/rtd/topics/datasources/lxd.rst
@@ -0,0 +1,65 @@
+.. _datasource_lxd:
+
+LXD
+===
+
+The data source ``LXD`` allows the user to provide custom user-data,
+vendor-data, meta-data and network-config to the instance without running
+a network service (or even without having a network at all). This datasource
+performs HTTP GETs against the `LXD socket device`_ which is provided to each
+running LXD container and VM as ``/dev/lxd/sock`` and represents all
+instance-metadata as versioned HTTP routes such as:
+
+ - 1.0/meta-data
+ - 1.0/config/user.meta-data
+ - 1.0/config/user.vendor-data
+ - 1.0/config/user.user-data
+ - 1.0/config/user.<any-custom-key>
+
+The LXD socket device ``/dev/lxd/sock`` is only present on containers and VMs
+when the instance configuration has ``security.devlxd=true`` (default).
+Disabling ``security.devlxd`` configuration setting at initial launch will
+ensure that cloud-init uses the :ref:`datasource_nocloud` datasource.
+Disabling ``security.devlxd`` ove the life of the container will result in
+warnings from cloud-init and cloud-init will keep the originally detected LXD
+datasource.
+
+The LXD datasource provides cloud-init the opportunity to react to meta-data,
+vendor-data, user-data and network-config changes and render the updated
+configuration across a system reboot.
+
+One can manipulate what meta-data, vendor-data or user-data is provided to
+the launched container using the LXD profiles or
+``lxc launch ... -c <key>="<value>"`` at initial container launch using one of
+the following keys:
+
+ - user.meta-data: YAML metadata which will be appended to base meta-data
+ - user.vendor-data: YAML which overrides any meta-data values
+ - user.network-config: YAML representing either :ref:`network_config_v1` or
+ :ref:`network_config_v2` format
+ - user.user-data: YAML which takes preference and overrides both meta-data
+ and vendor-data values
+ - user.any-key: Custom user configuration key and value pairs can be passed to
+ cloud-init. Those keys/values will be present in instance-data which can be
+ used by both `#template: jinja` #cloud-config templates and
+ the `cloud-init query` command.
+
+
+By default, network configuration from this datasource will be:
+
+.. code:: yaml
+
+ version: 1
+ config:
+ - type: physical
+ name: eth0
+ subnets:
+ - type: dhcp
+ control: auto
+
+This datasource is intended to replace :ref:`datasource_nocloud`
+datasource for LXD instances with a more direct support for LXD APIs instead
+of static NoCloud seed files.
+
+.. _LXD socket device: https://linuxcontainers.org/lxd/docs/master/dev-lxd
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index 0ca79102..d31f5d0f 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -50,7 +50,9 @@ These user-data and meta-data files are expected to be in the following format.
/user-data
/meta-data
-Basically, user-data is simply user-data and meta-data is a yaml formatted file
+Both files are required to be present for it to be considered a valid seed ISO.
+
+Basically, user-data is simply user-data and meta-data is a YAML formatted file
representing what you'd find in the EC2 metadata service.
You may also optionally provide a vendor-data file in the following format.
@@ -113,11 +115,11 @@ Example metadata:
Network configuration can also be provided to cloud-init in either
:ref:`network_config_v1` or :ref:`network_config_v2` by providing that
-yaml formatted data in a file named ``network-config``. If found,
+YAML formatted data in a file named ``network-config``. If found,
this file will override a ``network-interfaces`` file.
See an example below. Note specifically that this file does not
-have a top level ``network`` key as it it is already assumed to
+have a top level ``network`` key as it is already assumed to
be network configuration based on the filename.
.. code:: yaml
@@ -149,4 +151,4 @@ be network configuration based on the filename.
.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/opennebula.rst b/doc/rtd/topics/datasources/opennebula.rst
index 350a3e93..65570a53 100644
--- a/doc/rtd/topics/datasources/opennebula.rst
+++ b/doc/rtd/topics/datasources/opennebula.rst
@@ -69,13 +69,21 @@ Datasource mode configuration override. Values: local, net, disabled.
ETH<x>_NETWORK
ETH<x>_MASK
ETH<x>_GATEWAY
+ ETH<x>_GATEWAY6
ETH<x>_DOMAIN
ETH<x>_DNS
+ ETH<x>_SEARCH_DOMAIN
+ ETH<x>_MTU
+ ETH<x>_IP6
+ ETH<x>_IP6_ULA
+ ETH<x>_IP6_PREFIX_LENGTH
+ ETH<x>_IP6_GATEWAY
Static `network configuration`_.
::
+ SET_HOSTNAME
HOSTNAME
Instance hostname.
@@ -145,4 +153,4 @@ Example VM's context section
.. _contextualizing VMs: http://opennebula.org/documentation:documentation:cong
.. _network configuration: http://opennebula.org/documentation:documentation:cong#network_configuration
.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
index b23b4b7c..f523c142 100644
--- a/doc/rtd/topics/datasources/openstack.rst
+++ b/doc/rtd/topics/datasources/openstack.rst
@@ -12,10 +12,10 @@ Discovery
To determine whether a platform looks like it may be OpenStack, cloud-init
checks the following environment attributes as a potential OpenStack platform:
- * Maybe OpenStack if
+ * Maybe OpenStack if:
* **non-x86 cpu architecture**: because DMI data is buggy on some arches
- * Is OpenStack **if x86 architecture and ANY** of the following
+ * Is OpenStack **if x86 architecture and ANY** of the following:
* **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova*
* **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute*
@@ -32,7 +32,7 @@ The settings that may be configured are:
* **metadata_urls**: This list of urls will be searched for an OpenStack
metadata service. The first entry that successfully returns a 200 response
- for <url>/openstack will be selected. (default: ['http://169.254.169.254']).
+ for <url>/openstack will be selected. (default: ['http://169.254.169.254'])
* **max_wait**: the maximum amount of clock time in seconds that should be
spent searching metadata_urls. A value less than zero will result in only
one request being made, to the first in the list. (default: -1)
@@ -82,4 +82,12 @@ For more general information about how cloud-init handles vendor data,
including how it can be disabled by users on instances, see
:doc:`/topics/vendordata`.
-.. vi: textwidth=78
+OpenStack can also be configured to provide 'dynamic vendordata'
+which is provided by the DynamicJSON provider and appears under a
+different metadata path, /vendor_data2.json.
+
+Cloud-init will look for a ``cloud-init`` at the vendor_data2 path; if found,
+settings are applied after (and, hence, overriding) the settings from static
+vendor data. Both sets of vendor data can be overridden by user data.
+
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/oracle.rst b/doc/rtd/topics/datasources/oracle.rst
index 98c4657c..7e480021 100644
--- a/doc/rtd/topics/datasources/oracle.rst
+++ b/doc/rtd/topics/datasources/oracle.rst
@@ -46,4 +46,4 @@ An example configuration with the default values is provided below:
configure_secondary_nics: false
.. _Oracle Compute Infrastructure: https://cloud.oracle.com/
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/ovf.rst b/doc/rtd/topics/datasources/ovf.rst
index 6256e624..d6eb75da 100644
--- a/doc/rtd/topics/datasources/ovf.rst
+++ b/doc/rtd/topics/datasources/ovf.rst
@@ -13,6 +13,19 @@ source code tree in doc/sources/ovf
Configuration
-------------
+The following configuration can be set for the datasource in system
+configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are:
+
+ * disable_vmware_customization: disable or enable the vmware customization
+ based on vmware customization files. (default: True)
+ * allow_raw_data: enable or disable the vmware customization based on raw
+ cloud-init data including metadata and userdata. (default: True)
+ * vmware_cust_file_max_wait: the maximum amount of clock time in seconds that
+ should be spent waiting for vmware customization files. (default: 15)
+
+
On VMware platforms, VMTools use is required for OVF datasource configuration
settings as well as vCloud and vSphere admin configuration. User could change
the VMTools configuration options with command::
@@ -26,8 +39,8 @@ The following VMTools configuration options affect cloud-init's behavior on a bo
change this default behavior (for example: enabled by default) via
customization specification settings.
-VMWare admin can refer to (https://github.com/canonical/cloud-init/blob/master/cloudinit/sources/helpers/vmware/imc/config.py) and set the customization specification settings.
+VMWare admin can refer to (https://github.com/canonical/cloud-init/blob/main/cloudinit/sources/helpers/vmware/imc/config.py) and set the customization specification settings.
-For more information, see [VMware vSphere Product Documentation](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-9A5093A5-C54F-4502-941B-3F9C0F573A39.html) and specific VMTools parameters consumed.
+For more information, see `VMware vSphere Product Documentation <https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-9A5093A5-C54F-4502-941B-3F9C0F573A39.html>`_ and specific VMTools parameters consumed.
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/rbxcloud.rst b/doc/rtd/topics/datasources/rbxcloud.rst
index 52ec02ff..c4b3f2d0 100644
--- a/doc/rtd/topics/datasources/rbxcloud.rst
+++ b/doc/rtd/topics/datasources/rbxcloud.rst
@@ -22,4 +22,4 @@ is restarted, if the partition exists. For more information see
.. _HyperOne Virtual Machine docs: http://www.hyperone.com/
.. _FAT: https://en.wikipedia.org/wiki/File_Allocation_Table
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/smartos.rst b/doc/rtd/topics/datasources/smartos.rst
index be11dfbb..55604ffb 100644
--- a/doc/rtd/topics/datasources/smartos.rst
+++ b/doc/rtd/topics/datasources/smartos.rst
@@ -13,7 +13,7 @@ SmartOS Platform
The SmartOS virtualization platform uses meta-data to the instance via the
second serial console. On Linux, this is /dev/ttyS1. The data is a provided
via a simple protocol: something queries for the data, the console responds
-responds with the status and if "SUCCESS" returns until a single ".\n".
+with the status and if "SUCCESS" returns until a single ".\n".
New versions of the SmartOS tooling will include support for base64 encoded
data.
@@ -165,4 +165,4 @@ You can control the disk_setup then in 2 ways:
See doc/examples/cloud-config-disk-setup.txt for information on disk_setup.
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/upcloud.rst b/doc/rtd/topics/datasources/upcloud.rst
new file mode 100644
index 00000000..75f438ee
--- /dev/null
+++ b/doc/rtd/topics/datasources/upcloud.rst
@@ -0,0 +1,24 @@
+.. _datasource_upcloud:
+
+UpCloud
+=============
+
+The `UpCloud`_ datasource consumes information from UpCloud's `metadata
+service`_. This metadata service serves information about the
+running server via HTTP over the address 169.254.169.254 available in every
+DHCP-configured interface. The metadata API endpoints are fully described in
+UpCloud API documentation at
+`https://developers.upcloud.com/1.3/8-servers/#metadata-service
+<https://developers.upcloud.com/1.3/8-servers/#metadata-service>`_.
+
+Providing user-data
+-------------------
+
+When creating a server, user-data is provided by specifying it as `user_data`
+in the API or via the server creation tool in the control panel. User-data is
+immutable during server's lifetime and can be removed by deleting the server.
+
+.. _UpCloud: https://upcloud.com/
+.. _metadata service: https://upcloud.com/community/tutorials/upcloud-metadata-service/
+
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/vmware.rst b/doc/rtd/topics/datasources/vmware.rst
new file mode 100644
index 00000000..f1f48117
--- /dev/null
+++ b/doc/rtd/topics/datasources/vmware.rst
@@ -0,0 +1,358 @@
+.. _datasource_vmware:
+
+VMware
+======
+
+This datasource is for use with systems running on a VMware platform such as
+vSphere and currently supports the following data transports:
+
+
+* `GuestInfo <https://github.com/vmware/govmomi/blob/master/govc/USAGE.md#vmchange>`_ keys
+
+Configuration
+-------------
+
+The configuration method is dependent upon the transport:
+
+GuestInfo Keys
+^^^^^^^^^^^^^^
+
+One method of providing meta, user, and vendor data is by setting the following
+key/value pairs on a VM's ``extraConfig`` `property <https://vdc-repo.vmware.com/vmwb-repository/dcr-public/723e7f8b-4f21-448b-a830-5f22fd931b01/5a8257bd-7f41-4423-9a73-03307535bd42/doc/vim.vm.ConfigInfo.html>`_:
+
+.. list-table::
+ :header-rows: 1
+
+ * - Property
+ - Description
+ * - ``guestinfo.metadata``
+ - A YAML or JSON document containing the cloud-init metadata.
+ * - ``guestinfo.metadata.encoding``
+ - The encoding type for ``guestinfo.metadata``.
+ * - ``guestinfo.userdata``
+ - A YAML document containing the cloud-init user data.
+ * - ``guestinfo.userdata.encoding``
+ - The encoding type for ``guestinfo.userdata``.
+ * - ``guestinfo.vendordata``
+ - A YAML document containing the cloud-init vendor data.
+ * - ``guestinfo.vendordata.encoding``
+ - The encoding type for ``guestinfo.vendordata``.
+
+
+All ``guestinfo.*.encoding`` values may be set to ``base64`` or
+``gzip+base64``.
+
+Features
+--------
+
+This section reviews several features available in this datasource, regardless
+of how the meta, user, and vendor data was discovered.
+
+Instance data and lazy networks
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+One of the hallmarks of cloud-init is `its use of instance-data and JINJA
+queries <../instancedata.html#using-instance-data>`_
+-- the ability to write queries in user and vendor data that reference runtime
+information present in ``/run/cloud-init/instance-data.json``. This works well
+when the metadata provides all of the information up front, such as the network
+configuration. For systems that rely on DHCP, however, this information may not
+be available when the metadata is persisted to disk.
+
+This datasource ensures that even if the instance is using DHCP to configure
+networking, the same details about the configured network are available in
+``/run/cloud-init/instance-data.json`` as if static networking was used. This
+information collected at runtime is easy to demonstrate by executing the
+datasource on the command line. From the root of this repository, run the
+following command:
+
+.. code-block:: bash
+
+ PYTHONPATH="$(pwd)" python3 cloudinit/sources/DataSourceVMware.py
+
+The above command will result in output similar to the below JSON:
+
+.. code-block:: json
+
+ {
+ "hostname": "akutz.localhost",
+ "local-hostname": "akutz.localhost",
+ "local-ipv4": "192.168.0.188",
+ "local_hostname": "akutz.localhost",
+ "network": {
+ "config": {
+ "dhcp": true
+ },
+ "interfaces": {
+ "by-ipv4": {
+ "172.0.0.2": {
+ "netmask": "255.255.255.255",
+ "peer": "172.0.0.2"
+ },
+ "192.168.0.188": {
+ "broadcast": "192.168.0.255",
+ "mac": "64:4b:f0:18:9a:21",
+ "netmask": "255.255.255.0"
+ }
+ },
+ "by-ipv6": {
+ "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2": {
+ "flags": 208,
+ "mac": "64:4b:f0:18:9a:21",
+ "netmask": "ffff:ffff:ffff:ffff::/64"
+ }
+ },
+ "by-mac": {
+ "64:4b:f0:18:9a:21": {
+ "ipv4": [
+ {
+ "addr": "192.168.0.188",
+ "broadcast": "192.168.0.255",
+ "netmask": "255.255.255.0"
+ }
+ ],
+ "ipv6": [
+ {
+ "addr": "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2",
+ "flags": 208,
+ "netmask": "ffff:ffff:ffff:ffff::/64"
+ }
+ ]
+ },
+ "ac:de:48:00:11:22": {
+ "ipv6": []
+ }
+ }
+ }
+ },
+ "wait-on-network": {
+ "ipv4": true,
+ "ipv6": "false"
+ }
+ }
+
+
+Redacting sensitive information
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Sometimes the cloud-init userdata might contain sensitive information, and it
+may be desirable to have the ``guestinfo.userdata`` key (or other guestinfo
+keys) redacted as soon as its data is read by the datasource. This is possible
+by adding the following to the metadata:
+
+.. code-block:: yaml
+
+ redact: # formerly named cleanup-guestinfo, which will also work
+ - userdata
+ - vendordata
+
+When the above snippet is added to the metadata, the datasource will iterate
+over the elements in the ``redact`` array and clear each of the keys. For
+example, when the guestinfo transport is used, the above snippet will cause
+the following commands to be executed:
+
+.. code-block:: shell
+
+ vmware-rpctool "info-set guestinfo.userdata ---"
+ vmware-rpctool "info-set guestinfo.userdata.encoding "
+ vmware-rpctool "info-set guestinfo.vendordata ---"
+ vmware-rpctool "info-set guestinfo.vendordata.encoding "
+
+Please note that keys are set to the valid YAML string ``---`` as it is not
+possible remove an existing key from the guestinfo key-space. A key's analogous
+encoding property will be set to a single white-space character, causing the
+datasource to treat the actual key value as plain-text, thereby loading it as
+an empty YAML doc (hence the aforementioned ``---``\ ).
+
+Reading the local IP addresses
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This datasource automatically discovers the local IPv4 and IPv6 addresses for
+a guest operating system based on the default routes. However, when inspecting
+a VM externally, it's not possible to know what the *default* IP address is for
+the guest OS. That's why this datasource sets the discovered, local IPv4 and
+IPv6 addresses back in the guestinfo namespace as the following keys:
+
+
+* ``guestinfo.local-ipv4``
+* ``guestinfo.local-ipv6``
+
+It is possible that a host may not have any default, local IP addresses. It's
+also possible the reported, local addresses are link-local addresses. But these
+two keys may be used to discover what this datasource determined were the local
+IPv4 and IPv6 addresses for a host.
+
+Waiting on the network
+^^^^^^^^^^^^^^^^^^^^^^
+
+Sometimes cloud-init may bring up the network, but it will not finish coming
+online before the datasource's ``setup`` function is called, resulting in an
+``/var/run/cloud-init/instance-data.json`` file that does not have the correct
+network information. It is possible to instruct the datasource to wait until an
+IPv4 or IPv6 address is available before writing the instance data with the
+following metadata properties:
+
+.. code-block:: yaml
+
+ wait-on-network:
+ ipv4: true
+ ipv6: true
+
+If either of the above values are true, then the datasource will sleep for a
+second, check the network status, and repeat until one or both addresses from
+the specified families are available.
+
+Walkthrough
+-----------
+
+The following series of steps is a demonstration on how to configure a VM with
+this datasource:
+
+
+#. Create the metadata file for the VM. Save the following YAML to a file named
+ ``metadata.yaml``\ :
+
+ .. code-block:: yaml
+
+ instance-id: cloud-vm
+ local-hostname: cloud-vm
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+
+#. Create the userdata file ``userdata.yaml``\ :
+
+ .. code-block:: yaml
+
+ #cloud-config
+
+ users:
+ - default
+ - name: akutz
+ primary_group: akutz
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ groups: sudo, wheel
+ lock_passwd: true
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDE0c5FczvcGSh/tG4iw+Fhfi/O5/EvUM/96js65tly4++YTXK1d9jcznPS5ruDlbIZ30oveCBd3kT8LLVFwzh6hepYTf0YmCTpF4eDunyqmpCXDvVscQYRXyasEm5olGmVe05RrCJSeSShAeptv4ueIn40kZKOghinGWLDSZG4+FFfgrmcMCpx5YSCtX2gvnEYZJr0czt4rxOZuuP7PkJKgC/mt2PcPjooeX00vAj81jjU2f3XKrjjz2u2+KIt9eba+vOQ6HiC8c2IzRkUAJ5i1atLy8RIbejo23+0P4N2jjk17QySFOVHwPBDTYb0/0M/4ideeU74EN/CgVsvO6JrLsPBR4dojkV5qNbMNxIVv5cUwIy2ThlLgqpNCeFIDLCWNZEFKlEuNeSQ2mPtIO7ETxEL2Cz5y/7AIuildzYMc6wi2bofRC8HmQ7rMXRWdwLKWsR0L7SKjHblIwarxOGqLnUI+k2E71YoP7SZSlxaKi17pqkr0OMCF+kKqvcvHAQuwGqyumTEWOlH6TCx1dSPrW+pVCZSHSJtSTfDW2uzL6y8k10MT06+pVunSrWo5LHAXcS91htHV1M1UrH/tZKSpjYtjMb5+RonfhaFRNzvj7cCE1f3Kp8UVqAdcGBTtReoE8eRUT63qIxjw03a7VwAyB2w+9cu1R9/vAo8SBeRqw== sakutz@gmail.com
+
+#. Please note this step requires that the VM be powered off. All of the
+ commands below use the VMware CLI tool, `govc <https://github.com/vmware/govmomi/blob/master/govc>`_.
+
+ Go ahead and assign the path to the VM to the environment variable ``VM``\ :
+
+ .. code-block:: shell
+
+ export VM="/inventory/path/to/the/vm"
+
+#. Power off the VM:
+
+ .. raw:: html
+
+ <hr />
+
+ &#x26a0;&#xfe0f; <strong>First Boot Mode</strong>
+
+ To ensure the next power-on operation results in a first-boot scenario for
+ cloud-init, it may be necessary to run the following command just before
+ powering off the VM:
+
+ .. code-block:: bash
+
+ cloud-init clean
+
+ Otherwise cloud-init may not run in first-boot mode. For more information
+ on how the boot mode is determined, please see the
+ `First Boot Documentation <../boot.html#first-boot-determination>`_.
+
+ .. raw:: html
+
+ <hr />
+
+ .. code-block:: shell
+
+ govc vm.power -off "${VM}"
+
+#.
+ Export the environment variables that contain the cloud-init metadata and
+ userdata:
+
+ .. code-block:: shell
+
+ export METADATA=$(gzip -c9 <metadata.yaml | { base64 -w0 2>/dev/null || base64; }) \
+ USERDATA=$(gzip -c9 <userdata.yaml | { base64 -w0 2>/dev/null || base64; })
+
+#.
+ Assign the metadata and userdata to the VM:
+
+ .. code-block:: shell
+
+ govc vm.change -vm "${VM}" \
+ -e guestinfo.metadata="${METADATA}" \
+ -e guestinfo.metadata.encoding="gzip+base64" \
+ -e guestinfo.userdata="${USERDATA}" \
+ -e guestinfo.userdata.encoding="gzip+base64"
+
+ Please note the above commands include specifying the encoding for the
+ properties. This is important as it informs the datasource how to decode
+ the data for cloud-init. Valid values for ``metadata.encoding`` and
+ ``userdata.encoding`` include:
+
+
+ * ``base64``
+ * ``gzip+base64``
+
+#.
+ Power on the VM:
+
+ .. code-block:: shell
+
+ govc vm.power -vm "${VM}" -on
+
+If all went according to plan, the CentOS box is:
+
+* Locked down, allowing SSH access only for the user in the userdata
+* Configured for a dynamic IP address via DHCP
+* Has a hostname of ``cloud-vm``
+
+Examples
+--------
+
+This section reviews common configurations:
+
+Setting the hostname
+^^^^^^^^^^^^^^^^^^^^
+
+The hostname is set by way of the metadata key ``local-hostname``.
+
+Setting the instance ID
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The instance ID may be set by way of the metadata key ``instance-id``. However,
+if this value is absent then the instance ID is read from the file
+``/sys/class/dmi/id/product_uuid``.
+
+Providing public SSH keys
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The public SSH keys may be set by way of the metadata key ``public-keys-data``.
+Each newline-terminated string will be interpreted as a separate SSH public
+key, which will be placed in distro's default user's
+``~/.ssh/authorized_keys``. If the value is empty or absent, then nothing will
+be written to ``~/.ssh/authorized_keys``.
+
+Configuring the network
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The network is configured by setting the metadata key ``network`` with a value
+consistent with Network Config Versions
+`1 <../network-config-format-v1.html>`_ or
+`2 <../network-config-format-v2.html>`_\ , depending on the Linux
+distro's version of cloud-init.
+
+The metadata key ``network.encoding`` may be used to indicate the format of
+the metadata key "network". Valid encodings are ``base64`` and ``gzip+base64``.
diff --git a/doc/rtd/topics/datasources/vultr.rst b/doc/rtd/topics/datasources/vultr.rst
new file mode 100644
index 00000000..f8601700
--- /dev/null
+++ b/doc/rtd/topics/datasources/vultr.rst
@@ -0,0 +1,35 @@
+.. _datasource_vultr:
+
+Vultr
+=====
+
+The `Vultr`_ datasource retrieves basic configuration values from the locally
+accessible `metadata service`_. All data is served over HTTP from the address
+169.254.169.254. The endpoints are documented in
+`https://www.vultr.com/metadata/
+<https://www.vultr.com/metadata/>`_
+
+Configuration
+-------------
+
+Vultr's datasource can be configured as follows:
+
+ datasource:
+ Vultr:
+ url: 'http://169.254.169.254'
+ retries: 3
+ timeout: 2
+ wait: 2
+
+- *url*: The URL used to acquire the metadata configuration from
+- *retries*: Determines the number of times to attempt to connect to the
+ metadata service
+- *timeout*: Determines the timeout in seconds to wait for a response from the
+ metadata service
+- *wait*: Determines the timeout in seconds to wait before retrying after
+ accessible failure
+
+.. _Vultr: https://www.vultr.com/
+.. _metadata service: https://www.vultr.com/metadata/
+
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/zstack.rst b/doc/rtd/topics/datasources/zstack.rst
index 93a2791c..6630ad9f 100644
--- a/doc/rtd/topics/datasources/zstack.rst
+++ b/doc/rtd/topics/datasources/zstack.rst
@@ -34,4 +34,4 @@ Same as EC2, instance userdata can be queried at
user_data
password
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst
index 0d416f32..a4a2779f 100644
--- a/doc/rtd/topics/debugging.rst
+++ b/doc/rtd/topics/debugging.rst
@@ -1,6 +1,6 @@
-********************************
-Testing and debugging cloud-init
-********************************
+********************
+Debugging cloud-init
+********************
Overview
========
@@ -88,7 +88,7 @@ To quickly obtain a cloud-init log try using lxc on any ubuntu system:
.. code-block:: shell-session
- $ lxc init ubuntu-daily:xenial x1
+ $ lxc init ubuntu-daily:focal x1
$ lxc start x1
$ # Take lxc's cloud-init.log and pipe it to the analyzer
$ lxc file pull x1/var/log/cloud-init.log - | cloud-init analyze dump -i -
@@ -104,13 +104,13 @@ To quickly analyze a KVM a cloud-init log:
.. code-block:: shell-session
- $ wget https://cloud-images.ubuntu.com/daily/server/xenial/current/xenial-server-cloudimg-amd64.img
+ $ wget https://cloud-images.ubuntu.com/daily/server/focal/current/focal-server-cloudimg-amd64.img
2. Create a snapshot image to preserve the original cloud-image
.. code-block:: shell-session
- $ qemu-img create -b xenial-server-cloudimg-amd64.img -f qcow2 \
+ $ qemu-img create -b focal-server-cloudimg-amd64.img -f qcow2 \
test-cloudinit.qcow2
3. Create a seed image with metadata using `cloud-localds`
@@ -258,9 +258,9 @@ from **-proposed**
* Create a `new cloud-init bug`_ reporting the version of cloud-init
affected
- * Ping upstream cloud-init on Freenode's `#cloud-init IRC channel`_
+ * Ping upstream cloud-init on Libera's `#cloud-init IRC channel`_
.. _SRU: https://wiki.ubuntu.com/StableReleaseUpdates
.. _CloudinitUpdates: https://wiki.ubuntu.com/CloudinitUpdates
.. _new cloud-init bug: https://bugs.launchpad.net/cloud-init/+filebug
-.. _#cloud-init IRC channel: https://webchat.freenode.net/?channel=#cloud-init
+.. _#cloud-init IRC channel: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init
diff --git a/doc/rtd/topics/dir_layout.rst b/doc/rtd/topics/dir_layout.rst
index ebd63ae7..9d2c9896 100644
--- a/doc/rtd/topics/dir_layout.rst
+++ b/doc/rtd/topics/dir_layout.rst
@@ -84,4 +84,4 @@ application::
semaphore `files` which are only supposed to run `per-once` (not tied to the
instance id).
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/events.rst b/doc/rtd/topics/events.rst
new file mode 100644
index 00000000..1a562fb4
--- /dev/null
+++ b/doc/rtd/topics/events.rst
@@ -0,0 +1,95 @@
+.. _events:
+
+******************
+Events and Updates
+******************
+
+Events
+======
+
+`Cloud-init`_ will fetch and apply cloud and user data configuration
+upon several event types. The two most common events for cloud-init
+are when an instance first boots and any subsequent boot thereafter (reboot).
+In addition to boot events, cloud-init users and vendors are interested
+in when devices are added. cloud-init currently supports the following
+event types:
+
+- **BOOT_NEW_INSTANCE**: New instance first boot
+- **BOOT**: Any system boot other than 'BOOT_NEW_INSTANCE'
+- **BOOT_LEGACY**: Similar to 'BOOT', but applies networking config twice each
+ boot: once during Local stage, then again in Network stage. As this behavior
+ was previously the default behavior, this option exists to prevent regressing
+ such behavior.
+- **HOTPLUG**: Dynamic add of a system device
+
+Future work will likely include infrastructure and support for the following
+events:
+
+- **METADATA_CHANGE**: An instance's metadata has change
+- **USER_REQUEST**: Directed request to update
+
+Datasource Event Support
+========================
+
+All :ref:`datasources` by default support the ``BOOT_NEW_INSTANCE`` event.
+Each Datasource will declare a set of these events that it is capable of
+handling. Datasources may not support all event types. In some cases a system
+may be configured to allow a particular event but may be running on
+a platform whose datasource cannot support the event.
+
+Configuring Event Updates
+=========================
+
+Update configuration may be specified via user data,
+which can be used to enable or disable handling of specific events.
+This configuration will be honored as long as the events are supported by
+the datasource. However, configuration will always be applied at first
+boot, regardless of the user data specified.
+
+Updates
+~~~~~~~
+Update policy configuration defines which
+events are allowed to be handled. This is separate from whether a
+particular platform or datasource has the capability for such events.
+
+**scope**: *<name of the scope for event policy>*
+
+The ``scope`` value is a string which defines under which domain does the
+event occur. Currently the only one known scope is ``network``, though more
+scopes may be added in the future. Scopes are defined by convention but
+arbitrary values can be used.
+
+**when**: *<list of events to handle for a particular scope>*
+
+Each ``scope`` requires a ``when`` element to specify which events
+are to allowed to be handled.
+
+Hotplug
+=======
+When the hotplug event is supported by the data source and configured in
+user data, cloud-init will respond to the addition or removal of network
+interfaces to the system. In addition to fetching and updating the system
+metadata, cloud-init will also bring up/down the newly added interface.
+
+.. warning:: Due to its use of systemd sockets, hotplug functionality
+ is currently incompatible with SELinux. This issue is being tracked
+ `on Launchpad`_. Additionally, hotplug support is considered experimental for
+ non-Debian based systems.
+
+Examples
+========
+
+apply network config every boot
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+On every boot, apply network configuration found in the datasource.
+
+.. code-block:: shell-session
+
+ # apply network config on every boot
+ updates:
+ network:
+ when: ['boot']
+
+.. _Cloud-init: https://launchpad.net/cloud-init
+.. _on Launchpad: https://bugs.launchpad.net/cloud-init/+bug/1936229
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst
index 81860f85..8c7071e5 100644
--- a/doc/rtd/topics/examples.rst
+++ b/doc/rtd/topics/examples.rst
@@ -149,8 +149,8 @@ Disk setup
:language: yaml
:linenos:
-Register RedHat Subscription
-============================
+Register Red Hat Subscription
+=============================
.. literalinclude:: ../../examples/cloud-config-rh_subscription.txt
:language: yaml
@@ -179,4 +179,4 @@ Grow partitions
.. _chef: http://www.chef.io/chef/
.. _puppet: http://puppetlabs.com/
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst
index 27fabf15..125ce9f4 100644
--- a/doc/rtd/topics/faq.rst
+++ b/doc/rtd/topics/faq.rst
@@ -10,7 +10,7 @@ Having trouble? We would like to help!
- First go through this page with answers to common questions
- Use the search bar at the upper left to search these docs
-- Ask a question in the ``#cloud-init`` IRC channel on Freenode
+- Ask a question in the ``#cloud-init`` IRC channel on Libera
- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_
- Find a bug? Check out the :ref:`reporting_bugs` topic for
how to report one
@@ -56,7 +56,7 @@ instance
--------
The `/var/lib/cloud/instance` directory is a symbolic link that points
-to the most recenlty used instance-id directory. This folder contains the
+to the most recently used instance-id directory. This folder contains the
information cloud-init received from datasources, including vendor and user
data. This can be helpful to review to ensure the correct data was passed.
@@ -74,9 +74,9 @@ previous boot:
* `instance-id`: id of the instance as discovered by cloud-init. Changing
this file has no effect.
* `result.json`: json file will show both the datasource used to setup
- the instance, and if any errors occured
+ the instance, and if any errors occurred
* `status.json`: json file shows the datasource used and a break down
- of all four modules if any errors occured and the start and stop times.
+ of all four modules if any errors occurred and the start and stop times.
What datasource am I using?
===========================
@@ -139,7 +139,7 @@ cloud-config is:
To verify your YAML, we do have a short script called `validate-yaml.py`_
that can validate your user data offline.
-.. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/master/tools/validate-yaml.py
+.. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/main/tools/validate-yaml.py
Another option is to run the following on an instance to debug userdata
provided to the system:
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index d03e4caf..93ef34f0 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -23,9 +23,11 @@ Using a mime-multi part file, the user can specify more than one type of data.
For example, both a user data script and a cloud-config type could be
specified.
-Supported content-types are listed from the cloud-init subcommand make-mime::
+Supported content-types are listed from the cloud-init subcommand make-mime:
- % cloud-init devel make-mime --list-types
+.. code-block:: shell-session
+
+ $ cloud-init devel make-mime --list-types
cloud-boothook
cloud-config
cloud-config-archive
@@ -36,6 +38,9 @@ Supported content-types are listed from the cloud-init subcommand make-mime::
x-include-once-url
x-include-url
x-shellscript
+ x-shellscript-per-boot
+ x-shellscript-per-instance
+ x-shellscript-per-once
Helper subcommand to generate mime messages
@@ -45,13 +50,28 @@ The cloud-init subcommand can generate MIME multi-part files: `make-mime`_.
``make-mime`` subcommand takes pairs of (filename, "text/" mime subtype)
separated by a colon (e.g. ``config.yaml:cloud-config``) and emits a MIME
-multipart message to stdout. An example invocation, assuming you have your
-cloud config in ``config.yaml`` and a shell script in ``script.sh`` and want
-to store the multipart message in ``user-data``::
+multipart message to stdout.
+
+Examples
+--------
+Create userdata containing both a cloud-config (``config.yaml``)
+and a shell script (``script.sh``)
+
+.. code-block:: shell-session
+
+ $ cloud-init devel make-mime -a config.yaml:cloud-config -a script.sh:x-shellscript > userdata
+
+Create userdata containing 3 shell scripts:
+
+- ``always.sh`` - Run every boot
+- ``instance.sh`` - Run once per instance
+- ``once.sh`` - Run once
+
+.. code-block:: shell-session
- % cloud-init devel make-mime -a config.yaml:cloud-config -a script.sh:x-shellscript > user-data
+ $ cloud-init devel make-mime -a always.sh:x-shellscript-per-boot -a instance.sh:x-shellscript-per-instance -a once.sh:x-shellscript-per-once
-.. _make-mime: https://github.com/canonical/cloud-init/blob/master/cloudinit/cmd/devel/make_mime.py
+.. _make-mime: https://github.com/canonical/cloud-init/blob/main/cloudinit/cmd/devel/make_mime.py
User-Data Script
@@ -70,7 +90,7 @@ archive.
Example
-------
-::
+.. code-block:: shell-session
$ cat myscript.sh
@@ -85,7 +105,7 @@ Include File
This content is a ``include`` file.
The file contains a list of urls, one per line. Each of the URLs will be read,
-and their content will be passed through this same set of rules. Ie, the
+and their content will be passed through this same set of rules. I.e., the
content read from the URL can be gzipped, mime-multi-part, or plain text. If
an error occurs reading a file the remaining files will not be read.
@@ -108,7 +128,7 @@ These things include:
- *and many more...*
.. note::
- This file must be valid yaml syntax.
+ This file must be valid YAML syntax.
See the :ref:`yaml_examples` section for a commented set of examples of
supported cloud config formats.
@@ -205,4 +225,4 @@ cloud-init from processing user-data.
.. [#] See your cloud provider for applicable user-data size limitations...
.. _blog: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/hacking.rst b/doc/rtd/topics/hacking.rst
deleted file mode 100644
index 5ec25bfb..00000000
--- a/doc/rtd/topics/hacking.rst
+++ /dev/null
@@ -1,2 +0,0 @@
-.. include:: ../../../HACKING.rst
-.. vi: textwidth=78
diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst
index 1850982c..f08ead69 100644
--- a/doc/rtd/topics/instancedata.rst
+++ b/doc/rtd/topics/instancedata.rst
@@ -30,7 +30,7 @@ deployed with cloud-init:
* simple static object to query to obtain a instance's metadata
* speed: avoid costly network transactions for metadata that is already cached
- on the filesytem
+ on the filesystem
* reduce need to recrawl metadata services for static metadata that is already
cached
* leverage cloud-init's best practices for crawling cloud-metadata services
@@ -180,7 +180,7 @@ Example output:
v1.platform
-------------
-An attempt to identify the cloud platfrom instance that the system is running
+An attempt to identify the cloud platform instance that the system is running
on.
Examples output:
@@ -509,14 +509,19 @@ EC2 instance:
Using instance-data
===================
-As of cloud-init v. 18.4, any variables present in
-``/run/cloud-init/instance-data.json`` can be used in:
+As of cloud-init v. 18.4, any instance-data can be used in:
* User-data scripts
* Cloud config data
* Command line interface via **cloud-init query** or
**cloud-init devel render**
+This means that any variable present in
+``/run/cloud-init/instance-data-sensitive.json`` can be used,
+unless a non-root user is using the command line interface.
+In the non-root user case,
+``/run/cloud-init/instance-data.json`` will be used instead.
+
Many clouds allow users to provide user-data to an instance at
the time the instance is launched. Cloud-init supports a number of
:ref:`user_data_formats`.
@@ -525,12 +530,18 @@ Both user-data scripts and **#cloud-config** data support jinja template
rendering.
When the first line of the provided user-data begins with,
**## template: jinja** cloud-init will use jinja to render that file.
-Any instance-data-sensitive.json variables are surfaced as dot-delimited
-jinja template variables because cloud-config modules are run as 'root'
-user.
+Any instance-data-sensitive.json variables are surfaced as jinja template
+variables because cloud-config modules are run as 'root' user.
+.. note::
+ cloud-init also provides jinja-safe key aliases for any instance-data.json
+ keys which contain jinja operator characters such as +, -, ., /, etc. Any
+ jinja operator will be replaced with underscores in the jinja-safe key
+ alias. This allows for cloud-init templates to use aliased variable
+ references which allow for jinja's dot-notation reference such as
+ ``{{ ds.v1_0.my_safe_key }}`` instead of ``{{ ds["v1.0"]["my/safe-key"] }}``.
-Below are some examples of providing these types of user-data:
+Below are some other examples of using jinja templates in user-data:
* Cloud config calling home with the ec2 public hostname and availability-zone
@@ -559,9 +570,39 @@ Below are some examples of providing these types of user-data:
{%- endif %}
...
+One way to easily explore what Jinja variables are available on your machine
+is to use the ``cloud-init query --format`` (-f) commandline option which will
+render any Jinja syntax you use. Warnings or exceptions will be raised on
+invalid instance-data keys, paths or invalid syntax.
+
+.. code-block:: shell-session
+
+ # List all instance-data keys and values as root user
+ % sudo cloud-init query --all
+ {...}
+
+ # Introspect nested keys on an object
+ % cloud-init query -f "{{ds.keys()}}"
+ dict_keys(['meta_data', '_doc'])
+
+ # Test your Jinja rendering syntax on the command-line directly
+
+ # Failure to reference valid top-level instance-data key
+ % cloud-init query -f "{{invalid.instance-data.key}}"
+ WARNING: Ignoring jinja template for query commandline: 'invalid' is undefined
+
+ # Failure to reference valid dot-delimited key path on a known top-level key
+ % cloud-init query -f "{{v1.not_here}}"
+ WARNING: Could not render jinja template variables in file 'query commandline': 'not_here'
+ CI_MISSING_JINJA_VAR/not_here
+
+ # Test expected value using valid instance-data key path
+ % cloud-init query -f "My AMI: {{ds.meta_data.ami_id}}"
+ My AMI: ami-0fecc35d3c8ba8d60
+
.. note::
Trying to reference jinja variables that don't exist in
- instance-data.json will result in warnings in ``/var/log/cloud-init.log``
+ instance-data will result in warnings in ``/var/log/cloud-init.log``
and the following string in your rendered user-data:
``CI_MISSING_JINJA_VAR/<your_varname>``.
@@ -614,4 +655,4 @@ see only redacted values.
instance booted on your favorite cloud. See :ref:`cli_devel` for more
information.
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/integration_tests.rst b/doc/rtd/topics/integration_tests.rst
index aeda326c..f9f719da 100644
--- a/doc/rtd/topics/integration_tests.rst
+++ b/doc/rtd/topics/integration_tests.rst
@@ -9,11 +9,96 @@ Overview
Integration tests are written using pytest and are located at
``tests/integration_tests``. General design principles
-laid out in :ref:`unit_testing` should be followed for integration tests.
+laid out in :ref:`testing` should be followed for integration tests.
Setup is accomplished via a set of fixtures located in
``tests/integration_tests/conftest.py``.
+Test Definition
+===============
+Tests are defined like any other pytest test. The ``user_data``
+mark can be used to supply the cloud-config user data. Platform specific
+marks can be used to limit tests to particular platforms. The
+client fixture can be used to interact with the launched
+test instance.
+
+See :ref:`Examples` section for examples.
+
+Test Execution
+==============
+Test execution happens via pytest. A tox definition exists to run integration
+tests. To run all integration tests, you would run:
+
+.. code-block:: bash
+
+ $ tox -e integration-tests
+
+Pytest arguments may also be passed. For example:
+
+.. code-block:: bash
+
+ $ tox -e integration-tests tests/integration_tests/modules/test_combined.py
+
+Configuration
+=============
+
+All possible configuration values are defined in
+`tests/integration_tests/integration_settings.py <https://github.com/canonical/cloud-init/blob/main/tests/integration_tests/integration_settings.py>`_.
+Defaults can be
+overridden by supplying values in ``tests/integration_tests/user_settings.py``
+or by providing an environment variable of the same name prepended with
+``CLOUD_INIT_``. For example, to set the ``PLATFORM`` setting:
+
+.. code-block:: bash
+
+ CLOUD_INIT_PLATFORM='ec2' pytest tests/integration_tests/
+
+
+Cloud Interation
+================
+Cloud interaction happens via the
+`pycloudlib <https://pycloudlib.readthedocs.io/en/latest/index.html>`_ library.
+In order to run integration tests, pycloudlib must first be
+`configured <https://pycloudlib.readthedocs.io/en/latest/configuration.html#configuration>`_.
+
+For a minimal setup using LXD, write the following to
+``~/.config/pycloudlib.toml``:
+
+.. code-block:: toml
+
+ [lxd]
+
+
+Image Selection
+===============
+
+Each integration testing run uses a single image as its basis. This
+image is configured using the ``OS_IMAGE`` variable; see
+:ref:`Configuration` for details of how configuration works.
+
+``OS_IMAGE`` can take two types of value: an Ubuntu series name (e.g.
+"focal"), or an image specification. If an Ubuntu series name is
+given, then the most recent image for that series on the target cloud
+will be used. For other use cases, an image specification is used.
+
+In its simplest form, an image specification can simply be a cloud's
+image ID (e.g. "ami-deadbeef", "ubuntu:focal"). In this case, the
+image so-identified will be used as the basis for this testing run.
+
+This has a drawback, however: as we do not know what OS or release is
+within the image, the integration testing framework will run *all*
+tests against the image in question. If it's a RHEL8 image, then we
+would expect Ubuntu-specific tests to fail (and vice versa).
+
+To address this, a full image specification can be given. This is of
+the form: ``<image_id>[::<os>[::<release>]]`` where ``image_id`` is a
+cloud's image ID, ``os`` is the OS name, and ``release`` is the OS
+release name. So, for example, Ubuntu 18.04 (Bionic Beaver) on LXD is
+``ubuntu:bionic::ubuntu::bionic`` or RHEL 8 on Amazon is
+``ami-justanexample::rhel::8``. When a full specification is given,
+only tests which are intended for use on that OS and release will be
+executed.
+
Image Setup
===========
@@ -34,48 +119,90 @@ is implemented via one of the ``client`` fixtures. When a client fixture
is used, a test instance from which to run tests is launched prior to
test execution and torn down after.
-Test Definition
-===============
-Tests are defined like any other pytest test. The ``user_data``
-mark can be used to supply the cloud-config user data. Platform specific
-marks can be used to limit tests to particular platforms. The
-client fixture can be used to interact with the launched
-test instance.
-
-A basic example:
+Continuous Integration
+======================
+A subset of the integration tests are run when a pull request
+is submitted on Github. The tests run on these continuous
+integration (CI) runs are given a pytest mark:
.. code-block:: python
- USER_DATA = """#cloud-config
- bootcmd:
- - echo 'hello config!' > /tmp/user_data.txt"""
-
+ @pytest.mark.ci
+
+Most new tests should *not* use this mark, so be aware that having a
+successful CI run does not necessarily mean that your test passed
+successfully.
+
+Fixtures
+========
+Integration tests rely heavily on fixtures to do initial test setup.
+One or more of these fixtures will be used in almost every integration test.
+
+Details such as the cloud platform or initial image to use are determined
+via what is specified in the :ref:`Configuration`.
+
+client
+------
+The ``client`` fixture should be used for most test cases. It ensures:
+
+- All setup performed by :ref:`session_cloud` and :ref:`setup_image`
+- `Pytest marks <https://github.com/canonical/cloud-init/blob/af7eb1deab12c7208853c5d18b55228e0ba29c4d/tests/integration_tests/conftest.py#L220-L224>`_
+ used during instance creation are obtained and applied
+- The test instance is launched
+- Test failure status is determined after test execution
+- Logs are collected (if configured) after test execution
+- The test instance is torn down after test execution
+
+``module_client`` and ``class_client`` fixtures also exist for the
+purpose of running multiple tests against a single launched instance.
+They provide the exact same functionality as ``client``, but are
+scoped to the module or class respectively.
+
+session_cloud
+-------------
+The ``session_cloud`` session-scoped fixture will provide an
+`IntegrationCloud <https://github.com/canonical/cloud-init/blob/af7eb1deab12c7208853c5d18b55228e0ba29c4d/tests/integration_tests/clouds.py#L102>`_
+instance for the currently configured cloud. The fixture also
+ensures that any custom cloud session cleanup is performed.
+
+setup_image
+-----------
+The ``setup_image`` session-scope fixture will
+create a new image to launch all further cloud instances
+during this test run. It ensures:
+
+- A cloud instance is launched on the configured platform
+- The version of cloud-init under test is installed on the instance
+- ``cloud-init clean --logs`` is run on the instance
+- A snapshot of the instance is taken to be used as the basis for
+ future instance launches
+- The originally launched instance is torn down
+- The custom created image is torn down after all tests finish
+
+Examples
+--------
+A simple test case using the ``client`` fixture:
- class TestSimple:
- @pytest.mark.user_data(USER_DATA)
- @pytest.mark.ec2
- def test_simple(self, client):
- print(client.exec('cloud-init -v'))
-
-Test Execution
-==============
-Test execution happens via pytest. To run all integration tests,
-you would run:
-
-.. code-block:: bash
+.. code-block:: python
- pytest tests/integration_tests/
+ USER_DATA = """\
+ #cloud-config
+ bootcmd:
+ - echo 'hello!' > /var/tmp/hello.txt
+ """
-Configuration
-=============
+ @pytest.mark.user_data(USER_DATA)
+ def test_bootcmd(client):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Shellified 1 commands." in log
+ assert client.execute('cat /var/tmp/hello.txt').strip() == "hello!"
-All possible configuration values are defined in
-``tests/integration_tests/integration_settings.py``. Defaults can be
-overridden by supplying values in ``tests/integration_tests/user_settings.py``
-or by providing an environment variable of the same name prepended with
-``CLOUD_INIT_``. For example, to set the ``PLATFORM`` setting:
+Customizing the launch arguments before launching an instance manually:
-.. code-block:: bash
+.. code-block:: python
- CLOUD_INIT_PLATFORM='ec2' pytest tests/integration_tests/
+ def test_launch(session_cloud: IntegrationCloud, setup_image):
+ with session_cloud.launch(launch_kwargs={"wait": False}) as client:
+ client.instance.wait()
+ assert client.execute("echo hello world").strip() == "hello world"
diff --git a/doc/rtd/topics/logging.rst b/doc/rtd/topics/logging.rst
index 4fd7e28e..a14fb685 100644
--- a/doc/rtd/topics/logging.rst
+++ b/doc/rtd/topics/logging.rst
@@ -52,9 +52,9 @@ module using the standard python fileConfig format. Cloud-init looks for
config for the logging module under the ``logcfg`` key.
.. note::
- the logging configuration is not yaml, it is python ``fileConfig`` format,
+ the logging configuration is not YAML, it is python ``fileConfig`` format,
and is passed through directly to the python logging module. please use the
- correct syntax for a multi-line string in yaml.
+ correct syntax for a multi-line string in YAML.
By default, cloud-init uses the logging configuration provided in
``/etc/cloud/cloud.cfg.d/05_logging.cfg``. The default python logging
@@ -173,4 +173,4 @@ For more information on rsyslog configuration, see :ref:`cc_rsyslog`.
.. _python logging config: https://docs.python.org/3/library/logging.config.html#configuration-file-format
.. _python logging handlers: https://docs.python.org/3/library/logging.handlers.html
.. _python logging formatters: https://docs.python.org/3/library/logging.html#formatter-objects
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst
index 2b5e5dad..204719eb 100644
--- a/doc/rtd/topics/merging.rst
+++ b/doc/rtd/topics/merging.rst
@@ -6,8 +6,8 @@ Overview
========
This was implemented because it has been a common feature request that there be
-a way to specify how cloud-config yaml "dictionaries" provided as user-data are
-merged together when there are multiple yaml files to merge together (say when
+a way to specify how cloud-config YAML "dictionaries" provided as user-data are
+merged together when there are multiple YAML files to merge together (say when
performing an #include).
Since previously the merging algorithm was very simple and would only overwrite
@@ -236,7 +236,7 @@ Other uses
==========
In addition to being used for merging user-data sections, the default merging
-algorithm for merging 'conf.d' yaml files (which form an initial yaml config
+algorithm for merging 'conf.d' YAML files (which form an initial YAML config
for cloud-init) was also changed to use this mechanism so its full
benefits (and customization) can also be used there as well. Other places that
used the previous merging are also, similarly, now extensible (metadata
@@ -285,4 +285,4 @@ The second config
- bash4
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst
index e30fe0fe..093cee61 100644
--- a/doc/rtd/topics/modules.rst
+++ b/doc/rtd/topics/modules.rst
@@ -22,6 +22,8 @@ Modules
.. automodule:: cloudinit.config.cc_foo
.. automodule:: cloudinit.config.cc_growpart
.. automodule:: cloudinit.config.cc_grub_dpkg
+.. automodule:: cloudinit.config.cc_install_hotplug
+.. automodule:: cloudinit.config.cc_keyboard
.. automodule:: cloudinit.config.cc_keys_to_console
.. automodule:: cloudinit.config.cc_landscape
.. automodule:: cloudinit.config.cc_locale
@@ -62,4 +64,4 @@ Modules
.. automodule:: cloudinit.config.cc_users_groups
.. automodule:: cloudinit.config.cc_write_files
.. automodule:: cloudinit.config.cc_yum_add_repo
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/network-config-format-eni.rst b/doc/rtd/topics/network-config-format-eni.rst
index b0904952..94fa0f9e 100644
--- a/doc/rtd/topics/network-config-format-eni.rst
+++ b/doc/rtd/topics/network-config-format-eni.rst
@@ -17,4 +17,4 @@ Please reference existing `documentation`_ for the
.. _Cloud-init: https://launchpad.net/cloud-init
.. _documentation: http://manpages.ubuntu.com/manpages/trusty/en/man5/interfaces.5.html
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst
index 92e81897..68a9cefa 100644
--- a/doc/rtd/topics/network-config-format-v1.rst
+++ b/doc/rtd/topics/network-config-format-v1.rst
@@ -48,7 +48,7 @@ the key ``subnets``.
Physical
~~~~~~~~
The ``physical`` type configuration represents a "physical" network device,
-typically Ethernet-based. At least one of of these entries is required for
+typically Ethernet-based. At least one of these entries is required for
external network connectivity. Type ``physical`` requires only one key:
``name``. A ``physical`` device may contain some or all of the following
keys:
@@ -62,7 +62,8 @@ structure.
**mac_address**: *<MAC Address>*
The MAC Address is a device unique identifier that most Ethernet-based network
-devices possess. Specifying a MAC Address is optional.
+devices possess. Specifying a MAC Address is optional.
+Letters must be lowercase.
.. note::
@@ -334,7 +335,11 @@ Users can specify a ``nameserver`` type. Nameserver dictionaries include
the following keys:
- ``address``: List of IPv4 or IPv6 address of nameservers.
-- ``search``: List of of hostnames to include in the resolv.conf search path.
+- ``search``: List of hostnames to include in the resolv.conf search path.
+- ``interface``: Optional. Ties the nameserver definition to the specified
+ interface. The value specified here must match the `name` of an interface
+ defined in this config. If unspecified, this nameserver will be considered
+ a global nameserver.
**Nameserver Example**::
@@ -349,6 +354,7 @@ the following keys:
address: 192.168.23.14/27
gateway: 192.168.23.1
- type: nameserver
+ interface: interface0 # Ties nameserver to interface0 only
address:
- 192.168.23.2
- 8.8.8.8
@@ -414,9 +420,19 @@ Subnet types are one of the following:
- ``dhcp6``: Configure this interface with IPv6 dhcp.
- ``static``: Configure this interface with a static IPv4.
- ``static6``: Configure this interface with a static IPv6 .
+- ``ipv6_dhcpv6-stateful``: Configure this interface with ``dhcp6``
+- ``ipv6_dhcpv6-stateless``: Configure this interface with SLAAC and DHCP
+- ``ipv6_slaac``: Configure address with SLAAC
-When making use of ``dhcp`` types, no additional configuration is needed in
-the subnet dictionary.
+When making use of ``dhcp`` or either of the ``ipv6_dhcpv6`` types,
+no additional configuration is needed in the subnet dictionary.
+
+Using ``ipv6_dhcpv6-stateless`` or ``ipv6_slaac`` allows the IPv6 address to be
+automatically configured with StateLess Address AutoConfiguration (`SLAAC`_).
+SLAAC requires support from the network, so verify that your cloud or network
+offering has support before trying it out. With ``ipv6_dhcpv6-stateless``,
+DHCPv6 is still used to fetch other subnet details such as gateway or DNS
+servers. If you only want to discover the address, use ``ipv6_slaac``.
**Subnet DHCP Example**::
@@ -603,4 +619,6 @@ Some more examples to explore the various options available.
- dellstack
type: nameserver
-.. vi: textwidth=78
+.. _SLAAC: https://tools.ietf.org/html/rfc4862
+
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst
index aa17bef5..c1bf05d1 100644
--- a/doc/rtd/topics/network-config-format-v2.rst
+++ b/doc/rtd/topics/network-config-format-v2.rst
@@ -8,9 +8,25 @@ version 2 format defined for the `netplan`_ tool. Cloud-init supports
both reading and writing of Version 2; the latter support requires a
distro with `netplan`_ present.
+Netplan Passthrough
+-------------------
+
+On a system with netplan present, cloud-init will pass Version 2 configuration
+through to netplan without modification. On such systems, you do not need to
+limit yourself to the below subset of netplan's configuration format.
+
+.. warning::
+ If you are writing or generating network configuration that may be used on
+ non-netplan systems, you **must** limit yourself to the subset described in
+ this document, or you will see network configuration failures on
+ non-netplan systems.
+
+Version 2 Configuration Format
+------------------------------
+
The ``network`` key has at least two required elements. First
it must include ``version: 2`` and one or more of possible device
-``types``..
+``types``.
Cloud-init will read this format from system config.
For example the following could be present in
@@ -34,9 +50,6 @@ Each type block contains device definitions as a map where the keys (called
"configuration IDs"). Each entry under the ``types`` may include IP and/or
device configuration.
-Cloud-init does not current support ``wifis`` type that is present in native
-`netplan`_.
-
Device configuration IDs
------------------------
@@ -94,7 +107,8 @@ NetworkManager does not.
**macaddress**: *<(scalar)>*
-Device's MAC address in the form XX:XX:XX:XX:XX:XX. Globs are not allowed.
+Device's MAC address in the form xx:xx:xx:xx:xx:xx. Globs are not allowed.
+Letters must be lowercase.
.. note::
@@ -118,7 +132,7 @@ supported. Matching on driver is *only* supported with networkd.
# fixed MAC address
match:
- macaddress: 11:22:33:AA:BB:FF
+ macaddress: 11:22:33:aa:bb:ff
# first card of driver ``ixgbe``
match:
@@ -478,6 +492,11 @@ This is a complex example which shows most available features: ::
nameservers:
search: [foo.local, bar.local]
addresses: [8.8.8.8]
+ # static routes
+ routes:
+ - to: 192.0.2.0/24
+ via: 11.0.0.1
+ metric: 3
lom:
match:
driver: ixgbe
@@ -506,11 +525,6 @@ This is a complex example which shows most available features: ::
id: 1
link: id0
dhcp4: yes
- # static routes
- routes:
- - to: 0.0.0.0/0
- via: 11.0.0.1
- metric: 3
-
-.. _netplan: https://launchpad.net/netplan
-.. vi: textwidth=78
+
+.. _netplan: https://netplan.io
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 08db04d8..c461a3fe 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -75,6 +75,17 @@ If `Cloud-init`_ 's networking config has not been disabled, and
no other network information is found, then it will proceed
to generate a fallback networking configuration.
+Disabling Network Activation
+----------------------------
+
+Some datasources may not be initialized until after network has been brought
+up. In this case, cloud-init will attempt to bring up the interfaces specified
+by the datasource metadata.
+
+This behavior can be disabled in the cloud-init configuration dictionary,
+merged from ``/etc/cloud/cloud.cfg`` and ``/etc/cloud/cloud.cfg.d/*``::
+
+ disable_network_activation: true
Fallback Network Configuration
==============================
@@ -104,6 +115,13 @@ interface given the information it has available.
Finally after selecting the "right" interface, a configuration is
generated and applied to the system.
+.. note::
+
+ PhotonOS disables fallback networking configuration by default leaving
+ network unrendered when no other network config is provided.
+ If fallback config is still desired on PhotonOS, it can be enabled by
+ providing `disable_fallback_netcfg: false` in
+ `/etc/cloud/cloud.cfg:sys_config` settings.
Network Configuration Sources
=============================
@@ -144,6 +162,14 @@ The following Datasources optionally provide network configuration:
- `SmartOS JSON Metadata`_
+- :ref:`datasource_upcloud`
+
+ - `UpCloud JSON metadata`_
+
+- :ref:`datasource_vultr`
+
+ - `Vultr JSON metadata`_
+
For more information on network configuration formats
.. toctree::
@@ -257,5 +283,7 @@ Example output converting V2 to sysconfig:
.. _DigitalOcean JSON metadata: https://developers.digitalocean.com/documentation/metadata/#network-interfaces-index
.. _OpenStack Metadata Service Network: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/metadata-service-network-info.html
.. _SmartOS JSON Metadata: https://eng.joyent.com/mdata/datadict.html
+.. _UpCloud JSON metadata: https://developers.upcloud.com/1.3/8-servers/#metadata-service
+.. _Vultr JSON metadata: https://www.vultr.com/metadata/
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/security.rst b/doc/rtd/topics/security.rst
index b8386843..48fcb0a5 100644
--- a/doc/rtd/topics/security.rst
+++ b/doc/rtd/topics/security.rst
@@ -2,4 +2,4 @@
.. mdinclude:: ../../../SECURITY.md
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/testing.rst b/doc/rtd/topics/testing.rst
new file mode 100644
index 00000000..5543c6f5
--- /dev/null
+++ b/doc/rtd/topics/testing.rst
@@ -0,0 +1,160 @@
+*******
+Testing
+*******
+
+cloud-init has both unit tests and integration tests. Unit tests can
+be found at ``tests/unittests``. Integration tests can be found at
+``tests/integration_tests``. Documentation specifically for integration
+tests can be found on the :ref:`integration_tests` page, but
+the guidelines specified below apply to both types of tests.
+
+cloud-init uses `pytest`_ to run its tests, and has tests written both
+as ``unittest.TestCase`` sub-classes and as un-subclassed pytest tests.
+
+Guidelines
+==========
+
+The following guidelines should be followed.
+
+Test Layout
+-----------
+
+* For ease of organisation and greater accessibility for developers not
+ familiar with pytest, all cloud-init unit tests must be contained
+ within test classes
+
+ * Put another way, module-level test functions should not be used
+
+* As all tests are contained within classes, it is acceptable to mix
+ ``TestCase`` test classes and pytest test classes within the same
+ test file
+
+ * These can be easily distinguished by their definition: pytest
+ classes will not use inheritance at all (e.g.
+ `TestGetPackageMirrorInfo`_), whereas ``TestCase`` classes will
+ subclass (indirectly) from ``TestCase`` (e.g.
+ `TestPrependBaseCommands`_)
+
+* Unit tests and integration tests are located under cloud-init/tests
+
+ * For consistency, unit test files should have a matching name and
+ directory location under `tests/unittests`
+
+ * For example: the expected test file for code in
+ `cloudinit/path/to/file.py` is
+ `tests/unittests/path/to/test_file.py`
+
+
+``pytest`` Tests
+----------------
+
+* pytest test classes should use `pytest fixtures`_ to share
+ functionality instead of inheritance
+
+* pytest tests should use bare ``assert`` statements, to take advantage
+ of pytest's `assertion introspection`_
+
+``pytest`` Version Gotchas
+--------------------------
+
+As we still support Ubuntu 18.04 (Bionic Beaver), we can only use pytest
+features that are available in v3.3.2. This is an inexhaustive list of
+ways in which this may catch you out:
+
+* Only the following built-in fixtures are available [#fixture-list]_:
+
+ * ``cache``
+ * ``capfd``
+ * ``capfdbinary``
+ * ``caplog``
+ * ``capsys``
+ * ``capsysbinary``
+ * ``doctest_namespace``
+ * ``monkeypatch``
+ * ``pytestconfig``
+ * ``record_xml_property``
+ * ``recwarn``
+ * ``tmpdir_factory``
+ * ``tmpdir``
+
+Mocking and Assertions
+----------------------
+
+* Variables/parameter names for ``Mock`` or ``MagicMock`` instances
+ should start with ``m_`` to clearly distinguish them from non-mock
+ variables
+
+ * For example, ``m_readurl`` (which would be a mock for ``readurl``)
+
+* The ``assert_*`` methods that are available on ``Mock`` and
+ ``MagicMock`` objects should be avoided, as typos in these method
+ names may not raise ``AttributeError`` (and so can cause tests to
+ silently pass). An important exception: if a ``Mock`` is
+ `autospecced`_ then misspelled assertion methods *will* raise an
+ ``AttributeError``, so these assertion methods may be used on
+ autospecced ``Mock`` objects.
+
+ For non-autospecced ``Mock`` s, these substitutions can be used
+ (``m`` is assumed to be a ``Mock``):
+
+ * ``m.assert_any_call(*args, **kwargs)`` => ``assert
+ mock.call(*args, **kwargs) in m.call_args_list``
+ * ``m.assert_called()`` => ``assert 0 != m.call_count``
+ * ``m.assert_called_once()`` => ``assert 1 == m.call_count``
+ * ``m.assert_called_once_with(*args, **kwargs)`` => ``assert
+ [mock.call(*args, **kwargs)] == m.call_args_list``
+ * ``m.assert_called_with(*args, **kwargs)`` => ``assert
+ mock.call(*args, **kwargs) == m.call_args_list[-1]``
+ * ``m.assert_has_calls(call_list, any_order=True)`` => ``for call in
+ call_list: assert call in m.call_args_list``
+
+ * ``m.assert_has_calls(...)`` and ``m.assert_has_calls(...,
+ any_order=False)`` are not easily replicated in a single
+ statement, so their use when appropriate is acceptable.
+
+ * ``m.assert_not_called()`` => ``assert 0 == m.call_count``
+
+* When there are multiple patch calls in a test file for the module it
+ is testing, it may be desirable to capture the shared string prefix
+ for these patch calls in a module-level variable. If used, such
+ variables should be named ``M_PATH`` or, for datasource tests,
+ ``DS_PATH``.
+
+Test Argument Ordering
+----------------------
+
+* Test arguments should be ordered as follows:
+
+ * ``mock.patch`` arguments. When used as a decorator, ``mock.patch``
+ partially applies its generated ``Mock`` object as the first
+ argument, so these arguments must go first.
+ * ``pytest.mark.parametrize`` arguments, in the order specified to
+ the ``parametrize`` decorator. These arguments are also provided
+ by a decorator, so it's natural that they sit next to the
+ ``mock.patch`` arguments.
+ * Fixture arguments, alphabetically. These are not provided by a
+ decorator, so they are last, and their order has no defined
+ meaning, so we default to alphabetical.
+
+* It follows from this ordering of test arguments (so that we retain
+ the property that arguments left-to-right correspond to decorators
+ bottom-to-top) that test decorators should be ordered as follows:
+
+ * ``pytest.mark.parametrize``
+ * ``mock.patch``
+
+.. [#fixture-list] This list of fixtures (with markup) can be
+ reproduced by running::
+
+ python3 -m pytest --fixtures -q | grep "^[^ -]" | grep -v 'no tests ran in' | sort | sed 's/ \[session scope\]//g;s/.*/* ``\0``/g'
+
+ in an ubuntu lxd container with python3-pytest installed.
+
+.. _pytest: https://docs.pytest.org/
+.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html
+.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15
+.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/fbcb224bc12495ba200ab107246349d802c5d8e6/cloudinit/tests/test_subp.py#L20
+.. _assertion introspection: https://docs.pytest.org/en/latest/assert.html
+.. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093
+.. _pytest.param: https://docs.pytest.org/en/6.2.x/reference.html#pytest-param
+.. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing
diff --git a/doc/rtd/topics/vendordata.rst b/doc/rtd/topics/vendordata.rst
index cdb552d0..e659c941 100644
--- a/doc/rtd/topics/vendordata.rst
+++ b/doc/rtd/topics/vendordata.rst
@@ -47,8 +47,8 @@ way as user-data.
The only differences are:
- * user-scripts are stored in a different location than user-scripts (to
- avoid namespace collision)
+ * vendor-data-defined scripts are stored in a different location than
+ user-data-defined scripts (to avoid namespace collision)
* user can disable part handlers by cloud-config settings.
For example, to disable handling of 'part-handlers' in vendor-data,
the user could provide user-data like this:
@@ -68,4 +68,4 @@ of input files. That data can then be given to an instance.
See 'write-mime-multipart --help' for usage.
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/sources/kernel-cmdline.txt b/doc/sources/kernel-cmdline.txt
index 0b77a9af..4cbfd217 100644
--- a/doc/sources/kernel-cmdline.txt
+++ b/doc/sources/kernel-cmdline.txt
@@ -9,7 +9,7 @@ as it requires knowing in advance the correct command line or modifying
the boot loader to append data.
For example, when 'cloud-init start' runs, it will check to
-see if if one of 'cloud-config-url' or 'url' appear in key/value fashion
+see if one of 'cloud-config-url' or 'url' appear in key/value fashion
in the kernel command line as in:
root=/dev/sda ro url=http://foo.bar.zee/abcde
diff --git a/doc/sources/ovf/README b/doc/sources/ovf/README
index e3ef12e0..3e0b366b 100644
--- a/doc/sources/ovf/README
+++ b/doc/sources/ovf/README
@@ -53,7 +53,7 @@ box, follow the steps below.
You can change that at the grub prompt if you'd like by editing the
kernel entry. Otherwise, to see progress you'll need to switch
to the serial console. In kvm graphic mode, you do that by clicking
- in the window and then pressing pressing 'ctrl-alt-3'. For information
+ in the window and then pressing 'ctrl-alt-3'. For information
on how to do that in virtualbox or kvm curses, see the relevant
documentation.
diff --git a/doc/sources/ovf/example/ovf-env.xml b/doc/sources/ovf/example/ovf-env.xml
index 13e8f104..e5f4e262 100644
--- a/doc/sources/ovf/example/ovf-env.xml
+++ b/doc/sources/ovf/example/ovf-env.xml
@@ -41,6 +41,14 @@
-->
<Property oe:key="user-data" oe:value="IyEvYmluL3NoCmVjaG8gImhpIHdvcmxkIgo="/>
<Property oe:key="password" oe:value="passw0rd"/>
+ <!--
+ network-config is optional.
+ The value for network-config is to be base64 encoded.
+ It will be decoded, and then processed normally as network-config.
+ Set ovf-env.xml to VMware guestinfo.ovfEnv by below command:
+ 'vmware-rpctool "info-set guestinfo.ovfEnv `cat ./ovf-env.xml`"'
+ -->
+ <Property oe:key="network-config" oe:value="bmV0d29yazoKICB2ZXJzaW9uOiAyCiAgZXRoZXJuZXRzOgogICAgbmljczoKICAgICAgbWF0Y2g6CiAgICAgICAgbmFtZTogZXRoKgogICAgICBkaGNwNDogeWVz"/>
</PropertySection>
</Environment>
diff --git a/doc/sources/ovf/example/ubuntu-server.ovf b/doc/sources/ovf/example/ubuntu-server.ovf
index 846483a1..bc5327ed 100644
--- a/doc/sources/ovf/example/ubuntu-server.ovf
+++ b/doc/sources/ovf/example/ubuntu-server.ovf
@@ -48,8 +48,12 @@
<Label>Default User's password</Label>
<Description>If set, the default user's password will be set to this value to allow password based login. The password will be good for only a single login. If set to the string 'RANDOM' then a random password will be generated, and written to the console.</Description>
</Property>
+ <Property ovf:key="network-config" ovf:type="string" ovf:userConfigurable="true">
+ <Label>Encoded network-config</Label>
+ <Description>This field is optional. The value for network-config has to be base64 encoded.</Description>
+ </Property>
</ProductSection>
- <VirtualHardwareSection>
+ <VirtualHardwareSection ovf:transport="iso">
<Info>Virtual hardware requirements</Info>
<System>
<vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
diff --git a/doc/sources/ovf/make-iso b/doc/sources/ovf/make-iso
index 91d0e2e5..e46642bf 100755
--- a/doc/sources/ovf/make-iso
+++ b/doc/sources/ovf/make-iso
@@ -28,7 +28,7 @@ Usage() {
cat <<EOF
Usage: ${0##*/} ovf-env.xml.tmpl [user-data-file]
- create an an ovf transport iso with ovf-env.xml.tmpl
+ create an ovf transport iso with ovf-env.xml.tmpl
as ovf-env.xml on the iso.
if user-data-file is given, the file's contents will be base64 encoded
diff --git a/doc/userdata.txt b/doc/userdata.txt
index cc691ae6..355966a8 100644
--- a/doc/userdata.txt
+++ b/doc/userdata.txt
@@ -1,7 +1,7 @@
=== Overview ===
Userdata is data provided by the entity that launches an instance.
The cloud provider makes this data available to the instance via in one
-way or anohter.
+way or another.
In EC2, the data is provided by the user via the '--user-data' or
'user-data-file' argument to ec2-run-instances. The EC2 cloud makes the
diff --git a/integration-requirements.txt b/integration-requirements.txt
index 3648a0f1..8329eeec 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -1,5 +1,5 @@
# PyPI requirements for cloud-init integration testing
# https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html
#
-pycloudlib @ git+https://github.com/canonical/pycloudlib.git@4b8d2cd5ac6316810ce16d081842da575625ca4f
+pycloudlib @ git+https://github.com/canonical/pycloudlib.git@44206bb95c49901d994c9eb772eba07f2a1b6661
pytest
diff --git a/packages/bddeb b/packages/bddeb
index a3fb8848..b009021a 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -94,6 +94,8 @@ def write_debian_folder(root, templ_data, cloud_util_deps):
requires.extend(['python3'] + reqs + test_reqs)
if templ_data['debian_release'] == 'xenial':
requires.append('python3-pytest-catchlog')
+ elif templ_data['debian_release'] == 'impish':
+ requires.remove('dh-systemd')
templater.render_to_file(util.abs_join(find_root(),
'packages', 'debian', 'control.in'),
util.abs_join(deb_dir, 'control'),
diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json
index 75eccfdd..6607ef75 100644
--- a/packages/pkg-deps.json
+++ b/packages/pkg-deps.json
@@ -26,6 +26,20 @@
"sudo"
]
},
+ "eurolinux" : {
+ "build-requires" : [
+ "python3-devel"
+ ],
+ "requires" : [
+ "e2fsprogs",
+ "iproute",
+ "net-tools",
+ "procps",
+ "rsyslog",
+ "shadow-utils",
+ "sudo"
+ ]
+ },
"redhat" : {
"build-requires" : [
"python3-devel"
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index 16138012..1491822b 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -175,6 +175,7 @@ fi
%if "%{init_system}" == "systemd"
/usr/lib/systemd/system-generators/cloud-init-generator
+%{_sysconfdir}/systemd/system/sshd-keygen@.service.d/disable-sshd-keygen-if-cloud-init-active.conf
%{_unitdir}/cloud-*
%else
%attr(0755, root, root) %{_initddir}/cloud-config
diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in
index 004b875f..da8107b4 100644
--- a/packages/suse/cloud-init.spec.in
+++ b/packages/suse/cloud-init.spec.in
@@ -126,6 +126,7 @@ version_pys=$(cd "%{buildroot}" && find . -name version.py -type f)
%{_sysconfdir}/dhcp/dhclient-exit-hooks.d/hook-dhclient
%{_sysconfdir}/NetworkManager/dispatcher.d/hook-network-manager
+%{_sysconfdir}/systemd/system/sshd-keygen@.service.d/disable-sshd-keygen-if-cloud-init-active.conf
# Python code is here...
%{python_sitelib}/*
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..324d6f35
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,102 @@
+[tool.black]
+line-length = 79
+
+[tool.isort]
+profile = "black"
+line_length = 79
+# We patch logging in main.py before certain imports
+skip = ["cloudinit/cmd/main.py", ".tox", "packages", "tools"]
+
+[tool.mypy]
+follow_imports = "silent"
+exclude=[
+ '^cloudinit/apport\.py$',
+ '^cloudinit/cmd/query\.py$',
+ '^cloudinit/config/cc_chef\.py$',
+ '^cloudinit/config/cc_keyboard\.py$',
+ '^cloudinit/config/cc_landscape\.py$',
+ '^cloudinit/config/cc_mcollective\.py$',
+ '^cloudinit/config/cc_rsyslog\.py$',
+ '^cloudinit/config/cc_write_files_deferred\.py$',
+ '^cloudinit/config/cc_zypper_add_repo\.py$',
+ '^cloudinit/config/schema\.py$',
+ '^cloudinit/distros/bsd\.py$',
+ '^cloudinit/distros/freebsd\.py$',
+ '^cloudinit/distros/parsers/networkmanager_conf\.py$',
+ '^cloudinit/distros/parsers/resolv_conf\.py$',
+ '^cloudinit/distros/parsers/sys_conf\.py$',
+ '^cloudinit/dmi\.py$',
+ '^cloudinit/features\.py$',
+ '^cloudinit/handlers/cloud_config\.py$',
+ '^cloudinit/handlers/jinja_template\.py$',
+ '^cloudinit/net/__init__\.py$',
+ '^cloudinit/net/dhcp\.py$',
+ '^cloudinit/net/netplan\.py$',
+ '^cloudinit/net/sysconfig\.py$',
+ '^cloudinit/serial\.py$',
+ '^cloudinit/sources/DataSourceAliYun\.py$',
+ '^cloudinit/sources/DataSourceLXD\.py$',
+ '^cloudinit/sources/DataSourceOracle\.py$',
+ '^cloudinit/sources/DataSourceScaleway\.py$',
+ '^cloudinit/sources/DataSourceSmartOS\.py$',
+ '^cloudinit/sources/DataSourceVMware\.py$',
+ '^cloudinit/sources/__init__\.py$',
+ '^cloudinit/sources/helpers/vmware/imc/config_file\.py$',
+ '^cloudinit/stages\.py$',
+ '^cloudinit/templater\.py$',
+ '^cloudinit/url_helper\.py$',
+ '^conftest\.py$',
+ '^doc/rtd/conf\.py$',
+ '^setup\.py$',
+ '^tests/integration_tests/clouds\.py$',
+ '^tests/integration_tests/conftest\.py$',
+ '^tests/integration_tests/instances\.py$',
+ '^tests/integration_tests/integration_settings\.py$',
+ '^tests/integration_tests/modules/test_disk_setup\.py$',
+ '^tests/integration_tests/modules/test_growpart\.py$',
+ '^tests/integration_tests/modules/test_ssh_keysfile\.py$',
+ '^tests/unittests/__init__\.py$',
+ '^tests/unittests/cmd/devel/test_render\.py$',
+ '^tests/unittests/cmd/test_clean\.py$',
+ '^tests/unittests/cmd/test_cloud_id\.py$',
+ '^tests/unittests/cmd/test_main\.py$',
+ '^tests/unittests/cmd/test_query\.py$',
+ '^tests/unittests/cmd/test_status\.py$',
+ '^tests/unittests/config/test_cc_chef\.py$',
+ '^tests/unittests/config/test_cc_landscape\.py$',
+ '^tests/unittests/config/test_cc_locale\.py$',
+ '^tests/unittests/config/test_cc_mcollective\.py$',
+ '^tests/unittests/config/test_cc_rh_subscription\.py$',
+ '^tests/unittests/config/test_cc_set_hostname\.py$',
+ '^tests/unittests/config/test_cc_snap\.py$',
+ '^tests/unittests/config/test_cc_timezone\.py$',
+ '^tests/unittests/config/test_cc_ubuntu_advantage\.py$',
+ '^tests/unittests/config/test_cc_ubuntu_drivers\.py$',
+ '^tests/unittests/config/test_schema\.py$',
+ '^tests/unittests/helpers\.py$',
+ '^tests/unittests/net/test_dhcp\.py$',
+ '^tests/unittests/net/test_init\.py$',
+ '^tests/unittests/sources/test_aliyun\.py$',
+ '^tests/unittests/sources/test_azure\.py$',
+ '^tests/unittests/sources/test_ec2\.py$',
+ '^tests/unittests/sources/test_exoscale\.py$',
+ '^tests/unittests/sources/test_gce\.py$',
+ '^tests/unittests/sources/test_lxd\.py$',
+ '^tests/unittests/sources/test_opennebula\.py$',
+ '^tests/unittests/sources/test_openstack\.py$',
+ '^tests/unittests/sources/test_rbx\.py$',
+ '^tests/unittests/sources/test_scaleway\.py$',
+ '^tests/unittests/sources/test_smartos\.py$',
+ '^tests/unittests/test_data\.py$',
+ '^tests/unittests/test_ds_identify\.py$',
+ '^tests/unittests/test_ec2_util\.py$',
+ '^tests/unittests/test_net\.py$',
+ '^tests/unittests/test_net_activators\.py$',
+ '^tests/unittests/test_persistence\.py$',
+ '^tests/unittests/test_sshutil\.py$',
+ '^tests/unittests/test_subp\.py$',
+ '^tests/unittests/test_templating\.py$',
+ '^tests/unittests/test_url_helper\.py$',
+ '^tests/unittests/test_util\.py$',
+ '^tools/mock-meta\.py$',
+]
diff --git a/requirements.txt b/requirements.txt
index 5817da3b..c4adc455 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -32,3 +32,12 @@ jsonpatch
# For validating cloud-config sections per schema definitions
jsonschema
+
+# Used by DataSourceVMware to inspect the host's network configuration during
+# the "setup()" function.
+#
+# This allows a host that uses DHCP to bring up the network during BootLocal
+# and still participate in instance-data by gathering the network in detail at
+# runtime and merge that information into the metadata and repersist that to
+# disk.
+netifaces>=0.10.4
diff --git a/setup.py b/setup.py
index cbacf48e..a9132d2c 100755
--- a/setup.py
+++ b/setup.py
@@ -8,43 +8,47 @@
# Distutils magic for ec2-init
-from glob import glob
-
import atexit
import os
+import platform
import shutil
+import subprocess
import sys
import tempfile
-import platform
+from glob import glob
import setuptools
-from setuptools.command.install import install
from setuptools.command.egg_info import egg_info
+from setuptools.command.install import install
-from distutils.errors import DistutilsArgError
-
-import subprocess
+try:
+ from setuptools.errors import DistutilsError
+except ImportError:
+ from distutils.errors import DistutilsArgError as DistutilsError
RENDERED_TMPD_PREFIX = "RENDERED_TEMPD"
VARIANT = None
+
def is_f(p):
return os.path.isfile(p)
+
def is_generator(p):
- return '-generator' in p
+ return "-generator" in p
def pkg_config_read(library, var):
fallbacks = {
- 'systemd': {
- 'systemdsystemunitdir': '/lib/systemd/system',
- 'systemdsystemgeneratordir': '/lib/systemd/system-generators',
+ "systemd": {
+ "systemdsystemconfdir": "/etc/systemd/system",
+ "systemdsystemunitdir": "/lib/systemd/system",
+ "systemdsystemgeneratordir": "/lib/systemd/system-generators",
}
}
- cmd = ['pkg-config', '--variable=%s' % var, library]
+ cmd = ["pkg-config", "--variable=%s" % var, library]
try:
- path = subprocess.check_output(cmd).decode('utf-8')
+ path = subprocess.check_output(cmd).decode("utf-8")
path = path.strip()
except Exception:
path = fallbacks[library][var]
@@ -65,15 +69,15 @@ def in_virtualenv():
def get_version():
- cmd = [sys.executable, 'tools/read-version']
+ cmd = [sys.executable, "tools/read-version"]
ver = subprocess.check_output(cmd)
- return ver.decode('utf-8').strip()
+ return ver.decode("utf-8").strip()
def read_requires():
- cmd = [sys.executable, 'tools/read-dependencies']
+ cmd = [sys.executable, "tools/read-dependencies"]
deps = subprocess.check_output(cmd)
- return deps.decode('utf-8').splitlines()
+ return deps.decode("utf-8").splitlines()
def render_tmpl(template, mode=None):
@@ -87,7 +91,7 @@ def render_tmpl(template, mode=None):
# older versions of tox use bdist (xenial), and then install from there.
# newer versions just use install.
- if not (sys.argv[1] == 'install' or sys.argv[1].startswith('bdist*')):
+ if not (sys.argv[1] == "install" or sys.argv[1].startswith("bdist*")):
return template
tmpl_ext = ".tmpl"
@@ -101,51 +105,69 @@ def render_tmpl(template, mode=None):
bname = os.path.basename(template).rstrip(tmpl_ext)
fpath = os.path.join(tmpd, bname)
if VARIANT:
- subprocess.run([sys.executable, './tools/render-cloudcfg', '--variant',
- VARIANT, template, fpath])
+ subprocess.run(
+ [
+ sys.executable,
+ "./tools/render-cloudcfg",
+ "--variant",
+ VARIANT,
+ template,
+ fpath,
+ ]
+ )
else:
subprocess.run(
- [sys.executable, './tools/render-cloudcfg', template, fpath])
+ [sys.executable, "./tools/render-cloudcfg", template, fpath]
+ )
if mode:
os.chmod(fpath, mode)
# return path relative to setup.py
return os.path.join(os.path.basename(tmpd), bname)
+
# User can set the variant for template rendering
-if '--distro' in sys.argv:
- idx = sys.argv.index('--distro')
- VARIANT = sys.argv[idx+1]
- del sys.argv[idx+1]
- sys.argv.remove('--distro')
+if "--distro" in sys.argv:
+ idx = sys.argv.index("--distro")
+ VARIANT = sys.argv[idx + 1]
+ del sys.argv[idx + 1]
+ sys.argv.remove("--distro")
INITSYS_FILES = {
- 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],
- 'sysvinit_freebsd': [f for f in glob('sysvinit/freebsd/*') if is_f(f)],
- 'sysvinit_netbsd': [f for f in glob('sysvinit/netbsd/*') if is_f(f)],
- 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)],
- 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)],
- 'sysvinit_suse': [f for f in glob('sysvinit/suse/*') if is_f(f)],
- 'systemd': [render_tmpl(f)
- for f in (glob('systemd/*.tmpl') +
- glob('systemd/*.service') +
- glob('systemd/*.target'))
- if (is_f(f) and not is_generator(f))],
- 'systemd.generators': [
+ "sysvinit": [f for f in glob("sysvinit/redhat/*") if is_f(f)],
+ "sysvinit_freebsd": [f for f in glob("sysvinit/freebsd/*") if is_f(f)],
+ "sysvinit_netbsd": [f for f in glob("sysvinit/netbsd/*") if is_f(f)],
+ "sysvinit_deb": [f for f in glob("sysvinit/debian/*") if is_f(f)],
+ "sysvinit_openrc": [f for f in glob("sysvinit/gentoo/*") if is_f(f)],
+ "sysvinit_suse": [f for f in glob("sysvinit/suse/*") if is_f(f)],
+ "systemd": [
+ render_tmpl(f)
+ for f in (
+ glob("systemd/*.tmpl")
+ + glob("systemd/*.service")
+ + glob("systemd/*.socket")
+ + glob("systemd/*.target")
+ )
+ if (is_f(f) and not is_generator(f))
+ ],
+ "systemd.generators": [
render_tmpl(f, mode=0o755)
- for f in glob('systemd/*') if is_f(f) and is_generator(f)],
- 'upstart': [f for f in glob('upstart/*') if is_f(f)],
+ for f in glob("systemd/*")
+ if is_f(f) and is_generator(f)
+ ],
+ "upstart": [f for f in glob("upstart/*") if is_f(f)],
}
INITSYS_ROOTS = {
- 'sysvinit': 'etc/rc.d/init.d',
- 'sysvinit_freebsd': 'usr/local/etc/rc.d',
- 'sysvinit_netbsd': 'usr/local/etc/rc.d',
- 'sysvinit_deb': 'etc/init.d',
- 'sysvinit_openrc': 'etc/init.d',
- 'sysvinit_suse': 'etc/init.d',
- 'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'),
- 'systemd.generators': pkg_config_read('systemd',
- 'systemdsystemgeneratordir'),
- 'upstart': 'etc/init/',
+ "sysvinit": "etc/rc.d/init.d",
+ "sysvinit_freebsd": "usr/local/etc/rc.d",
+ "sysvinit_netbsd": "usr/local/etc/rc.d",
+ "sysvinit_deb": "etc/init.d",
+ "sysvinit_openrc": "etc/init.d",
+ "sysvinit_suse": "etc/init.d",
+ "systemd": pkg_config_read("systemd", "systemdsystemunitdir"),
+ "systemd.generators": pkg_config_read(
+ "systemd", "systemdsystemgeneratordir"
+ ),
+ "upstart": "etc/init/",
}
INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()])
@@ -156,22 +178,22 @@ USR = "usr"
ETC = "etc"
USR_LIB_EXEC = "usr/lib"
LIB = "lib"
-if os.uname()[0] == 'FreeBSD':
+if os.uname()[0] in ["FreeBSD", "DragonFly"]:
USR = "usr/local"
USR_LIB_EXEC = "usr/local/lib"
-elif os.path.isfile('/etc/redhat-release'):
+elif os.path.isfile("/etc/redhat-release"):
USR_LIB_EXEC = "usr/libexec"
-elif os.path.isfile('/etc/system-release-cpe'):
- with open('/etc/system-release-cpe') as f:
- cpe_data = f.read().rstrip().split(':')
+elif os.path.isfile("/etc/system-release-cpe"):
+ with open("/etc/system-release-cpe") as f:
+ cpe_data = f.read().rstrip().split(":")
- if cpe_data[1] == "\o":
- # URI formated CPE
+ if cpe_data[1] == "\o": # noqa: W605
+ # URI formatted CPE
inc = 0
else:
- # String formated CPE
+ # String formatted CPE
inc = 1
- (cpe_vendor, cpe_product, cpe_version) = cpe_data[2+inc:5+inc]
+ (cpe_vendor, cpe_product, cpe_version) = cpe_data[2 + inc : 5 + inc]
if cpe_vendor == "amazon":
USR_LIB_EXEC = "usr/libexec"
@@ -182,16 +204,18 @@ class MyEggInfo(egg_info):
def find_sources(self):
ret = egg_info.find_sources(self)
# update the self.filelist.
- self.filelist.exclude_pattern(RENDERED_TMPD_PREFIX + ".*",
- is_regex=True)
+ self.filelist.exclude_pattern(
+ RENDERED_TMPD_PREFIX + ".*", is_regex=True
+ )
# but since mfname is already written we have to update it also.
mfname = os.path.join(self.egg_info, "SOURCES.txt")
if os.path.exists(mfname):
with open(mfname) as fp:
- files = [f for f in fp
- if not f.startswith(RENDERED_TMPD_PREFIX)]
+ files = [
+ f for f in fp if not f.startswith(RENDERED_TMPD_PREFIX)
+ ]
with open(mfname, "w") as fp:
- fp.write(''.join(files))
+ fp.write("".join(files))
return ret
@@ -200,9 +224,12 @@ class InitsysInstallData(install):
init_system = None
user_options = install.user_options + [
# This will magically show up in member variable 'init_sys'
- ('init-system=', None,
- ('init system(s) to configure (%s) [default: None]' %
- (", ".join(INITSYS_TYPES)))),
+ (
+ "init-system=",
+ None,
+ "init system(s) to configure (%s) [default: None]"
+ % ", ".join(INITSYS_TYPES),
+ ),
]
def initialize_options(self):
@@ -215,25 +242,28 @@ class InitsysInstallData(install):
if self.init_system and isinstance(self.init_system, str):
self.init_system = self.init_system.split(",")
- if len(self.init_system) == 0 and not platform.system().endswith('BSD'):
- self.init_system = ['systemd']
+ if len(self.init_system) == 0 and not platform.system().endswith(
+ "BSD"
+ ):
+ self.init_system = ["systemd"]
bad = [f for f in self.init_system if f not in INITSYS_TYPES]
if len(bad) != 0:
- raise DistutilsArgError(
- "Invalid --init-system: %s" % (','.join(bad)))
+ raise DistutilsError("Invalid --init-system: %s" % ",".join(bad))
for system in self.init_system:
# add data files for anything that starts with '<system>.'
- datakeys = [k for k in INITSYS_ROOTS
- if k.partition(".")[0] == system]
+ datakeys = [
+ k for k in INITSYS_ROOTS if k.partition(".")[0] == system
+ ]
for k in datakeys:
if not INITSYS_FILES[k]:
continue
self.distribution.data_files.append(
- (INITSYS_ROOTS[k], INITSYS_FILES[k]))
- # Force that command to reinitalize (with new file list)
- self.distribution.reinitialize_command('install_data', True)
+ (INITSYS_ROOTS[k], INITSYS_FILES[k])
+ )
+ # Force that command to reinitialize (with new file list)
+ self.distribution.reinitialize_command("install_data", True)
if not in_virtualenv():
@@ -245,55 +275,78 @@ if not in_virtualenv():
INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k]
data_files = [
- (ETC + '/cloud', [render_tmpl("config/cloud.cfg.tmpl")]),
- (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')),
- (ETC + '/cloud/templates', glob('templates/*')),
- (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify',
- 'tools/uncloud-init',
- 'tools/write-ssh-key-fingerprints']),
- (USR + '/share/bash-completion/completions',
- ['bash_completion/cloud-init']),
- (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]),
- (USR + '/share/doc/cloud-init/examples',
- [f for f in glob('doc/examples/*') if is_f(f)]),
- (USR + '/share/doc/cloud-init/examples/seed',
- [f for f in glob('doc/examples/seed/*') if is_f(f)]),
+ (ETC + "/cloud", [render_tmpl("config/cloud.cfg.tmpl")]),
+ (ETC + "/cloud/cloud.cfg.d", glob("config/cloud.cfg.d/*")),
+ (ETC + "/cloud/templates", glob("templates/*")),
+ (
+ USR_LIB_EXEC + "/cloud-init",
+ [
+ "tools/ds-identify",
+ "tools/hook-hotplug",
+ "tools/uncloud-init",
+ "tools/write-ssh-key-fingerprints",
+ ],
+ ),
+ (
+ USR + "/share/bash-completion/completions",
+ ["bash_completion/cloud-init"],
+ ),
+ (USR + "/share/doc/cloud-init", [f for f in glob("doc/*") if is_f(f)]),
+ (
+ USR + "/share/doc/cloud-init/examples",
+ [f for f in glob("doc/examples/*") if is_f(f)],
+ ),
+ (
+ USR + "/share/doc/cloud-init/examples/seed",
+ [f for f in glob("doc/examples/seed/*") if is_f(f)],
+ ),
]
-if not platform.system().endswith('BSD'):
- data_files.extend([
- (ETC + '/NetworkManager/dispatcher.d/',
- ['tools/hook-network-manager']),
- (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']),
- (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')])
- ])
+if not platform.system().endswith("BSD"):
+ data_files.extend(
+ [
+ (
+ ETC + "/NetworkManager/dispatcher.d/",
+ ["tools/hook-network-manager"],
+ ),
+ (ETC + "/dhcp/dhclient-exit-hooks.d/", ["tools/hook-dhclient"]),
+ (LIB + "/udev/rules.d", [f for f in glob("udev/*.rules")]),
+ (
+ ETC + "/systemd/system/sshd-keygen@.service.d/",
+ ["systemd/disable-sshd-keygen-if-cloud-init-active.conf"],
+ ),
+ ]
+ )
# Use a subclass for install that handles
# adding on the right init system configuration files
cmdclass = {
- 'install': InitsysInstallData,
- 'egg_info': MyEggInfo,
+ "install": InitsysInstallData,
+ "egg_info": MyEggInfo,
}
requirements = read_requires()
setuptools.setup(
- name='cloud-init',
+ name="cloud-init",
version=get_version(),
- description='Cloud instance initialisation magic',
- author='Scott Moser',
- author_email='scott.moser@canonical.com',
- url='http://launchpad.net/cloud-init/',
- packages=setuptools.find_packages(exclude=['tests.*', '*.tests', 'tests']),
- scripts=['tools/cloud-init-per'],
- license='Dual-licensed under GPLv3 or Apache 2.0',
+ description="Cloud instance initialisation magic",
+ author="Scott Moser",
+ author_email="scott.moser@canonical.com",
+ url="http://launchpad.net/cloud-init/",
+ package_data={
+ "": ["*.json"],
+ },
+ packages=setuptools.find_packages(exclude=["tests.*", "tests"]),
+ scripts=["tools/cloud-init-per"],
+ license="Dual-licensed under GPLv3 or Apache 2.0",
data_files=data_files,
install_requires=requirements,
cmdclass=cmdclass,
entry_points={
- 'console_scripts': [
- 'cloud-init = cloudinit.cmd.main:main',
- 'cloud-id = cloudinit.cmd.cloud_id:main'
+ "console_scripts": [
+ "cloud-init = cloudinit.cmd.main:main",
+ "cloud-id = cloudinit.cmd.cloud_id:main",
],
- }
+ },
)
diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl
index 0773356b..74d47428 100755..100644
--- a/systemd/cloud-init-generator.tmpl
+++ b/systemd/cloud-init-generator.tmpl
@@ -83,7 +83,8 @@ default() {
check_for_datasource() {
local ds_rc=""
-{% if variant in ["rhel", "fedora", "centos"] %}
+{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora",
+ "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %}
local dsidentify="/usr/libexec/cloud-init/ds-identify"
{% else %}
local dsidentify="/usr/lib/cloud-init/ds-identify"
diff --git a/systemd/cloud-init-hotplugd.service b/systemd/cloud-init-hotplugd.service
new file mode 100644
index 00000000..b64632ef
--- /dev/null
+++ b/systemd/cloud-init-hotplugd.service
@@ -0,0 +1,22 @@
+# Paired with cloud-init-hotplugd.socket to read from the FIFO
+# /run/cloud-init/hook-hotplug-cmd which is created during a udev network
+# add or remove event as processed by 10-cloud-init-hook-hotplug.rules.
+
+# On start, read args from the FIFO, process and provide structured arguments
+# to `cloud-init devel hotplug-hook` which will setup or teardown network
+# devices as configured by user-data.
+
+# Known bug with an enforcing SELinux policy: LP: #1936229
+# cloud-init-hotplud.service will read args from file descriptor 3
+
+[Unit]
+Description=cloud-init hotplug hook daemon
+After=cloud-init-hotplugd.socket
+
+[Service]
+Type=simple
+ExecStart=/bin/bash -c 'read args <&3; echo "args=$args"; \
+ exec /usr/bin/cloud-init devel hotplug-hook $args; \
+ exit 0'
+SyslogIdentifier=cloud-init-hotplugd
+TimeoutStopSec=5
diff --git a/systemd/cloud-init-hotplugd.socket b/systemd/cloud-init-hotplugd.socket
new file mode 100644
index 00000000..aa093016
--- /dev/null
+++ b/systemd/cloud-init-hotplugd.socket
@@ -0,0 +1,13 @@
+# cloud-init-hotplugd.socket listens on the FIFO file
+# /run/cloud-init/hook-hotplug-cmd which is created during a udev network
+# add or remove event as processed by 10-cloud-init-hook-hotplug.rules.
+
+# Known bug with an enforcing SELinux policy: LP: #1936229
+[Unit]
+Description=cloud-init hotplug hook socket
+
+[Socket]
+ListenFIFO=/run/cloud-init/hook-hotplug-cmd
+
+[Install]
+WantedBy=cloud-init.target
diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl
index f140344d..e71e5679 100644
--- a/systemd/cloud-init.service.tmpl
+++ b/systemd/cloud-init.service.tmpl
@@ -1,7 +1,9 @@
## template:jinja
[Unit]
Description=Initial cloud-init job (metadata service crawler)
+{% if variant not in ["photon"] %}
DefaultDependencies=no
+{% endif %}
Wants=cloud-init-local.service
Wants=sshd-keygen.service
Wants=sshd.service
@@ -10,7 +12,8 @@ After=systemd-networkd-wait-online.service
{% if variant in ["ubuntu", "unknown", "debian"] %}
After=networking.service
{% endif %}
-{% if variant in ["centos", "fedora", "rhel"] %}
+{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora",
+ "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %}
After=network.service
After=NetworkManager.service
{% endif %}
diff --git a/systemd/disable-sshd-keygen-if-cloud-init-active.conf b/systemd/disable-sshd-keygen-if-cloud-init-active.conf
new file mode 100644
index 00000000..1a5d7a5a
--- /dev/null
+++ b/systemd/disable-sshd-keygen-if-cloud-init-active.conf
@@ -0,0 +1,7 @@
+# In some cloud-init enabled images the sshd-keygen template service may race
+# with cloud-init during boot causing issues with host key generation. This
+# drop-in config adds a condition to sshd-keygen@.service if it exists and
+# prevents the sshd-keygen units from running *if* cloud-init is going to run.
+#
+[Unit]
+ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target
diff --git a/sysvinit/freebsd/cloudinit b/sysvinit/freebsd/cloudinit
index aa5bd118..d26f3d0f 100755
--- a/sysvinit/freebsd/cloudinit
+++ b/sysvinit/freebsd/cloudinit
@@ -2,7 +2,7 @@
# PROVIDE: cloudinit
# REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal ldconfig devd
-# BEFORE: cloudconfig cloudfinal
+# BEFORE: LOGIN cloudconfig cloudfinal
. /etc/rc.subr
diff --git a/templates/chef_client.rb.tmpl b/templates/chef_client.rb.tmpl
index 0a759b04..b9d58172 100644
--- a/templates/chef_client.rb.tmpl
+++ b/templates/chef_client.rb.tmpl
@@ -15,7 +15,7 @@ The reason these are not in quotes is because they are ruby
symbols that will be placed inside here, and not actual strings...
#}
{% if chef_license %}
-chef_license "{{chef_license}}"
+chef_license "{{chef_license}}"
{% endif%}
{% if log_level %}
log_level {{log_level}}
diff --git a/templates/chrony.conf.photon.tmpl b/templates/chrony.conf.photon.tmpl
new file mode 100644
index 00000000..8551f793
--- /dev/null
+++ b/templates/chrony.conf.photon.tmpl
@@ -0,0 +1,48 @@
+## template:jinja
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# Allow the system clock to be stepped in the first three updates
+# if its offset is larger than 1 second.
+makestep 1.0 3
+
+# Enable kernel synchronization of the real-time clock (RTC).
+rtcsync
+
+# Enable hardware timestamping on all interfaces that support it.
+#hwtimestamp *
+
+# Increase the minimum number of selectable sources required to adjust
+# the system clock.
+#minsources 2
+
+# Allow NTP client access from local network.
+#allow 192.168.0.0/16
+
+# Serve time even if not synchronized to a time source.
+#local stratum 10
+
+# Specify file containing keys for NTP authentication.
+#keyfile /etc/chrony.keys
+
+# Get TAI-UTC offset and leap seconds from the system tz database.
+leapsectz right/UTC
+
+# Specify directory for log files.
+logdir /var/log/chrony
+
+# Select which information is logged.
+#log measurements statistics tracking
diff --git a/templates/hosts.alpine.tmpl b/templates/hosts.alpine.tmpl
index 33c1a941..98ae55e6 100644
--- a/templates/hosts.alpine.tmpl
+++ b/templates/hosts.alpine.tmpl
@@ -13,16 +13,13 @@ you need to add the following to config:
# /etc/cloud/cloud.cfg or cloud-config from user-data
#
# The following lines are desirable for IPv4 capable hosts
-127.0.1.1 {{fqdn}} {{hostname}}
-127.0.0.1 localhost.localdomain localhost
-127.0.0.1 localhost4.localdomain4 localhost4
+127.0.1.1 {{hostname}} {{fqdn}}
+127.0.0.1 localhost localhost.localdomain
+127.0.0.1 localhost4 localhost4.localdomain4
# The following lines are desirable for IPv6 capable hosts
-::1 {{fqdn}} {{hostname}}
-::1 localhost6.localdomain6 localhost6
+::1 {{hostname}} {{fqdn}}
+::1 localhost6 localhost6.localdomain6
-fe00::0 ip6-localnet
-ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
-ff02::3 ip6-allhosts
diff --git a/templates/hosts.debian.tmpl b/templates/hosts.debian.tmpl
index 7e29907a..afeccf9a 100644
--- a/templates/hosts.debian.tmpl
+++ b/templates/hosts.debian.tmpl
@@ -17,10 +17,7 @@ you need to add the following to config:
127.0.0.1 localhost
# The following lines are desirable for IPv6 capable hosts
-::1 ip6-localhost ip6-loopback
-fe00::0 ip6-localnet
-ff00::0 ip6-mcastprefix
+::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
-ff02::3 ip6-allhosts
diff --git a/templates/hosts.gentoo.tmpl b/templates/hosts.gentoo.tmpl
new file mode 100644
index 00000000..cc5e6f00
--- /dev/null
+++ b/templates/hosts.gentoo.tmpl
@@ -0,0 +1,23 @@
+## template:jinja
+{#
+This file /etc/cloud/templates/hosts.gentoo.tmpl is only utilized
+if enabled in cloud-config. Specifically, in order to enable it
+you need to add the following to config:
+ manage_etc_hosts: True
+-#}
+# Your system has configured 'manage_etc_hosts' as True.
+# As a result, if you wish for changes to this file to persist
+# then you will need to either
+# a.) make changes to the master file in /etc/cloud/templates/hosts.gentoo.tmpl
+# b.) change or remove the value of 'manage_etc_hosts' in
+# /etc/cloud/cloud.cfg or cloud-config from user-data
+#
+# The following lines are desirable for IPv4 capable hosts
+127.0.0.1 {{fqdn}} {{hostname}}
+127.0.0.1 localhost.localdomain localhost
+127.0.0.1 localhost4.localdomain4 localhost4
+
+# The following lines are desirable for IPv6 capable hosts
+::1 {{fqdn}} {{hostname}}
+::1 localhost.localdomain localhost
+::1 localhost6.localdomain6 localhost6
diff --git a/templates/hosts.photon.tmpl b/templates/hosts.photon.tmpl
new file mode 100644
index 00000000..0fd6f722
--- /dev/null
+++ b/templates/hosts.photon.tmpl
@@ -0,0 +1,22 @@
+## template:jinja
+{#
+This file /etc/cloud/templates/hosts.photon.tmpl is only utilized
+if enabled in cloud-config. Specifically, in order to enable it
+you need to add the following to config:
+ manage_etc_hosts: True
+-#}
+# Your system has configured 'manage_etc_hosts' as True.
+# As a result, if you wish for changes to this file to persist
+# then you will need to either
+# a.) make changes to the master file in /etc/cloud/templates/hosts.photon.tmpl
+# b.) change or remove the value of 'manage_etc_hosts' in
+# /etc/cloud/cloud.cfg or cloud-config from user-data
+#
+# The following lines are desirable for IPv4 capable hosts
+127.0.0.1 {{fqdn}} {{hostname}}
+127.0.0.1 localhost.localdomain localhost
+127.0.0.1 localhost4.localdomain4 localhost4
+
+# The following lines are desirable for IPv6 capable hosts
+::1 {{fqdn}} {{hostname}}
+::1 localhost6.localdomain6 localhost6
diff --git a/templates/ntp.conf.photon.tmpl b/templates/ntp.conf.photon.tmpl
new file mode 100644
index 00000000..4d4910d1
--- /dev/null
+++ b/templates/ntp.conf.photon.tmpl
@@ -0,0 +1,61 @@
+## template:jinja
+
+# For more information about this file, see the man pages
+# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).
+
+driftfile /var/lib/ntp/drift
+
+# Permit time synchronization with our time source, but do not
+# permit the source to query or modify the service on this system.
+restrict default kod nomodify notrap nopeer noquery
+restrict -6 default kod nomodify notrap nopeer noquery
+
+# Permit all access over the loopback interface. This could
+# be tightened as well, but to do so would effect some of
+# the administrative functions.
+restrict 127.0.0.1
+restrict -6 ::1
+
+# Hosts on local network are less restricted.
+#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
+
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+#broadcast 192.168.1.255 autokey # broadcast server
+#broadcastclient # broadcast client
+#broadcast 224.0.1.1 autokey # multicast server
+#multicastclient 224.0.1.1 # multicast client
+#manycastserver 239.255.254.254 # manycast server
+#manycastclient 239.255.254.254 autokey # manycast client
+
+# Enable public key cryptography.
+#crypto
+
+includefile /etc/ntp/crypto/pw
+
+# Key file containing the keys and key identifiers used when operating
+# with symmetric key cryptography.
+keys /etc/ntp/keys
+
+# Specify the key identifiers which are trusted.
+#trustedkey 4 8 42
+
+# Specify the key identifier to use with the ntpdc utility.
+#requestkey 8
+
+# Specify the key identifier to use with the ntpq utility.
+#controlkey 8
+
+# Enable writing of statistics records.
+#statistics clockstats cryptostats loopstats peerstats
diff --git a/templates/resolv.conf.tmpl b/templates/resolv.conf.tmpl
index f870be67..72a37bf7 100644
--- a/templates/resolv.conf.tmpl
+++ b/templates/resolv.conf.tmpl
@@ -22,7 +22,7 @@ domain {{domain}}
sortlist {% for sort in sortlist %}{{sort}} {% endfor %}
{% endif %}
{#
- Flags and options are required to be on the
+ Flags and options are required to be on the
same line preceded by "options" keyword
#}
{% if options or flags %}
diff --git a/templates/sources.list.debian.tmpl b/templates/sources.list.debian.tmpl
index e7ef9ed1..e8a7c015 100644
--- a/templates/sources.list.debian.tmpl
+++ b/templates/sources.list.debian.tmpl
@@ -15,8 +15,8 @@ deb-src {{mirror}} {{codename}} main
## Major bug fix updates produced after the final release of the
## distribution.
-deb {{security}} {{codename}}/updates main
-deb-src {{security}} {{codename}}/updates main
+deb {{security}} {{codename}}{% if codename in ('buster', 'stretch') %}/updates{% else %}-security{% endif %} main
+deb-src {{security}} {{codename}}{% if codename in ('buster', 'stretch') %}/updates{% else %}-security{% endif %} main
deb {{mirror}} {{codename}}-updates main
deb-src {{mirror}} {{codename}}-updates main
diff --git a/templates/sources.list.ubuntu.tmpl b/templates/sources.list.ubuntu.tmpl
index edb92f13..6f928677 100644
--- a/templates/sources.list.ubuntu.tmpl
+++ b/templates/sources.list.ubuntu.tmpl
@@ -43,13 +43,6 @@ deb {{mirror}} {{codename}}-updates multiverse
deb {{mirror}} {{codename}}-backports main restricted universe multiverse
# deb-src {{mirror}} {{codename}}-backports main restricted universe multiverse
-## Uncomment the following two lines to add software from Canonical's
-## 'partner' repository.
-## This software is not part of Ubuntu, but is offered by Canonical and the
-## respective vendors as a service to Ubuntu users.
-# deb http://archive.canonical.com/ubuntu {{codename}} partner
-# deb-src http://archive.canonical.com/ubuntu {{codename}} partner
-
deb {{security}} {{codename}}-security main restricted
# deb-src {{security}} {{codename}}-security main restricted
deb {{security}} {{codename}}-security universe
diff --git a/templates/systemd.resolved.conf.tmpl b/templates/systemd.resolved.conf.tmpl
new file mode 100644
index 00000000..fca50d37
--- /dev/null
+++ b/templates/systemd.resolved.conf.tmpl
@@ -0,0 +1,15 @@
+## template:jinja
+# Your system has been configured with 'manage-resolv-conf' set to true.
+# As a result, cloud-init has written this file with configuration data
+# that it has been provided. Cloud-init, by default, will write this file
+# a single time (PER_ONCE).
+#
+[Resolve]
+LLMNR=false
+{% if nameservers is defined %}
+DNS={% for server in nameservers %}{{server}} {% endfor %}
+{% endif %}
+
+{% if searchdomains is defined %}
+Domains={% for search in searchdomains %}{{search}} {% endfor %}
+{% endif %}
diff --git a/test-requirements.txt b/test-requirements.txt
index 0a6a04d4..06dfbbec 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -5,3 +5,4 @@ pytest-cov
# Only really needed on older versions of python
setuptools
+jsonschema
diff --git a/tests/cloud_tests/__init__.py b/tests/cloud_tests/__init__.py
deleted file mode 100644
index 6c632f99..00000000
--- a/tests/cloud_tests/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Main init."""
-
-import logging
-import os
-
-BASE_DIR = os.path.dirname(os.path.abspath(__file__))
-TESTCASES_DIR = os.path.join(BASE_DIR, 'testcases')
-TEST_CONF_DIR = os.path.join(BASE_DIR, 'testcases')
-TREE_BASE = os.sep.join(BASE_DIR.split(os.sep)[:-2])
-
-# This domain contains reverse lookups for hostnames that are used.
-# The primary reason is so sudo will return quickly when it attempts
-# to look up the hostname. i9n is just short for 'integration'.
-# see also bug 1730744 for why we had to do this.
-CI_DOMAIN = "i9n.cloud-init.io"
-
-
-def _initialize_logging():
- """Configure logging for cloud_tests."""
- logger = logging.getLogger(__name__)
- logger.setLevel(logging.DEBUG)
- formatter = logging.Formatter(
- '%(asctime)s - %(pathname)s:%(funcName)s:%(lineno)s '
- '[%(levelname)s]: %(message)s')
-
- console = logging.StreamHandler()
- console.setLevel(logging.DEBUG)
- console.setFormatter(formatter)
-
- logger.addHandler(console)
-
- return logger
-
-
-LOG = _initialize_logging()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/__main__.py b/tests/cloud_tests/__main__.py
deleted file mode 100644
index 7ee29cad..00000000
--- a/tests/cloud_tests/__main__.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Main entry point."""
-
-import argparse
-import logging
-import os
-import sys
-
-from tests.cloud_tests import args, bddeb, collect, manage, run_funcs, verify
-from tests.cloud_tests import LOG
-
-
-def configure_log(args):
- """Configure logging."""
- level = logging.INFO
- if args.verbose:
- level = logging.DEBUG
- elif args.quiet:
- level = logging.WARN
- LOG.setLevel(level)
-
-
-def main():
- """Entry point for cloud test suite."""
- # configure parser
- parser = argparse.ArgumentParser(prog='cloud_tests')
- subparsers = parser.add_subparsers(dest="subcmd")
- subparsers.required = True
-
- def add_subparser(name, description, arg_sets):
- """Add arguments to subparser."""
- subparser = subparsers.add_parser(name, help=description)
- for (_args, _kwargs) in (a for arg_set in arg_sets for a in arg_set):
- subparser.add_argument(*_args, **_kwargs)
-
- # configure subparsers
- for (name, (description, arg_sets)) in args.SUBCMDS.items():
- add_subparser(name, description,
- [args.ARG_SETS[arg_set] for arg_set in arg_sets])
-
- # parse arguments
- parsed = parser.parse_args()
-
- # process arguments
- configure_log(parsed)
- (_, arg_sets) = args.SUBCMDS[parsed.subcmd]
- for normalizer in [args.NORMALIZERS[arg_set] for arg_set in arg_sets]:
- parsed = normalizer(parsed)
- if not parsed:
- return -1
-
- # run handler
- LOG.debug('running with args: %s', parsed)
- return {
- 'bddeb': bddeb.bddeb,
- 'collect': collect.collect,
- 'create': manage.create,
- 'run': run_funcs.run,
- 'tree_collect': run_funcs.tree_collect,
- 'tree_run': run_funcs.tree_run,
- 'verify': verify.verify,
- }[parsed.subcmd](parsed)
-
-
-if __name__ == "__main__":
- if os.geteuid() == 0:
- sys.exit('Do not run as root')
- sys.exit(main())
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/args.py b/tests/cloud_tests/args.py
deleted file mode 100644
index ab345491..00000000
--- a/tests/cloud_tests/args.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Argparse argument setup and sanitization."""
-
-import os
-
-from tests.cloud_tests import config, util
-from tests.cloud_tests import LOG, TREE_BASE
-
-ARG_SETS = {
- 'BDDEB': (
- (('--bddeb-args',),
- {'help': 'args to pass through to bddeb',
- 'action': 'store', 'default': None, 'required': False}),
- (('--build-os',),
- {'help': 'OS to use as build system (default is xenial)',
- 'action': 'store', 'choices': config.ENABLED_DISTROS,
- 'default': 'xenial', 'required': False}),
- (('--build-platform',),
- {'help': 'platform to use for build system (default is lxd)',
- 'action': 'store', 'choices': config.ENABLED_PLATFORMS,
- 'default': 'lxd', 'required': False}),
- (('--cloud-init',),
- {'help': 'path to base of cloud-init tree', 'metavar': 'DIR',
- 'action': 'store', 'required': False, 'default': TREE_BASE}),),
- 'COLLECT': (
- (('-p', '--platform'),
- {'help': 'platform(s) to run tests on', 'metavar': 'PLATFORM',
- 'action': 'append', 'choices': config.ENABLED_PLATFORMS,
- 'default': []}),
- (('-n', '--os-name'),
- {'help': 'the name(s) of the OS(s) to test', 'metavar': 'NAME',
- 'action': 'append', 'choices': config.ENABLED_DISTROS,
- 'default': []}),
- (('-t', '--test-config'),
- {'help': 'test config file(s) to use', 'metavar': 'FILE',
- 'action': 'append', 'default': []}),
- (('--feature-override',),
- {'help': 'feature flags override(s), <flagname>=<true/false>',
- 'action': 'append', 'default': [], 'required': False}),),
- 'CREATE': (
- (('-c', '--config'),
- {'help': 'cloud-config yaml for testcase', 'metavar': 'DATA',
- 'action': 'store', 'required': False, 'default': None}),
- (('-e', '--enable'),
- {'help': 'enable testcase', 'required': False, 'default': False,
- 'action': 'store_true'}),
- (('name',),
- {'help': 'testcase name, in format "<category>/<test>"',
- 'action': 'store'}),
- (('-d', '--description'),
- {'help': 'description of testcase', 'required': False}),
- (('-f', '--force'),
- {'help': 'overwrite already existing test', 'required': False,
- 'action': 'store_true', 'default': False}),),
- 'INTERFACE': (
- (('-v', '--verbose'),
- {'help': 'verbose output', 'action': 'store_true', 'default': False}),
- (('-q', '--quiet'),
- {'help': 'quiet output', 'action': 'store_true', 'default': False}),),
- 'OUTPUT': (
- (('-d', '--data-dir'),
- {'help': 'directory to store test data in',
- 'action': 'store', 'metavar': 'DIR', 'required': False}),
- (('--preserve-instance',),
- {'help': 'do not destroy the instance under test',
- 'action': 'store_true', 'default': False, 'required': False}),
- (('--preserve-data',),
- {'help': 'do not remove collected data after successful run',
- 'action': 'store_true', 'default': False, 'required': False}),),
- 'OUTPUT_DEB': (
- (('--deb',),
- {'help': 'path to write output deb to', 'metavar': 'FILE',
- 'action': 'store', 'required': False,
- 'default': 'cloud-init_all.deb'}),),
- 'RESULT': (
- (('-r', '--result'),
- {'help': 'file to write results to',
- 'action': 'store', 'metavar': 'FILE'}),),
- 'SETUP': (
- (('--deb',),
- {'help': 'install deb', 'metavar': 'FILE', 'action': 'store'}),
- (('--rpm',),
- {'help': 'install rpm', 'metavar': 'FILE', 'action': 'store'}),
- (('--script',),
- {'help': 'script to set up image', 'metavar': 'DATA',
- 'action': 'store'}),
- (('--repo',),
- {'help': 'repo to enable (implies -u)', 'metavar': 'NAME',
- 'action': 'store'}),
- (('--ppa',),
- {'help': 'ppa to enable (implies -u)', 'metavar': 'NAME',
- 'action': 'store'}),
- (('-u', '--upgrade'),
- {'help': 'upgrade or install cloud-init from repo',
- 'action': 'store_true', 'default': False}),
- (('--upgrade-full',),
- {'help': 'do full system upgrade from repo (implies -u)',
- 'action': 'store_true', 'default': False}),),
-
-}
-
-SUBCMDS = {
- 'bddeb': ('build cloud-init deb from tree',
- ('BDDEB', 'OUTPUT_DEB', 'INTERFACE')),
- 'collect': ('collect test data',
- ('COLLECT', 'INTERFACE', 'OUTPUT', 'RESULT', 'SETUP')),
- 'create': ('create new test case', ('CREATE', 'INTERFACE')),
- 'run': ('run test suite',
- ('COLLECT', 'INTERFACE', 'RESULT', 'OUTPUT', 'SETUP')),
- 'tree_collect': ('collect using current working tree',
- ('BDDEB', 'COLLECT', 'INTERFACE', 'OUTPUT', 'RESULT')),
- 'tree_run': ('run using current working tree',
- ('BDDEB', 'COLLECT', 'INTERFACE', 'OUTPUT', 'RESULT')),
- 'verify': ('verify test data', ('INTERFACE', 'OUTPUT', 'RESULT')),
-}
-
-
-def _empty_normalizer(args):
- """Do not normalize arguments."""
- return args
-
-
-def normalize_bddeb_args(args):
- """Normalize BDDEB arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if errors encountered
- """
- # make sure cloud-init dir is accessible
- if not (args.cloud_init and os.path.isdir(args.cloud_init)):
- LOG.error('invalid cloud-init tree path')
- return None
-
- return args
-
-
-def normalize_create_args(args):
- """Normalize CREATE arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if errors occurred
- """
- # ensure valid name for new test
- if len(args.name.split('/')) != 2:
- LOG.error('invalid test name: %s', args.name)
- return None
- if os.path.exists(config.name_to_path(args.name)):
- msg = 'test: {} already exists'.format(args.name)
- if args.force:
- LOG.warning('%s but ignoring due to --force', msg)
- else:
- LOG.error(msg)
- return None
-
- # ensure test config valid if specified
- if isinstance(args.config, str) and len(args.config) == 0:
- LOG.error('test config cannot be empty if specified')
- return None
-
- # ensure description valid if specified
- if (isinstance(args.description, str) and
- (len(args.description) > 70 or len(args.description) == 0)):
- LOG.error('test description must be between 1 and 70 characters')
- return None
-
- return args
-
-
-def normalize_collect_args(args):
- """Normalize COLLECT arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if errors occurred
- """
- # platform should default to lxd
- if len(args.platform) == 0:
- args.platform = ['lxd']
- args.platform = util.sorted_unique(args.platform)
-
- # os name should default to all enabled
- # if os name is provided ensure that all provided are supported
- if len(args.os_name) == 0:
- args.os_name = config.ENABLED_DISTROS
- else:
- supported = config.ENABLED_DISTROS
- invalid = [os_name for os_name in args.os_name
- if os_name not in supported]
- if len(invalid) != 0:
- LOG.error('invalid os name(s): %s', invalid)
- return None
- args.os_name = util.sorted_unique(args.os_name)
-
- # test configs should default to all enabled
- # if test configs are provided, ensure that all provided are valid
- if len(args.test_config) == 0:
- args.test_config = config.list_test_configs()
- else:
- valid = []
- invalid = []
- for name in args.test_config:
- if os.path.exists(name):
- valid.append(name)
- elif os.path.exists(config.name_to_path(name)):
- valid.append(config.name_to_path(name))
- else:
- invalid.append(name)
- if len(invalid) != 0:
- LOG.error('invalid test config(s): %s', invalid)
- return None
- else:
- args.test_config = valid
- args.test_config = util.sorted_unique(args.test_config)
-
- # parse feature flag overrides and ensure all are valid
- if args.feature_override:
- overrides = args.feature_override
- args.feature_override = util.parse_conf_list(
- overrides, boolean=True, valid=config.list_feature_flags())
- if not args.feature_override:
- LOG.error('invalid feature flag override(s): %s', overrides)
- return None
- else:
- args.feature_override = {}
-
- return args
-
-
-def normalize_output_args(args):
- """Normalize OUTPUT arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if errors occurred
- """
- if args.data_dir:
- args.data_dir = os.path.abspath(args.data_dir)
- if not os.path.exists(args.data_dir):
- os.mkdir(args.data_dir)
-
- if not args.data_dir:
- args.data_dir = None
-
- # ensure clean output dir if collect
- # ensure data exists if verify
- if args.subcmd == 'collect':
- if not util.is_clean_writable_dir(args.data_dir):
- LOG.error('data_dir must be empty/new and must be writable')
- return None
-
- return args
-
-
-def normalize_output_deb_args(args):
- """Normalize OUTPUT_DEB arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if erros occurred
- """
- # make sure to use abspath for deb
- args.deb = os.path.abspath(args.deb)
-
- if not args.deb.endswith('.deb'):
- LOG.error('output filename does not end in ".deb"')
- return None
-
- return args
-
-
-def normalize_setup_args(args):
- """Normalize SETUP arguments.
-
- @param args: parsed args
- @return_value: updated_args, or None if errors occurred
- """
- # ensure deb or rpm valid if specified
- for pkg in (args.deb, args.rpm):
- if pkg is not None and not os.path.exists(pkg):
- LOG.error('cannot find package: %s', pkg)
- return None
-
- # if repo or ppa to be enabled run upgrade
- if args.repo or args.ppa:
- args.upgrade = True
-
- # if ppa is specified, remove leading 'ppa:' if any
- _ppa_header = 'ppa:'
- if args.ppa and args.ppa.startswith(_ppa_header):
- args.ppa = args.ppa[len(_ppa_header):]
-
- return args
-
-
-NORMALIZERS = {
- 'BDDEB': normalize_bddeb_args,
- 'COLLECT': normalize_collect_args,
- 'CREATE': normalize_create_args,
- 'INTERFACE': _empty_normalizer,
- 'OUTPUT': normalize_output_args,
- 'OUTPUT_DEB': normalize_output_deb_args,
- 'RESULT': _empty_normalizer,
- 'SETUP': normalize_setup_args,
-}
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py
deleted file mode 100644
index e45ad947..00000000
--- a/tests/cloud_tests/bddeb.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Used to build a deb."""
-
-from functools import partial
-import os
-import tempfile
-
-from cloudinit import subp
-from tests.cloud_tests import (config, LOG)
-from tests.cloud_tests import platforms
-from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)
-
-pre_reqs = ['devscripts', 'equivs', 'git', 'tar']
-
-
-def _out(cmd_res):
- """Get clean output from cmd result."""
- return cmd_res[0].decode("utf-8").strip()
-
-
-def build_deb(args, instance):
- """Build deb on system and copy out to location at args.deb.
-
- @param args: cmdline arguments
- @return_value: tuple of results and fail count
- """
- # update remote system package list and install build deps
- LOG.debug('installing pre-reqs')
- pkgs = ' '.join(pre_reqs)
- instance.execute('apt-get update && apt-get install --yes {}'.format(pkgs))
-
- # local tmpfile that must be deleted
- local_tarball = tempfile.NamedTemporaryFile().name
-
- # paths to use in remote system
- output_link = '/root/cloud-init_all.deb'
- remote_tarball = _out(instance.execute(['mktemp']))
- extract_dir = '/root'
- bddeb_path = os.path.join(extract_dir, 'packages', 'bddeb')
- git_env = {'GIT_DIR': os.path.join(extract_dir, '.git'),
- 'GIT_WORK_TREE': extract_dir}
-
- LOG.debug('creating tarball of cloud-init at: %s', local_tarball)
- subp.subp(['tar', 'cf', local_tarball, '--owner', 'root',
- '--group', 'root', '-C', args.cloud_init, '.'])
- LOG.debug('copying to remote system at: %s', remote_tarball)
- instance.push_file(local_tarball, remote_tarball)
-
- LOG.debug('extracting tarball in remote system at: %s', extract_dir)
- instance.execute(['tar', 'xf', remote_tarball, '-C', extract_dir])
- instance.execute(['git', 'commit', '-a', '-m', 'tmp', '--allow-empty'],
- env=git_env)
-
- LOG.debug('installing deps')
- deps_path = os.path.join(extract_dir, 'tools', 'read-dependencies')
- instance.execute([deps_path, '--install', '--test-distro',
- '--distro', 'ubuntu'])
-
- LOG.debug('building deb in remote system at: %s', output_link)
- bddeb_args = args.bddeb_args.split() if args.bddeb_args else []
- instance.execute([bddeb_path, '-d'] + bddeb_args, env=git_env)
-
- # copy the deb back to the host system
- LOG.debug('copying built deb to host at: %s', args.deb)
- instance.pull_file(output_link, args.deb)
-
-
-def setup_build(args):
- """Set build system up then run build.
-
- @param args: cmdline arguments
- @return_value: tuple of results and fail count
- """
- res = ({}, 1)
-
- # set up platform
- LOG.info('setting up platform: %s', args.build_platform)
- platform_config = config.load_platform_config(args.build_platform)
- platform_call = partial(platforms.get_platform, args.build_platform,
- platform_config)
- with PlatformComponent(platform_call) as platform:
-
- # set up image
- LOG.info('acquiring image for os: %s', args.build_os)
- img_conf = config.load_os_config(platform.platform_name, args.build_os)
- image_call = partial(platforms.get_image, platform, img_conf)
- with PlatformComponent(image_call) as image:
-
- # set up snapshot
- snapshot_call = partial(platforms.get_snapshot, image)
- with PlatformComponent(snapshot_call) as snapshot:
-
- # create instance with cloud-config to set it up
- LOG.info('creating instance to build deb in')
- empty_cloud_config = "#cloud-config\n{}"
- instance_call = partial(
- platforms.get_instance, snapshot, empty_cloud_config,
- use_desc='build cloud-init deb')
- with PlatformComponent(instance_call) as instance:
-
- # build the deb
- res = run_single('build deb on system',
- partial(build_deb, args, instance))
-
- return res
-
-
-def bddeb(args):
- """Entry point for build deb.
-
- @param args: cmdline arguments
- @return_value: fail count
- """
- LOG.info('preparing to build cloud-init deb')
- _res, failed = run_stage('build deb', [partial(setup_build, args)])
- return failed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py
deleted file mode 100644
index 642745d8..00000000
--- a/tests/cloud_tests/collect.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Used to collect data from platforms during tests."""
-
-from functools import partial
-import os
-
-from cloudinit import util as c_util
-from tests.cloud_tests import (config, LOG, setup_image, util)
-from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)
-from tests.cloud_tests import platforms
-from tests.cloud_tests.testcases import base, get_test_class
-
-
-def collect_script(instance, base_dir, script, script_name):
- """Collect script data.
-
- @param instance: instance to run script on
- @param base_dir: base directory for output data
- @param script: script contents
- @param script_name: name of script to run
- @return_value: None, may raise errors
- """
- LOG.debug('running collect script: %s', script_name)
- (out, err, exit) = instance.run_script(
- script.encode(), rcs=False,
- description='collect: {}'.format(script_name))
- if err:
- LOG.debug("collect script %s exited '%s' and had stderr: %s",
- script_name, err, exit)
- if not isinstance(out, bytes):
- raise util.PlatformError(
- "Collection of '%s' returned type %s, expected bytes: %s" %
- (script_name, type(out), out))
-
- c_util.write_file(os.path.join(base_dir, script_name), out)
-
-
-def collect_console(instance, base_dir):
- """Collect instance console log.
-
- @param instance: instance to get console log for
- @param base_dir: directory to write console log to
- """
- logfile = os.path.join(base_dir, 'console.log')
- LOG.debug('getting console log for %s to %s', instance.name, logfile)
- try:
- data = instance.console_log()
- except NotImplementedError as e:
- # args[0] is hacky, but thats all I see to get at the message.
- data = b'NotImplementedError:' + e.args[0].encode()
- with open(logfile, "wb") as fp:
- fp.write(data)
-
-
-def collect_test_data(args, snapshot, os_name, test_name):
- """Collect data for test case.
-
- @param args: cmdline arguments
- @param snapshot: instantiated snapshot
- @param test_name: name or path of test to run
- @return_value: tuple of results and fail count
- """
- res = ({}, 1)
-
- # load test config
- test_name_in = test_name
- test_name = config.path_to_name(test_name)
- test_config = config.load_test_config(test_name)
- user_data = test_config['cloud_config']
- test_scripts = test_config['collect_scripts']
- test_output_dir = os.sep.join(
- (args.data_dir, snapshot.platform_name, os_name, test_name))
-
- # if test is not enabled, skip and return 0 failures
- if not test_config.get('enabled', False):
- LOG.warning('test config %s is not enabled, skipping', test_name)
- return ({}, 0)
-
- test_class = get_test_class(
- config.name_to_module(test_name_in),
- test_data={'platform': snapshot.platform_name, 'os_name': os_name},
- test_conf=test_config['cloud_config'])
- try:
- test_class.maybeSkipTest()
- except base.SkipTest as s:
- LOG.warning('skipping test config %s: %s', test_name, s)
- return ({}, 0)
-
- # if testcase requires a feature flag that the image does not support,
- # skip the testcase with a warning
- req_features = test_config.get('required_features', [])
- if any(feature not in snapshot.features for feature in req_features):
- LOG.warning('test config %s requires features not supported by image, '
- 'skipping.\nrequired features: %s\nsupported features: %s',
- test_name, req_features, snapshot.features)
- return ({}, 0)
-
- # if there are user data overrides required for this test case, apply them
- overrides = snapshot.config.get('user_data_overrides', {})
- if overrides:
- LOG.debug('updating user data for collect with: %s', overrides)
- user_data = util.update_user_data(user_data, overrides)
-
- # create test instance
- component = PlatformComponent(
- partial(platforms.get_instance, snapshot, user_data,
- block=True, start=False, use_desc=test_name),
- preserve_instance=args.preserve_instance)
-
- LOG.info('collecting test data for test: %s', test_name)
- with component as instance:
- start_call = partial(run_single, 'boot instance', partial(
- instance.start, wait=True, wait_for_cloud_init=True))
- collect_calls = [partial(run_single, 'script {}'.format(script_name),
- partial(collect_script, instance,
- test_output_dir, script, script_name))
- for script_name, script in test_scripts.items()]
-
- res = run_stage('collect for test: {}'.format(test_name),
- [start_call] + collect_calls)
-
- instance.shutdown()
- collect_console(instance, test_output_dir)
-
- return res
-
-
-def collect_snapshot(args, image, os_name):
- """Collect data for snapshot of image.
-
- @param args: cmdline arguments
- @param image: instantiated image with set up complete
- @return_value tuple of results and fail count
- """
- res = ({}, 1)
-
- component = PlatformComponent(partial(platforms.get_snapshot, image))
-
- LOG.debug('creating snapshot for %s', os_name)
- with component as snapshot:
- LOG.info('collecting test data for os: %s', os_name)
- res = run_stage(
- 'collect test data for {}'.format(os_name),
- [partial(collect_test_data, args, snapshot, os_name, test_name)
- for test_name in args.test_config])
-
- return res
-
-
-def collect_image(args, platform, os_name):
- """Collect data for image.
-
- @param args: cmdline arguments
- @param platform: instantiated platform
- @param os_name: name of distro to collect for
- @return_value: tuple of results and fail count
- """
- res = ({}, 1)
-
- os_config = config.load_os_config(
- platform.platform_name, os_name, require_enabled=True,
- feature_overrides=args.feature_override)
- LOG.debug('os config: %s', os_config)
- component = PlatformComponent(
- partial(platforms.get_image, platform, os_config))
-
- LOG.info('acquiring image for os: %s', os_name)
- with component as image:
- res = run_stage('set up and collect data for os: {}'.format(os_name),
- [partial(setup_image.setup_image, args, image)] +
- [partial(collect_snapshot, args, image, os_name)],
- continue_after_error=False)
-
- return res
-
-
-def collect_platform(args, platform_name):
- """Collect data for platform.
-
- @param args: cmdline arguments
- @param platform_name: platform to collect for
- @return_value: tuple of results and fail count
- """
- res = ({}, 1)
-
- platform_config = config.load_platform_config(
- platform_name, require_enabled=True)
- platform_config['data_dir'] = args.data_dir
- LOG.debug('platform config: %s', platform_config)
- component = PlatformComponent(
- partial(platforms.get_platform, platform_name, platform_config))
-
- LOG.info('setting up platform: %s', platform_name)
- with component as platform:
- res = run_stage('collect for platform: {}'.format(platform_name),
- [partial(collect_image, args, platform, os_name)
- for os_name in args.os_name])
-
- return res
-
-
-def collect(args):
- """Entry point for collection.
-
- @param args: cmdline arguments
- @return_value: fail count
- """
- (res, failed) = run_stage(
- 'collect data', [partial(collect_platform, args, platform_name)
- for platform_name in args.platform])
-
- LOG.debug('collect stages: %s', res)
- if args.result:
- util.merge_results({'collect_stages': res}, args.result)
-
- return failed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/config.py b/tests/cloud_tests/config.py
deleted file mode 100644
index 06536edc..00000000
--- a/tests/cloud_tests/config.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Used to setup test configuration."""
-
-import glob
-import os
-
-from cloudinit import util as c_util
-from tests.cloud_tests import (BASE_DIR, TEST_CONF_DIR)
-
-# conf files
-CONF_EXT = '.yaml'
-VERIFY_EXT = '.py'
-PLATFORM_CONF = os.path.join(BASE_DIR, 'platforms.yaml')
-RELEASES_CONF = os.path.join(BASE_DIR, 'releases.yaml')
-TESTCASE_CONF = os.path.join(BASE_DIR, 'testcases.yaml')
-
-
-def get(base, key):
- """Get config entry 'key' from base, ensuring is dictionary."""
- return base[key] if key in base and base[key] is not None else {}
-
-
-def enabled(config):
- """Test if config item is enabled."""
- return isinstance(config, dict) and config.get('enabled', False)
-
-
-def path_to_name(path):
- """Convert abs or rel path to test config to path under 'sconfigs/'."""
- dir_path, file_name = os.path.split(os.path.normpath(path))
- name = os.path.splitext(file_name)[0]
- return os.sep.join((os.path.basename(dir_path), name))
-
-
-def name_to_path(name):
- """Convert test config path under configs/ to full config path."""
- name = os.path.normpath(name)
- if not name.endswith(CONF_EXT):
- name = name + CONF_EXT
- return name if os.path.isabs(name) else os.path.join(TEST_CONF_DIR, name)
-
-
-def name_sanitize(name):
- """Sanitize test name to be used as a module name."""
- return name.replace('-', '_')
-
-
-def name_to_module(name):
- """Convert test name to a loadable module name under 'testcases/'."""
- name = name_sanitize(path_to_name(name))
- return name.replace(os.path.sep, '.')
-
-
-def merge_config(base, override):
- """Merge config and base."""
- res = base.copy()
- res.update(override)
- res.update({k: merge_config(base.get(k, {}), v)
- for k, v in override.items() if isinstance(v, dict)})
- return res
-
-
-def merge_feature_groups(feature_conf, feature_groups, overrides):
- """Combine feature groups and overrides to construct a supported list.
-
- @param feature_conf: feature config from releases.yaml
- @param feature_groups: feature groups the release is a member of
- @param overrides: overrides specified by the release's config
- @return_value: dict of {feature: true/false} settings
- """
- res = dict().fromkeys(feature_conf['all'])
- for group in feature_groups:
- res.update(feature_conf['groups'][group])
- res.update(overrides)
- return res
-
-
-def load_platform_config(platform_name, require_enabled=False):
- """Load configuration for platform.
-
- @param platform_name: name of platform to retrieve config for
- @param require_enabled: if true, raise error if 'enabled' not True
- @return_value: config dict
- """
- main_conf = c_util.read_conf(PLATFORM_CONF)
- conf = merge_config(main_conf['default_platform_config'],
- main_conf['platforms'][platform_name])
- if require_enabled and not enabled(conf):
- raise ValueError('Platform is not enabled')
- return conf
-
-
-def load_os_config(platform_name, os_name, require_enabled=False,
- feature_overrides=None):
- """Load configuration for os.
-
- @param platform_name: platform name to load os config for
- @param os_name: name of os to retrieve config for
- @param require_enabled: if true, raise error if 'enabled' not True
- @param feature_overrides: feature flag overrides to merge with features
- @return_value: config dict
- """
- if feature_overrides is None:
- feature_overrides = {}
- main_conf = c_util.read_conf(RELEASES_CONF)
- default = main_conf['default_release_config']
- image = main_conf['releases'][os_name]
- conf = merge_config(merge_config(get(default, 'default'),
- get(default, platform_name)),
- merge_config(get(image, 'default'),
- get(image, platform_name)))
-
- feature_conf = main_conf['features']
- feature_groups = conf.get('feature_groups', [])
- overrides = merge_config(get(conf, 'features'), feature_overrides)
- conf['arch'] = c_util.get_dpkg_architecture()
- conf['features'] = merge_feature_groups(
- feature_conf, feature_groups, overrides)
-
- if require_enabled and not enabled(conf):
- raise ValueError('OS is not enabled')
- return conf
-
-
-def load_test_config(path):
- """Load a test config file by either abs path or rel path."""
- return merge_config(c_util.read_conf(TESTCASE_CONF)['base_test_data'],
- c_util.read_conf(name_to_path(path)))
-
-
-def list_feature_flags():
- """List all supported feature flags."""
- feature_conf = get(c_util.read_conf(RELEASES_CONF), 'features')
- return feature_conf.get('all', [])
-
-
-def list_enabled_platforms():
- """List all platforms enabled for testing."""
- platforms = get(c_util.read_conf(PLATFORM_CONF), 'platforms')
- return [k for k, v in platforms.items() if enabled(v)]
-
-
-def list_enabled_distros(platforms):
- """List all distros enabled for testing on specified platforms."""
- def platform_has_enabled(config):
- """List if platform is enabled."""
- return any(enabled(merge_config(get(config, 'default'),
- get(config, platform)))
- for platform in platforms)
-
- releases = get(c_util.read_conf(RELEASES_CONF), 'releases')
- return [k for k, v in releases.items() if platform_has_enabled(v)]
-
-
-def list_test_configs():
- """List all available test config files by abspath."""
- return [os.path.abspath(f) for f in
- glob.glob(os.sep.join((TEST_CONF_DIR, '*', '*.yaml')))]
-
-
-ENABLED_PLATFORMS = sorted(list_enabled_platforms())
-ENABLED_DISTROS = sorted(list_enabled_distros(ENABLED_PLATFORMS))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/manage.py b/tests/cloud_tests/manage.py
deleted file mode 100644
index 5f0cfd23..00000000
--- a/tests/cloud_tests/manage.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Create test cases automatically given a user_data script."""
-
-import os
-import textwrap
-
-from cloudinit import util as c_util
-from tests.cloud_tests.config import VERIFY_EXT
-from tests.cloud_tests import (config, util)
-from tests.cloud_tests import TESTCASES_DIR
-
-
-_verifier_fmt = textwrap.dedent(
- """
- \"\"\"cloud-init Integration Test Verify Script\"\"\"
- from tests.cloud_tests.testcases import base
-
-
- class {test_class}(base.CloudTestCase):
- \"\"\"
- Name: {test_name}
- Category: {test_category}
- Description: {test_description}
- \"\"\"
- pass
- """
-).lstrip()
-_config_fmt = textwrap.dedent(
- """
- #
- # Name: {test_name}
- # Category: {test_category}
- # Description: {test_description}
- #
- {config}
- """
-).strip()
-
-
-def write_testcase_config(args, fmt_args, testcase_file):
- """Write the testcase config file."""
- testcase_config = {'enabled': args.enable, 'collect_scripts': {}}
- if args.config:
- testcase_config['cloud_config'] = args.config
- fmt_args['config'] = util.yaml_format(testcase_config)
- c_util.write_file(testcase_file, _config_fmt.format(**fmt_args), omode='w')
-
-
-def write_verifier(args, fmt_args, verifier_file):
- """Write the verifier script."""
- fmt_args['test_class'] = 'Test{}'.format(
- config.name_sanitize(fmt_args['test_name']).title())
- c_util.write_file(verifier_file,
- _verifier_fmt.format(**fmt_args), omode='w')
-
-
-def create(args):
- """Create a new testcase."""
- (test_category, test_name) = args.name.split('/')
- fmt_args = {'test_name': test_name, 'test_category': test_category,
- 'test_description': str(args.description)}
-
- testcase_file = config.name_to_path(args.name)
- verifier_file = os.path.join(
- TESTCASES_DIR, test_category,
- config.name_sanitize(test_name) + VERIFY_EXT)
-
- write_testcase_config(args, fmt_args, testcase_file)
- write_verifier(args, fmt_args, verifier_file)
-
- return 0
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms.yaml b/tests/cloud_tests/platforms.yaml
deleted file mode 100644
index eaaa0a71..00000000
--- a/tests/cloud_tests/platforms.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-# ============================= Platform Config ===============================
-default_platform_config:
- # all disabled by default
- enabled: false
- # maximum time to retrieve image
- get_image_timeout: 300
- # maximum time to create instance (before waiting for cloud-init)
- create_instance_timeout: 60
- private_key: cloud_init_rsa
- public_key: cloud_init_rsa.pub
-platforms:
- ec2:
- enabled: true
- instance-type: t2.micro
- tag: cii
- lxd:
- enabled: true
- # overrides for image templates
- template_overrides:
- /var/lib/cloud/seed/nocloud-net/meta-data:
- when:
- - create
- - copy
- template: cloud-init-meta.tpl
- /var/lib/cloud/seed/nocloud-net/network-config:
- when:
- - create
- - copy
- template: cloud-init-network.tpl
- /var/lib/cloud/seed/nocloud-net/user-data:
- when:
- - create
- - copy
- template: cloud-init-user.tpl
- properties:
- default: |
- #cloud-config
- {}
- /var/lib/cloud/seed/nocloud-net/vendor-data:
- when:
- - create
- - copy
- template: cloud-init-vendor.tpl
- properties:
- default: |
- #cloud-config
- {}
- # overrides image template files
- template_files:
- cloud-init-meta.tpl: |
- #cloud-config
- instance-id: {{ container.name }}
- local-hostname: {{ container.name }}
- {{ config_get("user.meta-data", "") }}
- cloud-init-network.tpl: |
- {% if config_get("user.network-config", "") == "" %}version: 1
- config:
- - type: physical
- name: eth0
- subnets:
- - type: {% if config_get("user.network_mode", "") == "link-local" %}manual{% else %}dhcp{% endif %}
- control: auto{% else %}{{ config_get("user.network-config", "") }}{% endif %}
- cloud-init-user.tpl: |
- {{ config_get("user.user-data", properties.default) }}
- cloud-init-vendor.tpl: |
- {{ config_get("user.vendor-data", properties.default) }}
- nocloud-kvm:
- enabled: true
- cache_mode: cache=none,aio=native
- azurecloud:
- enabled: true
- region: West US 2
- vm_size: Standard_DS1_v2
- storage_sku: standard_lrs
- tag: ci
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/__init__.py b/tests/cloud_tests/platforms/__init__.py
deleted file mode 100644
index e506baa0..00000000
--- a/tests/cloud_tests/platforms/__init__.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Main init."""
-
-from .ec2 import platform as ec2
-from .lxd import platform as lxd
-from .nocloudkvm import platform as nocloudkvm
-from .azurecloud import platform as azurecloud
-from ..util import emit_dots_on_travis
-
-PLATFORMS = {
- 'ec2': ec2.EC2Platform,
- 'nocloud-kvm': nocloudkvm.NoCloudKVMPlatform,
- 'lxd': lxd.LXDPlatform,
- 'azurecloud': azurecloud.AzureCloudPlatform,
-}
-
-
-def get_image(platform, config):
- """Get image from platform object using os_name."""
- with emit_dots_on_travis():
- return platform.get_image(config)
-
-
-def get_instance(snapshot, *args, **kwargs):
- """Get instance from snapshot."""
- return snapshot.launch(*args, **kwargs)
-
-
-def get_platform(platform_name, config):
- """Get the platform object for 'platform_name' and init."""
- platform_cls = PLATFORMS.get(platform_name)
- if not platform_cls:
- raise ValueError('invalid platform name: {}'.format(platform_name))
- return platform_cls(config)
-
-
-def get_snapshot(image):
- """Get snapshot from image."""
- return image.snapshot()
-
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/azurecloud/image.py b/tests/cloud_tests/platforms/azurecloud/image.py
deleted file mode 100644
index aad2bca1..00000000
--- a/tests/cloud_tests/platforms/azurecloud/image.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Azure Cloud image Base class."""
-
-from tests.cloud_tests import LOG
-
-from ..images import Image
-from .snapshot import AzureCloudSnapshot
-
-
-class AzureCloudImage(Image):
- """Azure Cloud backed image."""
-
- platform_name = 'azurecloud'
-
- def __init__(self, platform, config, image_id):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- @param image_id: image id used to boot instance
- """
- super(AzureCloudImage, self).__init__(platform, config)
- self._img_instance = None
- self.image_id = image_id
-
- @property
- def _instance(self):
- """Internal use only, returns a running instance"""
- if not self._img_instance:
- self._img_instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self.image_id, user_data=None)
- self._img_instance.start(wait=True, wait_for_cloud_init=True)
- return self._img_instance
-
- def destroy(self):
- """Delete the instance used to create a custom image."""
- if self._img_instance:
- LOG.debug('Deleting backing instance %s',
- self._img_instance.vm_name)
- delete_vm = self.platform.compute_client.virtual_machines.delete(
- self.platform.resource_group.name, self._img_instance.vm_name)
- delete_vm.wait()
-
- super(AzureCloudImage, self).destroy()
-
- def _execute(self, *args, **kwargs):
- """Execute command in image, modifying image."""
- LOG.debug('executing commands on image')
- self._instance.start(wait=True)
- return self._instance._execute(*args, **kwargs)
-
- def push_file(self, local_path, remote_path):
- """Copy file at 'local_path' to instance at 'remote_path'."""
- LOG.debug('pushing file to image')
- return self._instance.push_file(local_path, remote_path)
-
- def run_script(self, *args, **kwargs):
- """Run script in image, modifying image.
-
- @return_value: script output
- """
- LOG.debug('running script on image')
- self._instance.start()
- return self._instance.run_script(*args, **kwargs)
-
- def snapshot(self):
- """ Create snapshot (image) of instance, wait until done.
-
- If no instance has been booted, base image is returned.
- Otherwise runs the clean script, deallocates, generalizes
- and creates custom image from instance.
- """
- LOG.debug('creating snapshot of image')
- if not self._img_instance:
- LOG.debug('No existing image, snapshotting base image')
- return AzureCloudSnapshot(self.platform, self.properties,
- self.config, self.features,
- self._instance.vm_name,
- delete_on_destroy=False)
-
- LOG.debug('creating snapshot from instance: %s', self._img_instance)
- if self.config.get('boot_clean_script'):
- self._img_instance.run_script(self.config.get('boot_clean_script'))
-
- LOG.debug('deallocating instance %s', self._instance.vm_name)
- deallocate = self.platform.compute_client.virtual_machines.deallocate(
- self.platform.resource_group.name, self._instance.vm_name)
- deallocate.wait()
-
- LOG.debug('generalizing instance %s', self._instance.vm_name)
- self.platform.compute_client.virtual_machines.generalize(
- self.platform.resource_group.name, self._instance.vm_name)
-
- image_params = {
- "location": self.platform.location,
- "properties": {
- "sourceVirtualMachine": {
- "id": self._img_instance.instance.id
- }
- }
- }
- LOG.debug('updating resource group image %s', self._instance.vm_name)
- self.platform.compute_client.images.create_or_update(
- self.platform.resource_group.name, self._instance.vm_name,
- image_params)
-
- LOG.debug('destroying self')
- self.destroy()
-
- LOG.debug('snapshot complete')
- return AzureCloudSnapshot(self.platform, self.properties, self.config,
- self.features, self._instance.vm_name)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/azurecloud/instance.py b/tests/cloud_tests/platforms/azurecloud/instance.py
deleted file mode 100644
index eedbaae8..00000000
--- a/tests/cloud_tests/platforms/azurecloud/instance.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base Azure Cloud instance."""
-
-from datetime import datetime, timedelta
-from urllib.parse import urlparse
-from time import sleep
-import traceback
-import os
-
-
-# pylint: disable=no-name-in-module
-from azure.storage.blob import BlockBlobService, BlobPermissions
-from msrestazure.azure_exceptions import CloudError
-
-from tests.cloud_tests import LOG
-
-from ..instances import Instance
-
-
-class AzureCloudInstance(Instance):
- """Azure Cloud backed instance."""
-
- platform_name = 'azurecloud'
-
- def __init__(self, platform, properties, config,
- features, image_id, user_data=None):
- """Set up instance.
-
- @param platform: platform object
- @param properties: dictionary of properties
- @param config: dictionary of configuration values
- @param features: dictionary of supported feature flags
- @param image_id: image to find and/or use
- @param user_data: test user-data to pass to instance
- """
- super(AzureCloudInstance, self).__init__(
- platform, image_id, properties, config, features)
-
- self.ssh_port = 22
- self.ssh_ip = None
- self.instance = None
- self.image_id = image_id
- self.vm_name = 'ci-azure-i-%s' % self.platform.tag
- self.user_data = user_data
- self.ssh_key_file = os.path.join(
- platform.config['data_dir'], platform.config['private_key'])
- self.ssh_pubkey_file = os.path.join(
- platform.config['data_dir'], platform.config['public_key'])
- self.blob_client, self.container, self.blob = None, None, None
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance with the platforms NIC."""
- if self.instance:
- return
- data = self.image_id.split('-')
- release, support = data[2].replace('_', '.'), data[3]
- sku = '%s-%s' % (release, support) if support == 'LTS' else release
- image_resource_id = '/subscriptions/%s' \
- '/resourceGroups/%s' \
- '/providers/Microsoft.Compute/images/%s' % (
- self.platform.subscription_id,
- self.platform.resource_group.name,
- self.image_id)
- storage_uri = "http://%s.blob.core.windows.net" \
- % self.platform.storage.name
- with open(self.ssh_pubkey_file, 'r') as key:
- ssh_pub_keydata = key.read()
-
- image_exists = False
- try:
- LOG.debug('finding image in resource group using image_id')
- self.platform.compute_client.images.get(
- self.platform.resource_group.name,
- self.image_id
- )
- image_exists = True
- LOG.debug('image found, launching instance, image_id=%s',
- self.image_id)
- except CloudError:
- LOG.debug(('image not found, launching instance with base image, '
- 'image_id=%s'), self.image_id)
-
- vm_params = {
- 'name': self.vm_name,
- 'location': self.platform.location,
- 'os_profile': {
- 'computer_name': 'CI-%s' % self.platform.tag,
- 'admin_username': self.ssh_username,
- "customData": self.user_data,
- "linuxConfiguration": {
- "disable_password_authentication": True,
- "ssh": {
- "public_keys": [{
- "path": "/home/%s/.ssh/authorized_keys" %
- self.ssh_username,
- "keyData": ssh_pub_keydata
- }]
- }
- }
- },
- "diagnosticsProfile": {
- "bootDiagnostics": {
- "storageUri": storage_uri,
- "enabled": True
- }
- },
- 'hardware_profile': {
- 'vm_size': self.platform.vm_size
- },
- 'storage_profile': {
- 'image_reference': {
- 'id': image_resource_id
- } if image_exists else {
- 'publisher': 'Canonical',
- 'offer': 'UbuntuServer',
- 'sku': sku,
- 'version': 'latest'
- }
- },
- 'network_profile': {
- 'network_interfaces': [{
- 'id': self.platform.nic.id
- }]
- },
- 'tags': {
- 'Name': self.platform.tag,
- }
- }
-
- try:
- self.instance = self.platform.compute_client.virtual_machines.\
- create_or_update(self.platform.resource_group.name,
- self.vm_name, vm_params)
- LOG.debug('creating instance %s from image_id=%s', self.vm_name,
- self.image_id)
- except CloudError as e:
- raise RuntimeError(
- 'failed creating instance:\n{}'.format(traceback.format_exc())
- ) from e
-
- if wait:
- self.instance.wait()
- self.ssh_ip = self.platform.network_client.\
- public_ip_addresses.get(
- self.platform.resource_group.name,
- self.platform.public_ip.name
- ).ip_address
- self._wait_for_system(wait_for_cloud_init)
-
- self.instance = self.instance.result()
- self.blob_client, self.container, self.blob =\
- self._get_blob_client()
-
- def shutdown(self, wait=True):
- """Finds console log then stopping/deallocates VM"""
- LOG.debug('waiting on console log before stopping')
- attempts, exists = 5, False
- while not exists and attempts:
- try:
- attempts -= 1
- exists = self.blob_client.get_blob_to_bytes(
- self.container, self.blob)
- LOG.debug('found console log')
- except Exception as e:
- if attempts:
- LOG.debug('Unable to find console log, '
- '%s attempts remaining', attempts)
- sleep(15)
- else:
- LOG.warning('Could not find console log: %s', e)
-
- LOG.debug('stopping instance %s', self.image_id)
- vm_deallocate = \
- self.platform.compute_client.virtual_machines.deallocate(
- self.platform.resource_group.name, self.image_id)
- if wait:
- vm_deallocate.wait()
-
- def destroy(self):
- """Delete VM and close all connections"""
- if self.instance:
- LOG.debug('destroying instance: %s', self.image_id)
- vm_delete = self.platform.compute_client.virtual_machines.delete(
- self.platform.resource_group.name, self.image_id)
- vm_delete.wait()
-
- self._ssh_close()
-
- super(AzureCloudInstance, self).destroy()
-
- def _execute(self, command, stdin=None, env=None):
- """Execute command on instance."""
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- return self._ssh(['sudo'] + env_args + list(command), stdin=stdin)
-
- def _get_blob_client(self):
- """
- Use VM details to retrieve container and blob name.
- Then Create blob service client for sas token to
- retrieve console log.
-
- :return: blob service, container name, blob name
- """
- LOG.debug('creating blob service for console log')
- storage = self.platform.storage_client.storage_accounts.get_properties(
- self.platform.resource_group.name, self.platform.storage.name)
-
- keys = self.platform.storage_client.storage_accounts.list_keys(
- self.platform.resource_group.name, self.platform.storage.name
- ).keys[0].value
-
- virtual_machine = self.platform.compute_client.virtual_machines.get(
- self.platform.resource_group.name, self.instance.name,
- expand='instanceView')
-
- blob_uri = virtual_machine.instance_view.boot_diagnostics.\
- serial_console_log_blob_uri
-
- container, blob = urlparse(blob_uri).path.split('/')[-2:]
-
- blob_client = BlockBlobService(
- account_name=storage.name,
- account_key=keys)
-
- sas = blob_client.generate_blob_shared_access_signature(
- container_name=container, blob_name=blob, protocol='https',
- expiry=datetime.utcnow() + timedelta(hours=1),
- permission=BlobPermissions.READ)
-
- blob_client = BlockBlobService(
- account_name=storage.name,
- sas_token=sas)
-
- return blob_client, container, blob
-
- def console_log(self):
- """Instance console.
-
- @return_value: bytes of this instance’s console
- """
- boot_diagnostics = self.blob_client.get_blob_to_bytes(
- self.container, self.blob)
- return boot_diagnostics.content
diff --git a/tests/cloud_tests/platforms/azurecloud/platform.py b/tests/cloud_tests/platforms/azurecloud/platform.py
deleted file mode 100644
index a664f612..00000000
--- a/tests/cloud_tests/platforms/azurecloud/platform.py
+++ /dev/null
@@ -1,240 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base Azure Cloud class."""
-
-import os
-import base64
-import traceback
-from datetime import datetime
-from tests.cloud_tests import LOG
-
-# pylint: disable=no-name-in-module
-from azure.common.credentials import ServicePrincipalCredentials
-# pylint: disable=no-name-in-module
-from azure.mgmt.resource import ResourceManagementClient
-# pylint: disable=no-name-in-module
-from azure.mgmt.network import NetworkManagementClient
-# pylint: disable=no-name-in-module
-from azure.mgmt.compute import ComputeManagementClient
-# pylint: disable=no-name-in-module
-from azure.mgmt.storage import StorageManagementClient
-from msrestazure.azure_exceptions import CloudError
-
-from .image import AzureCloudImage
-from .instance import AzureCloudInstance
-from ..platforms import Platform
-
-from cloudinit import util as c_util
-
-
-class AzureCloudPlatform(Platform):
- """Azure Cloud test platforms."""
-
- platform_name = 'azurecloud'
-
- def __init__(self, config):
- """Set up platform."""
- super(AzureCloudPlatform, self).__init__(config)
- self.tag = '%s-%s' % (
- config['tag'], datetime.now().strftime('%Y%m%d%H%M%S'))
- self.storage_sku = config['storage_sku']
- self.vm_size = config['vm_size']
- self.location = config['region']
-
- try:
- self.credentials, self.subscription_id = self._get_credentials()
-
- self.resource_client = ResourceManagementClient(
- self.credentials, self.subscription_id)
- self.compute_client = ComputeManagementClient(
- self.credentials, self.subscription_id)
- self.network_client = NetworkManagementClient(
- self.credentials, self.subscription_id)
- self.storage_client = StorageManagementClient(
- self.credentials, self.subscription_id)
-
- self.resource_group = self._create_resource_group()
- self.public_ip = self._create_public_ip_address()
- self.storage = self._create_storage_account(config)
- self.vnet = self._create_vnet()
- self.subnet = self._create_subnet()
- self.nic = self._create_nic()
- except CloudError as e:
- raise RuntimeError(
- 'failed creating a resource:\n{}'.format(
- traceback.format_exc()
- )
- ) from e
-
- def create_instance(self, properties, config, features,
- image_id, user_data=None):
- """Create an instance
-
- @param properties: image properties
- @param config: image configuration
- @param features: image features
- @param image_id: string of image id
- @param user_data: test user-data to pass to instance
- @return_value: cloud_tests.instances instance
- """
- if user_data is not None:
- user_data = str(base64.b64encode(
- user_data.encode('utf-8')), 'utf-8')
-
- return AzureCloudInstance(self, properties, config, features,
- image_id, user_data)
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- ss_region = self.azure_location_to_simplestreams_region()
-
- filters = [
- 'arch=%s' % 'amd64',
- 'endpoint=https://management.core.windows.net/',
- 'region=%s' % ss_region,
- 'release=%s' % img_conf['release']
- ]
-
- LOG.debug('finding image using streams')
- image = self._query_streams(img_conf, filters)
-
- try:
- image_id = image['id']
- LOG.debug('found image: %s', image_id)
- if image_id.find('__') > 0:
- image_id = image_id.split('__')[1]
- LOG.debug('image_id shortened to %s', image_id)
- except KeyError as e:
- raise RuntimeError(
- 'no images found for %s' % img_conf['release']
- ) from e
-
- return AzureCloudImage(self, img_conf, image_id)
-
- def destroy(self):
- """Delete all resources in resource group."""
- LOG.debug("Deleting resource group: %s", self.resource_group.name)
- delete = self.resource_client.resource_groups.delete(
- self.resource_group.name)
- delete.wait()
-
- def azure_location_to_simplestreams_region(self):
- """Convert location to simplestreams region"""
- location = self.location.lower().replace(' ', '')
- LOG.debug('finding location %s using simple streams', location)
- regions_file = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), 'regions.json')
- region_simplestreams_map = c_util.load_json(
- c_util.load_file(regions_file))
- return region_simplestreams_map.get(location, location)
-
- def _get_credentials(self):
- """Get credentials from environment"""
- LOG.debug('getting credentials from environment')
- cred_file = os.path.expanduser('~/.azure/credentials.json')
- try:
- azure_creds = c_util.load_json(
- c_util.load_file(cred_file))
- subscription_id = azure_creds['subscriptionId']
- credentials = ServicePrincipalCredentials(
- client_id=azure_creds['clientId'],
- secret=azure_creds['clientSecret'],
- tenant=azure_creds['tenantId'])
- return credentials, subscription_id
- except KeyError as e:
- raise RuntimeError(
- 'Please configure Azure service principal'
- ' credentials in %s' % cred_file
- ) from e
-
- def _create_resource_group(self):
- """Create resource group"""
- LOG.debug('creating resource group')
- resource_group_name = self.tag
- resource_group_params = {
- 'location': self.location
- }
- resource_group = self.resource_client.resource_groups.create_or_update(
- resource_group_name, resource_group_params)
- return resource_group
-
- def _create_storage_account(self, config):
- LOG.debug('creating storage account')
- storage_account_name = 'storage%s' % datetime.now().\
- strftime('%Y%m%d%H%M%S')
- storage_params = {
- 'sku': {
- 'name': config['storage_sku']
- },
- 'kind': "Storage",
- 'location': self.location
- }
- storage_account = self.storage_client.storage_accounts.create(
- self.resource_group.name, storage_account_name, storage_params)
- return storage_account.result()
-
- def _create_public_ip_address(self):
- """Create public ip address"""
- LOG.debug('creating public ip address')
- public_ip_name = '%s-ip' % self.resource_group.name
- public_ip_params = {
- 'location': self.location,
- 'public_ip_allocation_method': 'Dynamic'
- }
- ip = self.network_client.public_ip_addresses.create_or_update(
- self.resource_group.name, public_ip_name, public_ip_params)
- return ip.result()
-
- def _create_vnet(self):
- """create virtual network"""
- LOG.debug('creating vnet')
- vnet_name = '%s-vnet' % self.resource_group.name
- vnet_params = {
- 'location': self.location,
- 'address_space': {
- 'address_prefixes': ['10.0.0.0/16']
- }
- }
- vnet = self.network_client.virtual_networks.create_or_update(
- self.resource_group.name, vnet_name, vnet_params)
- return vnet.result()
-
- def _create_subnet(self):
- """create sub-network"""
- LOG.debug('creating subnet')
- subnet_name = '%s-subnet' % self.resource_group.name
- subnet_params = {
- 'address_prefix': '10.0.0.0/24'
- }
- subnet = self.network_client.subnets.create_or_update(
- self.resource_group.name, self.vnet.name,
- subnet_name, subnet_params)
- return subnet.result()
-
- def _create_nic(self):
- """Create network interface controller"""
- LOG.debug('creating nic')
- nic_name = '%s-nic' % self.resource_group.name
- nic_params = {
- 'location': self.location,
- 'ip_configurations': [{
- 'name': 'ipconfig',
- 'subnet': {
- 'id': self.subnet.id
- },
- 'publicIpAddress': {
- 'id': "/subscriptions/%s"
- "/resourceGroups/%s/providers/Microsoft.Network"
- "/publicIPAddresses/%s" % (
- self.subscription_id, self.resource_group.name,
- self.public_ip.name),
- }
- }]
- }
- nic = self.network_client.network_interfaces.create_or_update(
- self.resource_group.name, nic_name, nic_params)
- return nic.result()
diff --git a/tests/cloud_tests/platforms/azurecloud/regions.json b/tests/cloud_tests/platforms/azurecloud/regions.json
deleted file mode 100644
index c1b4da20..00000000
--- a/tests/cloud_tests/platforms/azurecloud/regions.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
- "eastasia": "East Asia",
- "southeastasia": "Southeast Asia",
- "centralus": "Central US",
- "eastus": "East US",
- "eastus2": "East US 2",
- "westus": "West US",
- "northcentralus": "North Central US",
- "southcentralus": "South Central US",
- "northeurope": "North Europe",
- "westeurope": "West Europe",
- "japanwest": "Japan West",
- "japaneast": "Japan East",
- "brazilsouth": "Brazil South",
- "australiaeast": "Australia East",
- "australiasoutheast": "Australia Southeast",
- "southindia": "South India",
- "centralindia": "Central India",
- "westindia": "West India",
- "canadacentral": "Canada Central",
- "canadaeast": "Canada East",
- "uksouth": "UK South",
- "ukwest": "UK West",
- "westcentralus": "West Central US",
- "westus2": "West US 2",
- "koreacentral": "Korea Central",
- "koreasouth": "Korea South",
- "francecentral": "France Central",
- "francesouth": "France South",
- "australiacentral": "Australia Central",
- "australiacentral2": "Australia Central 2",
- "uaecentral": "UAE Central",
- "uaenorth": "UAE North",
- "southafricanorth": "South Africa North",
- "southafricawest": "South Africa West",
- "switzerlandnorth": "Switzerland North",
- "switzerlandwest": "Switzerland West",
- "germanynorth": "Germany North",
- "germanywestcentral": "Germany West Central",
- "norwaywest": "Norway West",
- "norwayeast": "Norway East"
-}
diff --git a/tests/cloud_tests/platforms/azurecloud/snapshot.py b/tests/cloud_tests/platforms/azurecloud/snapshot.py
deleted file mode 100644
index 580cc596..00000000
--- a/tests/cloud_tests/platforms/azurecloud/snapshot.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base Azure Cloud snapshot."""
-
-from ..snapshots import Snapshot
-
-from tests.cloud_tests import LOG
-
-
-class AzureCloudSnapshot(Snapshot):
- """Azure Cloud image copy backed snapshot."""
-
- platform_name = 'azurecloud'
-
- def __init__(self, platform, properties, config, features, image_id,
- delete_on_destroy=True):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- super(AzureCloudSnapshot, self).__init__(
- platform, properties, config, features)
-
- self.image_id = image_id
- self.delete_on_destroy = delete_on_destroy
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param meta_data: meta_data for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: description of snapshot instance use
- @return_value: an Instance
- """
- if meta_data is not None:
- raise ValueError("metadata not supported on Azure Cloud tests")
-
- instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self.image_id, user_data)
-
- return instance
-
- def destroy(self):
- """Clean up snapshot data."""
- LOG.debug('destroying image %s', self.image_id)
- if self.delete_on_destroy:
- self.platform.compute_client.images.delete(
- self.platform.resource_group.name,
- self.image_id)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/image.py b/tests/cloud_tests/platforms/ec2/image.py
deleted file mode 100644
index d7b2c908..00000000
--- a/tests/cloud_tests/platforms/ec2/image.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""EC2 Image Base Class."""
-
-from ..images import Image
-from .snapshot import EC2Snapshot
-
-from tests.cloud_tests import LOG
-
-
-class EC2Image(Image):
- """EC2 backed image."""
-
- platform_name = 'ec2'
-
- def __init__(self, platform, config, image_ami):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- @param image_ami: string of image ami ID
- """
- super(EC2Image, self).__init__(platform, config)
- self._img_instance = None
- self.image_ami = image_ami
-
- @property
- def _instance(self):
- """Internal use only, returns a running instance"""
- if not self._img_instance:
- self._img_instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self.image_ami, user_data=None)
- self._img_instance.start(wait=True, wait_for_cloud_init=True)
- return self._img_instance
-
- def destroy(self):
- """Delete the instance used to create a custom image."""
- if self._img_instance:
- LOG.debug('terminating backing instance %s',
- self._img_instance.instance.instance_id)
- self._img_instance.instance.terminate()
- self._img_instance.instance.wait_until_terminated()
-
- super(EC2Image, self).destroy()
-
- def _execute(self, *args, **kwargs):
- """Execute command in image, modifying image."""
- self._instance.start(wait=True)
- return self._instance._execute(*args, **kwargs)
-
- def push_file(self, local_path, remote_path):
- """Copy file at 'local_path' to instance at 'remote_path'."""
- self._instance.start(wait=True)
- return self._instance.push_file(local_path, remote_path)
-
- def run_script(self, *args, **kwargs):
- """Run script in image, modifying image.
-
- @return_value: script output
- """
- self._instance.start(wait=True)
- return self._instance.run_script(*args, **kwargs)
-
- def snapshot(self):
- """Create snapshot of image, block until done.
-
- Will return base image_ami if no instance has been booted, otherwise
- will run the clean script, shutdown the instance, create a custom
- AMI, and use that AMI once available.
- """
- if not self._img_instance:
- return EC2Snapshot(self.platform, self.properties, self.config,
- self.features, self.image_ami,
- delete_on_destroy=False)
-
- if self.config.get('boot_clean_script'):
- self._img_instance.run_script(self.config.get('boot_clean_script'))
-
- self._img_instance.shutdown(wait=True)
-
- LOG.debug('creating custom ami from instance %s',
- self._img_instance.instance.instance_id)
- response = self.platform.ec2_client.create_image(
- Name='%s-%s' % (self.platform.tag, self.image_ami),
- InstanceId=self._img_instance.instance.instance_id
- )
- image_ami_edited = response['ImageId']
-
- # Create image and wait until it is in the 'available' state
- image = self.platform.ec2_resource.Image(image_ami_edited)
- image.wait_until_exists()
- waiter = self.platform.ec2_client.get_waiter('image_available')
- waiter.wait(ImageIds=[image.id])
- image.reload()
-
- return EC2Snapshot(self.platform, self.properties, self.config,
- self.features, image_ami_edited)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/instance.py b/tests/cloud_tests/platforms/ec2/instance.py
deleted file mode 100644
index d2e84047..00000000
--- a/tests/cloud_tests/platforms/ec2/instance.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base EC2 instance."""
-import os
-
-import botocore
-
-from ..instances import Instance
-from tests.cloud_tests import LOG, util
-
-
-class EC2Instance(Instance):
- """EC2 backed instance."""
-
- platform_name = "ec2"
- _ssh_client = None
-
- def __init__(self, platform, properties, config, features,
- image_ami, user_data=None):
- """Set up instance.
-
- @param platform: platform object
- @param properties: dictionary of properties
- @param config: dictionary of configuration values
- @param features: dictionary of supported feature flags
- @param image_ami: AWS AMI ID for image to use
- @param user_data: test user-data to pass to instance
- """
- super(EC2Instance, self).__init__(
- platform, image_ami, properties, config, features)
-
- self.image_ami = image_ami
- self.instance = None
- self.user_data = user_data
- self.ssh_ip = None
- self.ssh_port = 22
- self.ssh_key_file = os.path.join(
- platform.config['data_dir'], platform.config['private_key'])
- self.ssh_pubkey_file = os.path.join(
- platform.config['data_dir'], platform.config['public_key'])
-
- def console_log(self):
- """Collect console log from instance.
-
- The console log is buffered and not always present, therefore
- may return empty string.
- """
- try:
- # OutputBytes comes from platform._decode_console_output_as_bytes
- response = self.instance.console_output()
- return response['OutputBytes']
- except KeyError as e:
- if 'Output' in response:
- msg = ("'OutputBytes' did not exist in console_output() but "
- "'Output' did: %s..." % response['Output'][0:128])
- raise util.PlatformError('console_log', msg) from e
- return ('No Console Output [%s]' % self.instance).encode()
-
- def destroy(self):
- """Clean up instance."""
- if self.instance:
- LOG.debug('destroying instance %s', self.instance.id)
- self.instance.terminate()
- self.instance.wait_until_terminated()
-
- self._ssh_close()
-
- super(EC2Instance, self).destroy()
-
- def _execute(self, command, stdin=None, env=None):
- """Execute command on instance."""
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- return self._ssh(['sudo'] + env_args + list(command), stdin=stdin)
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance on EC2 with the platfrom's VPC."""
- if self.instance:
- if self.instance.state['Name'] == 'running':
- return
-
- LOG.debug('starting instance %s', self.instance.id)
- self.instance.start()
- else:
- LOG.debug('launching instance')
-
- args = {
- 'ImageId': self.image_ami,
- 'InstanceType': self.platform.instance_type,
- 'KeyName': self.platform.key_name,
- 'MaxCount': 1,
- 'MinCount': 1,
- 'SecurityGroupIds': [self.platform.security_group.id],
- 'SubnetId': self.platform.subnet.id,
- 'TagSpecifications': [{
- 'ResourceType': 'instance',
- 'Tags': [{
- 'Key': 'Name', 'Value': self.platform.tag
- }]
- }],
- }
-
- if self.user_data:
- args['UserData'] = self.user_data
-
- try:
- instances = self.platform.ec2_resource.create_instances(**args)
- except botocore.exceptions.ClientError as error:
- error_msg = error.response['Error']['Message']
- raise util.PlatformError('start', error_msg)
-
- self.instance = instances[0]
-
- LOG.debug('instance id: %s', self.instance.id)
- if wait:
- self.instance.wait_until_running()
- self.instance.reload()
- self.ssh_ip = self.instance.public_ip_address
- self._wait_for_system(wait_for_cloud_init)
-
- def shutdown(self, wait=True):
- """Shutdown instance."""
- LOG.debug('stopping instance %s', self.instance.id)
- self.instance.stop()
-
- if wait:
- self.instance.wait_until_stopped()
- self.instance.reload()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/platform.py b/tests/cloud_tests/platforms/ec2/platform.py
deleted file mode 100644
index b61a2ffb..00000000
--- a/tests/cloud_tests/platforms/ec2/platform.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base EC2 platform."""
-from datetime import datetime
-import os
-
-import boto3
-import botocore
-from botocore import session, handlers
-import base64
-
-from ..platforms import Platform
-from .image import EC2Image
-from .instance import EC2Instance
-from tests.cloud_tests import LOG
-
-
-class EC2Platform(Platform):
- """EC2 test platform."""
-
- platform_name = 'ec2'
- ipv4_cidr = '192.168.1.0/20'
-
- def __init__(self, config):
- """Set up platform."""
- super(EC2Platform, self).__init__(config)
- # Used for unique VPC, SSH key, and custom AMI generation naming
- self.tag = '%s-%s' % (
- config['tag'], datetime.now().strftime('%Y%m%d%H%M%S'))
- self.instance_type = config['instance-type']
-
- try:
- b3session = get_session()
- self.ec2_client = b3session.client('ec2')
- self.ec2_resource = b3session.resource('ec2')
- self.ec2_region = b3session.region_name
- self.key_name = self._upload_public_key(config)
- except botocore.exceptions.NoRegionError as e:
- raise RuntimeError(
- 'Please configure default region in $HOME/.aws/config'
- ) from e
- except botocore.exceptions.NoCredentialsError as e:
- raise RuntimeError(
- 'Please configure ec2 credentials in $HOME/.aws/credentials'
- ) from e
-
- self.vpc = self._create_vpc()
- self.internet_gateway = self._create_internet_gateway()
- self.subnet = self._create_subnet()
- self.routing_table = self._create_routing_table()
- self.security_group = self._create_security_group()
-
- def create_instance(self, properties, config, features,
- image_ami, user_data=None):
- """Create an instance
-
- @param src_img_path: image path to launch from
- @param properties: image properties
- @param config: image configuration
- @param features: image features
- @param image_ami: string of image ami ID
- @param user_data: test user-data to pass to instance
- @return_value: cloud_tests.instances instance
- """
- return EC2Instance(self, properties, config, features,
- image_ami, user_data)
-
- def destroy(self):
- """Delete SSH keys, terminate all instances, and delete VPC."""
- for instance in self.vpc.instances.all():
- LOG.debug('waiting for instance %s termination', instance.id)
- instance.terminate()
- instance.wait_until_terminated()
-
- if self.key_name:
- LOG.debug('deleting SSH key %s', self.key_name)
- self.ec2_client.delete_key_pair(KeyName=self.key_name)
-
- if self.security_group:
- LOG.debug('deleting security group %s', self.security_group.id)
- self.security_group.delete()
-
- if self.subnet:
- LOG.debug('deleting subnet %s', self.subnet.id)
- self.subnet.delete()
-
- if self.routing_table:
- LOG.debug('deleting routing table %s', self.routing_table.id)
- self.routing_table.delete()
-
- if self.internet_gateway:
- LOG.debug('deleting internet gateway %s', self.internet_gateway.id)
- self.internet_gateway.detach_from_vpc(VpcId=self.vpc.id)
- self.internet_gateway.delete()
-
- if self.vpc:
- LOG.debug('deleting vpc %s', self.vpc.id)
- self.vpc.delete()
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- Hard coded for 'amd64' based images.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- if img_conf['root-store'] == 'ebs':
- root_store = 'ssd'
- elif img_conf['root-store'] == 'instance-store':
- root_store = 'instance'
- else:
- raise RuntimeError('Unknown root-store type: %s' %
- (img_conf['root-store']))
-
- filters = [
- 'arch=%s' % 'amd64',
- 'endpoint=https://ec2.%s.amazonaws.com' % self.ec2_region,
- 'region=%s' % self.ec2_region,
- 'release=%s' % img_conf['release'],
- 'root_store=%s' % root_store,
- 'virt=hvm',
- ]
-
- LOG.debug('finding image using streams')
- image = self._query_streams(img_conf, filters)
-
- try:
- image_ami = image['id']
- except KeyError as e:
- raise RuntimeError(
- 'No images found for %s!' % img_conf['release']
- ) from e
-
- LOG.debug('found image: %s', image_ami)
- image = EC2Image(self, img_conf, image_ami)
- return image
-
- def _create_internet_gateway(self):
- """Create Internet Gateway and assign to VPC."""
- LOG.debug('creating internet gateway')
- # pylint: disable=no-member
- internet_gateway = self.ec2_resource.create_internet_gateway()
- internet_gateway.attach_to_vpc(VpcId=self.vpc.id)
- self._tag_resource(internet_gateway)
-
- return internet_gateway
-
- def _create_routing_table(self):
- """Update default routing table with internet gateway.
-
- This sets up internet access between the VPC via the internet gateway
- by configuring routing tables for IPv4 and IPv6.
- """
- LOG.debug('creating routing table')
- route_table = self.vpc.create_route_table()
- route_table.create_route(DestinationCidrBlock='0.0.0.0/0',
- GatewayId=self.internet_gateway.id)
- route_table.create_route(DestinationIpv6CidrBlock='::/0',
- GatewayId=self.internet_gateway.id)
- route_table.associate_with_subnet(SubnetId=self.subnet.id)
- self._tag_resource(route_table)
-
- return route_table
-
- def _create_security_group(self):
- """Enables ingress to default VPC security group."""
- LOG.debug('creating security group')
- security_group = self.vpc.create_security_group(
- GroupName=self.tag, Description='integration test security group')
- security_group.authorize_ingress(
- IpProtocol='-1', FromPort=-1, ToPort=-1, CidrIp='0.0.0.0/0')
- self._tag_resource(security_group)
-
- return security_group
-
- def _create_subnet(self):
- """Generate IPv4 and IPv6 subnets for use."""
- ipv6_cidr = self.vpc.ipv6_cidr_block_association_set[0][
- 'Ipv6CidrBlock'][:-2] + '64'
-
- LOG.debug('creating subnet with following ranges:')
- LOG.debug('ipv4: %s', self.ipv4_cidr)
- LOG.debug('ipv6: %s', ipv6_cidr)
- subnet = self.vpc.create_subnet(CidrBlock=self.ipv4_cidr,
- Ipv6CidrBlock=ipv6_cidr)
- modify_subnet = subnet.meta.client.modify_subnet_attribute
- modify_subnet(SubnetId=subnet.id,
- MapPublicIpOnLaunch={'Value': True})
- self._tag_resource(subnet)
-
- return subnet
-
- def _create_vpc(self):
- """Setup AWS EC2 VPC or return existing VPC."""
- LOG.debug('creating new vpc')
- try:
- vpc = self.ec2_resource.create_vpc( # pylint: disable=no-member
- CidrBlock=self.ipv4_cidr,
- AmazonProvidedIpv6CidrBlock=True)
- except botocore.exceptions.ClientError as e:
- raise RuntimeError(e) from e
-
- vpc.wait_until_available()
- self._tag_resource(vpc)
-
- return vpc
-
- def _tag_resource(self, resource):
- """Tag a resource with the specified tag.
-
- This makes finding and deleting resources specific to this testing
- much easier to find.
-
- @param resource: resource to tag
- """
- tag = {
- 'Key': 'Name',
- 'Value': self.tag
- }
- resource.create_tags(Tags=[tag])
-
- def _upload_public_key(self, config):
- """Generate random name and upload SSH key with that name.
-
- @param config: platform config
- @return: string of ssh key name
- """
- key_file = os.path.join(config['data_dir'], config['public_key'])
- with open(key_file, 'r') as file:
- public_key = file.read().strip('\n')
-
- LOG.debug('uploading SSH key %s', self.tag)
- self.ec2_client.import_key_pair(KeyName=self.tag,
- PublicKeyMaterial=public_key)
-
- return self.tag
-
-
-def _decode_console_output_as_bytes(parsed, **kwargs):
- """Provide console output as bytes in OutputBytes.
-
- For this to be useful, the session has to have had the
- decode_console_output handler unregistered already.
-
- https://github.com/boto/botocore/issues/1351 ."""
- if 'Output' not in parsed:
- return
- orig = parsed['Output']
- handlers.decode_console_output(parsed, **kwargs)
- parsed['OutputBytes'] = base64.b64decode(orig)
-
-
-def get_session():
- mysess = session.get_session()
- mysess.unregister('after-call.ec2.GetConsoleOutput',
- handlers.decode_console_output)
- mysess.register('after-call.ec2.GetConsoleOutput',
- _decode_console_output_as_bytes)
- return boto3.Session(botocore_session=mysess)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/snapshot.py b/tests/cloud_tests/platforms/ec2/snapshot.py
deleted file mode 100644
index 2c48cb54..00000000
--- a/tests/cloud_tests/platforms/ec2/snapshot.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base EC2 snapshot."""
-
-from ..snapshots import Snapshot
-from tests.cloud_tests import LOG
-
-
-class EC2Snapshot(Snapshot):
- """EC2 image copy backed snapshot."""
-
- platform_name = 'ec2'
-
- def __init__(self, platform, properties, config, features, image_ami,
- delete_on_destroy=True):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- @param image_ami: string of image ami ID
- @param delete_on_destroy: boolean to delete on destroy
- """
- super(EC2Snapshot, self).__init__(
- platform, properties, config, features)
-
- self.image_ami = image_ami
- self.delete_on_destroy = delete_on_destroy
-
- def destroy(self):
- """Deregister the backing AMI."""
- if self.delete_on_destroy:
- image = self.platform.ec2_resource.Image(self.image_ami)
- snapshot_id = image.block_device_mappings[0]['Ebs']['SnapshotId']
-
- LOG.debug('removing custom ami %s', self.image_ami)
- self.platform.ec2_client.deregister_image(ImageId=self.image_ami)
-
- LOG.debug('removing custom snapshot %s', snapshot_id)
- self.platform.ec2_client.delete_snapshot(SnapshotId=snapshot_id)
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param meta_data: meta_data for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: string of test name
- @return_value: an Instance
- """
- if meta_data is not None:
- raise ValueError("metadata not supported on Ec2")
-
- instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self.image_ami, user_data)
-
- if start:
- instance.start()
-
- return instance
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/images.py b/tests/cloud_tests/platforms/images.py
deleted file mode 100644
index f047de2e..00000000
--- a/tests/cloud_tests/platforms/images.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base class for images."""
-
-from ..util import TargetBase
-
-
-class Image(TargetBase):
- """Base class for images."""
-
- platform_name = None
-
- def __init__(self, platform, config):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- """
- self.platform = platform
- self.config = config
-
- def __str__(self):
- """A brief description of the image."""
- return '-'.join((self.properties['os'], self.properties['release']))
-
- @property
- def properties(self):
- """{} containing: 'arch', 'os', 'version', 'release'."""
- return {k: self.config[k]
- for k in ('arch', 'os', 'release', 'version')}
-
- @property
- def features(self):
- """Feature flags supported by this image.
-
- @return_value: list of feature names
- """
- return [k for k, v in self.config.get('features', {}).items() if v]
-
- @property
- def setup_overrides(self):
- """Setup options that need to be overridden for the image.
-
- @return_value: dictionary to update args with
- """
- # NOTE: more sophisticated options may be requied at some point
- return self.config.get('setup_overrides', {})
-
- def snapshot(self):
- """Create snapshot of image, block until done."""
- raise NotImplementedError
-
- def destroy(self):
- """Clean up data associated with image."""
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py
deleted file mode 100644
index efc35c7f..00000000
--- a/tests/cloud_tests/platforms/instances.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base instance."""
-import time
-
-import paramiko
-from paramiko.ssh_exception import (
- BadHostKeyException, AuthenticationException, SSHException)
-
-from ..util import TargetBase
-from tests.cloud_tests import LOG, util
-
-
-class Instance(TargetBase):
- """Base instance object."""
-
- platform_name = None
- _ssh_client = None
-
- def __init__(self, platform, name, properties, config, features):
- """Set up instance.
-
- @param platform: platform object
- @param name: hostname of instance
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- self.platform = platform
- self.name = name
- self.properties = properties
- self.config = config
- self.features = features
- self._tmp_count = 0
-
- self.ssh_ip = None
- self.ssh_port = None
- self.ssh_key_file = None
- self.ssh_username = 'ubuntu'
-
- def console_log(self):
- """Instance console.
-
- @return_value: bytes of this instance’s console
- """
- raise NotImplementedError
-
- def reboot(self, wait=True):
- """Reboot instance."""
- raise NotImplementedError
-
- def shutdown(self, wait=True):
- """Shutdown instance."""
- raise NotImplementedError
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance."""
- raise NotImplementedError
-
- def destroy(self):
- """Clean up instance."""
- self._ssh_close()
-
- def _ssh(self, command, stdin=None):
- """Run a command via SSH."""
- client = self._ssh_connect()
-
- cmd = util.shell_pack(command)
- fp_in, fp_out, fp_err = client.exec_command(cmd)
- channel = fp_in.channel
-
- if stdin is not None:
- fp_in.write(stdin)
- fp_in.close()
-
- channel.shutdown_write()
- rc = channel.recv_exit_status()
-
- return (fp_out.read(), fp_err.read(), rc)
-
- def _ssh_close(self):
- if self._ssh_client:
- try:
- self._ssh_client.close()
- except SSHException:
- LOG.warning('Failed to close SSH connection.')
- self._ssh_client = None
-
- def _ssh_connect(self):
- """Connect via SSH.
-
- Attempt to SSH to the client on the specific IP and port. If it
- fails in some manner, then retry 2 more times for a total of 3
- attempts; sleeping a few seconds between attempts.
- """
- if self._ssh_client:
- return self._ssh_client
-
- if not self.ssh_ip or not self.ssh_port:
- raise ValueError("Cannot ssh_connect, ssh_ip=%s ssh_port=%s" %
- (self.ssh_ip, self.ssh_port))
-
- client = paramiko.SSHClient()
- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file)
-
- retries = 3
- while retries:
- try:
- client.connect(username=self.ssh_username,
- hostname=self.ssh_ip, port=self.ssh_port,
- pkey=private_key)
- self._ssh_client = client
- return client
- except (ConnectionRefusedError, AuthenticationException,
- BadHostKeyException, ConnectionResetError, SSHException,
- OSError):
- retries -= 1
- LOG.debug('Retrying ssh connection on connect failure')
- time.sleep(3)
-
- ssh_cmd = 'Failed ssh connection to %s@%s:%s after 3 retries' % (
- self.ssh_username, self.ssh_ip, self.ssh_port
- )
- raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh')
-
- def _wait_for_system(self, wait_for_cloud_init):
- """Wait until system has fully booted and cloud-init has finished.
-
- @param wait_time: maximum time to wait
- @return_value: None, may raise OSError if wait_time exceeded
- """
- def clean_test(test):
- """Clean formatting for system ready test testcase."""
- return ' '.join(line for line in test.strip().splitlines()
- if not line.lstrip().startswith('#'))
-
- boot_timeout = self.config['boot_timeout']
- tests = [self.config['system_ready_script']]
- if wait_for_cloud_init:
- tests.append(self.config['cloud_init_ready_script'])
-
- formatted_tests = ' && '.join(clean_test(t) for t in tests)
- cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && '
- 'exit 0; sleep 1; done; exit 1').format(time=boot_timeout,
- test=formatted_tests)
-
- end_time = time.time() + boot_timeout
- while True:
- try:
- return_code = self.execute(
- cmd, rcs=(0, 1), description='wait for instance start'
- )[-1]
- if return_code == 0:
- break
- except util.InTargetExecuteError:
- LOG.warning("failed to connect via SSH")
-
- if time.time() < end_time:
- time.sleep(3)
- else:
- raise util.PlatformError('ssh', 'after %ss instance is not '
- 'reachable' % boot_timeout)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/image.py b/tests/cloud_tests/platforms/lxd/image.py
deleted file mode 100644
index a88b47f3..00000000
--- a/tests/cloud_tests/platforms/lxd/image.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""LXD Image Base Class."""
-
-import os
-import shutil
-import tempfile
-
-from ..images import Image
-from .snapshot import LXDSnapshot
-from cloudinit import subp
-from cloudinit import util as c_util
-from tests.cloud_tests import util
-
-
-class LXDImage(Image):
- """LXD backed image."""
-
- platform_name = "lxd"
-
- def __init__(self, platform, config, pylxd_image):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- """
- self.modified = False
- self._img_instance = None
- self._pylxd_image = None
- self.pylxd_image = pylxd_image
- super(LXDImage, self).__init__(platform, config)
-
- @property
- def pylxd_image(self):
- """Property function."""
- if self._pylxd_image:
- self._pylxd_image.sync()
- return self._pylxd_image
-
- @pylxd_image.setter
- def pylxd_image(self, pylxd_image):
- if self._img_instance:
- self._instance.destroy()
- self._img_instance = None
- if (self._pylxd_image and
- (self._pylxd_image is not pylxd_image) and
- (not self.config.get('cache_base_image') or self.modified)):
- self._pylxd_image.delete(wait=True)
- self.modified = False
- self._pylxd_image = pylxd_image
-
- @property
- def _instance(self):
- """Internal use only, returns a instance
-
- This starts an lxc instance from the image, so it is "dirty".
- Better would be some way to modify this "at rest".
- lxc-pstart would be an option."""
- if not self._img_instance:
- self._img_instance = self.platform.launch_container(
- self.properties, self.config, self.features,
- use_desc='image-modification', image_desc=str(self),
- image=self.pylxd_image.fingerprint)
- self._img_instance.start()
- return self._img_instance
-
- @property
- def properties(self):
- """{} containing: 'arch', 'os', 'version', 'release'."""
- properties = self.pylxd_image.properties
- return {
- 'arch': properties.get('architecture'),
- 'os': properties.get('os'),
- 'version': properties.get('version'),
- 'release': properties.get('release'),
- }
-
- def export_image(self, output_dir):
- """Export image from lxd image store to disk.
-
- @param output_dir: dir to store the exported image in
- @return_value: tuple of path to metadata tarball and rootfs
-
- Only the "split" image format with separate rootfs and metadata
- files is supported, e.g:
-
- 71f171df[...]cd31.squashfs (could also be: .tar.xz or .tar.gz)
- meta-71f171df[...]cd31.tar.xz
-
- Combined images made by a single tarball are not supported.
- """
- # pylxd's image export feature doesn't do split exports, so use cmdline
- fp = self.pylxd_image.fingerprint
- subp.subp(['lxc', 'image', 'export', fp, output_dir], capture=True)
- image_files = [p for p in os.listdir(output_dir) if fp in p]
-
- if len(image_files) != 2:
- raise NotImplementedError(
- "Image %s has unsupported format. "
- "Expected 2 files, found %d: %s."
- % (fp, len(image_files), ', '.join(image_files)))
-
- metadata = os.path.join(
- output_dir,
- next(p for p in image_files if p.startswith('meta-')))
- rootfs = os.path.join(
- output_dir,
- next(p for p in image_files if not p.startswith('meta-')))
- return (metadata, rootfs)
-
- def import_image(self, metadata, rootfs):
- """Import image to lxd image store from (split) tarball on disk.
-
- Note, this will replace and delete the current pylxd_image
-
- @param metadata: metadata tarball
- @param rootfs: rootfs tarball
- @return_value: imported image fingerprint
- """
- alias = util.gen_instance_name(
- image_desc=str(self), use_desc='update-metadata')
- subp.subp(['lxc', 'image', 'import', metadata, rootfs,
- '--alias', alias], capture=True)
- self.pylxd_image = self.platform.query_image_by_alias(alias)
- return self.pylxd_image.fingerprint
-
- def update_templates(self, template_config, template_data):
- """Update the image's template configuration.
-
- Note, this will replace and delete the current pylxd_image
-
- @param template_config: config overrides for template metadata
- @param template_data: template data to place into templates/
- """
- # set up tmp files
- export_dir = tempfile.mkdtemp(prefix='cloud_test_util_')
- extract_dir = tempfile.mkdtemp(prefix='cloud_test_util_')
- new_metadata = os.path.join(export_dir, 'new-meta.tar.xz')
- metadata_yaml = os.path.join(extract_dir, 'metadata.yaml')
- template_dir = os.path.join(extract_dir, 'templates')
-
- try:
- # extract old data
- (metadata, rootfs) = self.export_image(export_dir)
- shutil.unpack_archive(metadata, extract_dir)
-
- # update metadata
- metadata = c_util.read_conf(metadata_yaml)
- templates = metadata.get('templates', {})
- templates.update(template_config)
- metadata['templates'] = templates
- util.yaml_dump(metadata, metadata_yaml)
-
- # write out template files
- for name, content in template_data.items():
- path = os.path.join(template_dir, name)
- c_util.write_file(path, content)
-
- # store new data, mark new image as modified
- util.flat_tar(new_metadata, extract_dir)
- self.import_image(new_metadata, rootfs)
- self.modified = True
-
- finally:
- # remove tmpfiles
- shutil.rmtree(export_dir)
- shutil.rmtree(extract_dir)
-
- def _execute(self, *args, **kwargs):
- """Execute command in image, modifying image."""
- return self._instance._execute(*args, **kwargs)
-
- def push_file(self, local_path, remote_path):
- """Copy file at 'local_path' to instance at 'remote_path'."""
- return self._instance.push_file(local_path, remote_path)
-
- def run_script(self, *args, **kwargs):
- """Run script in image, modifying image.
-
- @return_value: script output
- """
- return self._instance.run_script(*args, **kwargs)
-
- def snapshot(self):
- """Create snapshot of image, block until done."""
- # get empty user data to pass in to instance
- # if overrides for user data provided, use them
- empty_userdata = util.update_user_data(
- {}, self.config.get('user_data_overrides', {}))
- conf = {'user.user-data': empty_userdata}
- # clone current instance
- instance = self.platform.launch_container(
- self.properties, self.config, self.features,
- container=self._instance.name, image_desc=str(self),
- use_desc='snapshot', container_config=conf)
- # wait for cloud-init before boot_clean_script is run to ensure
- # /var/lib/cloud is removed cleanly
- instance.start(wait=True, wait_for_cloud_init=True)
- if self.config.get('boot_clean_script'):
- instance.run_script(self.config.get('boot_clean_script'))
- # freeze current instance and return snapshot
- instance.freeze()
- return LXDSnapshot(self.platform, self.properties, self.config,
- self.features, instance)
-
- def destroy(self):
- """Clean up data associated with image."""
- self.pylxd_image = None
- super(LXDImage, self).destroy()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py
deleted file mode 100644
index 2b973a08..00000000
--- a/tests/cloud_tests/platforms/lxd/instance.py
+++ /dev/null
@@ -1,278 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base LXD instance."""
-
-import os
-import shutil
-import time
-from tempfile import mkdtemp
-
-from cloudinit.subp import subp, ProcessExecutionError, which
-from cloudinit.util import load_yaml
-from tests.cloud_tests import LOG
-from tests.cloud_tests.util import PlatformError
-
-from ..instances import Instance
-
-from pylxd import exceptions as pylxd_exc
-
-
-class LXDInstance(Instance):
- """LXD container backed instance."""
-
- platform_name = "lxd"
- _console_log_method = None
- _console_log_file = None
-
- def __init__(self, platform, name, properties, config, features,
- pylxd_container):
- """Set up instance.
-
- @param platform: platform object
- @param name: hostname of instance
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- if not pylxd_container:
- raise ValueError("Invalid value pylxd_container: %s" %
- pylxd_container)
- self._pylxd_container = pylxd_container
- super(LXDInstance, self).__init__(
- platform, name, properties, config, features)
- self.tmpd = mkdtemp(prefix="%s-%s" % (type(self).__name__, name))
- self.name = name
- self._setup_console_log()
-
- @property
- def pylxd_container(self):
- """Property function."""
- if self._pylxd_container is None:
- raise RuntimeError(
- "%s: Attempted use of pylxd_container after deletion." % self)
- self._pylxd_container.sync()
- return self._pylxd_container
-
- def __str__(self):
- return (
- '%s(name=%s) status=%s' %
- (self.__class__.__name__, self.name,
- ("deleted" if self._pylxd_container is None else
- self.pylxd_container.status)))
-
- def _execute(self, command, stdin=None, env=None):
- if env is None:
- env = {}
-
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- # ensure instance is running and execute the command
- self.start()
-
- # Use cmdline client due to https://github.com/lxc/pylxd/issues/268
- exit_code = 0
- try:
- stdout, stderr = subp(
- ['lxc', 'exec', self.name, '--'] + env_args + list(command),
- data=stdin, decode=False)
- except ProcessExecutionError as e:
- exit_code = e.exit_code
- stdout = e.stdout
- stderr = e.stderr
-
- return stdout, stderr, exit_code
-
- def read_data(self, remote_path, decode=False):
- """Read data from instance filesystem.
-
- @param remote_path: path in instance
- @param decode: decode data before returning.
- @return_value: content of remote_path as bytes if 'decode' is False,
- and as string if 'decode' is True.
- """
- data = self.pylxd_container.files.get(remote_path)
- return data.decode() if decode else data
-
- def write_data(self, remote_path, data):
- """Write data to instance filesystem.
-
- @param remote_path: path in instance
- @param data: data to write in bytes
- """
- self.pylxd_container.files.put(remote_path, data)
-
- @property
- def console_log_method(self):
- if self._console_log_method is not None:
- return self._console_log_method
-
- client = which('lxc')
- if not client:
- raise PlatformError("No 'lxc' client.")
-
- elif _has_proper_console_support():
- self._console_log_method = 'show-log'
- elif client.startswith("/snap"):
- self._console_log_method = 'logfile-snap'
- else:
- self._console_log_method = 'logfile-tmp'
-
- LOG.debug("Set console log method to %s", self._console_log_method)
- return self._console_log_method
-
- def _setup_console_log(self):
- method = self.console_log_method
- if not method.startswith("logfile-"):
- return
-
- if method == "logfile-snap":
- log_dir = "/var/snap/lxd/common/consoles"
- if not os.path.exists(log_dir):
- raise PlatformError(
- "Unable to log with snap lxc. Please run:\n"
- " sudo mkdir --mode=1777 -p %s" % log_dir)
- elif method == "logfile-tmp":
- log_dir = "/tmp"
- else:
- raise PlatformError(
- "Unexpected value for console method: %s" % method)
-
- # doing this ensures we can read it. Otherwise it ends up root:root.
- log_file = os.path.join(log_dir, self.name)
- with open(log_file, "w") as fp:
- fp.write("# %s\n" % self.name)
-
- cfg = "lxc.console.logfile=%s" % log_file
- orig = self._pylxd_container.config.get('raw.lxc', "")
- if orig:
- orig += "\n"
- self._pylxd_container.config['raw.lxc'] = orig + cfg
- self._pylxd_container.save()
- self._console_log_file = log_file
-
- def console_log(self):
- """Console log.
-
- @return_value: bytes of this instance's console
- """
-
- if self._console_log_file:
- if not os.path.exists(self._console_log_file):
- raise NotImplementedError(
- "Console log '%s' does not exist. If this is a remote "
- "lxc, then this is really NotImplementedError. If it is "
- "A local lxc, then this is a RuntimeError."
- "https://github.com/lxc/lxd/issues/1129")
- with open(self._console_log_file, "rb") as fp:
- return fp.read()
-
- try:
- return subp(['lxc', 'console', '--show-log', self.name],
- decode=False)[0]
- except ProcessExecutionError as e:
- raise PlatformError(
- "console log",
- "Console log failed [%d]: stdout=%s stderr=%s" % (
- e.exit_code, e.stdout, e.stderr)
- ) from e
-
- def reboot(self, wait=True):
- """Reboot instance."""
- self.shutdown(wait=wait)
- self.start(wait=wait)
-
- def shutdown(self, wait=True, retry=1):
- """Shutdown instance."""
- if self.pylxd_container.status == 'Stopped':
- return
-
- try:
- LOG.debug("%s: shutting down (wait=%s)", self, wait)
- self.pylxd_container.stop(wait=wait)
- except (pylxd_exc.LXDAPIException, pylxd_exc.NotFound) as e:
- # An exception happens here sometimes (LP: #1783198)
- # LOG it, and try again.
- LOG.warning(
- ("%s: shutdown(retry=%d) caught %s in shutdown "
- "(response=%s): %s"),
- self, retry, e.__class__.__name__, e.response, e)
- if isinstance(e, pylxd_exc.NotFound):
- LOG.debug("container_exists(%s) == %s",
- self.name, self.platform.container_exists(self.name))
- if retry == 0:
- raise e
- return self.shutdown(wait=wait, retry=retry - 1)
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance."""
- if self.pylxd_container.status != 'Running':
- self.pylxd_container.start(wait=wait)
- if wait:
- self._wait_for_system(wait_for_cloud_init)
-
- def freeze(self):
- """Freeze instance."""
- if self.pylxd_container.status != 'Frozen':
- self.pylxd_container.freeze(wait=True)
-
- def unfreeze(self):
- """Unfreeze instance."""
- if self.pylxd_container.status == 'Frozen':
- self.pylxd_container.unfreeze(wait=True)
-
- def destroy(self):
- """Clean up instance."""
- LOG.debug("%s: deleting container.", self)
- self.unfreeze()
- self.shutdown()
- retries = [1] * 5
- for attempt, wait in enumerate(retries):
- try:
- self.pylxd_container.delete(wait=True)
- break
- except Exception:
- if attempt + 1 >= len(retries):
- raise
- LOG.debug('Failed to delete container %s (%s/%s) retrying...',
- self, attempt + 1, len(retries))
- time.sleep(wait)
-
- self._pylxd_container = None
-
- if self.platform.container_exists(self.name):
- raise OSError('%s: container was not properly removed' % self)
- if self._console_log_file and os.path.exists(self._console_log_file):
- os.unlink(self._console_log_file)
- shutil.rmtree(self.tmpd)
- super(LXDInstance, self).destroy()
-
-
-def _has_proper_console_support():
- stdout, _ = subp(['lxc', 'info'])
- info = load_yaml(stdout)
- reason = None
- if 'console' not in info.get('api_extensions', []):
- reason = "LXD server does not support console api extension"
- else:
- dver = str(info.get('environment', {}).get('driver_version', ""))
- if dver.startswith("2.") or dver.startswith("1."):
- reason = "LXD Driver version not 3.x+ (%s)" % dver
- else:
- try:
- stdout = subp(['lxc', 'console', '--help'], decode=False)[0]
- if not (b'console' in stdout and b'log' in stdout):
- reason = "no '--log' in lxc console --help"
- except ProcessExecutionError:
- reason = "no 'console' command in lxc client"
-
- if reason:
- LOG.debug("no console-support: %s", reason)
- return False
- else:
- LOG.debug("console-support looks good")
- return True
-
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/platform.py b/tests/cloud_tests/platforms/lxd/platform.py
deleted file mode 100644
index f7251a07..00000000
--- a/tests/cloud_tests/platforms/lxd/platform.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base LXD platform."""
-
-from pylxd import (Client, exceptions)
-
-from ..platforms import Platform
-from .image import LXDImage
-from .instance import LXDInstance
-from tests.cloud_tests import util
-
-DEFAULT_SSTREAMS_SERVER = "https://images.linuxcontainers.org:8443"
-
-
-class LXDPlatform(Platform):
- """LXD test platform."""
-
- platform_name = 'lxd'
-
- def __init__(self, config):
- """Set up platform."""
- super(LXDPlatform, self).__init__(config)
- # TODO: allow configuration of remote lxd host via env variables
- # set up lxd connection
- self.client = Client()
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- pylxd_image = self.client.images.create_from_simplestreams(
- img_conf.get('sstreams_server', DEFAULT_SSTREAMS_SERVER),
- img_conf['alias'])
- image = LXDImage(self, img_conf, pylxd_image)
- if img_conf.get('override_templates', False):
- image.update_templates(self.config.get('template_overrides', {}),
- self.config.get('template_files', {}))
- return image
-
- def launch_container(self, properties, config, features,
- image=None, container=None, ephemeral=False,
- container_config=None, block=True, image_desc=None,
- use_desc=None):
- """Launch a container.
-
- @param properties: image properties
- @param config: image configuration
- @param features: image features
- @param image: image fingerprint to launch from
- @param container: container to copy
- @param ephemeral: delete image after first shutdown
- @param container_config: config options for instance as dict
- @param block: wait until container created
- @param image_desc: description of image being launched
- @param use_desc: description of container's use
- @return_value: cloud_tests.instances instance
- """
- if not (image or container):
- raise ValueError("either image or container must be specified")
- container = self.client.containers.create({
- 'name': util.gen_instance_name(image_desc=image_desc,
- use_desc=use_desc,
- used_list=self.list_containers()),
- 'ephemeral': bool(ephemeral),
- 'config': (container_config
- if isinstance(container_config, dict) else {}),
- 'source': ({'type': 'image', 'fingerprint': image} if image else
- {'type': 'copy', 'source': container})
- }, wait=block)
- return LXDInstance(self, container.name, properties, config, features,
- container)
-
- def container_exists(self, container_name):
- """Check if container with name 'container_name' exists.
-
- @return_value: True if exists else False
- """
- res = True
- try:
- self.client.containers.get(container_name)
- except exceptions.LXDAPIException as e:
- res = False
- if e.response.status_code != 404:
- raise
- return res
-
- def list_containers(self):
- """List names of all containers.
-
- @return_value: list of names
- """
- return [container.name for container in self.client.containers.all()]
-
- def query_image_by_alias(self, alias):
- """Get image by alias in local image store.
-
- @param alias: alias of image
- @return_value: pylxd image (not cloud_tests.images instance)
- """
- return self.client.images.get_by_alias(alias)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/snapshot.py b/tests/cloud_tests/platforms/lxd/snapshot.py
deleted file mode 100644
index b524644f..00000000
--- a/tests/cloud_tests/platforms/lxd/snapshot.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base LXD snapshot."""
-
-from ..snapshots import Snapshot
-
-
-class LXDSnapshot(Snapshot):
- """LXD image copy backed snapshot."""
-
- platform_name = "lxd"
-
- def __init__(self, platform, properties, config, features,
- pylxd_frozen_instance):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- self.pylxd_frozen_instance = pylxd_frozen_instance
- super(LXDSnapshot, self).__init__(
- platform, properties, config, features)
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param instance_id: instance-id for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: description of snapshot instance use
- @return_value: an Instance
- """
- inst_config = {'user.user-data': user_data}
- if meta_data:
- inst_config['user.meta-data'] = meta_data
- instance = self.platform.launch_container(
- self.properties, self.config, self.features, block=block,
- image_desc=str(self), container=self.pylxd_frozen_instance.name,
- use_desc=use_desc, container_config=inst_config)
- if start:
- instance.start()
- return instance
-
- def destroy(self):
- """Clean up snapshot data."""
- self.pylxd_frozen_instance.destroy()
- super(LXDSnapshot, self).destroy()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/nocloudkvm/__init__.py b/tests/cloud_tests/platforms/nocloudkvm/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/cloud_tests/platforms/nocloudkvm/__init__.py
+++ /dev/null
diff --git a/tests/cloud_tests/platforms/nocloudkvm/image.py b/tests/cloud_tests/platforms/nocloudkvm/image.py
deleted file mode 100644
index ff5b6ad7..00000000
--- a/tests/cloud_tests/platforms/nocloudkvm/image.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""NoCloud KVM Image Base Class."""
-
-from cloudinit import subp
-
-import os
-import shutil
-import tempfile
-
-from ..images import Image
-from .snapshot import NoCloudKVMSnapshot
-
-
-class NoCloudKVMImage(Image):
- """NoCloud KVM backed image."""
-
- platform_name = "nocloud-kvm"
-
- def __init__(self, platform, config, orig_img_path):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- @param img_path: path to the image
- """
- self.modified = False
- self._workd = tempfile.mkdtemp(prefix='NoCloudKVMImage')
- self._orig_img_path = orig_img_path
- self._img_path = os.path.join(self._workd,
- os.path.basename(self._orig_img_path))
-
- subp.subp(['qemu-img', 'create', '-f', 'qcow2',
- '-b', orig_img_path, self._img_path])
-
- super(NoCloudKVMImage, self).__init__(platform, config)
-
- def _execute(self, command, stdin=None, env=None):
- """Execute command in image, modifying image."""
- return self.mount_image_callback(command, stdin=stdin, env=env)
-
- def mount_image_callback(self, command, stdin=None, env=None):
- """Run mount-image-callback."""
-
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- mic_chroot = ['sudo', 'mount-image-callback', '--system-mounts',
- '--system-resolvconf', self._img_path,
- '--', 'chroot', '_MOUNTPOINT_']
- try:
- out, err = subp.subp(mic_chroot + env_args + list(command),
- data=stdin, decode=False)
- return (out, err, 0)
- except subp.ProcessExecutionError as e:
- return (e.stdout, e.stderr, e.exit_code)
-
- def snapshot(self):
- """Create snapshot of image, block until done."""
- if not self._img_path:
- raise RuntimeError()
-
- return NoCloudKVMSnapshot(self.platform, self.properties, self.config,
- self.features, self._img_path)
-
- def destroy(self):
- """Unset path to signal image is no longer used.
-
- The removal of the images and all other items is handled by the
- framework. In some cases we want to keep the images, so let the
- framework decide whether to keep or destroy everything.
- """
- self._img_path = None
- shutil.rmtree(self._workd)
-
- super(NoCloudKVMImage, self).destroy()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/nocloudkvm/instance.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py
deleted file mode 100644
index 5140a11c..00000000
--- a/tests/cloud_tests/platforms/nocloudkvm/instance.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base NoCloud KVM instance."""
-
-import copy
-import os
-import socket
-import subprocess
-import time
-import uuid
-
-from ..instances import Instance
-from cloudinit.atomic_helper import write_json
-from cloudinit import subp
-from tests.cloud_tests import LOG, util
-
-# This domain contains reverse lookups for hostnames that are used.
-# The primary reason is so sudo will return quickly when it attempts
-# to look up the hostname. i9n is just short for 'integration'.
-# see also bug 1730744 for why we had to do this.
-CI_DOMAIN = "i9n.cloud-init.io"
-
-
-class NoCloudKVMInstance(Instance):
- """NoCloud KVM backed instance."""
-
- platform_name = "nocloud-kvm"
-
- def __init__(self, platform, name, image_path, properties, config,
- features, user_data, meta_data):
- """Set up instance.
-
- @param platform: platform object
- @param name: image path
- @param image_path: path to disk image to boot.
- @param properties: dictionary of properties
- @param config: dictionary of configuration values
- @param features: dictionary of supported feature flags
- """
- super(NoCloudKVMInstance, self).__init__(
- platform, name, properties, config, features
- )
-
- self.user_data = user_data
- if meta_data:
- meta_data = copy.deepcopy(meta_data)
- else:
- meta_data = {}
-
- if 'instance-id' in meta_data:
- iid = meta_data['instance-id']
- else:
- iid = str(uuid.uuid1())
- meta_data['instance-id'] = iid
-
- self.instance_id = iid
- self.ssh_key_file = os.path.join(
- platform.config['data_dir'], platform.config['private_key'])
- self.ssh_pubkey_file = os.path.join(
- platform.config['data_dir'], platform.config['public_key'])
-
- self.ssh_pubkey = None
- if self.ssh_pubkey_file:
- with open(self.ssh_pubkey_file, "r") as fp:
- self.ssh_pubkey = fp.read().rstrip('\n')
-
- if not meta_data.get('public-keys'):
- meta_data['public-keys'] = []
- meta_data['public-keys'].append(self.ssh_pubkey)
-
- self.ssh_ip = '127.0.0.1'
- self.ssh_port = None
- self.pid = None
- self.pid_file = None
- self.console_file = None
- self.disk = image_path
- self.cache_mode = platform.config.get('cache_mode',
- 'cache=none,aio=native')
- self.meta_data = meta_data
-
- def shutdown(self, wait=True):
- """Shutdown instance."""
-
- if self.pid:
- # This relies on _execute which uses sudo over ssh. The ssh
- # connection would get killed before sudo exited, so ignore errors.
- cmd = ['shutdown', 'now']
- try:
- self._execute(cmd)
- except util.InTargetExecuteError:
- pass
- self._ssh_close()
-
- if wait:
- LOG.debug("Executed shutdown. waiting on pid %s to end",
- self.pid)
- time_for_shutdown = 120
- give_up_at = time.time() + time_for_shutdown
- pid_file_path = '/proc/%s' % self.pid
- msg = ("pid %s did not exit in %s seconds after shutdown." %
- (self.pid, time_for_shutdown))
- while True:
- if not os.path.exists(pid_file_path):
- break
- if time.time() > give_up_at:
- raise util.PlatformError("shutdown", msg)
- self.pid = None
-
- def destroy(self):
- """Clean up instance."""
- if self.pid:
- try:
- subp.subp(['kill', '-9', self.pid])
- except subp.ProcessExecutionError:
- pass
-
- if self.pid_file:
- try:
- os.remove(self.pid_file)
- except Exception:
- pass
-
- self.pid = None
- self._ssh_close()
-
- super(NoCloudKVMInstance, self).destroy()
-
- def _execute(self, command, stdin=None, env=None):
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- return self._ssh(['sudo'] + env_args + list(command), stdin=stdin)
-
- def generate_seed(self, tmpdir):
- """Generate nocloud seed from user-data"""
- seed_file = os.path.join(tmpdir, '%s_seed.img' % self.name)
- user_data_file = os.path.join(tmpdir, '%s_user_data' % self.name)
- meta_data_file = os.path.join(tmpdir, '%s_meta_data' % self.name)
-
- with open(user_data_file, "w") as ud_file:
- ud_file.write(self.user_data)
-
- # meta-data can be yaml, but more easily pretty printed with json
- write_json(meta_data_file, self.meta_data)
- subp.subp(['cloud-localds', seed_file, user_data_file,
- meta_data_file])
-
- return seed_file
-
- def get_free_port(self):
- """Get a free port assigned by the kernel."""
- s = socket.socket()
- s.bind(('', 0))
- num = s.getsockname()[1]
- s.close()
- return num
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance."""
- tmpdir = self.platform.config['data_dir']
- seed = self.generate_seed(tmpdir)
- self.pid_file = os.path.join(tmpdir, '%s.pid' % self.name)
- self.console_file = os.path.join(tmpdir, '%s-console.log' % self.name)
- self.ssh_port = self.get_free_port()
-
- cmd = ['./tools/xkvm',
- '--disk', '%s,%s' % (self.disk, self.cache_mode),
- '--disk', '%s' % seed,
- '--netdev', ','.join(['user',
- 'hostfwd=tcp::%s-:22' % self.ssh_port,
- 'dnssearch=%s' % CI_DOMAIN]),
- '--', '-pidfile', self.pid_file, '-vnc', 'none',
- '-m', '2G', '-smp', '2', '-nographic', '-name', self.name,
- '-serial', 'file:' + self.console_file]
- subprocess.Popen(cmd,
- close_fds=True,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
-
- while not os.path.exists(self.pid_file):
- time.sleep(1)
-
- with open(self.pid_file, 'r') as pid_f:
- self.pid = pid_f.readlines()[0].strip()
-
- if wait:
- self._wait_for_system(wait_for_cloud_init)
-
- def console_log(self):
- if not self.console_file:
- return b''
- with open(self.console_file, "rb") as fp:
- return fp.read()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/nocloudkvm/platform.py b/tests/cloud_tests/platforms/nocloudkvm/platform.py
deleted file mode 100644
index 53c8ebf2..00000000
--- a/tests/cloud_tests/platforms/nocloudkvm/platform.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base NoCloud KVM platform."""
-import glob
-import os
-
-from simplestreams import filters
-from simplestreams import mirrors
-from simplestreams import objectstores
-from simplestreams import util as s_util
-
-from ..platforms import Platform
-from .image import NoCloudKVMImage
-from .instance import NoCloudKVMInstance
-from cloudinit import subp
-from cloudinit import util as c_util
-from tests.cloud_tests import util
-
-
-class NoCloudKVMPlatform(Platform):
- """NoCloud KVM test platform."""
-
- platform_name = 'nocloud-kvm'
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- (url, path) = s_util.path_from_mirror_url(img_conf['mirror_url'], None)
-
- filter = filters.get_filters(
- [
- 'arch=%s' % c_util.get_dpkg_architecture(),
- 'release=%s' % img_conf['release'],
- 'ftype=disk1.img',
- ]
- )
- mirror_config = {'filters': filter,
- 'keep_items': False,
- 'max_items': 1,
- 'checksumming_reader': True,
- 'item_download': True
- }
-
- def policy(content, path):
- return s_util.read_signed(content, keyring=img_conf['keyring'])
-
- smirror = mirrors.UrlMirrorReader(url, policy=policy)
- tstore = objectstores.FileStore(img_conf['mirror_dir'])
- tmirror = mirrors.ObjectFilterMirror(config=mirror_config,
- objectstore=tstore)
- tmirror.sync(smirror, path)
-
- search_d = os.path.join(img_conf['mirror_dir'], '**',
- img_conf['release'], '**', '*.img')
-
- images = []
- for fname in glob.iglob(search_d, recursive=True):
- images.append(fname)
-
- if len(images) < 1:
- raise RuntimeError("No images found under '%s'" % search_d)
- if len(images) > 1:
- raise RuntimeError(
- "Multiple images found in '%s': %s" % (search_d,
- ' '.join(images)))
-
- image = NoCloudKVMImage(self, img_conf, images[0])
- return image
-
- def create_instance(self, properties, config, features,
- src_img_path, image_desc=None, use_desc=None,
- user_data=None, meta_data=None):
- """Create an instance
-
- @param src_img_path: image path to launch from
- @param properties: image properties
- @param config: image configuration
- @param features: image features
- @param image_desc: description of image being launched
- @param use_desc: description of container's use
- @return_value: cloud_tests.instances instance
- """
- name = util.gen_instance_name(image_desc=image_desc, use_desc=use_desc)
- img_path = os.path.join(self.config['data_dir'], name + '.qcow2')
- subp.subp(['qemu-img', 'create', '-f', 'qcow2',
- '-b', src_img_path, img_path])
-
- return NoCloudKVMInstance(self, name, img_path, properties, config,
- features, user_data, meta_data)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/nocloudkvm/snapshot.py b/tests/cloud_tests/platforms/nocloudkvm/snapshot.py
deleted file mode 100644
index 2dae3590..00000000
--- a/tests/cloud_tests/platforms/nocloudkvm/snapshot.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base NoCloud KVM snapshot."""
-import os
-import shutil
-import tempfile
-
-from ..snapshots import Snapshot
-
-
-class NoCloudKVMSnapshot(Snapshot):
- """NoCloud KVM image copy backed snapshot."""
-
- platform_name = "nocloud-kvm"
-
- def __init__(self, platform, properties, config, features, image_path):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- @param image_path: image file to snapshot.
- """
- self._workd = tempfile.mkdtemp(prefix='NoCloudKVMSnapshot')
- snapshot = os.path.join(self._workd, 'snapshot')
- shutil.copyfile(image_path, snapshot)
- self._image_path = snapshot
-
- super(NoCloudKVMSnapshot, self).__init__(
- platform, properties, config, features)
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param instance_id: instance-id for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: description of snapshot instance use
- @return_value: an Instance
- """
- instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self._image_path, image_desc=str(self), use_desc=use_desc,
- user_data=user_data, meta_data=meta_data)
-
- if start:
- instance.start()
-
- return instance
-
- def destroy(self):
- """Clean up snapshot data."""
- shutil.rmtree(self._workd)
- super(NoCloudKVMSnapshot, self).destroy()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py
deleted file mode 100644
index ac3b6563..00000000
--- a/tests/cloud_tests/platforms/platforms.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base platform class."""
-import os
-import shutil
-
-from simplestreams import filters, mirrors
-from simplestreams import util as s_util
-
-from cloudinit import subp
-from cloudinit import util as c_util
-
-from tests.cloud_tests import util
-
-
-class Platform(object):
- """Base class for platforms."""
-
- platform_name = None
-
- def __init__(self, config):
- """Set up platform."""
- self.config = config
- self.tmpdir = util.mkdtemp()
- if 'data_dir' in config:
- self.data_dir = config['data_dir']
- else:
- self.data_dir = os.path.join(self.tmpdir, "data_dir")
- os.mkdir(self.data_dir)
-
- self._generate_ssh_keys(self.data_dir)
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- raise NotImplementedError
-
- def destroy(self):
- """Clean up platform data."""
- shutil.rmtree(self.tmpdir)
-
- def _generate_ssh_keys(self, data_dir):
- """Generate SSH keys to be used with image."""
- filename = os.path.join(data_dir, self.config['private_key'])
-
- if os.path.exists(filename):
- c_util.del_file(filename)
-
- subp.subp(['ssh-keygen', '-m', 'PEM', '-t', 'rsa', '-b', '4096',
- '-f', filename, '-P', '',
- '-C', 'ubuntu@cloud_test'],
- capture=True)
-
- @staticmethod
- def _query_streams(img_conf, img_filter):
- """Query streams for latest image given a specific filter.
-
- @param img_conf: configuration for image
- @param filters: array of filters as strings format 'key=value'
- @return: dictionary with latest image information or empty
- """
- def policy(content, path):
- return s_util.read_signed(content, keyring=img_conf['keyring'])
-
- (url, path) = s_util.path_from_mirror_url(img_conf['mirror_url'], None)
- smirror = mirrors.UrlMirrorReader(url, policy=policy)
-
- config = {'max_items': 1, 'filters': filters.get_filters(img_filter)}
- tmirror = FilterMirror(config)
- tmirror.sync(smirror, path)
-
- try:
- return tmirror.json_entries[0]
- except IndexError as e:
- raise RuntimeError(
- 'no images found with filter: %s' % img_filter
- ) from e
-
-
-class FilterMirror(mirrors.BasicMirrorWriter):
- """Taken from sstream-query to return query result as json array."""
-
- def __init__(self, config=None):
- super(FilterMirror, self).__init__(config=config)
- if config is None:
- config = {}
- self.config = config
- self.filters = config.get('filters', [])
- self.json_entries = []
-
- def load_products(self, path=None, content_id=None):
- return {'content_id': content_id, 'products': {}}
-
- def filter_item(self, data, src, target, pedigree):
- return filters.filter_item(self.filters, data, src, pedigree)
-
- def insert_item(self, data, src, target, pedigree, contentsource):
- # src and target are top level products:1.0
- # data is src['products'][ped[0]]['versions'][ped[1]]['items'][ped[2]]
- # contentsource is a ContentSource if 'path' exists in data or None
- data = s_util.products_exdata(src, pedigree)
- if 'path' in data:
- data.update({'item_url': contentsource.url})
- self.json_entries.append(data)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/snapshots.py b/tests/cloud_tests/platforms/snapshots.py
deleted file mode 100644
index 0f5f8bb6..00000000
--- a/tests/cloud_tests/platforms/snapshots.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base snapshot."""
-
-
-class Snapshot(object):
- """Base class for snapshots."""
-
- platform_name = None
-
- def __init__(self, platform, properties, config, features):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- self.platform = platform
- self.properties = properties
- self.config = config
- self.features = features
-
- def __str__(self):
- """A brief description of the snapshot."""
- return '-'.join((self.properties['os'], self.properties['release']))
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param instance_id: instance-id for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: description of snapshot instance use
- @return_value: an Instance
- """
- raise NotImplementedError
-
- def destroy(self):
- """Clean up snapshot data."""
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml
deleted file mode 100644
index 6249efc5..00000000
--- a/tests/cloud_tests/releases.yaml
+++ /dev/null
@@ -1,364 +0,0 @@
-# ============================= Release Config ================================
-default_release_config:
- # global default configuration options
- default:
- # all are disabled by default
- enabled: false
- # timeout for booting image and running cloud init
- boot_timeout: 120
- # a script to run after a boot that is used to modify an image, before
- # making a snapshot of the image. may be useful for removing data left
- # behind from cloud-init booting, such as logs, to ensure that data
- # from snapshot.launch() will not include a cloud-init.log from a boot
- # used to create the snapshot, if cloud-init has not run
- boot_clean_script: |
- #!/bin/bash
- rm -rf /var/log/cloud-init.log /var/log/cloud-init-output.log \
- /var/lib/cloud/ /run/cloud-init/ /var/log/syslog
- # test script to determine if system is booted fully
- system_ready_script: |
- # permit running or degraded state as both indicate complete boot
- [ $(systemctl is-system-running) = 'running' -o
- $(systemctl is-system-running) = 'degraded' ]
- # test script to determine if cloud-init has finished
- cloud_init_ready_script: |
- [ -f '/run/cloud-init/result.json' ]
- # currently used features and their uses are:
- # features groups and additional feature settings
- feature_groups: []
- features: {}
- mirror_url: https://cloud-images.ubuntu.com/daily
- mirror_dir: '/srv/citest/images'
- keyring: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg
- # The OS version formatted as Major.Minor is used to compare releases.
- # Each release needs to define this, for example "16.04". Quoting is
- # necessary to ensure the version is treated as a string.
- version: null
-
- ec2:
- # Choose from: [ebs, instance-store]
- root-store: ebs
- boot_timeout: 300
- nocloud-kvm:
- setup_overrides: null
- override_templates: false
- # lxd specific default configuration options
- lxd:
- # default sstreams server to use for lxd image retrieval
- sstreams_server: https://us.images.linuxcontainers.org:8443
- # keep base image, avoids downloading again next run
- cache_base_image: true
- # lxd images from linuxcontainers.org do not have the nocloud seed
- # templates in place, so the image metadata must be modified
- override_templates: true
- # arg overrides to set image up
- setup_overrides:
- # lxd images from linuxcontainers.org do not come with
- # cloud-init, so must pull cloud-init in from repo using
- # setup_image.upgrade
- upgrade: true
- azurecloud:
- boot_timeout: 300
-
-features:
- # all currently supported feature flags
- all:
- - apt # image supports apt package manager
- - byobu # byobu is available in repositories
- - landscape # landscape-client available in repos
- - lxd # lxd is available in the image
- - ppa # image supports ppas
- - rpm # image supports rpms
- - snap # supports snapd
- # NOTE: the following feature flags are to work around bugs in the
- # images, and can be removed when no longer needed
- - hostname # setting system hostname works
- # NOTE: the following feature flags are to work around issues in the
- # testcases, and can be removed when no longer needed
- - apt_src_cont # default contents and format of sources.list matches
- # ubuntu sources.list
- - apt_hist_fmt # apt command history entries use full paths to apt
- # executable rather than relative paths
- - daylight_time # timezones are daylight not standard time
- - apt_up_out # 'Calculating upgrade..' present in log output from
- # apt-get dist-upgrade output
- - engb_locale # locale en_GB.UTF-8 is available
- - locale_gen # the /etc/locale.gen file exists
- - no_ntpdate # 'ntpdate' is not installed by default
- - no_file_fmt_e # the 'file' utility does not have a formatting error
- - ppa_file_name # the name of the source file added to sources.list.d has
- # the expected format for newer ubuntu releases
- - sshd # requires ssh server to be installed by default
- - ssh_key_fmt # ssh auth keys printed to console have expected format
- - syslog # test case requires syslog to be written by default
- - ubuntu_ntp # expect ubuntu.pool.ntp.org to be used as ntp server
- - ubuntu_repos # test case requres ubuntu repositories to be used
- - ubuntu_user # test case needs user with the name 'ubuntu' to exist
- # NOTE: the following feature flags are to work around issues that may
- # be considered bugs in cloud-init
- - lsb_release # image has lsb_release installed, maybe should install
- # if missing by default
- - sudo # image has sudo installed, should not be required
- # feature flag groups
- groups:
- base:
- hostname: true
- no_file_fmt_e: true
- ubuntu_specific:
- apt_src_cont: true
- apt_hist_fmt: true
- byobu: true
- daylight_time: true
- engb_locale: true
- landscape: true
- locale_gen: true
- lsb_release: true
- lxd: true
- ppa: true
- ppa_file_name: true
- snap: true
- sshd: true
- ssh_key_fmt: true
- sudo: true
- syslog: true
- ubuntu_ntp: true
- ubuntu_repos: true
- ubuntu_user: true
- debian_base:
- apt: true
- apt_up_out: true
- no_ntpdate: true
- rhel_base:
- rpm: true
-
-releases:
- # UBUNTU =================================================================
- hirsute:
- # EOL: Jan 2022
- default:
- enabled: true
- release: hirsute
- version: "21.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: hirsute
- setup_overrides: null
- override_templates: false
- groovy:
- # EOL: Jul 2021
- default:
- enabled: true
- release: groovy
- version: "20.10"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: groovy
- setup_overrides: null
- override_templates: false
- focal:
- # EOL: Apr 2025
- default:
- enabled: true
- release: focal
- version: "20.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: focal
- setup_overrides: null
- override_templates: false
- eoan:
- # EOL: Jul 2020
- default:
- enabled: true
- release: eoan
- version: "19.10"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: eoan
- setup_overrides: null
- override_templates: false
- disco:
- # EOL: Jan 2020
- default:
- enabled: true
- release: disco
- version: "19.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: disco
- setup_overrides: null
- override_templates: false
- cosmic:
- # EOL: Jul 2019
- default:
- enabled: true
- release: cosmic
- version: "18.10"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: cosmic
- setup_overrides: null
- override_templates: false
- bionic:
- # EOL: Apr 2023
- default:
- enabled: true
- release: bionic
- version: "18.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: bionic
- setup_overrides: null
- override_templates: false
- artful:
- # EOL: Jul 2018
- default:
- enabled: true
- release: artful
- version: "17.10"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: artful
- setup_overrides: null
- override_templates: false
- xenial:
- # EOL: Apr 2021
- default:
- enabled: true
- release: xenial
- version: "16.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: xenial
- setup_overrides: null
- override_templates: false
- trusty:
- # EOL: Apr 2019
- default:
- enabled: true
- release: trusty
- version: "14.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- features:
- apt_up_out: false
- locale_gen: false
- lxd: false
- ppa_file_name: false
- snap: false
- ssh_key_fmt: false
- no_ntpdate: false
- no_file_fmt_e: false
- system_ready_script: |
- #!/bin/bash
- # upstart based, so use old style runlevels
- [ $(runlevel | awk '{print $2}') = '2' ]
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: trusty
- setup_overrides: null
- override_templates: false
- # DEBIAN =================================================================
- stretch:
- # EOL: Not yet released
- default:
- enabled: true
- feature_groups:
- - base
- - debian_base
- lxd:
- alias: debian/stretch/default
- jessie:
- # EOL: Jun 2020
- # NOTE: the cloud-init version shipped with jessie is out of date
- # tests work if an up to date deb is used
- default:
- enabled: true
- feature_groups:
- - base
- - debian_base
- lxd:
- alias: debian/jessie/default
- # CENTOS =================================================================
- centos70:
- # EOL: Jun 2024 (2020 - end of full updates)
- default:
- enabled: true
- feature_groups:
- - base
- - rhel_base
- user_data_overrides:
- preserve_hostname: true
- lxd:
- features:
- # NOTE: (LP: #1575779)
- hostname: false
- alias: centos/7/default
- centos66:
- # EOL: Nov 2020
- default:
- enabled: true
- feature_groups:
- - base
- - rhel_base
- # still supported, but only bugfixes after may 2017
- system_ready_script: |
- #!/bin/bash
- [ $(runlevel | awk '{print $2}') = '3' ]
- user_data_overrides:
- preserve_hostname: true
- lxd:
- features:
- # NOTE: (LP: #1575779)
- hostname: false
- alias: centos/6/default
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/run_funcs.py b/tests/cloud_tests/run_funcs.py
deleted file mode 100644
index 8ae91120..00000000
--- a/tests/cloud_tests/run_funcs.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Run functions."""
-
-import os
-
-from tests.cloud_tests import bddeb, collect, util, verify
-
-
-def tree_collect(args):
- """Collect data using deb build from current tree.
-
- @param args: cmdline args
- @return_value: fail count
- """
- failed = 0
- tmpdir = util.TempDir(tmpdir=args.data_dir, preserve=args.preserve_data)
-
- with tmpdir as data_dir:
- args.data_dir = data_dir
- args.deb = os.path.join(tmpdir.tmpdir, 'cloud-init_all.deb')
- try:
- failed += bddeb.bddeb(args)
- failed += collect.collect(args)
- except Exception:
- failed += 1
- raise
-
- return failed
-
-
-def tree_run(args):
- """Run test suite using deb build from current tree.
-
- @param args: cmdline args
- @return_value: fail count
- """
- failed = 0
- tmpdir = util.TempDir(tmpdir=args.data_dir, preserve=args.preserve_data)
-
- with tmpdir as data_dir:
- args.data_dir = data_dir
- args.deb = os.path.join(tmpdir.tmpdir, 'cloud-init_all.deb')
- try:
- failed += bddeb.bddeb(args)
- failed += collect.collect(args)
- failed += verify.verify(args)
- except Exception:
- failed += 1
- raise
-
- return failed
-
-
-def run(args):
- """Run test suite.
-
- @param args: cmdline args
- @return_value: fail count
- """
- failed = 0
- tmpdir = util.TempDir(tmpdir=args.data_dir, preserve=args.preserve_data)
-
- with tmpdir as data_dir:
- args.data_dir = data_dir
- try:
- failed += collect.collect(args)
- failed += verify.verify(args)
- except Exception:
- failed += 1
- raise
-
- return failed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py
deleted file mode 100644
index 69e66e3f..00000000
--- a/tests/cloud_tests/setup_image.py
+++ /dev/null
@@ -1,237 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Setup image for testing."""
-
-from functools import partial
-import os
-import yaml
-
-from tests.cloud_tests import LOG
-from tests.cloud_tests import stage, util
-
-
-def installed_package_version(image, package, ensure_installed=True):
- """Get installed version of package.
-
- @param image: cloud_tests.images instance to operate on
- @param package: name of package
- @param ensure_installed: raise error if not installed
- @return_value: cloud-init version string
- """
- os_family = util.get_os_family(image.properties['os'])
- if os_family == 'debian':
- cmd = ['dpkg-query', '-W', "--showformat=${Version}", package]
- elif os_family == 'redhat':
- cmd = ['rpm', '-q', '--queryformat', "'%{VERSION}'", package]
- else:
- raise NotImplementedError
-
- return image.execute(
- cmd, description='query version for package: {}'.format(package),
- rcs=(0,) if ensure_installed else range(0, 256))[0].strip()
-
-
-def install_deb(args, image):
- """Install deb into image.
-
- @param args: cmdline arguments, must contain --deb
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- # ensure system is compatible with package format
- os_family = util.get_os_family(image.properties['os'])
- if os_family != 'debian':
- raise NotImplementedError('install deb: {} not supported on os '
- 'family: {}'.format(args.deb, os_family))
-
- # install deb
- msg = 'install deb: "{}" into target'.format(args.deb)
- LOG.debug(msg)
- remote_path = os.path.join('/tmp', os.path.basename(args.deb))
- image.push_file(args.deb, remote_path)
- image.execute(
- ['apt-get', 'install', '--allow-downgrades', '--assume-yes',
- remote_path], description=msg)
- # check installed deb version matches package
- fmt = ['-W', "--showformat=${Version}"]
- out = image.execute(['dpkg-deb'] + fmt + [remote_path])[0]
- expected_version = out.strip()
- found_version = installed_package_version(image, 'cloud-init')
- if expected_version != found_version:
- raise OSError('install deb version "{}" does not match expected "{}"'
- .format(found_version, expected_version))
-
- LOG.debug('successfully installed: %s, version: %s', args.deb,
- found_version)
-
-
-def install_rpm(args, image):
- """Install rpm into image.
-
- @param args: cmdline arguments, must contain --rpm
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- os_family = util.get_os_family(image.properties['os'])
- if os_family != 'redhat':
- raise NotImplementedError('install rpm: {} not supported on os '
- 'family: {}'.format(args.rpm, os_family))
-
- # install rpm
- msg = 'install rpm: "{}" into target'.format(args.rpm)
- LOG.debug(msg)
- remote_path = os.path.join('/tmp', os.path.basename(args.rpm))
- image.push_file(args.rpm, remote_path)
- image.execute(['rpm', '-U', remote_path], description=msg)
-
- fmt = ['--queryformat', '"%{VERSION}"']
- (out, _err, _exit) = image.execute(['rpm', '-q'] + fmt + [remote_path])
- expected_version = out.strip()
- found_version = installed_package_version(image, 'cloud-init')
- if expected_version != found_version:
- raise OSError('install rpm version "{}" does not match expected "{}"'
- .format(found_version, expected_version))
-
- LOG.debug('successfully installed: %s, version %s', args.rpm,
- found_version)
-
-
-def upgrade(args, image):
- """Upgrade or install cloud-init from repo.
-
- @param args: cmdline arguments
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- os_family = util.get_os_family(image.properties['os'])
- if os_family == 'debian':
- cmd = 'apt-get update && apt-get install cloud-init --yes'
- elif os_family == 'redhat':
- cmd = 'sleep 10 && yum install cloud-init --assumeyes'
- else:
- raise NotImplementedError
-
- msg = 'upgrading cloud-init'
- LOG.debug(msg)
- image.execute(cmd, description=msg)
-
-
-def upgrade_full(args, image):
- """Run the system's full upgrade command.
-
- @param args: cmdline arguments
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- os_family = util.get_os_family(image.properties['os'])
- if os_family == 'debian':
- cmd = 'apt-get update && apt-get upgrade --yes'
- elif os_family == 'redhat':
- cmd = 'yum upgrade --assumeyes'
- else:
- raise NotImplementedError('upgrade command not configured for distro '
- 'from family: {}'.format(os_family))
-
- msg = 'full system upgrade'
- LOG.debug(msg)
- image.execute(cmd, description=msg)
-
-
-def run_script(args, image):
- """Run a script in the target image.
-
- @param args: cmdline arguments, must contain --script
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- msg = 'run setup image script in target image'
- LOG.debug(msg)
- image.run_script(args.script, description=msg)
-
-
-def enable_ppa(args, image):
- """Enable a ppa in the target image.
-
- @param args: cmdline arguments, must contain --ppa
- @param image: cloud_tests.image instance to operate on
- @return_value: None, may raise errors
- """
- # ppa only supported on ubuntu (maybe debian?)
- if image.properties['os'].lower() != 'ubuntu':
- raise NotImplementedError('enabling a ppa is only available on ubuntu')
-
- # add ppa with add-apt-repository and update
- ppa = 'ppa:{}'.format(args.ppa)
- msg = 'enable ppa: "{}" in target'.format(ppa)
- LOG.debug(msg)
- cmd = 'add-apt-repository --yes {} && apt-get update'.format(ppa)
- image.execute(cmd, description=msg)
-
-
-def enable_repo(args, image):
- """Enable a repository in the target image.
-
- @param args: cmdline arguments, must contain --repo
- @param image: cloud_tests.image instance to operate on
- @return_value: None, may raise errors
- """
- # find enable repo command for the distro
- os_family = util.get_os_family(image.properties['os'])
- if os_family == 'debian':
- cmd = ('echo "{}" >> "/etc/apt/sources.list" '.format(args.repo) +
- '&& apt-get update')
- elif os_family == 'centos':
- cmd = 'yum-config-manager --add-repo="{}"'.format(args.repo)
- else:
- raise NotImplementedError('enable repo command not configured for '
- 'distro from family: {}'.format(os_family))
-
- msg = 'enable repo: "{}" in target'.format(args.repo)
- LOG.debug(msg)
- image.execute(cmd, description=msg)
-
-
-def setup_image(args, image):
- """Set up image as specified in args.
-
- @param args: cmdline arguments
- @param image: cloud_tests.image instance to operate on
- @return_value: tuple of results and fail count
- """
- # update the args if necessary for this image
- overrides = image.setup_overrides
- LOG.debug('updating args for setup with: %s', overrides)
- args = util.update_args(args, overrides, preserve_old=True)
-
- # mapping of setup cmdline arg name to setup function
- # represented as a tuple rather than a dict or odict as lookup by name not
- # needed, and order is important as --script and --upgrade go at the end
- handlers = (
- # arg handler description
- ('deb', install_deb, 'setup func for --deb, install deb'),
- ('rpm', install_rpm, 'setup func for --rpm, install rpm'),
- ('repo', enable_repo, 'setup func for --repo, enable repo'),
- ('ppa', enable_ppa, 'setup func for --ppa, enable ppa'),
- ('script', run_script, 'setup func for --script, run script'),
- ('upgrade', upgrade, 'setup func for --upgrade, upgrade cloud-init'),
- ('upgrade-full', upgrade_full, 'setup func for --upgrade-full'),
- )
-
- # determine which setup functions needed
- calls = [partial(stage.run_single, desc, partial(func, args, image))
- for name, func, desc in handlers if getattr(args, name, None)]
-
- try:
- data = yaml.safe_load(
- image.read_data("/etc/cloud/build.info", decode=True))
- info = ' '.join(["%s=%s" % (k, data.get(k))
- for k in ("build_name", "serial") if k in data])
- except Exception as e:
- info = "N/A (%s)" % e
-
- LOG.info('setting up image %s (info %s)', image, info)
- res = stage.run_stage(
- 'set up for {}'.format(image), calls, continue_after_error=False)
- return res
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/stage.py b/tests/cloud_tests/stage.py
deleted file mode 100644
index d64a1dcc..00000000
--- a/tests/cloud_tests/stage.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Stage a run."""
-
-import sys
-import time
-import traceback
-
-from tests.cloud_tests import LOG
-
-
-class PlatformComponent(object):
- """Context manager to safely handle platform components."""
-
- def __init__(self, get_func, preserve_instance=False):
- """Store get_<platform component> function as partial with no args.
-
- @param get_func: Callable returning an instance from the platform.
- @param preserve_instance: Boolean, when True, do not destroy instance
- after test. Used for test development.
- """
- self.get_func = get_func
- self.preserve_instance = preserve_instance
-
- def __enter__(self):
- """Create instance of platform component."""
- self.instance = self.get_func()
- return self.instance
-
- def __exit__(self, etype, value, trace):
- """Destroy instance."""
- if self.instance is not None:
- if self.preserve_instance:
- LOG.info('Preserving test instance %s', self.instance.name)
- else:
- self.instance.destroy()
-
-
-def run_single(name, call):
- """Run a single function, keeping track of results and time.
-
- @param name: name of part
- @param call: call to make
- @return_value: a tuple of result and fail count
- """
- res = {
- 'name': name,
- 'time': 0,
- 'errors': [],
- 'success': False
- }
- failed = 0
- start_time = time.time()
-
- try:
- call()
- except Exception as e:
- failed += 1
- res['errors'].append(str(e))
- LOG.error('stage part: %s encountered error: %s', name, str(e))
- trace = traceback.extract_tb(sys.exc_info()[-1])
- LOG.error('traceback:\n%s', ''.join(traceback.format_list(trace)))
-
- res['time'] = time.time() - start_time
- if failed == 0:
- res['success'] = True
-
- return res, failed
-
-
-def run_stage(parent_name, calls, continue_after_error=True):
- """Run a stage of collection, keeping track of results and failures.
-
- @param parent_name: name of stage calls are under
- @param calls: list of function call taking no params. must return a tuple
- of results and failures. may raise exceptions
- @param continue_after_error: whether or not to proceed to the next call
- after catching an exception or recording a
- failure
- @return_value: a tuple of results and failures, with result containing
- results from the function call under 'stages', and a list
- of errors (if any on this level), and elapsed time
- running stage, and the name
- """
- res = {
- 'name': parent_name,
- 'time': 0,
- 'errors': [],
- 'stages': [],
- 'success': False,
- }
- failed = 0
- start_time = time.time()
-
- for call in calls:
- try:
- (call_res, call_failed) = call()
- res['stages'].append(call_res)
- except Exception as e:
- call_failed = 1
- res['errors'].append(str(e))
- LOG.error('stage: %s encountered error: %s', parent_name, str(e))
- trace = traceback.extract_tb(sys.exc_info()[-1])
- LOG.error('traceback:\n%s', ''.join(traceback.format_list(trace)))
-
- failed += call_failed
- if call_failed and not continue_after_error:
- break
-
- res['time'] = time.time() - start_time
- if not failed:
- res['success'] = True
-
- return (res, failed)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml
deleted file mode 100644
index fb9a5d27..00000000
--- a/tests/cloud_tests/testcases.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# ============================= Base Test Config ==============================
-base_test_data:
- script_timeout: 20
- enabled: True
- required_features: []
- cloud_config: |
- #cloud-config
- collect_scripts:
- cloud-init.log: |
- #!/bin/sh
- cat /var/log/cloud-init.log
- cloud-init-output.log: |
- #!/bin/sh
- cat /var/log/cloud-init-output.log
- instance-id: |
- #!/bin/sh
- cat /run/cloud-init/.instance-id
- instance-data.json: |
- #!/bin/sh
- cat /run/cloud-init/instance-data.json
- result.json: |
- #!/bin/sh
- cat /run/cloud-init/result.json
- status.json: |
- #!/bin/sh
- cat /run/cloud-init/status.json
- package-versions: |
- #!/bin/sh
- dpkg-query --show
- build.info: |
- #!/bin/sh
- binfo=/etc/cloud/build.info
- [ -f "$binfo" ] && cat "$binfo" || echo "N/A"
- system.journal.gz: |
- #!/bin/sh
- [ -d /run/systemd ] || { echo "not systemd."; exit 0; }
- fail() { echo "ERROR:" "$@" 1>&2; exit 1; }
- journal=""
- for d in /run/log/journal /var/log/journal; do
- for f in $d/*/system.journal; do
- [ -f "$f" ] || continue
- [ -z "$journal" ] ||
- fail "multiple journal found: $f $journal."
- journal="$f"
- done
- done
- [ -f "$journal" ] || fail "no journal file found."
- gzip --to-stdout "$journal"
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py
deleted file mode 100644
index bb9785d3..00000000
--- a/tests/cloud_tests/testcases/__init__.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Main init."""
-
-import importlib
-import inspect
-import unittest
-
-from cloudinit.util import read_conf
-
-from tests.cloud_tests import config
-from tests.cloud_tests.testcases.base import CloudTestCase as base_test
-
-
-def discover_test(test_name):
- """Discover tests in test file for 'testname'.
-
- @return_value: list of test classes
- """
- testmod_name = 'tests.cloud_tests.testcases.{}'.format(
- config.name_sanitize(test_name))
- try:
- testmod = importlib.import_module(testmod_name)
- except NameError as e:
- raise ValueError(
- 'no test verifier found at: {}'.format(testmod_name)
- ) from e
-
- found = [mod for name, mod in inspect.getmembers(testmod)
- if (inspect.isclass(mod)
- and base_test in inspect.getmro(mod)
- and getattr(mod, '__test__', True))]
- if len(found) != 1:
- raise RuntimeError(
- "Unexpected situation, multiple tests for %s: %s" % (
- test_name, found))
-
- return found
-
-
-def get_test_class(test_name, test_data, test_conf):
- test_class = discover_test(test_name)[0]
-
- class DynamicTestSubclass(test_class):
-
- _realclass = test_class
- data = test_data
- conf = test_conf
- release_conf = read_conf(config.RELEASES_CONF)['releases']
-
- def __str__(self):
- return "%s (%s)" % (self._testMethodName,
- unittest.util.strclass(self._realclass))
-
- @classmethod
- def setUpClass(cls):
- cls.maybeSkipTest()
-
- return DynamicTestSubclass
-
-
-def get_suite(test_name, data, conf):
- """Get test suite with all tests for 'testname'.
-
- @return_value: a test suite
- """
- suite = unittest.TestSuite()
- suite.addTest(
- unittest.defaultTestLoader.loadTestsFromTestCase(
- get_test_class(test_name, data, conf)))
- return suite
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py
deleted file mode 100644
index 4448e0b5..00000000
--- a/tests/cloud_tests/testcases/base.py
+++ /dev/null
@@ -1,385 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base test case module."""
-
-import crypt
-import json
-import re
-import unittest
-
-
-from cloudinit import util as c_util
-
-SkipTest = unittest.SkipTest
-
-
-class CloudTestCase(unittest.TestCase):
- """Base test class for verifiers."""
-
- # data gets populated in get_suite.setUpClass
- data = {}
- conf = None
- _cloud_config = None
- release_conf = {} # The platform's os release configuration
-
- expected_warnings = () # Subclasses set to ignore expected WARN logs
-
- @property
- def os_cfg(self):
- return self.release_conf[self.os_name]['default']
-
- def is_distro(self, distro_name):
- return self.os_cfg['os'] == distro_name
-
- @classmethod
- def maybeSkipTest(cls):
- """Present to allow subclasses to override and raise a skipTest."""
-
- def assertPackageInstalled(self, name, version=None):
- """Check dpkg-query --show output for matching package name.
-
- @param name: package base name
- @param version: string representing a package version or part of a
- version.
- """
- pkg_out = self.get_data_file('package-versions')
- pkg_match = re.search(
- '^%s\t(?P<version>.*)$' % name, pkg_out, re.MULTILINE)
- if pkg_match:
- installed_version = pkg_match.group('version')
- if not version:
- return # Success
- if installed_version.startswith(version):
- return # Success
- raise AssertionError(
- 'Expected package version %s-%s not found. Found %s' %
- name, version, installed_version)
- raise AssertionError('Package not installed: %s' % name)
-
- def os_version_cmp(self, cmp_version):
- """Compare the version of the test to comparison_version.
-
- @param: cmp_version: Either a float or a string representing
- a release os from releases.yaml (e.g. centos66)
-
- @return: -1 when version < cmp_version, 0 when version=cmp_version and
- 1 when version > cmp_version.
- """
- version = self.release_conf[self.os_name]['default']['version']
- if isinstance(cmp_version, str):
- cmp_version = self.release_conf[cmp_version]['default']['version']
- if version < cmp_version:
- return -1
- elif version == cmp_version:
- return 0
- else:
- return 1
-
- @property
- def os_name(self):
- return self.data.get('os_name', 'UNKNOWN')
-
- @property
- def platform(self):
- return self.data.get('platform', 'UNKNOWN')
-
- @property
- def cloud_config(self):
- """Get the cloud-config used by the test."""
- if not self._cloud_config:
- self._cloud_config = c_util.load_yaml(self.conf)
- return self._cloud_config
-
- def get_config_entry(self, name):
- """Get a config entry from cloud-config ensuring that it is present."""
- if name not in self.cloud_config:
- raise AssertionError('Key "{}" not in cloud config'.format(name))
- return self.cloud_config[name]
-
- def get_data_file(self, name, decode=True):
- """Get data file failing test if it is not present."""
- if name not in self.data:
- raise AssertionError('File "{}" missing from collect data'
- .format(name))
- if not decode:
- return self.data[name]
- return self.data[name].decode('utf-8')
-
- def get_instance_id(self):
- """Get recorded instance id."""
- return self.get_data_file('instance-id').strip()
-
- def get_status_data(self, data, version=None):
- """Parse result.json and status.json like data files.
-
- @param data: data to load
- @param version: cloud-init output version, defaults to 'v1'
- @return_value: dict of data or None if missing
- """
- if not version:
- version = 'v1'
- data = json.loads(data)
- return data.get(version)
-
- def get_datasource(self):
- """Get datasource name."""
- data = self.get_status_data(self.get_data_file('result.json'))
- return data.get('datasource')
-
- def test_no_stages_errors(self):
- """Ensure that there were no errors in any stage."""
- status = self.get_status_data(self.get_data_file('status.json'))
- for stage in ('init', 'init-local', 'modules-config', 'modules-final'):
- self.assertIn(stage, status)
- self.assertEqual(len(status[stage]['errors']), 0,
- 'errors {} were encountered in stage {}'
- .format(status[stage]['errors'], stage))
- result = self.get_status_data(self.get_data_file('result.json'))
- self.assertEqual(len(result['errors']), 0)
-
- def test_no_warnings_in_log(self):
- """Unexpected warnings should not be found in the log."""
- warnings = [
- line for line in self.get_data_file('cloud-init.log').splitlines()
- if 'WARN' in line]
- joined_warnings = '\n'.join(warnings)
- for expected_warning in self.expected_warnings:
- self.assertIn(
- expected_warning, joined_warnings,
- msg="Did not find %s in cloud-init.log" % expected_warning)
- # Prune expected from discovered warnings
- warnings = [w for w in warnings if expected_warning not in w]
- self.assertEqual(
- [], warnings, msg="'WARN' found inside cloud-init.log")
-
- def test_instance_data_json_ec2(self):
- """Validate instance-data.json content by ec2 platform.
-
- This content is sourced by snapd when determining snapstore endpoints.
- We validate expected values per cloud type to ensure we don't break
- snapd.
- """
- if self.platform != 'ec2':
- raise SkipTest(
- 'Skipping ec2 instance-data.json on %s' % self.platform)
- out = self.get_data_file('instance-data.json')
- if not out:
- if self.is_distro('ubuntu') and self.os_version_cmp('bionic') >= 0:
- raise AssertionError(
- 'No instance-data.json found on %s' % self.os_name)
- raise SkipTest(
- 'Skipping instance-data.json test.'
- ' OS: %s not bionic or newer' % self.os_name)
- instance_data = json.loads(out)
- self.assertCountEqual(['merged_cfg'], instance_data['sensitive_keys'])
- ds = instance_data.get('ds', {})
- v1_data = instance_data.get('v1', {})
- metadata = ds.get('meta-data', {})
- macs = metadata.get(
- 'network', {}).get('interfaces', {}).get('macs', {})
- if not macs:
- raise AssertionError('No network data from EC2 meta-data')
- # Check meta-data items we depend on
- expected_net_keys = [
- 'public-ipv4s', 'ipv4-associations', 'local-hostname',
- 'public-hostname']
- for mac_data in macs.values():
- for key in expected_net_keys:
- self.assertIn(key, mac_data)
- self.assertIsNotNone(
- metadata.get('placement', {}).get('availability-zone'),
- 'Could not determine EC2 Availability zone placement')
- self.assertIsNotNone(
- v1_data['availability_zone'], 'expected ec2 availability_zone')
- self.assertEqual('aws', v1_data['cloud_name'])
- self.assertEqual('ec2', v1_data['platform'])
- self.assertEqual(
- 'metadata (http://169.254.169.254)', v1_data['subplatform'])
- self.assertIn('i-', v1_data['instance_id'])
- self.assertIn('ip-', v1_data['local_hostname'])
- self.assertIsNotNone(v1_data['region'], 'expected ec2 region')
- self.assertIsNotNone(
- re.match(r'\d\.\d+\.\d+-\d+-aws', v1_data['kernel_release']))
- self.assertEqual(
- 'redacted for non-root user', instance_data['merged_cfg'])
- self.assertEqual(self.os_cfg['os'], v1_data['variant'])
- self.assertEqual(self.os_cfg['os'], v1_data['distro'])
- self.assertEqual(
- self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
- "Unexpected sys_info dist value")
- self.assertEqual(self.os_name, v1_data['distro_release'])
- self.assertEqual(
- str(self.os_cfg['version']), v1_data['distro_version'])
- self.assertEqual('x86_64', v1_data['machine'])
- self.assertIsNotNone(
- re.match(r'3.\d\.\d', v1_data['python_version']),
- "unexpected python version: {ver}".format(
- ver=v1_data["python_version"]))
-
- def test_instance_data_json_lxd(self):
- """Validate instance-data.json content by lxd platform.
-
- This content is sourced by snapd when determining snapstore endpoints.
- We validate expected values per cloud type to ensure we don't break
- snapd.
- """
- if self.platform != 'lxd':
- raise SkipTest(
- 'Skipping lxd instance-data.json on %s' % self.platform)
- out = self.get_data_file('instance-data.json')
- if not out:
- if self.is_distro('ubuntu') and self.os_version_cmp('bionic') >= 0:
- raise AssertionError(
- 'No instance-data.json found on %s' % self.os_name)
- raise SkipTest(
- 'Skipping instance-data.json test.'
- ' OS: %s not bionic or newer' % self.os_name)
- instance_data = json.loads(out)
- v1_data = instance_data.get('v1', {})
- self.assertCountEqual([], sorted(instance_data['base64_encoded_keys']))
- self.assertEqual('unknown', v1_data['cloud_name'])
- self.assertEqual('lxd', v1_data['platform'])
- self.assertEqual(
- 'seed-dir (/var/lib/cloud/seed/nocloud-net)',
- v1_data['subplatform'])
- self.assertIsNone(
- v1_data['availability_zone'],
- 'found unexpected lxd availability_zone %s' %
- v1_data['availability_zone'])
- self.assertIn('cloud-test', v1_data['instance_id'])
- self.assertIn('cloud-test', v1_data['local_hostname'])
- self.assertIsNone(
- v1_data['region'],
- 'found unexpected lxd region %s' % v1_data['region'])
- self.assertIsNotNone(
- re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release']))
- self.assertEqual(
- 'redacted for non-root user', instance_data['merged_cfg'])
- self.assertEqual(self.os_cfg['os'], v1_data['variant'])
- self.assertEqual(self.os_cfg['os'], v1_data['distro'])
- self.assertEqual(
- self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
- "Unexpected sys_info dist value")
- self.assertEqual(self.os_name, v1_data['distro_release'])
- self.assertEqual(
- str(self.os_cfg['version']), v1_data['distro_version'])
- self.assertEqual('x86_64', v1_data['machine'])
- self.assertIsNotNone(
- re.match(r'3.\d\.\d', v1_data['python_version']),
- "unexpected python version: {ver}".format(
- ver=v1_data["python_version"]))
-
- def test_instance_data_json_kvm(self):
- """Validate instance-data.json content by nocloud-kvm platform.
-
- This content is sourced by snapd when determining snapstore endpoints.
- We validate expected values per cloud type to ensure we don't break
- snapd.
- """
- if self.platform != 'nocloud-kvm':
- raise SkipTest(
- 'Skipping nocloud-kvm instance-data.json on %s' %
- self.platform)
- out = self.get_data_file('instance-data.json')
- if not out:
- if self.is_distro('ubuntu') and self.os_version_cmp('bionic') >= 0:
- raise AssertionError(
- 'No instance-data.json found on %s' % self.os_name)
- raise SkipTest(
- 'Skipping instance-data.json test.'
- ' OS: %s not bionic or newer' % self.os_name)
- instance_data = json.loads(out)
- v1_data = instance_data.get('v1', {})
- self.assertCountEqual([], instance_data['base64_encoded_keys'])
- self.assertEqual('unknown', v1_data['cloud_name'])
- self.assertEqual('nocloud', v1_data['platform'])
- subplatform = v1_data['subplatform']
- self.assertIsNotNone(
- re.match(r'config-disk \(\/dev\/[a-z]{3}\)', subplatform),
- 'kvm subplatform "%s" != "config-disk (/dev/...)"' % subplatform)
- self.assertIsNone(
- v1_data['availability_zone'],
- 'found unexpected kvm availability_zone %s' %
- v1_data['availability_zone'])
- self.assertIsNotNone(
- re.match(r'[\da-f]{8}(-[\da-f]{4}){3}-[\da-f]{12}',
- v1_data['instance_id']),
- 'kvm instance_id is not a UUID: %s' % v1_data['instance_id'])
- self.assertIn('ubuntu', v1_data['local_hostname'])
- self.assertIsNone(
- v1_data['region'],
- 'found unexpected lxd region %s' % v1_data['region'])
- self.assertIsNotNone(
- re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release']))
- self.assertEqual(
- 'redacted for non-root user', instance_data['merged_cfg'])
- self.assertEqual(self.os_cfg['os'], v1_data['variant'])
- self.assertEqual(self.os_cfg['os'], v1_data['distro'])
- self.assertEqual(
- self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
- "Unexpected sys_info dist value")
- self.assertEqual(self.os_name, v1_data['distro_release'])
- self.assertEqual(
- str(self.os_cfg['version']), v1_data['distro_version'])
- self.assertEqual('x86_64', v1_data['machine'])
- self.assertIsNotNone(
- re.match(r'3.\d\.\d', v1_data['python_version']),
- "unexpected python version: {ver}".format(
- ver=v1_data["python_version"]))
-
-
-class PasswordListTest(CloudTestCase):
- """Base password test case class."""
-
- def test_shadow_passwords(self):
- """Test shadow passwords."""
- shadow = self.get_data_file('shadow')
- users = {}
- dupes = []
- for line in shadow.splitlines():
- user, encpw = line.split(":")[0:2]
- if user in users:
- dupes.append(user)
- users[user] = encpw
-
- jane_enc = "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg."
- self.assertEqual([], dupes)
- self.assertEqual(jane_enc, users['jane'])
-
- mikey_enc = "$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89"
- self.assertEqual(mikey_enc, users['mikey'])
-
- # shadow entry is $N$salt$, so we encrypt with the same format
- # and salt and expect the result.
- tom = "mypassword123!"
- fmtsalt = users['tom'][0:users['tom'].rfind("$") + 1]
- tom_enc = crypt.crypt(tom, fmtsalt)
- self.assertEqual(tom_enc, users['tom'])
-
- harry_enc = ("$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsG"
- "JEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/")
- dick_enc = "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1"
-
- # these should have been changed to random values.
- self.assertNotEqual(harry_enc, users['harry'])
- self.assertTrue(users['harry'].startswith("$"))
- self.assertNotEqual(dick_enc, users['dick'])
- self.assertTrue(users['dick'].startswith("$"))
-
- self.assertNotEqual(users['harry'], users['dick'])
-
- def test_shadow_expected_users(self):
- """Test every tom, dick, and harry user in shadow."""
- out = self.get_data_file('shadow')
- self.assertIn('tom:', out)
- self.assertIn('dick:', out)
- self.assertIn('harry:', out)
- self.assertIn('jane:', out)
- self.assertIn('mikey:', out)
-
- def test_sshd_config(self):
- """Test sshd config allows passwords."""
- out = self.get_data_file('sshd_config')
- self.assertIn('PasswordAuthentication yes', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/README.md b/tests/cloud_tests/testcases/bugs/README.md
deleted file mode 100644
index 09ce0765..00000000
--- a/tests/cloud_tests/testcases/bugs/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# Bug Test Configs
-
-## purpose
-Configs that reproduce bugs filed against cloud-init. Having test configs for
-cloud-init bugs ensures that the fixes do not break in the future, and makes it
-easy to see how many systems and platforms are effected by a new bug.
-
-## structure
-Should have one test config for most bugs filed. The name of the test should
-contain ``lp`` followed by the bug number. It may also be useful to add a
-comment to each bug config with a summary copied from the bug report.
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1511485.py b/tests/cloud_tests/testcases/bugs/lp1511485.py
deleted file mode 100644
index 670d3aff..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1511485.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestLP1511485(base.CloudTestCase):
- """Test LP# 1511485."""
-
- def test_final_message(self):
- """Test final message exists."""
- out = self.get_data_file('cloud-init-output.log')
- self.assertIn('Final message from cloud-config', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1511485.yaml b/tests/cloud_tests/testcases/bugs/lp1511485.yaml
deleted file mode 100644
index ebf9763f..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1511485.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# LP Bug 1511485: final_message is silent on ubuntu-12.04.5 / cloud-init 0.6.3
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- final_message: "Final message from cloud-config"
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1611074.yaml b/tests/cloud_tests/testcases/bugs/lp1611074.yaml
deleted file mode 100644
index 960679d5..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1611074.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# LP Bug 1611074: Reformatting of ephemeral drive fails on resize of Azure VM
-#
-# 2016-11-18: Disabled until test written
-#
-enabled: False
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1628337.py b/tests/cloud_tests/testcases/bugs/lp1628337.py
deleted file mode 100644
index a2c90481..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1628337.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestLP1628337(base.CloudTestCase):
- """Test LP# 1511485."""
-
- def test_fetch_indices(self):
- """Verify no apt errors."""
- out = self.get_data_file('cloud-init-output.log')
- self.assertNotIn('W: Failed to fetch', out)
- self.assertNotIn('W: Some index files failed to download. '
- 'They have been ignored, or old ones used instead.',
- out)
-
- def test_ntp(self):
- """Verify can find ntp and install it."""
- out = self.get_data_file('cloud-init-output.log')
- self.assertNotIn('E: Unable to locate package ntp', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1628337.yaml b/tests/cloud_tests/testcases/bugs/lp1628337.yaml
deleted file mode 100644
index e39b3cd8..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1628337.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# LP Bug 1628337: cloud-init tries to install NTP before even configuring the archives
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- ntp:
- servers: ['ntp.ubuntu.com']
- apt:
- primary:
- - arches: [default]
- uri: http://us.archive.ubuntu.com/ubuntu/
-collect_sciprts:
- ntp.conf: |
- #!/bin/bash
- cat /etc/ntp.conf
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/README.md b/tests/cloud_tests/testcases/examples/README.md
deleted file mode 100644
index 110a223b..00000000
--- a/tests/cloud_tests/testcases/examples/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Example Test Configs
-
-## Purpose
-This folder contains example cloud configs found on
-[cloudinit.readthedocs.io](https://cloudinit.readthedocs.io/en/latest/topics/examples.html).
-Examples covered by other tests, like modules, are excluded from tests here
-to prevent duplication and reduce test time.
-
-## Structure
-One test per example test config on cloudinit.readthedocs.io
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/TODO.md b/tests/cloud_tests/testcases/examples/TODO.md
deleted file mode 100644
index 8db0e98e..00000000
--- a/tests/cloud_tests/testcases/examples/TODO.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# Missing Examples
-
-Below lists each of the issing examples and why it is not currently added.
-
- - Chef (takes > 60 seconds to run)
- - Puppet (takes > 60 seconds to run)
- - Manage resolve.conf (lxd backend overrides changes)
- - Adding a yum repository (need centos system)
- - Register RedHat Subscription (need centos system + subscription)
- - Adjust mount points mounted (need multiple disks)
- - Call a url when finished (need end point)
- - Reboot/poweroff when finished (how to test)
- - Disk setup (need multiple disks)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/add_apt_repositories.py b/tests/cloud_tests/testcases/examples/add_apt_repositories.py
deleted file mode 100644
index 71eede97..00000000
--- a/tests/cloud_tests/testcases/examples/add_apt_repositories.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigurePrimary(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_ubuntu_sources(self):
- """Test no default Ubuntu entries exist."""
- out = self.get_data_file('ubuntu.sources.list')
- self.assertEqual(0, int(out))
-
- def test_gatech_sources(self):
- """Test GaTech entires exist."""
- out = self.get_data_file('gatech.sources.list')
- self.assertEqual(20, int(out))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/add_apt_repositories.yaml b/tests/cloud_tests/testcases/examples/add_apt_repositories.yaml
deleted file mode 100644
index 4b8575f7..00000000
--- a/tests/cloud_tests/testcases/examples/add_apt_repositories.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt:
- primary:
- - arches: [default]
- uri: "http://www.gtlib.gatech.edu/pub/ubuntu-releases/"
-collect_scripts:
- ubuntu.sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list | grep -v '^#' | sed '/^\s*$/d' | grep archive.ubuntu.com | wc -l
- gatech.sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list | grep -v '^#' | sed '/^\s*$/d' | grep gtlib.gatech.edu | wc -l
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/alter_completion_message.py b/tests/cloud_tests/testcases/examples/alter_completion_message.py
deleted file mode 100644
index b7b5d5e0..00000000
--- a/tests/cloud_tests/testcases/examples/alter_completion_message.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestFinalMessage(base.CloudTestCase):
- """Test cloud init module `cc_final_message`."""
-
- subs_char = '$'
-
- def get_final_message_config(self):
- """Get config for final message."""
- self.assertIn('final_message', self.cloud_config)
- return self.cloud_config['final_message']
-
- def get_final_message(self):
- """Get final message from log."""
- out = self.get_data_file('cloud-init-output.log')
- lines = len(self.get_final_message_config().splitlines())
- return '\n'.join(out.splitlines()[-1 * lines:])
-
- def test_final_message_string(self):
- """Ensure final handles regular strings."""
- for actual, config in zip(
- self.get_final_message().splitlines(),
- self.get_final_message_config().splitlines()):
- if self.subs_char not in config:
- self.assertEqual(actual, config)
-
- def test_final_message_subs(self):
- """Test variable substitution in final message."""
- # TODO: add verification of other substitutions
- patterns = {'$datasource': self.get_datasource()}
- for key, expected in patterns.items():
- index = self.get_final_message_config().splitlines().index(key)
- actual = self.get_final_message().splitlines()[index]
- self.assertEqual(actual, expected)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/alter_completion_message.yaml b/tests/cloud_tests/testcases/examples/alter_completion_message.yaml
deleted file mode 100644
index 9e154f80..00000000
--- a/tests/cloud_tests/testcases/examples/alter_completion_message.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- final_message: |
- This is my final message!
- $version
- $timestamp
- $datasource
- $uptime
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py b/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py
deleted file mode 100644
index 38540eb8..00000000
--- a/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestTrustedCA(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_cert_count_ca(self):
- """Test correct count of CAs in .crt."""
- out = self.get_data_file('cert_count_ca')
- self.assertIn('7 /etc/ssl/certs/ca-certificates.crt', out)
-
- def test_cert_count_cloudinit(self):
- """Test correct count of CAs in .pem."""
- out = self.get_data_file('cert_count_cloudinit')
- self.assertIn('7 /etc/ssl/certs/cloud-init-ca-certs.pem', out)
-
- def test_cloudinit_certs(self):
- """Test text of cert."""
- out = self.get_data_file('cloudinit_certs')
- self.assertIn('-----BEGIN CERTIFICATE-----', out)
- self.assertIn('YOUR-ORGS-TRUSTED-CA-CERT-HERE', out)
- self.assertIn('-----END CERTIFICATE-----', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml b/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml
deleted file mode 100644
index ad32b088..00000000
--- a/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- ca-certs:
- # If present and set to True, the 'remove-defaults' parameter will remove
- # all the default trusted CA certificates that are normally shipped with
- # Ubuntu.
- # This is mainly for paranoid admins - most users will not need this
- # functionality.
- remove-defaults: true
-
- # If present, the 'trusted' parameter should contain a certificate (or list
- # of certificates) to add to the system as trusted CA certificates.
- # Pay close attention to the YAML multiline list syntax. The example shown
- # here is for a list of multiline certificates.
- trusted:
- - |
- -----BEGIN CERTIFICATE-----
- YOUR-ORGS-TRUSTED-CA-CERT-HERE
- -----END CERTIFICATE-----
- - |
- -----BEGIN CERTIFICATE-----
- YOUR-ORGS-TRUSTED-CA-CERT-HERE
- -----END CERTIFICATE-----
-collect_scripts:
- cloudinit_certs: |
- #!/bin/bash
- cat /etc/ssl/certs/cloud-init-ca-certs.pem
- cert_count_ca: |
- #!/bin/bash
- wc -l /etc/ssl/certs/ca-certificates.crt
- cert_count_cloudinit: |
- #!/bin/bash
- wc -l /etc/ssl/certs/cloud-init-ca-certs.pem
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py b/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py
deleted file mode 100644
index 691a316b..00000000
--- a/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSSHKeys(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_cert_count(self):
- """Test cert count."""
- out = self.get_data_file('cert_count')
- self.assertEqual(20, int(out))
-
- def test_dsa_public(self):
- """Test DSA key has ending."""
- out = self.get_data_file('dsa_public')
- self.assertIn('ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost', out)
-
- def test_rsa_public(self):
- """Test RSA key has specific ending."""
- out = self.get_data_file('rsa_public')
- self.assertIn('PemAWthxHO18QJvWPocKJtlsDNi3 smoser@localhost', out)
-
- def test_auth_keys(self):
- """Test authorized keys has specific ending."""
- out = self.get_data_file('auth_keys')
- self.assertIn('QPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host', out)
- self.assertIn('Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml b/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml
deleted file mode 100644
index f3eaf3ce..00000000
--- a/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host
- - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies
-
- # Send pre-generated ssh private keys to the server
- # If these are present, they will be written to /etc/ssh and
- # new random keys will not be generated
- # in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported
- ssh_keys:
- rsa_private: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qcon2LZS/x
- 1cydPZ4pQpfjEha6WxZ6o8ci/Ea/w0n+0HGPwaxlEG2Z9inNtj3pgFrYcRztfECb
- 1j6HCibZbAzYtwIBIwJgO8h72WjcmvcpZ8OvHSvTwAguO2TkR6mPgHsgSaKy6GJo
- PUJnaZRWuba/HX0KGyhz19nPzLpzG5f0fYahlMJAyc13FV7K6kMBPXTRR6FxgHEg
- L0MPC7cdqAwOVNcPY6A7AjEA1bNaIjOzFN2sfZX0j7OMhQuc4zP7r80zaGc5oy6W
- p58hRAncFKEvnEq2CeL3vtuZAjEAwNBHpbNsBYTRPCHM7rZuG/iBtwp8Rxhc9I5w
- ixvzMgi+HpGLWzUIBS+P/XhekIjPAjA285rVmEP+DR255Ls65QbgYhJmTzIXQ2T9
- luLvcmFBC6l35Uc4gTgg4ALsmXLn71MCMGMpSWspEvuGInayTCL+vEjmNBT+FAdO
- W7D4zCpI43jRS9U06JVOeSc9CDk2lwiA3wIwCTB/6uc8Cq85D9YqpM10FuHjKpnP
- REPPOyrAspdeOAV+6VKRavstea7+2DZmSUgE
- -----END RSA PRIVATE KEY-----
-
- rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7XdewmZ3h8eIXJD7TRHtVW7aJX1ByifYtlL/HVzJ09nilCl+MSFrpbFnqjxyL8Rr/DSf7QcY/BrGUQbZn2Kc22PemAWthxHO18QJvWPocKJtlsDNi3 smoser@localhost
-
- dsa_private: |
- -----BEGIN DSA PRIVATE KEY-----
- MIIBuwIBAAKBgQDP2HLu7pTExL89USyM0264RCyWX/CMLmukxX0Jdbm29ax8FBJT
- pLrO8TIXVY5rPAJm1dTHnpuyJhOvU9G7M8tPUABtzSJh4GVSHlwaCfycwcpLv9TX
- DgWIpSj+6EiHCyaRlB1/CBp9RiaB+10QcFbm+lapuET+/Au6vSDp9IRtlQIVAIMR
- 8KucvUYbOEI+yv+5LW9u3z/BAoGBAI0q6JP+JvJmwZFaeCMMVxXUbqiSko/P1lsa
- LNNBHZ5/8MOUIm8rB2FC6ziidfueJpqTMqeQmSAlEBCwnwreUnGfRrKoJpyPNENY
- d15MG6N5J+z81sEcHFeprryZ+D3Ge9VjPq3Tf3NhKKwCDQ0240aPezbnjPeFm4mH
- bYxxcZ9GAoGAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI3
- 8UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC
- /QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQCFEIsKKWv
- 99iziAH0KBMVbxy03Trz
- -----END DSA PRIVATE KEY-----
-
- dsa_public: ssh-dsa AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost
-collect_scripts:
- cert_count: |
- #!/bin/bash
- ls | wc -l
- dsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key.pub
- rsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key.pub
- auth_keys: |
- #!/bin/bash
- cat /home/ubuntu/.ssh/authorized_keys
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/including_user_groups.py b/tests/cloud_tests/testcases/examples/including_user_groups.py
deleted file mode 100644
index 4067348d..00000000
--- a/tests/cloud_tests/testcases/examples/including_user_groups.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestUserGroups(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_group_ubuntu(self):
- """Test ubuntu group exists."""
- out = self.get_data_file('group_ubuntu')
- self.assertRegex(out, r'ubuntu:x:[0-9]{4}:')
-
- def test_group_cloud_users(self):
- """Test cloud users group exists."""
- out = self.get_data_file('group_cloud_users')
- self.assertRegex(out, r'cloud-users:x:[0-9]{4}:barfoo')
-
- def test_user_ubuntu(self):
- """Test ubuntu user exists."""
- out = self.get_data_file('user_ubuntu')
- self.assertRegex(
- out, r'ubuntu:x:[0-9]{4}:[0-9]{4}:Ubuntu:/home/ubuntu:/bin/bash')
-
- def test_user_foobar(self):
- """Test foobar user exists."""
- out = self.get_data_file('user_foobar')
- self.assertRegex(
- out, r'foobar:x:[0-9]{4}:[0-9]{4}:Foo B. Bar:/home/foobar:')
-
- def test_user_barfoo(self):
- """Test barfoo user exists."""
- out = self.get_data_file('user_barfoo')
- self.assertRegex(
- out, r'barfoo:x:[0-9]{4}:[0-9]{4}:Bar B. Foo:/home/barfoo:')
-
- def test_user_cloudy(self):
- """Test cloudy user exists."""
- out = self.get_data_file('user_cloudy')
- self.assertRegex(out, r'cloudy:x:[0-9]{3,4}:')
-
- def test_user_root_in_secret(self):
- """Test root user is in 'secret' group."""
- _user, _, groups = self.get_data_file('root_groups').partition(":")
- self.assertIn("secret", groups.split(),
- msg="User root is not in group 'secret'")
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/including_user_groups.yaml b/tests/cloud_tests/testcases/examples/including_user_groups.yaml
deleted file mode 100644
index 86e392dd..00000000
--- a/tests/cloud_tests/testcases/examples/including_user_groups.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- # Add groups to the system
- groups:
- - secret: [root]
- - cloud-users
-
- # Add users to the system. Users are added after groups are added.
- users:
- - default
- - name: foobar
- gecos: Foo B. Bar
- primary_group: foobar
- groups: users
- expiredate: '2038-01-19'
- lock_passwd: false
- passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
- - name: barfoo
- gecos: Bar B. Foo
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: [cloud-users, secret]
- lock_passwd: true
- - name: cloudy
- gecos: Magic Cloud App Daemon User
- inactive: '5'
- system: true
-collect_scripts:
- group_ubuntu: |
- #!/bin/bash
- getent group ubuntu
- group_cloud_users: |
- #!/bin/bash
- getent group cloud-users
- user_ubuntu: |
- #!/bin/bash
- getent passwd ubuntu
- user_foobar: |
- #!/bin/bash
- getent passwd foobar
- user_barfoo: |
- #!/bin/bash
- getent passwd barfoo
- user_cloudy: |
- #!/bin/bash
- getent passwd cloudy
- root_groups: |
- #!/bin/bash
- groups root
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/install_arbitrary_packages.py b/tests/cloud_tests/testcases/examples/install_arbitrary_packages.py
deleted file mode 100644
index df133844..00000000
--- a/tests/cloud_tests/testcases/examples/install_arbitrary_packages.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestInstall(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_htop(self):
- """Verify htop installed."""
- out = self.get_data_file('htop')
- self.assertEqual(1, int(out))
-
- def test_tree(self):
- """Verify tree installed."""
- out = self.get_data_file('treeutils')
- self.assertEqual(1, int(out))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml b/tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml
deleted file mode 100644
index d3980228..00000000
--- a/tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- packages:
- - htop
- - tree
-collect_scripts:
- htop: |
- #!/bin/bash
- dpkg -l | grep htop | wc -l
- tree: |
- #!/bin/bash
- dpkg -l | grep tree | wc -l
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py b/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py
deleted file mode 100644
index 4ec26b8f..00000000
--- a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestChefExample(base.CloudTestCase):
- """Test chef module."""
-
- def test_chef_basic(self):
- """Test chef installed."""
- out = self.get_data_file('chef_installed')
- self.assertIn('install ok', out)
-
- # FIXME: Add more tests, and/or replace with comprehensive module tests
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml b/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml
deleted file mode 100644
index 68ca95b5..00000000
--- a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2017-03-31: Disabled as depends on third party apt repository
-#
-enabled: False
-cloud_config: |
- #cloud-config
- # Key from https://packages.chef.io/chef.asc
- apt:
- sources:
- source1:
- source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.12 (Darwin)
- Comment: GPGTools - http://gpgtools.org
-
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg
- PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK
- CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid
- AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd
- Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz
- SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK
- OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/
- Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY
- IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu
- twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8
- DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE
- WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS
- 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA
- dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC
- MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD
- 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K
- zA==
- =IxPr
- -----END PGP PUBLIC KEY BLOCK-----
-
- chef:
-
- # Valid values are 'gems' and 'packages' and 'omnibus'
- install_type: "packages"
-
- # Boolean: run 'install_type' code even if chef-client
- # appears already installed.
- force_install: false
-
- # Chef settings
- server_url: "https://chef.yourorg.com:4000"
-
- # Node Name
- # Defaults to the instance-id if not present
- node_name: "your-node-name"
-
- # Environment
- # Defaults to '_default' if not present
- environment: "production"
-
- # Default validation name is chef-validator
- validation_name: "yourorg-validator"
- # if validation_cert's value is "system" then it is expected
- # that the file already exists on the system.
- validation_cert: |
- -----BEGIN RSA PRIVATE KEY-----
- YOUR-ORGS-VALIDATION-KEY-HERE
- -----END RSA PRIVATE KEY-----
-
- # A run list for a first boot json
- run_list:
- - "recipe[apache2]"
- - "role[db]"
-
- # Specify a list of initial attributes used by the cookbooks
- initial_attributes:
- apache:
- prefork:
- maxclients: 100
- keepalive: "off"
-
- # if install_type is 'omnibus', change the url to download
- omnibus_url: "https://www.opscode.com/chef/install.sh"
-
-
- # Capture all subprocess output into a logfile
- # Useful for troubleshooting cloud-init issues
- output: {all: '| tee -a /var/log/cloud-init-output.log'}
-
-collect_scripts:
- chef_installed: |
- #!/bin/sh
- dpkg-query -W -f '${Status}\n' chef
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_apt_upgrade.py b/tests/cloud_tests/testcases/examples/run_apt_upgrade.py
deleted file mode 100644
index 744e49cb..00000000
--- a/tests/cloud_tests/testcases/examples/run_apt_upgrade.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestUpgrade(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_upgrade(self):
- """Test upgrade exists in apt history."""
- out = self.get_data_file('cloud-init.log')
- self.assertIn(
- '[CLOUDINIT] util.py[DEBUG]: apt-upgrade '
- '[eatmydata apt-get --option=Dpkg::Options::=--force-confold '
- '--option=Dpkg::options::=--force-unsafe-io --assume-yes --quiet '
- 'dist-upgrade] took', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml b/tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml
deleted file mode 100644
index 2b7eae4c..00000000
--- a/tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- package_upgrade: true
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_commands.py b/tests/cloud_tests/testcases/examples/run_commands.py
deleted file mode 100644
index 01d5d4fc..00000000
--- a/tests/cloud_tests/testcases/examples/run_commands.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestRunCmd(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_run_cmd(self):
- """Test run command worked."""
- out = self.get_data_file('run_cmd')
- self.assertIn('cloud-init run cmd test', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_commands.yaml b/tests/cloud_tests/testcases/examples/run_commands.yaml
deleted file mode 100644
index f80eb8ce..00000000
--- a/tests/cloud_tests/testcases/examples/run_commands.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- runcmd:
- - echo cloud-init run cmd test > /var/tmp/run_cmd
-collect_scripts:
- run_cmd: |
- #!/bin/bash
- cat /var/tmp/run_cmd
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_commands_first_boot.py b/tests/cloud_tests/testcases/examples/run_commands_first_boot.py
deleted file mode 100644
index 3f3d8f84..00000000
--- a/tests/cloud_tests/testcases/examples/run_commands_first_boot.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestBootCmd(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_bootcmd_host(self):
- """Test boot command worked."""
- out = self.get_data_file('hosts')
- self.assertIn('192.168.1.130 us.archive.ubuntu.com', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml b/tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml
deleted file mode 100644
index 7bd803db..00000000
--- a/tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- bootcmd:
- - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
-collect_scripts:
- hosts: |
- #!/bin/bash
- cat /etc/hosts
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml b/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml
deleted file mode 100644
index e366c042..00000000
--- a/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as test suite fails this long running test currently
-#
-enabled: False
-cloud_config: |
- #cloud-config
- puppet:
- # Every key present in the conf object will be added to puppet.conf:
- # [name]
- # subkey=value
- #
- # For example the configuration below will have the following section
- # added to puppet.conf:
- # [puppetd]
- # server=puppetmaster.example.org
- # certname=i-0123456.ip-X-Y-Z.cloud.internal
- #
- # The puppmaster ca certificate will be available in
- # /var/lib/puppet/ssl/certs/ca.pem
- conf:
- agent:
- server: "puppetmaster.example.org"
- # certname supports substitutions at runtime:
- # %i: instanceid
- # Example: i-0123456
- # %f: fqdn of the machine
- # Example: ip-X-Y-Z.cloud.internal
- #
- # NB: the certname will automatically be lowercased as required by puppet
- certname: "%i.%f"
- # ca_cert is a special case. It won't be added to puppet.conf.
- # It holds the puppetmaster certificate in pem format.
- # It should be a multi-line string (using the | yaml notation for
- # multi-line strings).
- # The puppetmaster certificate is located in
- # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host.
- #
- ca_cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py b/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py
deleted file mode 100644
index 7bd520f6..00000000
--- a/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestWriteFiles(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_b64(self):
- """Test b64 encoded file reads as ascii."""
- out = self.get_data_file('file_b64')
- self.assertIn('ASCII text', out)
-
- def test_binary(self):
- """Test binary file reads as executable."""
- out = self.get_data_file('file_binary')
- self.assertIn('ELF 64-bit LSB executable, x86-64, version 1', out)
-
- def test_gzip(self):
- """Test gzip file shows up as a shell script."""
- out = self.get_data_file('file_gzip')
- self.assertIn('POSIX shell script, ASCII text executable', out)
-
- def test_text(self):
- """Test text shows up as ASCII text."""
- out = self.get_data_file('file_text')
- self.assertIn('ASCII text', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml b/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml
deleted file mode 100644
index 6f78f994..00000000
--- a/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- write_files:
- - encoding: b64
- content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4
- owner: root:root
- path: /root/file_b64
- permissions: '0644'
- - content: |
- # My new /root/file_text
-
- SMBDOPTIONS="-D"
- path: /root/file_text
- - content: !!binary |
- f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAAAAAAAAEAAOAAI
- AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgA
- AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAA
- path: /root/file_binary
- permissions: '0555'
- - encoding: gzip
- content: !!binary |
- H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
- path: /root/file_gzip
- permissions: '0755'
-collect_scripts:
- file_b64: |
- #!/bin/bash
- file /root/file_b64
- file_text: |
- #!/bin/bash
- file /root/file_text
- file_binary: |
- #!/bin/bash
- file /root/file_binary
- file_gzip: |
- #!/bin/bash
- file /root/file_gzip
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/main/README.md b/tests/cloud_tests/testcases/main/README.md
deleted file mode 100644
index 60346063..00000000
--- a/tests/cloud_tests/testcases/main/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Main Functionality Test Configs
-
-## purpose
-Test main features and config options of cloud-init such as logging, output
-redirection, early init and integration with init system
-
-## structure
-Should have one or more test configs for all main cloud-init output and logging
-options, and basic functionality test cases
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/main/command_output_simple.py b/tests/cloud_tests/testcases/main/command_output_simple.py
deleted file mode 100644
index 80a2c8d7..00000000
--- a/tests/cloud_tests/testcases/main/command_output_simple.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestCommandOutputSimple(base.CloudTestCase):
- """Test functionality of simple output redirection."""
-
- expected_warnings = ('Stdout, stderr changing to',)
-
- def test_output_file(self):
- """Ensure that the output file is not empty and has all stages."""
- data = self.get_data_file('cloud-init-test-output')
- self.assertNotEqual(len(data), 0, "specified log empty")
- self.assertEqual(self.get_config_entry('final_message'),
- data.splitlines()[-1].strip())
- # TODO: need to test that all stages redirected here
-
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/main/command_output_simple.yaml b/tests/cloud_tests/testcases/main/command_output_simple.yaml
deleted file mode 100644
index 08ca8940..00000000
--- a/tests/cloud_tests/testcases/main/command_output_simple.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Test functionality of simple output redirection
-#
-cloud_config: |
- #cloud-config
- output: { all: "| tee -a /var/log/cloud-init-test-output" }
- final_message: "should be last line in cloud-init-test-output file"
-collect_scripts:
- cloud-init-test-output: |
- #!/bin/bash
- cat /var/log/cloud-init-test-output
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/README.md b/tests/cloud_tests/testcases/modules/README.md
deleted file mode 100644
index d66101f2..00000000
--- a/tests/cloud_tests/testcases/modules/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Module Test Configs
-
-## Purpose
-Test functionality of cloud config modules. See
-[here](https://cloudinit.readthedocs.io/en/latest/topics/modules.html) for
-a full list.
-
-## Structure
-Should have one or more test configs for each module in cloudinit/config/. The
-name of the test should indicate which module the config is verifying.
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/TODO.md b/tests/cloud_tests/testcases/modules/TODO.md
deleted file mode 100644
index 9513cb2d..00000000
--- a/tests/cloud_tests/testcases/modules/TODO.md
+++ /dev/null
@@ -1,95 +0,0 @@
-# TODO
-
-The following lists complete or partially misisng modules. If a module is
-listed with nothing below it indicates that no work is completed on that
-module. If there is a list below the module name that is the remainig
-identified work.
-
-## apt_configure
-
- * apt_get_wrapper
- * What does this do? How to use it?
- * apt_get_command
- * To specify a different 'apt-get' command, set 'apt_get_command'.
- This must be a list, and the subcommand (update, upgrade) is appended to it.
- * Modify default and verify the options got passed correctly.
- * preserve sources
- * TBD
-
-## chef
-2016-11-17: Tests took > 60 seconds and test framework times out currently.
-
-## disable EC2 metadata
-
-## disk setup
-
-## emit upstart
-
-## fan
-
-## growpart
-
-## grub dpkg
-
-## landscape
-2016-11-17: Module is not working
-
-## lxd
-2016-11-17: Need a zfs backed test written
-
-## mcollective
-
-## migrator
-
-## mounts
-
-## phone home
-
-## power state change
-
-## puppet
-2016-11-17: Tests took > 60 seconds and test framework times out currently.
-
-## resizefs
-
-## resolv conf
-2016-11-17: Issues with changing resolv.conf and lxc backend.
-
-## redhat subscription
-2016-11-17: Need RH support in test framework.
-
-## rightscale userdata
-2016-11-17: Specific to RightScale cloud enviornment.
-
-## rsyslog
-
-## scripts per boot
-Not applicable to write a test for this as it specifies when something should be run.
-
-## scripts per instance
-Not applicable to write a test for this as it specifies when something should be run.
-
-## scripts per once
-Not applicable to write a test for this as it specifies when something should be run.
-
-## scripts user
-Not applicable to write a test for this as it specifies when something should be run.
-
-## scripts vendor
-Not applicable to write a test for this as it specifies when something should be run.
-
-## snap
-2019-12-19: Need to investigate
-
-## spacewalk
-
-## ssh authkey fingerprints
-The authkey_hash key does not appear to work. In fact the default claims to be md5, however syslog only shows sha256
-
-## update etc hosts
-2016-11-17: Issues with changing /etc/hosts and lxc backend.
-
-## yum add repo
-2016-11-17: Need RH support in test framework.
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_conf.py b/tests/cloud_tests/testcases/modules/apt_configure_conf.py
deleted file mode 100644
index 3bf93447..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_conf.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureConf(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_apt_conf_assumeyes(self):
- """Test config assumes true."""
- out = self.get_data_file('94cloud-init-config')
- self.assertIn('Assume-Yes "true";', out)
-
- def test_apt_conf_fixbroken(self):
- """Test config fixes broken."""
- out = self.get_data_file('94cloud-init-config')
- self.assertIn('Fix-Broken "true";', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_conf.yaml b/tests/cloud_tests/testcases/modules/apt_configure_conf.yaml
deleted file mode 100644
index de453000..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_conf.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Provide a configuration for APT
-#
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt:
- conf: |
- APT {
- Get {
- Assume-Yes "true";
- Fix-Broken "true";
- }
- }
-collect_scripts:
- 94cloud-init-config: |
- #!/bin/bash
- cat /etc/apt/apt.conf.d/94cloud-init-config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py b/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py
deleted file mode 100644
index eabe4607..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureDisableSuites(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_empty_sourcelist(self):
- """Test source list is empty."""
- out = self.get_data_file('sources.list')
- self.assertEqual('', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml b/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml
deleted file mode 100644
index 98800673..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Disables everything in sources.list
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- apt:
- disable_suites:
- - $RELEASE
- - $RELEASE-updates
- - $RELEASE-backports
- - $RELEASE-security
-collect_scripts:
- sources.list: |
- #!/bin/bash
- grep -v '^#' /etc/apt/sources.list | sed '/^\s*$/d'
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_primary.py b/tests/cloud_tests/testcases/modules/apt_configure_primary.py
deleted file mode 100644
index 4950a2ef..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_primary.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigurePrimary(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_ubuntu_sources(self):
- """Test no default Ubuntu entries exist."""
- out = self.get_data_file('sources.list')
- ubuntu_source_count = len(
- [line for line in out.split('\n') if 'archive.ubuntu.com' in line])
- self.assertEqual(0, ubuntu_source_count)
-
- def test_gatech_sources(self):
- """Test GaTech entries exist."""
- out = self.get_data_file('sources.list')
- gatech_source_count = len(
- [line for line in out.split('\n') if 'gtlib.gatech.edu' in line])
- self.assertGreater(gatech_source_count, 0)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_primary.yaml b/tests/cloud_tests/testcases/modules/apt_configure_primary.yaml
deleted file mode 100644
index cc067d4f..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_primary.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Setup a custome primary sources.list
-#
-required_features:
- - apt
- - apt_src_cont
-cloud_config: |
- #cloud-config
- apt:
- primary:
- - arches:
- - default
- uri: "http://www.gtlib.gatech.edu/pub/ubuntu-releases/"
-collect_scripts:
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_proxy.py b/tests/cloud_tests/testcases/modules/apt_configure_proxy.py
deleted file mode 100644
index 0c61b6cc..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_proxy.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureProxy(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_proxy_config(self):
- """Test proxy options added to apt config."""
- out = self.get_data_file('90cloud-init-aptproxy')
- self.assertIn(
- 'Acquire::http::Proxy "http://squid.internal:3128";', out)
- self.assertIn(
- 'Acquire::http::Proxy "http://squid.internal:3128";', out)
- self.assertIn(
- 'Acquire::ftp::Proxy "ftp://squid.internal:3128";', out)
- self.assertIn(
- 'Acquire::https::Proxy "https://squid.internal:3128";', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml b/tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml
deleted file mode 100644
index be6c6f81..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Set apt proxy
-#
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt:
- proxy: "http://squid.internal:3128"
- http_proxy: "http://squid.internal:3128"
- ftp_proxy: "ftp://squid.internal:3128"
- https_proxy: "https://squid.internal:3128"
-collect_scripts:
- 90cloud-init-aptproxy: |
- #!/bin/bash
- cat /etc/apt/apt.conf.d/90cloud-init-aptproxy
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_security.py b/tests/cloud_tests/testcases/modules/apt_configure_security.py
deleted file mode 100644
index 7d7e2585..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_security.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSecurity(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_security_mirror(self):
- """Test security lines added and uncommented in source.list."""
- out = self.get_data_file('sources.list')
- self.assertEqual(6, int(out))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_security.yaml b/tests/cloud_tests/testcases/modules/apt_configure_security.yaml
deleted file mode 100644
index 83dd51df..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_security.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Add security to sources.list
-#
-required_features:
- - apt
- - ubuntu_repos
-cloud_config: |
- #cloud-config
- apt:
- security:
- - arches:
- - default
-collect_scripts:
- sources.list: |
- #!/bin/bash
- grep -c security.ubuntu.com /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_key.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_key.py
deleted file mode 100644
index d9061f3c..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_key.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSourcesKey(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_apt_key_list(self):
- """Test key list updated."""
- out = self.get_data_file('apt_key_list')
- self.assertIn(
- '1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF', out)
- self.assertIn('Launchpad PPA for cloud init development team', out)
-
- def test_source_list(self):
- """Test source.list updated."""
- out = self.get_data_file('sources.list')
- self.assertIn(
- 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml
deleted file mode 100644
index bde9398a..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-# Add a sources.list entry with a given key (Debian Jessie)
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- apt:
- sources:
- source1:
- source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: SKS 1.1.6
- Comment: Hostname: keyserver.ubuntu.com
-
- mQINBFbZRUIBEAC+A0PIKYBP9kLC4hQtRrffRS11uLo8/BdtmOdrlW0hpPHzCfKnjR3tvSEI
- lqPHG1QrrjAXKZDnZMRz+h/px7lUztvytGzHPSJd5ARUzAyjyRezUhoJ3VSCxrPqx62avuWf
- RfoJaIeHfDehL5/dTVkyiWxfVZ369ZX6JN2AgLsQTeybTQ75+2z0xPrrhnGmgh6g0qTYcAaq
- M5ONOGiqeSBX/Smjh6ALy5XkhUiFGLsI7Yluf6XSICY/x7gd6RAfgSIQrUTNMoS1sqhT4aot
- +xvOfQy8ySkfAK4NddXql6E/+ZqTmBY/Lr0YklFBy8jGT+UysfiIznPMIwbmgq5Li7BtDDtX
- b8Uyi4edPpjtextezfXYn4NVIpPL5dPZS/FXh4HpzyH0pYCfrH4QDGA7i52AGmhpiOFjJMo6
- N33sdjZHOH/2Vyp+QZaQnsdUAi1N4M6c33tQbpIScn1SY+El8z5JDA4PBzkw8HpLCi1gGoa6
- V4kfbWqXXbGAJFkLkP/vc4+pY9axOlmCkJg7xCPwhI75y1cONgovhz+BEXOzolh5KZuGbGbj
- xe0wva5DLBeIg7EQFf+99pOS7Syby3Xpm6ZbswEFV0cllK4jf/QMjtfInxobuMoI0GV0bE5l
- WlRtPCK5FnbHwxi0wPNzB/5fwzJ77r6HgPrR0OkT0lWmbUyoOQARAQABtC1MYXVuY2hwYWQg
- UFBBIGZvciBjbG91ZCBpbml0IGRldmVsb3BtZW50IHRlYW2JAjgEEwECACIFAlbZRUICGwMG
- CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEAg9Bvvk0wTfHfcP/REK5N2s1JYc69qEa9ZN
- o6oi+A7l6AYw+ZY88O5TJe7F9otv5VXCIKSUT0Vsepjgf0mtXAgf/sb2lsJn/jp7tzgov3YH
- vSrkTkRydz8xcA87gwQKePuvTLxQpftF4flrBxgSueIn5O/tPrBOxLz7EVYBc78SKg9aj9L2
- yUp+YuNevlwfZCTYeBb9r3FHaab2HcgkwqYch66+nKYfwiLuQ9NzXXm0Wn0JcEQ6pWvJscbj
- C9BdawWovfvMK5/YLfI6Btm7F4mIpQBdhSOUp/YXKmdvHpmwxMCN2QhqYK49SM7qE9aUDbJL
- arppSEBtlCLWhRBZYLTUna+BkuQ1bHz4St++XTR49Qd7vDERALpApDjB2dxPfMiBzCMwQQyq
- uy13exU8o2ETLg+dZSLfDTzrBNsBFmXlw8WW17nTISYdKeGKL+QdlUjpzdwUMMzHhAO8SmMH
- zjeSlDSRMXBJFAFSbCl7EwmMKa3yVX0zInT91fNllZ3iatAmtVdqVH/BFQfTIMH2ET7A8WzJ
- ZzVSuMRhqoKdr5AMcHuJGPUoVkVJHQA+NNvEiXSysF3faL7jmKapmUwrhpYYX2H8pf+VMu2e
- cLflKTI28dl+ZQ4Pl/aVsxrti/pzhdYy05Sn5ddtySyIkvo8L1cU5MWpbvSlFPkTstBUDLBf
- pb0uBy+g0oxJQg15
- =uy53
- -----END PGP PUBLIC KEY BLOCK-----
-collect_scripts:
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list.d/source1.list
- apt_key_list: |
- #!/bin/bash
- apt-key finger
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py
deleted file mode 100644
index ddc86174..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSourcesKeyserver(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_apt_key_list(self):
- """Test specific key added."""
- out = self.get_data_file('apt_key_list')
- self.assertIn(
- '1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF', out)
- self.assertIn('Launchpad PPA for cloud init development team', out)
-
- def test_source_list(self):
- """Test source.list updated."""
- out = self.get_data_file('sources.list')
- self.assertIn(
- 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml
deleted file mode 100644
index 25088135..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Add a sources.list entry with a key from a keyserver
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- apt:
- sources:
- source1:
- keyid: 1FF0D8535EF7E719E5C81B9C083D06FBE4D304DF
- keyserver: keyserver.ubuntu.com
- source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main"
-collect_scripts:
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list.d/source1.list
- apt_key_list: |
- #!/bin/bash
- apt-key finger
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py
deleted file mode 100644
index cf84e056..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSourcesList(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_sources_list(self):
- """Test sources.list includes sources."""
- out = self.get_data_file('sources.list')
-
- # Verify we have 6 entires
- self.assertEqual(6, len(out.rstrip().split('\n')))
-
- # Verify the keys generated the list correctly
- self.assertRegex(out, r'deb http:\/\/archive.ubuntu.com\/ubuntu '
- '[a-z].* main restricted')
- self.assertRegex(out, r'deb-src http:\/\/archive.ubuntu.com\/ubuntu '
- '[a-z].* main restricted')
- self.assertRegex(out, r'deb http:\/\/archive.ubuntu.com\/ubuntu '
- '[a-z].* universe restricted')
- self.assertRegex(out, r'deb-src http:\/\/archive.ubuntu.com\/ubuntu '
- '[a-z].* universe restricted')
- self.assertRegex(out, r'deb http:\/\/security.ubuntu.com\/ubuntu '
- '[a-z].*security multiverse')
- self.assertRegex(out, r'deb-src http:\/\/security.ubuntu.com\/ubuntu '
- '[a-z].*security multiverse')
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml
deleted file mode 100644
index 87e470c1..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Generate a sources.list
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- apt:
- primary:
- - arches: [default]
- uri: http://archive.ubuntu.com/ubuntu
- security:
- - arches: [default]
- uri: http://security.ubuntu.com/ubuntu
- sources_list: |
- deb $MIRROR $RELEASE main restricted
- deb-src $MIRROR $RELEASE main restricted
- deb $PRIMARY $RELEASE universe restricted
- deb-src $PRIMARY $RELEASE universe restricted
- deb $SECURITY $RELEASE-security multiverse
- deb-src $SECURITY $RELEASE-security multiverse
-collect_scripts:
- sources.list: |
- #/bin/bash
- cat /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
deleted file mode 100644
index dfbdeadf..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSourcesPPA(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_ppa(self):
- """Test specific ppa added."""
- out = self.get_data_file('sources.list')
- self.assertIn(
- 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu', out)
-
- def test_ppa_key(self):
- """Test ppa key added."""
- out = self.get_data_file('apt-key')
- self.assertIn(
- '1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF', out)
- self.assertIn('Launchpad PPA for cloud init development team', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml
deleted file mode 100644
index b997bcfb..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# Add a PPA to source.list
-#
-# NOTE: on older ubuntu releases the sources file added is named
-# 'cloud-init-dev-test-archive-trusty', without 'ubuntu' in the middle
-required_features:
- - apt
- - ppa
- - ppa_file_name
-cloud_config: |
- #cloud-config
- apt:
- sources:
- source1:
- keyid: 0165013E
- keyserver: keyserver.ubuntu.com
- source: "ppa:cloud-init-dev/test-archive"
-collect_scripts:
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list.d/cloud-init-dev-ubuntu-test-archive-*.list
- apt-key: |
- #!/bin/bash
- apt-key finger
- sources_full: |
- #!/bin/bash
- cat /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.py b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.py
deleted file mode 100644
index c98eedef..00000000
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptPipeliningDisable(base.CloudTestCase):
- """Test apt-pipelining module."""
-
- def test_disable_pipelining(self):
- """Test pipelining disabled."""
- out = self.get_data_file('90cloud-init-pipelining')
- self.assertIn('Acquire::http::Pipeline-Depth "0";', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml
deleted file mode 100644
index 22a31dc4..00000000
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Disable apt pipelining value
-#
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt_pipelining: false
-collect_scripts:
- 90cloud-init-pipelining: |
- #!/bin/bash
- cat /etc/apt/apt.conf.d/90cloud-init-pipelining
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
deleted file mode 100644
index 2b940a66..00000000
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptPipeliningOS(base.CloudTestCase):
- """Test apt-pipelining module."""
-
- def test_os_pipelining(self):
- """test 'os' settings does not write apt config file."""
- out = self.get_data_file('90cloud-init-pipelining_not_written')
- self.assertEqual(0, int(out))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml
deleted file mode 100644
index 86d5220b..00000000
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Set apt pipelining value to OS, no conf written
-#
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt_pipelining: os
-collect_scripts:
- 90cloud-init-pipelining_not_written: |
- #!/bin/bash
- ls /etc/apt/apt.conf.d/90cloud-init-pipelining | wc -l
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/bootcmd.py b/tests/cloud_tests/testcases/modules/bootcmd.py
deleted file mode 100644
index f5b86b03..00000000
--- a/tests/cloud_tests/testcases/modules/bootcmd.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestBootCmd(base.CloudTestCase):
- """Test bootcmd module."""
-
- def test_bootcmd_host(self):
- """Test boot cmd worked."""
- out = self.get_data_file('hosts')
- self.assertIn('192.168.1.130 us.archive.ubuntu.com', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/bootcmd.yaml b/tests/cloud_tests/testcases/modules/bootcmd.yaml
deleted file mode 100644
index 3a73994e..00000000
--- a/tests/cloud_tests/testcases/modules/bootcmd.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Early boot command
-#
-cloud_config: |
- #cloud-config
- bootcmd:
- - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
-collect_scripts:
- hosts: |
- #!/bin/bash
- cat /etc/hosts
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/byobu.py b/tests/cloud_tests/testcases/modules/byobu.py
deleted file mode 100644
index 74d0529a..00000000
--- a/tests/cloud_tests/testcases/modules/byobu.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestByobu(base.CloudTestCase):
- """Test Byobu module."""
-
- def test_byobu_installed(self):
- """Test byobu installed."""
- self.assertPackageInstalled('byobu')
-
- def test_byobu_profile_enabled(self):
- """Test byobu profile.d file exists."""
- out = self.get_data_file('byobu_profile_enabled')
- self.assertIn('/etc/profile.d/Z97-byobu.sh', out)
-
- def test_byobu_launch_exists(self):
- """Test byobu-launch exists."""
- out = self.get_data_file('byobu_launch_exists')
- self.assertIn('/usr/bin/byobu-launch', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/byobu.yaml b/tests/cloud_tests/testcases/modules/byobu.yaml
deleted file mode 100644
index d002a611..00000000
--- a/tests/cloud_tests/testcases/modules/byobu.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Install and enable byobu system wide and default user
-#
-required_features:
- - byobu
-cloud_config: |
- #cloud-config
- byobu_by_default: enable
-collect_scripts:
- byobu_profile_enabled: |
- #!/bin/bash
- ls /etc/profile.d/Z97-byobu.sh
- byobu_launch_exists: |
- #!/bin/bash
- which /usr/bin/byobu-launch
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ca_certs.py b/tests/cloud_tests/testcases/modules/ca_certs.py
deleted file mode 100644
index 6b56f639..00000000
--- a/tests/cloud_tests/testcases/modules/ca_certs.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestCaCerts(base.CloudTestCase):
- """Test ca certs module."""
-
- def test_certs_updated(self):
- """Test certs have been updated in /etc/ssl/certs."""
- out = self.get_data_file('cert_links')
- # Bionic update-ca-certificates creates less links debian #895075
- unlinked_files = []
- links = {}
- for cert_line in out.splitlines():
- if '->' in cert_line:
- fname, _sep, link = cert_line.split()
- links[fname] = link
- else:
- unlinked_files.append(cert_line)
- self.assertEqual(['ca-certificates.crt'], unlinked_files)
- self.assertEqual('cloud-init-ca-certs.pem', links['a535c1f3.0'])
- self.assertEqual(
- '/usr/share/ca-certificates/cloud-init-ca-certs.crt',
- links['cloud-init-ca-certs.pem'])
-
- def test_cert_installed(self):
- """Test line from our cert exists."""
- out = self.get_data_file('cert')
- self.assertIn('a36c744454555024e7f82edc420fd2c8', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ca_certs.yaml b/tests/cloud_tests/testcases/modules/ca_certs.yaml
deleted file mode 100644
index 2cd91551..00000000
--- a/tests/cloud_tests/testcases/modules/ca_certs.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-# Remove existing ca_certs and install custom ca-cert
-#
-cloud_config: |
- #cloud-config
- ca-certs:
- remove-defaults: true
- trusted:
- - |
- -----BEGIN CERTIFICATE-----
- MIIGJzCCBA+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBsjELMAkGA1UEBhMCRlIx
- DzANBgNVBAgMBkFsc2FjZTETMBEGA1UEBwwKU3RyYXNib3VyZzEYMBYGA1UECgwP
- d3d3LmZyZWVsYW4ub3JnMRAwDgYDVQQLDAdmcmVlbGFuMS0wKwYDVQQDDCRGcmVl
- bGFuIFNhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxIjAgBgkqhkiG9w0BCQEW
- E2NvbnRhY3RAZnJlZWxhbi5vcmcwHhcNMTIwNDI3MTAzMTE4WhcNMjIwNDI1MTAz
- MTE4WjB+MQswCQYDVQQGEwJGUjEPMA0GA1UECAwGQWxzYWNlMRgwFgYDVQQKDA93
- d3cuZnJlZWxhbi5vcmcxEDAOBgNVBAsMB2ZyZWVsYW4xDjAMBgNVBAMMBWFsaWNl
- MSIwIAYJKoZIhvcNAQkBFhNjb250YWN0QGZyZWVsYW4ub3JnMIICIjANBgkqhkiG
- 9w0BAQEFAAOCAg8AMIICCgKCAgEA3W29+ID6194bH6ejLrIC4hb2Ugo8v6ZC+Mrc
- k2dNYMNPjcOKABvxxEtBamnSaeU/IY7FC/giN622LEtV/3oDcrua0+yWuVafyxmZ
- yTKUb4/GUgafRQPf/eiX9urWurtIK7XgNGFNUjYPq4dSJQPPhwCHE/LKAykWnZBX
- RrX0Dq4XyApNku0IpjIjEXH+8ixE12wH8wt7DEvdO7T3N3CfUbaITl1qBX+Nm2Z6
- q4Ag/u5rl8NJfXg71ZmXA3XOj7zFvpyapRIZcPmkvZYn7SMCp8dXyXHPdpSiIWL2
- uB3KiO4JrUYvt2GzLBUThp+lNSZaZ/Q3yOaAAUkOx+1h08285Pi+P8lO+H2Xic4S
- vMq1xtLg2bNoPC5KnbRfuFPuUD2/3dSiiragJ6uYDLOyWJDivKGt/72OVTEPAL9o
- 6T2pGZrwbQuiFGrGTMZOvWMSpQtNl+tCCXlT4mWqJDRwuMGrI4DnnGzt3IKqNwS4
- Qyo9KqjMIPwnXZAmWPm3FOKe4sFwc5fpawKO01JZewDsYTDxVj+cwXwFxbE2yBiF
- z2FAHwfopwaH35p3C6lkcgP2k/zgAlnBluzACUI+MKJ/G0gv/uAhj1OHJQ3L6kn1
- SpvQ41/ueBjlunExqQSYD7GtZ1Kg8uOcq2r+WISE3Qc9MpQFFkUVllmgWGwYDuN3
- Zsez95kCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNT
- TCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFFlfyRO6G8y5qEFKikl5
- ajb2fT7XMB8GA1UdIwQYMBaAFCNsLT0+KV14uGw+quK7Lh5sh/JTMA0GCSqGSIb3
- DQEBBQUAA4ICAQAT5wJFPqervbja5+90iKxi1d0QVtVGB+z6aoAMuWK+qgi0vgvr
- mu9ot2lvTSCSnRhjeiP0SIdqFMORmBtOCFk/kYDp9M/91b+vS+S9eAlxrNCB5VOf
- PqxEPp/wv1rBcE4GBO/c6HcFon3F+oBYCsUQbZDKSSZxhDm3mj7pb67FNbZbJIzJ
- 70HDsRe2O04oiTx+h6g6pW3cOQMgIAvFgKN5Ex727K4230B0NIdGkzuj4KSML0NM
- slSAcXZ41OoSKNjy44BVEZv0ZdxTDrRM4EwJtNyggFzmtTuV02nkUj1bYYYC5f0L
- ADr6s0XMyaNk8twlWYlYDZ5uKDpVRVBfiGcq0uJIzIvemhuTrofh8pBQQNkPRDFT
- Rq1iTo1Ihhl3/Fl1kXk1WR3jTjNb4jHX7lIoXwpwp767HAPKGhjQ9cFbnHMEtkro
- RlJYdtRq5mccDtwT0GFyoJLLBZdHHMHJz0F9H7FNk2tTQQMhK5MVYwg+LIaee586
- CQVqfbscp7evlgjLW98H+5zylRHAgoH2G79aHljNKMp9BOuq6SnEglEsiWGVtu2l
- hnx8SB3sVJZHeer8f/UQQwqbAO+Kdy70NmbSaqaVtp8jOxLiidWkwSyRTsuU6D8i
- DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ==
- -----END CERTIFICATE-----
-collect_scripts:
- cert_links: |
- #!/bin/bash
- # links printed <filename> -> <link target>
- # non-links printed <filename>
- for file in `ls /etc/ssl/certs`; do
- [ -h /etc/ssl/certs/$file ] && echo -n $file ' -> ' && readlink /etc/ssl/certs/$file || echo $file;
- done
- cert: |
- #!/bin/bash
- md5sum /etc/ssl/certs/ca-certificates.crt
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/debug_disable.py b/tests/cloud_tests/testcases/modules/debug_disable.py
deleted file mode 100644
index e40e4b89..00000000
--- a/tests/cloud_tests/testcases/modules/debug_disable.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestDebugDisable(base.CloudTestCase):
- """Disable debug messages."""
-
- def test_debug_disable(self):
- """Test verbose output missing from logs."""
- out = self.get_data_file('cloud-init.log')
- self.assertNotIn(
- out, r'Skipping module named [a-z].* verbose printing disabled')
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/debug_disable.yaml b/tests/cloud_tests/testcases/modules/debug_disable.yaml
deleted file mode 100644
index 63218b18..00000000
--- a/tests/cloud_tests/testcases/modules/debug_disable.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Do not run in debug mode
-#
-cloud_config: |
- #cloud-config
- debug:
- verbose: False
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/debug_enable.py b/tests/cloud_tests/testcases/modules/debug_enable.py
deleted file mode 100644
index 28d26062..00000000
--- a/tests/cloud_tests/testcases/modules/debug_enable.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestDebugEnable(base.CloudTestCase):
- """Test debug messages."""
-
- def test_debug_enable(self):
- """Test debug messages in cloud-init log."""
- out = self.get_data_file('cloud-init.log')
- self.assertIn('[DEBUG]', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/debug_enable.yaml b/tests/cloud_tests/testcases/modules/debug_enable.yaml
deleted file mode 100644
index d44147db..00000000
--- a/tests/cloud_tests/testcases/modules/debug_enable.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Run in debug mode
-#
-cloud_config: |
- #cloud-config
- debug:
- verbose: True
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/final_message.py b/tests/cloud_tests/testcases/modules/final_message.py
deleted file mode 100644
index b7b5d5e0..00000000
--- a/tests/cloud_tests/testcases/modules/final_message.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestFinalMessage(base.CloudTestCase):
- """Test cloud init module `cc_final_message`."""
-
- subs_char = '$'
-
- def get_final_message_config(self):
- """Get config for final message."""
- self.assertIn('final_message', self.cloud_config)
- return self.cloud_config['final_message']
-
- def get_final_message(self):
- """Get final message from log."""
- out = self.get_data_file('cloud-init-output.log')
- lines = len(self.get_final_message_config().splitlines())
- return '\n'.join(out.splitlines()[-1 * lines:])
-
- def test_final_message_string(self):
- """Ensure final handles regular strings."""
- for actual, config in zip(
- self.get_final_message().splitlines(),
- self.get_final_message_config().splitlines()):
- if self.subs_char not in config:
- self.assertEqual(actual, config)
-
- def test_final_message_subs(self):
- """Test variable substitution in final message."""
- # TODO: add verification of other substitutions
- patterns = {'$datasource': self.get_datasource()}
- for key, expected in patterns.items():
- index = self.get_final_message_config().splitlines().index(key)
- actual = self.get_final_message().splitlines()[index]
- self.assertEqual(actual, expected)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/final_message.yaml b/tests/cloud_tests/testcases/modules/final_message.yaml
deleted file mode 100644
index c9ed6118..00000000
--- a/tests/cloud_tests/testcases/modules/final_message.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Print a final message with various predefined variables
-#
-cloud_config: |
- #cloud-config
- final_message: |
- This is my final message!
- $version
- $timestamp
- $datasource
- $uptime
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/keys_to_console.py b/tests/cloud_tests/testcases/modules/keys_to_console.py
deleted file mode 100644
index 07f38112..00000000
--- a/tests/cloud_tests/testcases/modules/keys_to_console.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestKeysToConsole(base.CloudTestCase):
- """Test proper keys are included and excluded to console."""
-
- def test_excluded_keys(self):
- """Test excluded keys missing."""
- out = self.get_data_file('syslog')
- self.assertNotIn('(DSA)', out)
- self.assertNotIn('(ECDSA)', out)
-
- def test_expected_keys(self):
- """Test expected keys exist."""
- out = self.get_data_file('syslog')
- self.assertIn('(ED25519)', out)
- self.assertIn('(RSA)', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/keys_to_console.yaml b/tests/cloud_tests/testcases/modules/keys_to_console.yaml
deleted file mode 100644
index 5d86e739..00000000
--- a/tests/cloud_tests/testcases/modules/keys_to_console.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Hide printing of ssh key and fingerprints for specific keys
-#
-required_features:
- - syslog
-cloud_config: |
- #cloud-config
- ssh_fp_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
- ssh_key_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
-collect_scripts:
- syslog: |
- #!/bin/bash
- cat /var/log/syslog
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/landscape.yaml b/tests/cloud_tests/testcases/modules/landscape.yaml
deleted file mode 100644
index ed2c37c4..00000000
--- a/tests/cloud_tests/testcases/modules/landscape.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Setup landscape client settings
-#
-# 2016-11-17: Disabled due to this not working
-#
-enabled: false
-required_features:
- - landscape
-cloud_config: |
- #cloud-conifg
- landscape:
- client:
- log_level: "info"
- url: "https://landscape.canonical.com/message-system"
- ping_url: "http://landscape.canonical.com/ping"
- data_path: "/var/lib/landscape/client"
- http_proxy: "http://my.proxy.com/foobar"
- https_proxy: "https://my.proxy.com/foobar"
- tags: "server,cloud"
- computer_title: "footitle"
- registration_key: "fookey"
- account_name: "fooaccount"
-collect_scripts:
- client.conf: |
- #!/bin/bash
- cat /etc/landscape/client.conf
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/locale.py b/tests/cloud_tests/testcases/modules/locale.py
deleted file mode 100644
index cb9e1dce..00000000
--- a/tests/cloud_tests/testcases/modules/locale.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-from cloudinit import util
-
-
-class TestLocale(base.CloudTestCase):
- """Test locale is set properly."""
-
- def test_locale(self):
- """Test locale is set properly."""
- data = util.load_shell_content(self.get_data_file('locale_default'))
- self.assertIn("LANG", data)
- self.assertEqual('en_GB.UTF-8', data['LANG'])
-
- def test_locale_a(self):
- """Test locale -a has both options."""
- out = self.get_data_file('locale_a')
- self.assertIn('en_GB.utf8', out)
- self.assertIn('en_US.utf8', out)
-
- def test_locale_gen(self):
- """Test local.gen file has all entries."""
- out = self.get_data_file('locale_gen')
- self.assertIn('en_GB.UTF-8', out)
- self.assertIn('en_US.UTF-8', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/locale.yaml b/tests/cloud_tests/testcases/modules/locale.yaml
deleted file mode 100644
index e01518a1..00000000
--- a/tests/cloud_tests/testcases/modules/locale.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Set locale to non-default option and verify
-#
-required_features:
- - engb_locale
- - locale_gen
-cloud_config: |
- #cloud-config
- locale: en_GB.UTF-8
- locale_configfile: /etc/default/locale
-collect_scripts:
- locale_default: |
- #!/bin/bash
- cat /etc/default/locale
- locale_a: |
- #!/bin/bash
- locale -a
- locale_gen: |
- #!/bin/bash
- cat /etc/locale.gen | grep -v '^#' | uniq
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/lxd_bridge.py b/tests/cloud_tests/testcases/modules/lxd_bridge.py
deleted file mode 100644
index ea545e0a..00000000
--- a/tests/cloud_tests/testcases/modules/lxd_bridge.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestLxdBridge(base.CloudTestCase):
- """Test LXD module."""
-
- @classmethod
- def maybeSkipTest(cls):
- """Skip on cosmic for two reasons:
- a.) LP: #1795036 - 'lxd init' fails on cosmic kernel.
- b.) apt install lxd installs via snap which can be slow
- as that will download core snap and lxd."""
- os_name = cls.data.get('os_name', 'UNKNOWN')
- if os_name == "cosmic":
- raise base.SkipTest('Skipping test on cosmic (LP: #1795036).')
-
- def test_lxd(self):
- """Test lxd installed."""
- out = self.get_data_file('lxd')
- self.assertIn('/lxd', out)
-
- def test_lxc(self):
- """Test lxc installed."""
- out = self.get_data_file('lxc')
- self.assertIn('/lxc', out)
-
- def test_bridge(self):
- """Test bridge config."""
- out = self.get_data_file('lxc-bridge')
- self.assertIn('lxdbr0', out)
- self.assertIn('10.100.100.1/24', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/lxd_bridge.yaml b/tests/cloud_tests/testcases/modules/lxd_bridge.yaml
deleted file mode 100644
index e6b7e76a..00000000
--- a/tests/cloud_tests/testcases/modules/lxd_bridge.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# LXD configured with directory backend and IPv4 bridge
-#
-required_features:
- - lxd
-cloud_config: |
- #cloud-config
- lxd:
- init:
- storage_backend: dir
- bridge:
- mode: new
- name: lxdbr0
- ipv4_address: 10.100.100.1
- ipv4_netmask: 24
- ipv4_dhcp_first: 10.100.100.100
- ipv4_dhcp_last: 10.100.100.200
- ipv4_nat: true
- domain: lxd
-collect_scripts:
- lxc: |
- #!/bin/bash
- which lxc
- lxd: |
- #!/bin/bash
- which lxd
- lxc-bridge: |
- #!/bin/bash
- ip addr show lxdbr0
- cat /etc/default/lxd-bridge 2>/dev/null | grep -v ^# | sort -u
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/lxd_dir.py b/tests/cloud_tests/testcases/modules/lxd_dir.py
deleted file mode 100644
index 797bafed..00000000
--- a/tests/cloud_tests/testcases/modules/lxd_dir.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestLxdDir(base.CloudTestCase):
- """Test LXD module."""
-
- @classmethod
- def maybeSkipTest(cls):
- """Skip on cosmic for two reasons:
- a.) LP: #1795036 - 'lxd init' fails on cosmic kernel.
- b.) apt install lxd installs via snap which can be slow
- as that will download core snap and lxd."""
- os_name = cls.data.get('os_name', 'UNKNOWN')
- if os_name == "cosmic":
- raise base.SkipTest('Skipping test on cosmic (LP: #1795036).')
-
- def test_lxd(self):
- """Test lxd installed."""
- out = self.get_data_file('lxd')
- self.assertIn('/lxd', out)
-
- def test_lxc(self):
- """Test lxc installed."""
- out = self.get_data_file('lxc')
- self.assertIn('/lxc', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/lxd_dir.yaml b/tests/cloud_tests/testcases/modules/lxd_dir.yaml
deleted file mode 100644
index f93a3fa7..00000000
--- a/tests/cloud_tests/testcases/modules/lxd_dir.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# LXD configured with directory backend
-#
-required_features:
- - lxd
-cloud_config: |
- #cloud-config
- lxd:
- init:
- storage_backend: dir
-collect_scripts:
- lxc: |
- #!/bin/bash
- which lxc
- lxd: |
- #!/bin/bash
- which lxd
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp.py b/tests/cloud_tests/testcases/modules/ntp.py
deleted file mode 100644
index c63cc15e..00000000
--- a/tests/cloud_tests/testcases/modules/ntp.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestNtp(base.CloudTestCase):
- """Test ntp module"""
-
- def test_ntp_installed(self):
- """Test ntp installed"""
- self.assertPackageInstalled('ntp')
-
- def test_ntp_dist_entries(self):
- """Test dist config file is empty"""
- out = self.get_data_file('ntp_conf_dist_empty')
- self.assertEqual(0, int(out))
-
- def test_ntp_entries(self):
- """Test config entries"""
- out = self.get_data_file('ntp_conf_pool_list')
- self.assertIn('pool.ntp.org iburst', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp.yaml b/tests/cloud_tests/testcases/modules/ntp.yaml
deleted file mode 100644
index 7ea0707d..00000000
--- a/tests/cloud_tests/testcases/modules/ntp.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Emtpy NTP config to setup using defaults
-#
-cloud_config: |
- #cloud-config
- ntp:
- ntp_client: ntp
- pools: []
- servers: []
-collect_scripts:
- ntp_installed: |
- #!/bin/bash
- ntpd --version > /dev/null 2>&1
- echo $?
- ntp_conf_dist_empty: |
- #!/bin/bash
- ls /etc/ntp.conf.dist | wc -l
- ntp_conf_pool_list: |
- #!/bin/bash
- grep 'pool.ntp.org' /etc/ntp.conf | grep -v ^#
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py
deleted file mode 100644
index 7d341773..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_chrony.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-import unittest
-
-from tests.cloud_tests.testcases import base
-
-
-class TestNtpChrony(base.CloudTestCase):
- """Test ntp module with chrony client"""
-
- def setUp(self):
- """Skip this suite of tests on lxd and artful or older."""
- if self.platform == 'lxd':
- if self.is_distro('ubuntu') and self.os_version_cmp('artful') <= 0:
- raise unittest.SkipTest(
- 'No support for chrony on containers <= artful.'
- ' LP: #1589780')
- return super(TestNtpChrony, self).setUp()
-
- def test_chrony_entries(self):
- """Test chrony config entries"""
- out = self.get_data_file('chrony_conf')
- self.assertIn('.pool.ntp.org', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.yaml b/tests/cloud_tests/testcases/modules/ntp_chrony.yaml
deleted file mode 100644
index 120735e2..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_chrony.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# ntp enabled, chrony selected, check conf file
-# as chrony won't start in a container
-#
-cloud_config: |
- #cloud-config
- ntp:
- enabled: true
- ntp_client: chrony
-collect_scripts:
- chrony_conf: |
- #!/bin/sh
- set -- /etc/chrony.conf /etc/chrony/chrony.conf
- for p in "$@"; do
- [ -e "$p" ] && { cat "$p"; exit; }
- done
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_pools.py b/tests/cloud_tests/testcases/modules/ntp_pools.py
deleted file mode 100644
index 152fd3f1..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_pools.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestNtpPools(base.CloudTestCase):
- """Test ntp module."""
-
- def test_ntp_installed(self):
- """Test ntp installed"""
- out = self.get_data_file('ntp_installed_pools')
- self.assertEqual(0, int(out))
-
- def test_ntp_dist_entries(self):
- """Test dist config file is empty"""
- out = self.get_data_file('ntp_conf_dist_pools')
- self.assertEqual(0, int(out))
-
- def test_ntp_entires(self):
- """Test config entries"""
- out = self.get_data_file('ntp_conf_pools')
- pools = self.cloud_config.get('ntp').get('pools')
- for pool in pools:
- self.assertIn('pool %s iburst' % pool, out)
-
- def test_ntpq_servers(self):
- """Test ntpq output has configured servers"""
- out = self.get_data_file('ntpq_servers')
- pools = self.cloud_config.get('ntp').get('pools')
- for pool in pools:
- self.assertIn(pool, out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_pools.yaml b/tests/cloud_tests/testcases/modules/ntp_pools.yaml
deleted file mode 100644
index 60fa0fd1..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_pools.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# NTP config using specific pools
-#
-# NOTE: lsb_release listed here because with recent cloud-init deb with
-# (LP: 1628337) resolved, cloud-init will attempt to configure archives.
-# this fails without lsb_release as UNAVAILABLE is used for $RELEASE
-required_features:
- - lsb_release
-cloud_config: |
- #cloud-config
- ntp:
- ntp_client: ntp
- pools:
- - 0.cloud-init.mypool
- - 1.cloud-init.mypool
- - 172.16.15.14
-collect_scripts:
- ntp_installed_pools: |
- #!/bin/bash
- ntpd --version > /dev/null 2>&1
- echo $?
- ntp_conf_dist_pools: |
- #!/bin/bash
- ls /etc/ntp.conf.dist | wc -l
- ntp_conf_pools: |
- #!/bin/bash
- grep '^pool' /etc/ntp.conf
- ntpq_servers: |
- #!/bin/sh
- ntpq -p -w -n
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.py b/tests/cloud_tests/testcases/modules/ntp_servers.py
deleted file mode 100644
index 8d2a68b3..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_servers.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script"""
-from tests.cloud_tests.testcases import base
-
-
-class TestNtpServers(base.CloudTestCase):
- """Test ntp module"""
-
- def test_ntp_installed(self):
- """Test ntp installed"""
- out = self.get_data_file('ntp_installed_servers')
- self.assertEqual(0, int(out))
-
- def test_ntp_dist_entries(self):
- """Test dist config file is empty"""
- out = self.get_data_file('ntp_conf_dist_servers')
- self.assertEqual(0, int(out))
-
- def test_ntp_entries(self):
- """Test config server entries"""
- out = self.get_data_file('ntp_conf_servers')
- servers = self.cloud_config.get('ntp').get('servers')
- for server in servers:
- self.assertIn('server %s iburst' % server, out)
-
- def test_ntpq_servers(self):
- """Test ntpq output has configured servers"""
- out = self.get_data_file('ntpq_servers')
- servers = self.cloud_config.get('ntp').get('servers')
- for server in servers:
- self.assertIn(server, out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.yaml b/tests/cloud_tests/testcases/modules/ntp_servers.yaml
deleted file mode 100644
index ee636679..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_servers.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# NTP config using specific servers
-#
-required_features:
- - lsb_release
-cloud_config: |
- #cloud-config
- ntp:
- ntp_client: ntp
- servers:
- - 172.16.15.14
- - 172.16.17.18
-collect_scripts:
- ntp_installed_servers: |
- #!/bin/sh
- ntpd --version > /dev/null 2>&1
- echo $?
- ntp_conf_dist_servers: |
- #!/bin/sh
- cat /etc/ntp.conf.dist | wc -l
- ntp_conf_servers: |
- #!/bin/sh
- grep '^server' /etc/ntp.conf
- ntpq_servers: |
- #!/bin/sh
- ntpq -p -w -n
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_timesyncd.py b/tests/cloud_tests/testcases/modules/ntp_timesyncd.py
deleted file mode 100644
index eca750bc..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_timesyncd.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestNtpTimesyncd(base.CloudTestCase):
- """Test ntp module with systemd-timesyncd client"""
-
- def test_timesyncd_entries(self):
- """Test timesyncd config entries"""
- out = self.get_data_file('timesyncd_conf')
- self.assertIn('.pool.ntp.org', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml b/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml
deleted file mode 100644
index ee47a741..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# ntp enabled, systemd-timesyncd selected, check conf file
-# as systemd-timesyncd won't start in a container
-#
-cloud_config: |
- #cloud-config
- ntp:
- enabled: true
- ntp_client: systemd-timesyncd
-collect_scripts:
- timesyncd_conf: |
- #!/bin/sh
- cat /etc/systemd/timesyncd.conf.d/cloud-init.conf
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
deleted file mode 100644
index fecad768..00000000
--- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPackageInstallUpdateUpgrade(base.CloudTestCase):
- """Test package install update upgrade module."""
-
- def test_installed_sl(self):
- """Test sl got installed."""
- self.assertPackageInstalled('sl')
-
- def test_installed_tree(self):
- """Test tree got installed."""
- self.assertPackageInstalled('tree')
-
- def test_apt_history(self):
- """Test apt history for update command."""
- out = self.get_data_file('apt_history_cmdline')
- self.assertIn(
- 'Commandline: /usr/bin/apt-get --option=Dpkg::Options'
- '::=--force-confold --option=Dpkg::options::=--force-unsafe-io '
- '--assume-yes --quiet install sl tree', out)
-
- def test_cloud_init_output(self):
- """Test cloud-init-output for install & upgrade stuff."""
- out = self.get_data_file('cloud-init-output.log')
- self.assertIn('Setting up tree (', out)
- self.assertIn('Setting up sl (', out)
- self.assertIn('Reading package lists...', out)
- self.assertIn('Building dependency tree...', out)
- self.assertIn('Reading state information...', out)
- self.assertIn('Calculating upgrade...', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml
deleted file mode 100644
index dd79e438..00000000
--- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Update/upgrade via apt and then install a pair of packages
-#
-# NOTE: this should not require apt feature, use 'which' rather than 'dpkg -l'
-# NOTE: the testcase for this looks for the command in history.log as
-# /usr/bin/apt-get..., which is not how it always appears. it should
-# instead look for just apt-get...
-# NOTE: this testcase should not require 'apt_up_out', and should look for a
-# call to 'apt-get upgrade' or 'apt-get dist-upgrade' in cloud-init.log
-# rather than 'Calculating upgrade...' in output
-required_features:
- - apt
- - apt_hist_fmt
- - apt_up_out
-cloud_config: |
- #cloud-config
- packages:
- - sl
- - tree
- package_update: true
- package_upgrade: true
-collect_scripts:
- apt_history_cmdline: |
- #!/bin/bash
- grep ^Commandline: /var/log/apt/history.log
- dpkg_show: |
- #!/bin/bash
- dpkg-query --show
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/runcmd.py b/tests/cloud_tests/testcases/modules/runcmd.py
deleted file mode 100644
index 9fce3062..00000000
--- a/tests/cloud_tests/testcases/modules/runcmd.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestRunCmd(base.CloudTestCase):
- """Test runcmd module."""
-
- def test_run_cmd(self):
- """Test run command worked."""
- out = self.get_data_file('run_cmd')
- self.assertIn('cloud-init run cmd test', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/runcmd.yaml b/tests/cloud_tests/testcases/modules/runcmd.yaml
deleted file mode 100644
index 8309a883..00000000
--- a/tests/cloud_tests/testcases/modules/runcmd.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Run a simple command
-#
-cloud_config: |
- #cloud-config
- runcmd:
- - echo cloud-init run cmd test > /var/tmp/run_cmd
-collect_scripts:
- run_cmd: |
- #!/bin/bash
- cat /var/tmp/run_cmd
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/seed_random_command.yaml b/tests/cloud_tests/testcases/modules/seed_random_command.yaml
deleted file mode 100644
index 6a9157eb..00000000
--- a/tests/cloud_tests/testcases/modules/seed_random_command.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Use uuid to create a random string
-#
-# 2016-11-15 Disabled as this is not working currently
-#
-enabled: False
-cloud_config: |
- #cloud-config
- random_seed:
- command: ["cat", "/proc/sys/kernel/random/uuid"]
- command_required: true
- file: /root/seed
-collect_scripts:
- seed_data: |
- #!/bin/bash
- cat /root/seed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/seed_random_data.py b/tests/cloud_tests/testcases/modules/seed_random_data.py
deleted file mode 100644
index db433d26..00000000
--- a/tests/cloud_tests/testcases/modules/seed_random_data.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSeedRandom(base.CloudTestCase):
- """Test seed random module."""
-
- def test_random_seed_data(self):
- """Test random data passed in exists."""
- out = self.get_data_file('seed_data')
- self.assertIn('MYUb34023nD:LFDK10913jk;dfnk:Df', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/seed_random_data.yaml b/tests/cloud_tests/testcases/modules/seed_random_data.yaml
deleted file mode 100644
index a9b2c885..00000000
--- a/tests/cloud_tests/testcases/modules/seed_random_data.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Push in random raw string to set as seed
-#
-cloud_config: |
- #cloud-config
- random_seed:
- data: 'MYUb34023nD:LFDK10913jk;dfnk:Df'
- encoding: raw
- file: /root/seed
-collect_scripts:
- seed_data: |
- #!/bin/bash
- cat /root/seed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname.py b/tests/cloud_tests/testcases/modules/set_hostname.py
deleted file mode 100644
index 1dbe64c2..00000000
--- a/tests/cloud_tests/testcases/modules/set_hostname.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestHostname(base.CloudTestCase):
- """Test hostname module."""
-
- ex_hostname = "cloudinit2"
-
- def test_hostname(self):
- """Test hostname command shows correct output."""
- out = self.get_data_file('hostname')
- self.assertIn(self.ex_hostname, out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname.yaml b/tests/cloud_tests/testcases/modules/set_hostname.yaml
deleted file mode 100644
index 071fb220..00000000
--- a/tests/cloud_tests/testcases/modules/set_hostname.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Set the hostname and update /etc/hosts
-#
-required_features:
- - hostname
-cloud_config: |
- #cloud-config
- hostname: cloudinit2
-
-collect_scripts:
- hosts: |
- #!/bin/bash
- grep ^127 /etc/hosts
- hostname: |
- #!/bin/bash
- hostname
- fqdn: |
- #!/bin/bash
- hostname --fqdn
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py
deleted file mode 100644
index a405b30b..00000000
--- a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests import CI_DOMAIN
-from tests.cloud_tests.testcases import base
-
-
-class TestHostnameFqdn(base.CloudTestCase):
- """Test Hostname module."""
-
- ex_hostname = "cloudinit1"
- ex_fqdn = "cloudinit2." + CI_DOMAIN
-
- def test_hostname(self):
- """Test hostname output."""
- out = self.get_data_file('hostname')
- self.assertIn(self.ex_hostname, out)
-
- def test_hostname_fqdn(self):
- """Test hostname fqdn output."""
- out = self.get_data_file('fqdn')
- self.assertIn(self.ex_fqdn, out)
-
- def test_hosts(self):
- """Test /etc/hosts file."""
- out = self.get_data_file('hosts')
- self.assertIn('127.0.1.1 %s %s' % (self.ex_fqdn, self.ex_hostname),
- out)
- self.assertIn('127.0.0.1 localhost', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml
deleted file mode 100644
index a85ee79e..00000000
--- a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Set the hostname and update /etc/hosts
-#
-required_features:
- - hostname
-cloud_config: |
- #cloud-config
- manage_etc_hosts: true
- hostname: cloudinit1
- # this needs changing if CI_DOMAIN were updated.
- fqdn: cloudinit2.i9n.cloud-init.io
-collect_scripts:
- hosts: |
- #!/bin/bash
- grep ^127 /etc/hosts
- hostname: |
- #!/bin/bash
- hostname
- fqdn: |
- #!/bin/bash
- hostname --fqdn
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password.py b/tests/cloud_tests/testcases/modules/set_password.py
deleted file mode 100644
index a29b2261..00000000
--- a/tests/cloud_tests/testcases/modules/set_password.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPassword(base.CloudTestCase):
- """Test password module."""
-
- # TODO add test to make sure password is actually "password"
-
- def test_shadow(self):
- """Test ubuntu user in shadow."""
- out = self.get_data_file('shadow')
- self.assertIn('ubuntu:', out)
-
- def test_sshd_config(self):
- """Test sshd config allows passwords."""
- out = self.get_data_file('sshd_config')
- self.assertIn('PasswordAuthentication yes', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password.yaml b/tests/cloud_tests/testcases/modules/set_password.yaml
deleted file mode 100644
index 04d7c58a..00000000
--- a/tests/cloud_tests/testcases/modules/set_password.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Set password of default user
-#
-required_features:
- - ubuntu_user
-cloud_config: |
- #cloud-config
- password: password
- chpasswd: { expire: False }
- ssh_pwauth: True
-collect_scripts:
- shadow: |
- #!/bin/bash
- cat /etc/shadow
- sshd_config: |
- #!/bin/bash
- grep '^PasswordAuth' /etc/ssh/sshd_config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_expire.py b/tests/cloud_tests/testcases/modules/set_password_expire.py
deleted file mode 100644
index 967aca7b..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_expire.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPasswordExpire(base.CloudTestCase):
- """Test password module."""
-
- def test_shadow(self):
- """Test user frozen in shadow."""
- out = self.get_data_file('shadow')
- self.assertIn('harry:!:', out)
- self.assertIn('dick:!:', out)
- self.assertIn('tom:!:', out)
- self.assertIn('harry:!:', out)
-
- def test_sshd_config(self):
- """Test sshd config allows passwords."""
- out = self.get_data_file('sshd_config')
- self.assertIn('PasswordAuthentication yes', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_expire.yaml b/tests/cloud_tests/testcases/modules/set_password_expire.yaml
deleted file mode 100644
index ba6344b9..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_expire.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Expire password for all users
-#
-required_features:
- - sshd
-cloud_config: |
- #cloud-config
- chpasswd: { expire: True }
- ssh_pwauth: yes
- users:
- - default
- - name: tom
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
- lock_passwd: false
- - name: dick
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
- lock_passwd: false
- - name: harry
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
- lock_passwd: false
- - name: jane
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
- lock_passwd: false
-collect_scripts:
- shadow: |
- #!/bin/bash
- cat /etc/shadow
- sshd_config: |
- #!/bin/bash
- grep '^PasswordAuth' /etc/ssh/sshd_config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list.py b/tests/cloud_tests/testcases/modules/set_password_list.py
deleted file mode 100644
index 375cd27d..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_list.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPasswordList(base.PasswordListTest, base.CloudTestCase):
- """Test password setting via list in chpasswd/list."""
-
- __test__ = True
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list.yaml b/tests/cloud_tests/testcases/modules/set_password_list.yaml
deleted file mode 100644
index fd3e1e44..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_list.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# Set password of list of users
-#
-cloud_config: |
- #cloud-config
- ssh_pwauth: yes
- users:
- - default
- - name: tom
- # md5 gotomgo
- passwd: "$1$S7$tT1BEDIYrczeryDQJfdPe0"
- lock_passwd: false
- - name: dick
- # md5 gocubsgo
- passwd: "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1"
- lock_passwd: false
- - name: harry
- # sha512 goharrygo
- passwd: "$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsGJEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/"
- lock_passwd: false
- - name: jane
- # sha256 gojanego
- passwd: "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg."
- lock_passwd: false
- - name: "mikey"
- lock_passwd: false
- chpasswd:
- list:
- - tom:mypassword123!
- - dick:RANDOM
- - harry:RANDOM
- - mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
-collect_scripts:
- shadow: |
- #!/bin/bash
- cat /etc/shadow
- sshd_config: |
- #!/bin/bash
- grep '^PasswordAuth' /etc/ssh/sshd_config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list_string.py b/tests/cloud_tests/testcases/modules/set_password_list_string.py
deleted file mode 100644
index 8c2634c5..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_list_string.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPasswordListString(base.PasswordListTest, base.CloudTestCase):
- """Test password setting via string in chpasswd/list."""
-
- __test__ = True
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list_string.yaml b/tests/cloud_tests/testcases/modules/set_password_list_string.yaml
deleted file mode 100644
index e9fe54b0..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_list_string.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# Set password of list of users as a string
-#
-cloud_config: |
- #cloud-config
- ssh_pwauth: yes
- users:
- - default
- - name: tom
- # md5 gotomgo
- passwd: "$1$S7$tT1BEDIYrczeryDQJfdPe0"
- lock_passwd: false
- - name: dick
- # md5 gocubsgo
- passwd: "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1"
- lock_passwd: false
- - name: harry
- # sha512 goharrygo
- passwd: "$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsGJEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/"
- lock_passwd: false
- - name: jane
- # sha256 gojanego
- passwd: "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg."
- lock_passwd: false
- - name: "mikey"
- lock_passwd: false
- chpasswd:
- list: |
- tom:mypassword123!
- dick:RANDOM
- harry:RANDOM
- mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
-collect_scripts:
- shadow: |
- #!/bin/bash
- cat /etc/shadow
- sshd_config: |
- #!/bin/bash
- grep '^PasswordAuth' /etc/ssh/sshd_config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/snap.py b/tests/cloud_tests/testcases/modules/snap.py
deleted file mode 100644
index ff68abbe..00000000
--- a/tests/cloud_tests/testcases/modules/snap.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script"""
-from tests.cloud_tests.testcases import base
-
-
-class TestSnap(base.CloudTestCase):
- """Test snap module"""
-
- def test_snappy_version(self):
- """Expect hello-world and core snaps are installed."""
- out = self.get_data_file('snaplist')
- self.assertIn('core', out)
- self.assertIn('hello-world', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/snap.yaml b/tests/cloud_tests/testcases/modules/snap.yaml
deleted file mode 100644
index 322199c3..00000000
--- a/tests/cloud_tests/testcases/modules/snap.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Install snappy
-#
-# Aug 23, 2018: Disabled due to requiring a proxy for testing
-# tests do not handle the proxy well at this time.
-enabled: False
-required_features:
- - snap
-cloud_config: |
- #cloud-config
- package_update: true
- snap:
- squashfuse_in_container: true
- commands:
- - snap install hello-world
-collect_scripts:
- snaplist: |
- #!/bin/bash
- snap list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
deleted file mode 100644
index 02935447..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshKeyFingerprintsDisable(base.CloudTestCase):
- """Test ssh key fingerprints module."""
-
- def test_cloud_init_log(self):
- """Verify disabled."""
- out = self.get_data_file('cloud-init.log')
- self.assertIn('Skipping module named ssh-authkey-fingerprints, '
- 'logging of SSH fingerprints disabled', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml
deleted file mode 100644
index d93893e2..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Disable fingerprint printing
-#
-required_features:
- - syslog
-cloud_config: |
- #cloud-config
- no_ssh_fingerprints: true
-collect_scripts:
- syslog: |
- #!/bin/bash
- cat /var/log/syslog
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py
deleted file mode 100644
index 3510e75a..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshKeyFingerprintsEnable(base.CloudTestCase):
- """Test ssh key fingerprints module."""
-
- def test_syslog(self):
- """Verify output of syslog."""
- out = self.get_data_file('syslog')
- self.assertRegex(out, r'256 SHA256:.*(ECDSA)')
- self.assertRegex(out, r'256 SHA256:.*(ED25519)')
- self.assertNotRegex(out, r'1024 SHA256:.*(DSA)')
- self.assertNotRegex(out, r'2048 SHA256:.*(RSA)')
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml
deleted file mode 100644
index 9f5dc34a..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Print auth keys with different hash than md5
-#
-# NOTE: testcase checks for '256 SHA256:.*(ECDSA)' on output line on trusty
-# this fails as line in output reads '256:.*(ECDSA)'
-required_features:
- - syslog
- - ssh_key_fmt
-cloud_config: |
- #cloud-config
- ssh_genkeytypes:
- - ecdsa
- - ed25519
- ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXW9Gg5H7ehjdSc6qDzwNtgCy94XYHhEYlXZMO2+FJrH3wfHGiMfCwOHxcOMt2QiXItULthdeQWS9QjBSSjVRXf6731igFrqPFyS9qBlOQ5D29C4HBXFnQggGVpBNJ82IRJv7szbbe/vpgLBP4kttUza9Dr4e1YM1ln4PRnjfXea6T0m+m1ixNb5432pTXlqYOnNOxSIm1gHgMLxPuDrJvQERDKrSiKSjIdyC9Jd8t2e1tkNLY0stmckVRbhShmcJvlyofHWbc2Ca1mmtP7MlS1VQnfLkvU1IrFwkmaQmaggX6WR6coRJ6XFXdWcq/AI2K6GjSnl1dnnCxE8VCEXBlXgFzad+PMSG4yiL5j8Oo1ZVpkTdgBnw4okGqTYCXyZg6X00As9IBNQfZMFlQXlIo4FiWgj3CO5QHQOyOX6FuEumaU13GnERrSSdp9tCs1Qm3/DG2RSCQBWTfcgMcStIvKqvJ3IjFn0vGLvI3Ampnq9q1SHwmmzAPSdzcMA76HyMUA5VWaBvWHlUxzIM6unxZASnwvuCzpywSEB5J2OF+p6H+cStJwQ32XwmOG8pLp1srlVWpqZI58Du/lzrkPqONphoZx0LDV86w7RUz1ksDzAdcm0tvmNRFMN1a0frDs506oA3aWK0oDk4Nmvk8sXGTYYw3iQSkOvDUUlIsqdaO+w==
-collect_scripts:
- syslog: |
- #!/bin/bash
- cat /var/log/syslog
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_import_id.py b/tests/cloud_tests/testcases/modules/ssh_import_id.py
deleted file mode 100644
index ef156f47..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_import_id.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshImportId(base.CloudTestCase):
- """Test ssh import id module."""
-
- def test_authorized_keys(self):
- """Test that ssh keys were imported."""
- out = self.get_data_file('auth_keys_ubuntu')
-
- self.assertIn('# ssh-import-id gh:powersj', out)
- self.assertIn('# ssh-import-id lp:smoser', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_import_id.yaml b/tests/cloud_tests/testcases/modules/ssh_import_id.yaml
deleted file mode 100644
index b62d3f69..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_import_id.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Import a user's ssh key via gh or lp
-#
-required_features:
- - ubuntu_user
- - sudo
-cloud_config: |
- #cloud-config
- ssh_import_id:
- - gh:powersj
- - lp:smoser
-collect_scripts:
- auth_keys_ubuntu: |
- #!/bin/bash
- cat /home/ubuntu/.ssh/authorized_keys
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_generate.py b/tests/cloud_tests/testcases/modules/ssh_keys_generate.py
deleted file mode 100644
index b68f5565..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_keys_generate.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshKeysGenerate(base.CloudTestCase):
- """Test ssh keys module."""
-
- # TODO: Check cloud-init-output for the correct keys being generated
-
- def test_dsa_public(self):
- """Test dsa public key not generated."""
- out = self.get_data_file('dsa_public')
- self.assertEqual('', out)
-
- def test_dsa_private(self):
- """Test dsa private key not generated."""
- out = self.get_data_file('dsa_private')
- self.assertEqual('', out)
-
- def test_rsa_public(self):
- """Test rsa public key not generated."""
- out = self.get_data_file('rsa_public')
- self.assertEqual('', out)
-
- def test_rsa_private(self):
- """Test rsa public key not generated."""
- out = self.get_data_file('rsa_private')
- self.assertEqual('', out)
-
- def test_ecdsa_public(self):
- """Test ecdsa public key generated."""
- out = self.get_data_file('ecdsa_public')
- self.assertIsNotNone(out)
-
- def test_ecdsa_private(self):
- """Test ecdsa public key generated."""
- out = self.get_data_file('ecdsa_private')
- self.assertIsNotNone(out)
-
- def test_ed25519_public(self):
- """Test ed25519 public key generated."""
- out = self.get_data_file('ed25519_public')
- self.assertIsNotNone(out)
-
- def test_ed25519_private(self):
- """Test ed25519 public key generated."""
- out = self.get_data_file('ed25519_private')
- self.assertIsNotNone(out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml b/tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml
deleted file mode 100644
index 0a7adf62..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-# SSH keys generated using cloud-init
-#
-required_features:
- - ubuntu_user
-cloud_config: |
- #cloud-config
- ssh_genkeytypes:
- - ecdsa
- - ed25519
- authkey_hash: sha512
-collect_scripts:
- dsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key.pub
- dsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key
- rsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key.pub
- rsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key
- ecdsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ecdsa_key.pub
- ecdsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ecdsa_key
- ed25519_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ed25519_key.pub
- ed25519_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ed25519_key
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_provided.py b/tests/cloud_tests/testcases/modules/ssh_keys_provided.py
deleted file mode 100644
index add3f469..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_keys_provided.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshKeysProvided(base.CloudTestCase):
- """Test ssh keys module."""
-
- def test_dsa_public(self):
- """Test dsa public key passed in."""
- out = self.get_data_file('dsa_public')
- self.assertIn('AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4RZS8c'
- 'NM4ZpeuE5UB/Nnr6OSU/nmbO8LuM', out)
-
- def test_dsa_private(self):
- """Test dsa private key passed in."""
- out = self.get_data_file('dsa_private')
- self.assertIn('MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr'
- 'hOVAfzZ6+jklP', out)
-
- def test_rsa_public(self):
- """Test rsa public key passed in."""
- out = self.get_data_file('rsa_public')
- self.assertIn('AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT'
- 'LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4', out)
-
- def test_rsa_private(self):
- """Test rsa public key passed in."""
- out = self.get_data_file('rsa_private')
- self.assertIn('4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un'
- 'RQvLZpMRdywBm', out)
-
- def test_ecdsa_public(self):
- """Test ecdsa public key passed in."""
- out = self.get_data_file('ecdsa_public')
- self.assertIn('AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB'
- 'BBFsS5Tvky/IC/dXhE/afxxU', out)
-
- def test_ecdsa_private(self):
- """Test ecdsa public key passed in."""
- out = self.get_data_file('ecdsa_private')
- self.assertIn('AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY'
- '5mpZqxgX4vcgb', out)
-
- def test_ed25519_public(self):
- """Test ed25519 public key passed in."""
- out = self.get_data_file('ed25519_public')
- self.assertIn('AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6'
- 'G15dqjQ2XkNVOEnb5', out)
-
- def test_ed25519_private(self):
- """Test ed25519 public key passed in."""
- out = self.get_data_file('ed25519_private')
- self.assertIn('XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT'
- 'OhteXao0Nl5DVThJ2+Q', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml b/tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml
deleted file mode 100644
index 41f63550..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml
+++ /dev/null
@@ -1,99 +0,0 @@
-#
-# SSH keys provided via cloud config
-#
-enabled: False
-required_features:
- - ubuntu_user
- - sudo
-cloud_config: |
- #cloud-config
- disable_root: false
- ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXW9Gg5H7ehjdSc6qDzwNtgCy94XYHhEYlXZMO2+FJrH3wfHGiMfCwOHxcOMt2QiXItULthdeQWS9QjBSSjVRXf6731igFrqPFyS9qBlOQ5D29C4HBXFnQggGVpBNJ82IRJv7szbbe/vpgLBP4kttUza9Dr4e1YM1ln4PRnjfXea6T0m+m1ixNb5432pTXlqYOnNOxSIm1gHgMLxPuDrJvQERDKrSiKSjIdyC9Jd8t2e1tkNLY0stmckVRbhShmcJvlyofHWbc2Ca1mmtP7MlS1VQnfLkvU1IrFwkmaQmaggX6WR6coRJ6XFXdWcq/AI2K6GjSnl1dnnCxE8VCEXBlXgFzad+PMSG4yiL5j8Oo1ZVpkTdgBnw4okGqTYCXyZg6X00As9IBNQfZMFlQXlIo4FiWgj3CO5QHQOyOX6FuEumaU13GnERrSSdp9tCs1Qm3/DG2RSCQBWTfcgMcStIvKqvJ3IjFn0vGLvI3Ampnq9q1SHwmmzAPSdzcMA76HyMUA5VWaBvWHlUxzIM6unxZASnwvuCzpywSEB5J2OF+p6H+cStJwQ32XwmOG8pLp1srlVWpqZI58Du/lzrkPqONphoZx0LDV86w7RUz1ksDzAdcm0tvmNRFMN1a0frDs506oA3aWK0oDk4Nmvk8sXGTYYw3iQSkOvDUUlIsqdaO+w==
- ssh_keys:
- rsa_private: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAtPx6PqN3iSEsnTtibyIEy52Tra8T5fn0ryXyg46Di2NBwdnj
- o8trNv9jenfV/UhmePl58lXjT43wV8OCMl6KsYXyBdegM35NNtono4I4mLLKFMR9
- 9TOtDn6iYcaNenVhF3ZCj9Z2nNOlTrdc0uchHqKMrxLjCRCUrL91Uf+xioTF901Y
- RM+ZqC5lT92yAL76F4qPF+Lq1QtUfNfUIwwvOp5ccDZLPxij0YvyBzubYye9hJHu
- yjbJv78R4JHV+L2WhzSoX3W/6WrxVzeXqFGqH894ccOaC/7tnqSP6V8lIQ6fE2+c
- DurJcpM3CJRgkndGHjtU55Y71YkcdLksSMvezQIDAQABAoIBAQCrU4IJP8dNeaj5
- IpkY6NQvR/jfZqfogYi+MKb1IHin/4rlDfUvPcY9pt8ttLlObjYK+OcWn3Vx/sRw
- 4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2unRQvLZpMRdywBm
- lq95OrCghnG03aUsFJUZPpi5ydnwbA12ma+KHkG0EzaVlhA7X9N6z0K6U+zue2gl
- goMLt/MH0rsYawkHrwiwXaIFQeyV4MJP0vmrZLbFk1bycu9X/xPtTYotWyWo4eKA
- cb05uu04qwexkKHDM0KXtT0JecbTo2rOefFo8Uuab6uJY+fEHNocZ+v1vLA4aOxJ
- ovp1JuXlAoGBAOWYNgKrlTfy5n0sKsNk+1RuL2jHJZJ3HMd0EIt7/fFQN3Fi08Hu
- jtntqD30Wj+DJK8b8Lrt66FruxyEJm5VhVmwkukrLR5ige2f6ftZnoFCmdyy+0zP
- dnPZSUe2H5ZPHa+qthJgHLn+al2P04tGh+1fGHC2PbP+e0Co+/ZRIOxrAoGBAMnN
- IEen9/FRsqvnDd36I8XnJGskVRTZNjylxBmbKcuMWm+gNhOI7gsCAcqzD4BYZjjW
- pLhrt/u9p+l4MOJy6OUUdM/okg12SnJEGryysOcVBcXyrvOfklWnANG4EAH5jt1N
- ftTb1XTxzvWVuR/WJK0B5MZNYM71cumBdUDtPi+nAoGAYmoIXMSnxb+8xNL10aOr
- h9ljQQp8NHgSQfyiSufvRk0YNuYh1vMnEIsqnsPrG2Zfhx/25GmvoxXGssaCorDN
- 5FAn6QK06F1ZTD5L0Y3sv4OI6G1gAuC66ZWuL6sFhyyKkQ4f1WiVZ7SCa3CHQSAO
- i9VDaKz1bf4bXvAQcNj9v9kCgYACSOZCqW4vN0OUmqsXhkt9ZB6Pb/veno70pNPR
- jmYsvcwQU3oJQpWfXkhy6RAV3epaXmPDCsUsfns2M3wqNC7a2R5xdCqjKGGzZX4A
- AO3rz9se4J6Gd5oKijeCKFlWDGNHsibrdgm2pz42nZlY+O21X74dWKbt8O16I1MW
- hxkbJQKBgAXfuen/srVkJgPuqywUYag90VWCpHsuxdn+fZJa50SyZADr+RbiDfH2
- vek8Uo8ap8AEsv4Rfs9opUcUZevLp3g2741eOaidHVLm0l4iLIVl03otGOqvSzs+
- A3tFPEOxauXpzCt8f8eXsz0WQXAgIKW2h8zu5QHjomioU3i27mtE
- -----END RSA PRIVATE KEY-----
- rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgTLnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4+XnyVeNPjfBXw4IyXoqxhfIF16Azfk022iejgjiYssoUxH31M60OfqJhxo16dWEXdkKP1nac06VOt1zS5yEeooyvEuMJEJSsv3VR/7GKhMX3TVhEz5moLmVP3bIAvvoXio8X4urVC1R819QjDC86nlxwNks/GKPRi/IHO5tjJ72Eke7KNsm/vxHgkdX4vZaHNKhfdb/pavFXN5eoUaofz3hxw5oL/u2epI/pXyUhDp8Tb5wO6slykzcIlGCSd0YeO1TnljvViRx0uSxIy97N root@xenial-lxd
- dsa_private: |
- -----BEGIN DSA PRIVATE KEY-----
- MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXrhOVAfzZ6+jklP
- 55mzvC7jO53PWWC31hq10xBoWdev0WtcNF9Tv+4bAa1263y51Rqo4GI7xx+xic1d
- mLqqfYijBT9k48J/1tV0cs1Wjs6FP/IJTD/kYVC930JjYQMi722lBnUxsQIVAL7i
- z3fTGKTvSzvW0wQlwnYpS2QFAoGANp+KdyS9V93HgxGQEN1rlj/TSv/a3EVdCKtE
- nQf55aPHxDAVDVw5JtRh4pZbbRV4oGRPc9KOdjo5BU28vSM3Lmhkb+UaaDXwHkgI
- nK193o74DKjADWZxuLyyiKHiMOhxozoxDfjWxs8nz6uqvSW0pr521EwIY6RajbED
- nZ2a3GkCgYEAyoUomNRB6bmpsIfzt8zdtqLP5umIj2uhr9MVPL8/QdbxmJ72Z7pf
- Q2z1B7QAdIBGOlqJXtlau7ABhWK29Efe+99ObyTSSdDc6RCDeAwUmBAiPRQhDH2E
- wExw3doDSCUb28L1B50wBzQ8mC3KXp6C7IkBXWspb16DLHUHFSI8bkICFA5kVUcW
- nCPOXEQsayANi8+Cb7BH
- -----END DSA PRIVATE KEY-----
- dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4RZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM7nc9ZYLfWGrXTEGhZ16/Ra1w0X1O/7hsBrXbrfLnVGqjgYjvHH7GJzV2Yuqp9iKMFP2Tjwn/W1XRyzVaOzoU/8glMP+RhUL3fQmNhAyLvbaUGdTGxAAAAFQC+4s930xik70s71tMEJcJ2KUtkBQAAAIA2n4p3JL1X3ceDEZAQ3WuWP9NK/9rcRV0Iq0SdB/nlo8fEMBUNXDkm1GHillttFXigZE9z0o52OjkFTby9IzcuaGRv5RpoNfAeSAicrX3ejvgMqMANZnG4vLKIoeIw6HGjOjEN+NbGzyfPq6q9JbSmvnbUTAhjpFqNsQOdnZrcaQAAAIEAyoUomNRB6bmpsIfzt8zdtqLP5umIj2uhr9MVPL8/QdbxmJ72Z7pfQ2z1B7QAdIBGOlqJXtlau7ABhWK29Efe+99ObyTSSdDc6RCDeAwUmBAiPRQhDH2EwExw3doDSCUb28L1B50wBzQ8mC3KXp6C7IkBXWspb16DLHUHFSI8bkI= root@xenial-lxd
- ed25519_private: |
- -----BEGIN OPENSSH PRIVATE KEY-----
- b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
- QyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNTOhteXao0Nl5DVThJ2+QAAAJgwt+lcMLfp
- XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNTOhteXao0Nl5DVThJ2+Q
- AAAEDQlFZpz9q8+/YJHS9+jPAqy2ZT6cGEv8HTB6RZtTjd/dudAZSu4vjZpVWzId5pXmZg
- 1M6G15dqjQ2XkNVOEnb5AAAAD3Jvb3RAeGVuaWFsLWx4ZAECAwQFBg==
- -----END OPENSSH PRIVATE KEY-----
- ed25519_public: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6G15dqjQ2XkNVOEnb5 root@xenial-lxd
- ecdsa_private: |
- -----BEGIN EC PRIVATE KEY-----
- MHcCAQEEIDuK+QFc1wmyJY8uDqQVa1qHte30Rk/fdLxGIBkwJAyOoAoGCCqGSM49
- AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY5mpZqxgX4vcgb
- 7f/CtXuM6s2svcDJqAeXr6Wk8OJJcMxylA==
- -----END EC PRIVATE KEY-----
- ecdsa_public: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFsS5Tvky/IC/dXhE/afxxUG6kdQOvdQJCYGZN42OZqWasYF+L3IG+3/wrV7jOrNrL3AyagHl6+lpPDiSXDMcpQ= root@xenial-lxd
-collect_scripts:
- dsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key.pub
- dsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key
- rsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key.pub
- rsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key
- ecdsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ecdsa_key.pub
- ecdsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ecdsa_key
- ed25519_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ed25519_key.pub
- ed25519_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ed25519_key
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/timezone.py b/tests/cloud_tests/testcases/modules/timezone.py
deleted file mode 100644
index 654fa53d..00000000
--- a/tests/cloud_tests/testcases/modules/timezone.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestTimezone(base.CloudTestCase):
- """Test timezone module."""
-
- def test_timezone(self):
- """Test date prints correct timezone."""
- out = self.get_data_file('timezone')
- self.assertEqual('HDT', out.rstrip())
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/timezone.yaml b/tests/cloud_tests/testcases/modules/timezone.yaml
deleted file mode 100644
index 5112aa9f..00000000
--- a/tests/cloud_tests/testcases/modules/timezone.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# Set system timezone
-#
-required_features:
- - daylight_time
-cloud_config: |
- #cloud-config
- timezone: US/Aleutian
-collect_scripts:
- timezone: |
- #!/bin/bash
- # date will convert this to system's configured time zone.
- # use a static date to avoid dealing with daylight savings.
- date "+%Z" --date="Thu, 03 Nov 2016 00:47:00 -0400"
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/user_groups.py b/tests/cloud_tests/testcases/modules/user_groups.py
deleted file mode 100644
index 4067348d..00000000
--- a/tests/cloud_tests/testcases/modules/user_groups.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestUserGroups(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_group_ubuntu(self):
- """Test ubuntu group exists."""
- out = self.get_data_file('group_ubuntu')
- self.assertRegex(out, r'ubuntu:x:[0-9]{4}:')
-
- def test_group_cloud_users(self):
- """Test cloud users group exists."""
- out = self.get_data_file('group_cloud_users')
- self.assertRegex(out, r'cloud-users:x:[0-9]{4}:barfoo')
-
- def test_user_ubuntu(self):
- """Test ubuntu user exists."""
- out = self.get_data_file('user_ubuntu')
- self.assertRegex(
- out, r'ubuntu:x:[0-9]{4}:[0-9]{4}:Ubuntu:/home/ubuntu:/bin/bash')
-
- def test_user_foobar(self):
- """Test foobar user exists."""
- out = self.get_data_file('user_foobar')
- self.assertRegex(
- out, r'foobar:x:[0-9]{4}:[0-9]{4}:Foo B. Bar:/home/foobar:')
-
- def test_user_barfoo(self):
- """Test barfoo user exists."""
- out = self.get_data_file('user_barfoo')
- self.assertRegex(
- out, r'barfoo:x:[0-9]{4}:[0-9]{4}:Bar B. Foo:/home/barfoo:')
-
- def test_user_cloudy(self):
- """Test cloudy user exists."""
- out = self.get_data_file('user_cloudy')
- self.assertRegex(out, r'cloudy:x:[0-9]{3,4}:')
-
- def test_user_root_in_secret(self):
- """Test root user is in 'secret' group."""
- _user, _, groups = self.get_data_file('root_groups').partition(":")
- self.assertIn("secret", groups.split(),
- msg="User root is not in group 'secret'")
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/user_groups.yaml b/tests/cloud_tests/testcases/modules/user_groups.yaml
deleted file mode 100644
index 91b0e281..00000000
--- a/tests/cloud_tests/testcases/modules/user_groups.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-# Create groups and users with various options
-#
-required_features:
- - ubuntu_user
-cloud_config: |
- #cloud-config
- # Add groups to the system
- groups:
- - secret: [root]
- - cloud-users
-
- # Add users to the system. Users are added after groups are added.
- users:
- - default
- - name: foobar
- gecos: Foo B. Bar
- primary_group: foobar
- groups: users
- expiredate: '2038-01-19'
- lock_passwd: false
- passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
- - name: barfoo
- gecos: Bar B. Foo
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: [cloud-users, secret]
- lock_passwd: true
- - name: cloudy
- gecos: Magic Cloud App Daemon User
- inactive: '5'
- system: true
-collect_scripts:
- group_ubuntu: |
- #!/bin/bash
- getent group ubuntu
- group_cloud_users: |
- #!/bin/bash
- getent group cloud-users
- user_ubuntu: |
- #!/bin/bash
- getent passwd ubuntu
- user_foobar: |
- #!/bin/bash
- getent passwd foobar
- user_barfoo: |
- #!/bin/bash
- getent passwd barfoo
- user_cloudy: |
- #!/bin/bash
- getent passwd cloudy
- root_groups: |
- #!/bin/bash
- groups root
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/write_files.py b/tests/cloud_tests/testcases/modules/write_files.py
deleted file mode 100644
index 526a2ebd..00000000
--- a/tests/cloud_tests/testcases/modules/write_files.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestWriteFiles(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_b64(self):
- """Test b64 encoded file reads as ascii."""
- out = self.get_data_file('file_b64')
- self.assertIn('ASCII text', out)
-
- def test_binary(self):
- """Test binary file reads as executable."""
- out = self.get_data_file('file_binary').strip()
- md5 = "3801184b97bb8c6e63fa0e1eae2920d7"
- sha256 = ("2c791c4037ea5bd7e928d6a87380f8ba7a803cd83d"
- "5e4f269e28f5090f0f2c9a")
- self.assertIn(out, (md5 + " -", sha256 + " -"))
-
- def test_gzip(self):
- """Test gzip file shows up as a shell script."""
- out = self.get_data_file('file_gzip')
- self.assertIn('POSIX shell script, ASCII text executable', out)
-
- def test_text(self):
- """Test text shows up as ASCII text."""
- out = self.get_data_file('file_text')
- self.assertIn('ASCII text', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/write_files.yaml b/tests/cloud_tests/testcases/modules/write_files.yaml
deleted file mode 100644
index cc7ea4bd..00000000
--- a/tests/cloud_tests/testcases/modules/write_files.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# Write various file types
-#
-# NOTE: on trusty 'file' has an output formatting error for binary files and
-# has 2 spaces in 'LSB executable', which causes a failure here
-#
-# NOTE: the binary data can be any binary data, not only executables
-# and can be generated via the base 64 command as such:
-# $ base64 < hello > hello.txt
-# the opposite is running:
-# $ base64 -d < hello.txt > hello
-#
-required_features:
- - no_file_fmt_e
-cloud_config: |
- #cloud-config
- write_files:
- - encoding: b64
- content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4
- owner: root:root
- path: /root/file_b64
- permissions: '0644'
- - content: |
- # My new /root/file_text
-
- SMBDOPTIONS="-D"
- path: /root/file_text
- - content: !!binary |
- /Z/xrHR4WINT0UNoKPQKbuovp6+Js+JK
- path: /root/file_binary
- permissions: '0555'
- - encoding: gzip
- content: !!binary |
- H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
- path: /root/file_gzip
- permissions: '0755'
-collect_scripts:
- file_b64: |
- #!/bin/bash
- file /root/file_b64
- file_text: |
- #!/bin/bash
- file /root/file_text
- file_binary: |
- #!/bin/bash
- for hasher in md5sum sha256sum; do
- $hasher </root/file_binary && break
- done
- file_gzip: |
- #!/bin/bash
- file /root/file_gzip
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py
deleted file mode 100644
index 7dcccbdd..00000000
--- a/tests/cloud_tests/util.py
+++ /dev/null
@@ -1,532 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Utilities for re-use across integration tests."""
-
-import base64
-import copy
-import glob
-import multiprocessing
-import os
-import random
-import shlex
-import shutil
-import string
-import subprocess
-import tempfile
-import time
-import yaml
-from contextlib import contextmanager
-
-from cloudinit import subp
-from cloudinit import util as c_util
-from tests.cloud_tests import LOG
-
-OS_FAMILY_MAPPING = {
- 'debian': ['debian', 'ubuntu'],
- 'redhat': ['centos', 'rhel', 'fedora'],
- 'gentoo': ['gentoo'],
- 'freebsd': ['freebsd'],
- 'suse': ['sles'],
- 'arch': ['arch'],
-}
-
-
-def list_test_data(data_dir):
- """Find all tests with test data available in data_dir.
-
- @param data_dir: should contain <platforms>/<os_name>/<testnames>/<data>
- @return_value: {<platform>: {<os_name>: [<testname>]}}
- """
- if not os.path.isdir(data_dir):
- raise ValueError("bad data dir")
-
- res = {}
- for platform in os.listdir(data_dir):
- if not os.path.isdir(os.path.join(data_dir, platform)):
- continue
-
- res[platform] = {}
- for os_name in os.listdir(os.path.join(data_dir, platform)):
- res[platform][os_name] = [
- os.path.sep.join(f.split(os.path.sep)[-2:]) for f in
- glob.glob(os.sep.join((data_dir, platform, os_name, '*/*')))]
-
- LOG.debug('found test data: %s\n', res)
- return res
-
-
-def gen_instance_name(prefix='cloud-test', image_desc=None, use_desc=None,
- max_len=63, delim='-', max_tries=16, used_list=None,
- valid=string.ascii_lowercase + string.digits):
- """Generate an unique name for a test instance.
-
- @param prefix: name prefix, defaults to cloud-test, default should be left
- @param image_desc: short string (len <= 16) with image desc
- @param use_desc: short string (len <= 30) with usage desc
- @param max_len: maximum name length, defaults to 64 chars
- @param delim: delimiter to use between tokens
- @param max_tries: maximum tries to find a unique name before giving up
- @param used_list: already used names, or none to not check
- @param valid: string of valid characters for name
- @return_value: valid, unused name, may raise StopIteration
- """
- unknown = 'unknown'
-
- def join(*args):
- """Join args with delim."""
- return delim.join(args)
-
- def fill(*args):
- """Join name elems and fill rest with random data."""
- name = join(*args)
- num = max_len - len(name) - len(delim)
- return join(name, ''.join(random.choice(valid) for _ in range(num)))
-
- def clean(elem, max_len):
- """Filter bad characters out of elem and trim to length."""
- elem = elem.lower()[:max_len] if elem else unknown
- return ''.join(c if c in valid else delim for c in elem)
-
- return next(name for name in
- (fill(prefix, clean(image_desc, 16), clean(use_desc, 30))
- for _ in range(max_tries))
- if not used_list or name not in used_list)
-
-
-def sorted_unique(iterable, key=None, reverse=False):
- """Create unique sorted list.
-
- @param iterable: the data structure to sort
- @param key: if you have a specific key
- @param reverse: to reverse or not
- @return_value: a sorted list of unique items in iterable
- """
- return sorted(set(iterable), key=key, reverse=reverse)
-
-
-def get_os_family(os_name):
- """Get os family type for os_name.
-
- @param os_name: name of os
- @return_value: family name for os_name
- """
- return next((k for k, v in OS_FAMILY_MAPPING.items()
- if os_name.lower() in v), None)
-
-
-def current_verbosity():
- """Get verbosity currently in effect from log level.
-
- @return_value: verbosity, 0-2, 2=verbose, 0=quiet
- """
- return max(min(3 - int(LOG.level / 10), 2), 0)
-
-
-@contextmanager
-def emit_dots_on_travis():
- """
- A context manager that emits a dot every 10 seconds if running on Travis.
-
- Travis will kill jobs that don't emit output for a certain amount of time.
- This context manager spins up a background process which will emit a dot to
- stdout every 10 seconds to avoid being killed.
-
- It should be wrapped selectively around operations that are known to take a
- long time.
- """
- if os.environ.get('TRAVIS') != "true":
- # If we aren't on Travis, don't do anything.
- yield
- return
-
- def emit_dots():
- while True:
- print(".")
- time.sleep(10)
-
- dot_process = multiprocessing.Process(target=emit_dots)
- dot_process.start()
- try:
- yield
- finally:
- dot_process.terminate()
-
-
-def is_writable_dir(path):
- """Make sure dir is writable.
-
- @param path: path to determine if writable
- @return_value: boolean with result
- """
- try:
- c_util.ensure_dir(path)
- os.remove(tempfile.mkstemp(dir=os.path.abspath(path))[1])
- except (IOError, OSError):
- return False
- return True
-
-
-def is_clean_writable_dir(path):
- """Make sure dir is empty and writable, creating it if it does not exist.
-
- @param path: path to check
- @return_value: True/False if successful
- """
- path = os.path.abspath(path)
- if not (is_writable_dir(path) and len(os.listdir(path)) == 0):
- return False
- return True
-
-
-def configure_yaml():
- """Clean yaml."""
- yaml.add_representer(str, (lambda dumper, data: dumper.represent_scalar(
- 'tag:yaml.org,2002:str', data, style='|' if '\n' in data else '')))
-
-
-def yaml_format(data, content_type=None):
- """Format data as yaml.
-
- @param data: data to dump
- @param header: if specified, add a header to the dumped data
- @return_value: yaml string
- """
- configure_yaml()
- content_type = (
- '#{}\n'.format(content_type.strip('#\n')) if content_type else '')
- return content_type + yaml.dump(data, indent=2, default_flow_style=False)
-
-
-def yaml_dump(data, path):
- """Dump data to path in yaml format."""
- c_util.write_file(os.path.abspath(path), yaml_format(data), omode='w')
-
-
-def merge_results(data, path):
- """Handle merging results from collect phase and verify phase."""
- current = {}
- if os.path.exists(path):
- with open(path, 'r') as fp:
- current = c_util.load_yaml(fp.read())
- current.update(data)
- yaml_dump(current, path)
-
-
-def rel_files(basedir):
- """List of files under directory by relative path, not including dirs.
-
- @param basedir: directory to search
- @return_value: list or relative paths
- """
- basedir = os.path.normpath(basedir)
- return [path[len(basedir) + 1:] for path in
- glob.glob(os.path.join(basedir, '**'), recursive=True)
- if not os.path.isdir(path)]
-
-
-def flat_tar(output, basedir, owner='root', group='root'):
- """Create a flat tar archive (no leading ./) from basedir.
-
- @param output: output tar file to write
- @param basedir: base directory for archive
- @param owner: owner of archive files
- @param group: group archive files belong to
- @return_value: none
- """
- subp.subp(['tar', 'cf', output, '--owner', owner, '--group', group,
- '-C', basedir] + rel_files(basedir), capture=True)
-
-
-def parse_conf_list(entries, valid=None, boolean=False):
- """Parse config in a list of strings in key=value format.
-
- @param entries: list of key=value strings
- @param valid: list of valid keys in result, return None if invalid input
- @param boolean: if true, then interpret all values as booleans
- @return_value: dict of configuration or None if invalid
- """
- res = {key: value.lower() == 'true' if boolean else value
- for key, value in (i.split('=') for i in entries)}
- return res if not valid or all(k in valid for k in res.keys()) else None
-
-
-def update_args(args, updates, preserve_old=True):
- """Update cmdline arguments from a dictionary.
-
- @param args: cmdline arguments
- @param updates: dictionary of {arg_name: new_value} mappings
- @param preserve_old: if true, create a deep copy of args before updating
- @return_value: updated cmdline arguments
- """
- args = copy.deepcopy(args) if preserve_old else args
- if updates:
- vars(args).update(updates)
- return args
-
-
-def update_user_data(user_data, updates, dump_to_yaml=True):
- """Update user_data from dictionary.
-
- @param user_data: user data as yaml string or dict
- @param updates: dictionary to merge with user data
- @param dump_to_yaml: return as yaml dumped string if true
- @return_value: updated user data, as yaml string if dump_to_yaml is true
- """
- user_data = (c_util.load_yaml(user_data)
- if isinstance(user_data, str) else copy.deepcopy(user_data))
- user_data.update(updates)
- return (yaml_format(user_data, content_type='cloud-config')
- if dump_to_yaml else user_data)
-
-
-def shell_safe(cmd):
- """Produce string safe shell string.
-
- Create a string that can be passed to:
- set -- <string>
- to produce the same array that cmd represents.
-
- Internally we utilize 'getopt's ability/knowledge on how to quote
- strings to be safe for shell. This implementation could be changed
- to be pure python. It is just a matter of correctly escaping
- or quoting characters like: ' " ^ & $ ; ( ) ...
-
- @param cmd: command as a list
- """
- out = subprocess.check_output(
- ["getopt", "--shell", "sh", "--options", "", "--", "--"] + list(cmd))
- # out contains ' -- <data>\n'. drop the ' -- ' and the '\n'
- return out.decode()[4:-1]
-
-
-def shell_pack(cmd):
- """Return a string that can shuffled through 'sh' and execute cmd.
-
- In Python subprocess terms:
- check_output(cmd) == check_output(shell_pack(cmd), shell=True)
-
- @param cmd: list or string of command to pack up
- """
-
- if isinstance(cmd, str):
- cmd = [cmd]
- else:
- cmd = list(cmd)
-
- stuffed = shell_safe(cmd)
- # for whatever reason b64encode returns bytes when it is clearly
- # representable as a string by nature of being base64 encoded.
- b64 = base64.b64encode(stuffed.encode()).decode()
- return 'eval set -- "$(echo %s | base64 --decode)" && exec "$@"' % b64
-
-
-def shell_quote(cmd):
- if isinstance(cmd, (tuple, list)):
- return ' '.join([shlex.quote(x) for x in cmd])
- return shlex.quote(cmd)
-
-
-class TargetBase(object):
- _tmp_count = 0
-
- def execute(self, command, stdin=None, env=None,
- rcs=None, description=None):
- """Execute command in instance, recording output, error and exit code.
-
- Assumes functional networking and execution as root with the
- target filesystem being available at /.
-
- @param command: the command to execute as root inside the image
- if command is a string, then it will be executed as:
- ['sh', '-c', command]
- @param stdin: bytes content for standard in
- @param env: environment variables
- @param rcs: return codes.
- None (default): non-zero exit code will raise exception.
- False: any is allowed (No execption raised).
- list of int: any rc not in the list will raise exception.
- @param description: purpose of command
- @return_value: tuple containing stdout data, stderr data, exit code
- """
- if isinstance(command, str):
- command = ['sh', '-c', command]
-
- if rcs is None:
- rcs = (0,)
-
- if description:
- LOG.debug('executing "%s"', description)
- else:
- LOG.debug("executing command: %s", shell_quote(command))
-
- out, err, rc = self._execute(command=command, stdin=stdin, env=env)
-
- # False means accept anything.
- if (rcs is False or rc in rcs):
- return out, err, rc
-
- raise InTargetExecuteError(out, err, rc, command, description)
-
- def _execute(self, command, stdin=None, env=None):
- """Execute command in inside, return stdout, stderr and exit code.
-
- Assumes functional networking and execution as root with the
- target filesystem being available at /.
-
- @param stdin: bytes content for standard in
- @param env: environment variables
- @return_value: tuple containing stdout data, stderr data, exit code
-
- This is intended to be implemented by the Image or Instance.
- Many callers will use the higher level 'execute'."""
- raise NotImplementedError("_execute must be implemented by subclass.")
-
- def read_data(self, remote_path, decode=False):
- """Read data from instance filesystem.
-
- @param remote_path: path in instance
- @param decode: decode data before returning.
- @return_value: content of remote_path as bytes if 'decode' is False,
- and as string if 'decode' is True.
- """
- # when sh is invoked with '-c', then the first argument is "$0"
- # which is commonly understood as the "program name".
- # 'read_data' is the program name, and 'remote_path' is '$1'
- stdout, _stderr, rc = self._execute(
- ["sh", "-c", 'exec cat "$1"', 'read_data', remote_path])
- if rc != 0:
- raise RuntimeError("Failed to read file '%s'" % remote_path)
-
- if decode:
- return stdout.decode()
- return stdout
-
- def write_data(self, remote_path, data):
- """Write data to instance filesystem.
-
- @param remote_path: path in instance
- @param data: data to write in bytes
- """
- # when sh is invoked with '-c', then the first argument is "$0"
- # which is commonly understood as the "program name".
- # 'write_data' is the program name, and 'remote_path' is '$1'
- _, _, rc = self._execute(
- ["sh", "-c", 'exec cat >"$1"', 'write_data', remote_path],
- stdin=data)
-
- if rc != 0:
- raise RuntimeError("Failed to write to '%s'" % remote_path)
- return
-
- def pull_file(self, remote_path, local_path):
- """Copy file at 'remote_path', from instance to 'local_path'.
-
- @param remote_path: path on remote instance
- @param local_path: path on local instance
- """
- with open(local_path, 'wb') as fp:
- fp.write(self.read_data(remote_path))
-
- def push_file(self, local_path, remote_path):
- """Copy file at 'local_path' to instance at 'remote_path'.
-
- @param local_path: path on local instance
- @param remote_path: path on remote instance"""
- with open(local_path, "rb") as fp:
- self.write_data(remote_path, data=fp.read())
-
- def run_script(self, script, rcs=None, description=None):
- """Run script in target and return stdout.
-
- @param script: script contents
- @param rcs: allowed return codes from script
- @param description: purpose of script
- @return_value: stdout from script
- """
- # Just write to a file, add execute, run it, then remove it.
- shblob = '; '.join((
- 'set -e',
- 's="$1"',
- 'shift',
- 'cat > "$s"',
- 'trap "rm -f $s" EXIT',
- 'chmod +x "$s"',
- '"$s" "$@"'))
- return self.execute(
- ['sh', '-c', shblob, 'runscript', self.tmpfile()],
- stdin=script, description=description, rcs=rcs)
-
- def tmpfile(self):
- """Get a tmp file in the target.
-
- @return_value: path to new file in target
- """
- path = "/tmp/%s-%04d" % (type(self).__name__, self._tmp_count)
- self._tmp_count += 1
- return path
-
-
-class InTargetExecuteError(subp.ProcessExecutionError):
- """Error type for in target commands that fail."""
-
- default_desc = 'Unexpected error while running command.'
-
- def __init__(self, stdout, stderr, exit_code, cmd, description=None,
- reason=None):
- """Init error and parent error class."""
- super(InTargetExecuteError, self).__init__(
- stdout=stdout, stderr=stderr, exit_code=exit_code,
- cmd=shell_quote(cmd),
- description=description if description else self.default_desc,
- reason=reason)
-
-
-class PlatformError(IOError):
- """Error type for platform errors."""
-
- default_desc = 'unexpected error in platform.'
-
- def __init__(self, operation, description=None):
- """Init error and parent error class."""
- description = description if description else self.default_desc
-
- message = '%s: %s' % (operation, description)
- IOError.__init__(self, message)
-
-
-def mkdtemp(prefix='cloud_test_data'):
- return tempfile.mkdtemp(prefix=prefix)
-
-
-class TempDir(object):
- """Configurable temporary directory like tempfile.TemporaryDirectory."""
-
- def __init__(self, tmpdir=None, preserve=False, prefix='cloud_test_data_'):
- """Initialize.
-
- @param tmpdir: directory to use as tempdir
- @param preserve: if true, always preserve data on exit
- @param prefix: prefix to use for tempfile name
- """
- self.tmpdir = tmpdir
- self.preserve = preserve
- self.prefix = prefix
-
- def __enter__(self):
- """Create tempdir.
-
- @return_value: tempdir path
- """
- if not self.tmpdir:
- self.tmpdir = mkdtemp(prefix=self.prefix)
- LOG.debug('using tmpdir: %s', self.tmpdir)
- return self.tmpdir
-
- def __exit__(self, etype, value, trace):
- """Destroy tempdir if no errors occurred."""
- if etype or self.preserve:
- LOG.info('leaving data in %s', self.tmpdir)
- else:
- shutil.rmtree(self.tmpdir)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py
deleted file mode 100644
index 0295af40..00000000
--- a/tests/cloud_tests/verify.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Verify test results."""
-
-import os
-import unittest
-
-from tests.cloud_tests import (config, LOG, util, testcases)
-
-
-def verify_data(data_dir, platform, os_name, tests):
- """Verify test data is correct.
-
- @param data_dir: top level directory for all tests
- @param platform: The platform name we for this test data (e.g. lxd)
- @param os_name: The operating system under test (xenial, artful, etc.).
- @param tests: list of test names
- @return_value: {<test_name>: {passed: True/False, failures: []}}
- """
- base_dir = os.sep.join((data_dir, platform, os_name))
- runner = unittest.TextTestRunner(verbosity=util.current_verbosity())
- res = {}
- for test_name in tests:
- LOG.debug('verifying test data for %s', test_name)
-
- # get cloudconfig for test
- test_conf = config.load_test_config(test_name)
- test_module = config.name_to_module(test_name)
- cloud_conf = test_conf['cloud_config']
-
- # load script outputs
- data = {'platform': platform, 'os_name': os_name}
- test_dir = os.path.join(base_dir, test_name)
- for script_name in os.listdir(test_dir):
- with open(os.path.join(test_dir, script_name), 'rb') as fp:
- data[script_name] = fp.read()
-
- # get test suite and launch tests
- suite = testcases.get_suite(test_module, data, cloud_conf)
- suite_results = runner.run(suite)
- res[test_name] = {
- 'passed': suite_results.wasSuccessful(),
- 'failures': [{'module': type(test_class).__base__.__module__,
- 'class': type(test_class).__base__.__name__,
- 'function': str(test_class).split()[0],
- 'error': trace.splitlines()[-1],
- 'traceback': trace, }
- for test_class, trace in suite_results.failures]
- }
-
- for failure in res[test_name]['failures']:
- LOG.warning('test case: %s failed %s.%s with: %s',
- test_name, failure['class'], failure['function'],
- failure['error'])
-
- return res
-
-
-def format_test_failures(test_result):
- """Return a human-readable printable format of test failures."""
- if not test_result['failures']:
- return ''
- failure_hdr = ' test failures:'
- failure_fmt = ' * {module}.{class}.{function}\n '
- output = []
- for failure in test_result['failures']:
- if not output:
- output = [failure_hdr]
- msg = failure_fmt.format(**failure)
- if failure.get('error'):
- msg += failure['error']
- else:
- msg += failure.get('traceback', '')
- output.append(msg)
- return '\n'.join(output)
-
-
-def format_results(res):
- """Return human-readable results as a string"""
- platform_hdr = 'Platform: {platform}'
- distro_hdr = ' Distro: {distro}'
- distro_summary_fmt = (
- ' test modules passed:{passed} tests failed:{failed}')
- output = ['']
- counts = {}
- for platform, platform_data in res.items():
- output.append(platform_hdr.format(platform=platform))
- counts[platform] = {}
- for distro, distro_data in platform_data.items():
- distro_failure_output = []
- output.append(distro_hdr.format(distro=distro))
- counts[platform][distro] = {'passed': 0, 'failed': 0}
- for _, test_result in distro_data.items():
- if test_result['passed']:
- counts[platform][distro]['passed'] += 1
- else:
- counts[platform][distro]['failed'] += len(
- test_result['failures'])
- failure_output = format_test_failures(test_result)
- if failure_output:
- distro_failure_output.append(failure_output)
- output.append(
- distro_summary_fmt.format(**counts[platform][distro]))
- if distro_failure_output:
- output.extend(distro_failure_output)
- return '\n'.join(output)
-
-
-def verify(args):
- """Verify test data.
-
- @param args: directory of test data
- @return_value: 0 for success, or number of failed tests
- """
- failed = 0
- res = {}
-
- # find test data
- tests = util.list_test_data(args.data_dir)
-
- for platform in tests.keys():
- res[platform] = {}
- for os_name in tests[platform].keys():
- test_name = "platform='{}', os='{}'".format(platform, os_name)
- LOG.info('test: %s verifying test data', test_name)
-
- # run test
- res[platform][os_name] = verify_data(
- args.data_dir, platform, os_name,
- tests[platform][os_name])
-
- # handle results
- fail_list = [k for k, v in res[platform][os_name].items()
- if not v.get('passed')]
- if len(fail_list) == 0:
- LOG.info('test: %s passed all tests', test_name)
- else:
- LOG.warning('test: %s failed %s tests', test_name,
- len(fail_list))
- failed += len(fail_list)
-
- # dump results
- LOG.debug('\n---- Verify summarized results:\n%s', format_results(res))
- if args.result:
- util.merge_results({'verify': res}, args.result)
-
- return failed
-
-# vi: ts=4 expandtab
diff --git a/tests/configs/sample1.yaml b/tests/configs/sample1.yaml
deleted file mode 100644
index ae935cc0..00000000
--- a/tests/configs/sample1.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-#cloud-config
-#apt_update: false
-#apt_upgrade: true
-packages: [ bzr, pastebinit, ubuntu-dev-tools, ccache, bzr-builddeb, vim-nox, git-core, lftp ]
-
-#disable_root: False
-
-# mounts:
-# - [ ephemeral0, /mnt ]
-# - [ swap, none, swap, sw, 0, 0 ]
-
-ssh_import_id: [smoser ]
-
-#!/bin/sh
-
-output: {all: '| tee -a /var/log/cloud-init-output.log'}
-
-sm_misc:
- - &user_setup |
- set -x; exec > ~/user_setup.log 2>&1
- echo "starting at $(date -R)"
- echo "set -o vi" >> ~/.bashrc
- cat >> ~/.profile <<"EOF"
- export EDITOR=vi
- export DEB_BUILD_OPTIONS=parallel=4
- export PATH=/usr/lib/ccache:$PATH
- EOF
-
- mkdir ~/bin
- chmod 755 ~/bin
- cat > ~/bin/mdebuild <<"EOF"
- #!/bin/sh
- exec debuild --prepend-path /usr/lib/ccache "$@"
- EOF
- chmod 755 ~/bin/*
-
- #byobu-launcher-install
- byobu-ctrl-a screen 2>&1 || :
-
- echo "pinging 8.8.8.8"
- ping -c 4 8.8.8.8
-
-runcmd:
- - [ sudo, -Hu, ubuntu, sh, -c, '[ -e /var/log/cloud-init.log ] || exit 0; grep "cloud-init.*running" /var/log/cloud-init.log > ~/runcmd.log' ]
- - [ sudo, -Hu, ubuntu, sh, -c, 'read up sleep < /proc/uptime; echo $(date): runcmd up at $up | tee -a ~/runcmd.log' ]
- - [ sudo, -Hu, ubuntu, sh, -c, *user_setup ]
-
-
-byobu_by_default: user
diff --git a/tests/data/netinfo/sample-ipaddrshow-json b/tests/data/netinfo/sample-ipaddrshow-json
new file mode 100644
index 00000000..8f6a430c
--- /dev/null
+++ b/tests/data/netinfo/sample-ipaddrshow-json
@@ -0,0 +1,91 @@
+[
+ {
+ "ifindex": 1,
+ "ifname": "lo",
+ "flags": [
+ "LOOPBACK",
+ "UP",
+ "LOWER_UP"
+ ],
+ "mtu": 65536,
+ "qdisc": "noqueue",
+ "operstate": "UNKNOWN",
+ "group": "default",
+ "txqlen": 1000,
+ "link_type": "loopback",
+ "address": "00:00:00:00:00:00",
+ "broadcast": "00:00:00:00:00:00",
+ "addr_info": [
+ {
+ "family": "inet",
+ "local": "127.0.0.1",
+ "prefixlen": 8,
+ "scope": "host",
+ "label": "lo",
+ "valid_life_time": 4294967295,
+ "preferred_life_time": 4294967295
+ },
+ {
+ "family": "inet6",
+ "local": "::1",
+ "prefixlen": 128,
+ "scope": "host",
+ "valid_life_time": 4294967295,
+ "preferred_life_time": 4294967295
+ }
+ ]
+ },
+ {
+ "ifindex": 23,
+ "link_index": 24,
+ "ifname": "enp0s25",
+ "flags": [
+ "BROADCAST",
+ "MULTICAST",
+ "UP",
+ "LOWER_UP"
+ ],
+ "mtu": 1500,
+ "qdisc": "noqueue",
+ "operstate": "UP",
+ "group": "default",
+ "txqlen": 1000,
+ "link_type": "ether",
+ "address": "50:7b:9d:2c:af:91",
+ "broadcast": "ff:ff:ff:ff:ff:ff",
+ "link_netnsid": 0,
+ "addr_info": [
+ {
+ "family": "inet",
+ "local": "192.168.2.18",
+ "prefixlen": 24,
+ "metric": 100,
+ "broadcast": "192.168.2.255",
+ "scope": "global",
+ "dynamic": true,
+ "label": "enp0s25",
+ "valid_life_time": 2339,
+ "preferred_life_time": 2339
+ },
+ {
+ "family": "inet6",
+ "local": "fe80::7777:2222:1111:eeee",
+ "prefixlen": 64,
+ "scope": "global",
+ "dynamic": true,
+ "mngtmpaddr": true,
+ "noprefixroute": true,
+ "valid_life_time": 6823,
+ "preferred_life_time": 3223
+ },
+ {
+ "family": "inet6",
+ "local": "fe80::8107:2b92:867e:f8a6",
+ "prefixlen": 64,
+ "scope": "link",
+ "valid_life_time": 4294967295,
+ "preferred_life_time": 4294967295
+ }
+ ]
+ }
+]
diff --git a/tests/data/netinfo/sample-ipaddrshow-json-down b/tests/data/netinfo/sample-ipaddrshow-json-down
new file mode 100644
index 00000000..7ad5dde0
--- /dev/null
+++ b/tests/data/netinfo/sample-ipaddrshow-json-down
@@ -0,0 +1,57 @@
+[
+ {
+ "ifindex": 1,
+ "ifname": "lo",
+ "flags": [
+ "LOOPBACK",
+ "UP",
+ "LOWER_UP"
+ ],
+ "mtu": 65536,
+ "qdisc": "noqueue",
+ "operstate": "UNKNOWN",
+ "group": "default",
+ "txqlen": 1000,
+ "link_type": "loopback",
+ "address": "00:00:00:00:00:00",
+ "broadcast": "00:00:00:00:00:00",
+ "addr_info": [
+ {
+ "family": "inet",
+ "local": "127.0.0.1",
+ "prefixlen": 8,
+ "scope": "host",
+ "label": "lo",
+ "valid_life_time": 4294967295,
+ "preferred_life_time": 4294967295
+ },
+ {
+ "family": "inet6",
+ "local": "::1",
+ "prefixlen": 128,
+ "scope": "host",
+ "valid_life_time": 4294967295,
+ "preferred_life_time": 4294967295
+ }
+ ]
+ },
+ {
+ "ifindex": 23,
+ "link_index": 24,
+ "ifname": "eth0",
+ "flags": [
+ "BROADCAST",
+ "MULTICAST"
+ ],
+ "mtu": 1500,
+ "qdisc": "noqueue",
+ "operstate": "DOWN",
+ "group": "default",
+ "txqlen": 1000,
+ "link_type": "ether",
+ "address": "00:16:3e:de:51:a6",
+ "broadcast": "ff:ff:ff:ff:ff:ff",
+ "link_netnsid": 0,
+ "addr_info": []
+ }
+]
diff --git a/tests/data/netinfo/sample-ipaddrshow-output b/tests/data/netinfo/sample-ipaddrshow-output
index b2fa2672..2aa3f90c 100644
--- a/tests/data/netinfo/sample-ipaddrshow-output
+++ b/tests/data/netinfo/sample-ipaddrshow-output
@@ -4,10 +4,9 @@
inet6 ::1/128 scope host \ valid_lft forever preferred_lft forever
2: enp0s25: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
link/ether 50:7b:9d:2c:af:91 brd ff:ff:ff:ff:ff:ff
- inet 192.168.2.18/24 brd 192.168.2.255 scope global dynamic enp0s25
+ inet 192.168.2.18/24 metric 100 brd 192.168.2.255 scope global dynamic enp0s25
valid_lft 84174sec preferred_lft 84174sec
inet6 fe80::7777:2222:1111:eeee/64 scope global
valid_lft forever preferred_lft forever
inet6 fe80::8107:2b92:867e:f8a6/64 scope link
valid_lft forever preferred_lft forever
-
diff --git a/tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl b/tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl
new file mode 100644
index 00000000..c7d7844b
--- /dev/null
+++ b/tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl
@@ -0,0 +1,504 @@
+ccopy_reg
+_reconstructor
+p1
+(ccloudinit.sources.DataSourceNoCloud
+DataSourceNoCloudNet
+p2
+c__builtin__
+object
+p3
+NtRp4
+(dp5
+S'paths'
+p6
+g1
+(ccloudinit.helpers
+Paths
+p7
+g3
+NtRp8
+(dp9
+S'lookups'
+p10
+(dp11
+S'cloud_config'
+p12
+S'cloud-config.txt'
+p13
+sS'userdata'
+p14
+S'user-data.txt.i'
+p15
+sS'vendordata'
+p16
+S'vendor-data.txt.i'
+p17
+sS'userdata_raw'
+p18
+S'user-data.txt'
+p19
+sS'boothooks'
+p20
+g20
+sS'scripts'
+p21
+g21
+sS'sem'
+p22
+g22
+sS'data'
+p23
+g23
+sS'vendor_scripts'
+p24
+S'scripts/vendor'
+p25
+sS'handlers'
+p26
+g26
+sS'obj_pkl'
+p27
+S'obj.pkl'
+p28
+sS'vendordata_raw'
+p29
+S'vendor-data.txt'
+p30
+sS'vendor_cloud_config'
+p31
+S'vendor-cloud-config.txt'
+p32
+ssS'template_tpl'
+p33
+S'/etc/cloud/templates/%s.tmpl'
+p34
+sS'cfgs'
+p35
+(dp36
+S'cloud_dir'
+p37
+S'/var/lib/cloud/'
+p38
+sS'templates_dir'
+p39
+S'/etc/cloud/templates/'
+p40
+sS'upstart_dir'
+p41
+S'/etc/init/'
+p42
+ssS'cloud_dir'
+p43
+g38
+sS'datasource'
+p44
+NsS'upstart_conf_d'
+p45
+g42
+sS'boot_finished'
+p46
+S'/var/lib/cloud/instance/boot-finished'
+p47
+sS'instance_link'
+p48
+S'/var/lib/cloud/instance'
+p49
+sS'seed_dir'
+p50
+S'/var/lib/cloud/seed'
+p51
+sbsS'supported_seed_starts'
+p52
+(S'http://'
+S'https://'
+S'ftp://'
+tp53
+sS'sys_cfg'
+p54
+(dp55
+S'output'
+p56
+(dp57
+S'all'
+p58
+S'| tee -a /var/log/cloud-init-output.log'
+p59
+ssS'users'
+p60
+(lp61
+S'default'
+p62
+asS'def_log_file'
+p63
+S'/var/log/cloud-init.log'
+p64
+sS'cloud_final_modules'
+p65
+(lp66
+S'rightscale_userdata'
+p67
+aS'scripts-vendor'
+p68
+aS'scripts-per-once'
+p69
+aS'scripts-per-boot'
+p70
+aS'scripts-per-instance'
+p71
+aS'scripts-user'
+p72
+aS'ssh-authkey-fingerprints'
+p73
+aS'keys-to-console'
+p74
+aS'phone-home'
+p75
+aS'final-message'
+p76
+aS'power-state-change'
+p77
+asS'disable_root'
+p78
+I01
+sS'syslog_fix_perms'
+p79
+S'syslog:adm'
+p80
+sS'log_cfgs'
+p81
+(lp82
+(lp83
+S'[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n'
+p84
+aS'[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=("/dev/log", handlers.SysLogHandler.LOG_USER)\n'
+p85
+aa(lp86
+g84
+aS"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n"
+p87
+aasS'cloud_init_modules'
+p88
+(lp89
+S'migrator'
+p90
+aS'seed_random'
+p91
+aS'bootcmd'
+p92
+aS'write-files'
+p93
+aS'growpart'
+p94
+aS'resizefs'
+p95
+aS'set_hostname'
+p96
+aS'update_hostname'
+p97
+aS'update_etc_hosts'
+p98
+aS'ca-certs'
+p99
+aS'rsyslog'
+p100
+aS'users-groups'
+p101
+aS'ssh'
+p102
+asS'preserve_hostname'
+p103
+I00
+sS'_log'
+p104
+(lp105
+g84
+ag87
+ag85
+asS'datasource_list'
+p106
+(lp107
+S'NoCloud'
+p108
+aS'ConfigDrive'
+p109
+aS'OpenNebula'
+p110
+aS'Azure'
+p111
+aS'AltCloud'
+p112
+aS'OVF'
+p113
+aS'MAAS'
+p114
+aS'GCE'
+p115
+aS'OpenStack'
+p116
+aS'CloudSigma'
+p117
+aS'Ec2'
+p118
+aS'CloudStack'
+p119
+aS'SmartOS'
+p120
+aS'None'
+p121
+asS'vendor_data'
+p122
+(dp123
+S'prefix'
+p124
+(lp125
+sS'enabled'
+p126
+I01
+ssS'cloud_config_modules'
+p127
+(lp128
+S'emit_upstart'
+p129
+aS'disk_setup'
+p130
+aS'mounts'
+p131
+aS'ssh-import-id'
+p132
+aS'locale'
+p133
+aS'set-passwords'
+p134
+aS'grub-dpkg'
+p135
+aS'apt-pipelining'
+p136
+aS'apt-configure'
+p137
+aS'package-update-upgrade-install'
+p138
+aS'landscape'
+p139
+aS'timezone'
+p140
+aS'puppet'
+p141
+aS'chef'
+p142
+aS'salt-minion'
+p143
+aS'mcollective'
+p144
+aS'disable-ec2-metadata'
+p145
+aS'runcmd'
+p146
+aS'byobu'
+p147
+assg14
+Nsg16
+Nsg18
+S'#cloud-config\n{}\n\n'
+p148
+sg29
+S'#cloud-config\n{}\n\n'
+p149
+sS'dsmode'
+p150
+S'net'
+p151
+sS'seed'
+p152
+S'/var/lib/cloud/seed/nocloud-net'
+p153
+sS'cmdline_id'
+p154
+S'ds=nocloud-net'
+p155
+sS'ud_proc'
+p156
+g1
+(ccloudinit.user_data
+UserDataProcessor
+p157
+g3
+NtRp158
+(dp159
+g6
+g8
+sS'ssl_details'
+p160
+(dp161
+sbsg50
+g153
+sS'ds_cfg'
+p162
+(dp163
+sS'distro'
+p164
+g1
+(ccloudinit.distros.ubuntu
+Distro
+p165
+g3
+NtRp166
+(dp167
+S'osfamily'
+p168
+S'debian'
+p169
+sS'_paths'
+p170
+g8
+sS'name'
+p171
+S'ubuntu'
+p172
+sS'_runner'
+p173
+g1
+(ccloudinit.helpers
+Runners
+p174
+g3
+NtRp175
+(dp176
+g6
+g8
+sS'sems'
+p177
+(dp178
+sbsS'_cfg'
+p179
+(dp180
+S'paths'
+p181
+(dp182
+g37
+g38
+sg39
+g40
+sg41
+g42
+ssS'default_user'
+p183
+(dp184
+S'shell'
+p185
+S'/bin/bash'
+p186
+sS'name'
+p187
+S'ubuntu'
+p188
+sS'sudo'
+p189
+(lp190
+S'ALL=(ALL) NOPASSWD:ALL'
+p191
+asS'lock_passwd'
+p192
+I01
+sS'gecos'
+p193
+S'Ubuntu'
+p194
+sS'groups'
+p195
+(lp196
+S'adm'
+p197
+aS'audio'
+p198
+aS'cdrom'
+p199
+aS'dialout'
+p200
+aS'dip'
+p201
+aS'floppy'
+p202
+aS'netdev'
+p203
+aS'plugdev'
+p204
+aS'sudo'
+p205
+aS'video'
+p206
+assS'package_mirrors'
+p207
+(lp208
+(dp209
+S'arches'
+p210
+(lp211
+S'i386'
+p212
+aS'amd64'
+p213
+asS'failsafe'
+p214
+(dp215
+S'security'
+p216
+S'http://security.ubuntu.com/ubuntu'
+p217
+sS'primary'
+p218
+S'http://archive.ubuntu.com/ubuntu'
+p219
+ssS'search'
+p220
+(dp221
+S'security'
+p222
+(lp223
+sS'primary'
+p224
+(lp225
+S'http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/'
+p226
+aS'http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/'
+p227
+aS'http://%(region)s.clouds.archive.ubuntu.com/ubuntu/'
+p228
+assa(dp229
+S'arches'
+p230
+(lp231
+S'armhf'
+p232
+aS'armel'
+p233
+aS'default'
+p234
+asS'failsafe'
+p235
+(dp236
+S'security'
+p237
+S'http://ports.ubuntu.com/ubuntu-ports'
+p238
+sS'primary'
+p239
+S'http://ports.ubuntu.com/ubuntu-ports'
+p240
+ssasS'ssh_svcname'
+p241
+S'ssh'
+p242
+ssbsS'metadata'
+p243
+(dp244
+g150
+g151
+sS'local-hostname'
+p245
+S'trusty-upgrade2'
+p246
+sS'instance-id'
+p247
+S'trusty-upgrade2'
+p248
+ssb. \ No newline at end of file
diff --git a/tests/integration_tests/__init__.py b/tests/integration_tests/__init__.py
new file mode 100644
index 00000000..81f9b02f
--- /dev/null
+++ b/tests/integration_tests/__init__.py
@@ -0,0 +1,14 @@
+import random
+
+
+def random_mac_address() -> str:
+ """Generate a random MAC address.
+
+ The MAC address will have a 1 in its least significant bit, indicating it
+ to be a locally administered address.
+ """
+ return "02:00:00:%02x:%02x:%02x" % (
+ random.randint(0, 255),
+ random.randint(0, 255),
+ random.randint(0, 255),
+ )
diff --git a/tests/integration_tests/assets/keys/id_rsa.test1 b/tests/integration_tests/assets/keys/id_rsa.test1
new file mode 100644
index 00000000..bd4c822e
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test1
@@ -0,0 +1,38 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEAtRlG96aJ23URvAgO/bBsuLl+lquc350aSwV98/i8vlvOn5GVcHye
+t/rXQg4lZ4s0owG3kWyQFY8nvTk+G+UNU8fN0anAzBDi+4MzsejkF9scjTMFmXVrIpICqV
+3bYQNjPv6r+ubQdkD01du3eB9t5/zl84gtshp0hBdofyz8u1/A25s7fVU67GyI7PdKvaS+
+yvJSInZnb2e9VQzfJC+qAnN7gUZatBKjdgUtJeiUUeDaVnaS17b0aoT9iBO0sIcQtOTBlY
+lCjFt1TAMLZ64Hj3SfGZB7Yj0Z+LzFB2IWX1zzsjI68YkYPKOSL/NYhQU9e55kJQ7WnngN
+HY/2n/A7dNKSFDmgM5c9IWgeZ7fjpsfIYAoJ/CAxFIND+PEHd1gCS6xoEhaUVyh5WH/Xkw
+Kv1nx4AiZ2BFCE+75kySRLZUJ+5y0r3DU5ktMXeURzVIP7pu0R8DCul+GU+M/+THyWtAEO
+geaNJ6fYpo2ipDhbmTYt3kk2lMIapRxGBFs+37sdAAAFgGGJssNhibLDAAAAB3NzaC1yc2
+EAAAGBALUZRvemidt1EbwIDv2wbLi5fparnN+dGksFffP4vL5bzp+RlXB8nrf610IOJWeL
+NKMBt5FskBWPJ705PhvlDVPHzdGpwMwQ4vuDM7Ho5BfbHI0zBZl1ayKSAqld22EDYz7+q/
+rm0HZA9NXbt3gfbef85fOILbIadIQXaH8s/LtfwNubO31VOuxsiOz3Sr2kvsryUiJ2Z29n
+vVUM3yQvqgJze4FGWrQSo3YFLSXolFHg2lZ2kte29GqE/YgTtLCHELTkwZWJQoxbdUwDC2
+euB490nxmQe2I9Gfi8xQdiFl9c87IyOvGJGDyjki/zWIUFPXueZCUO1p54DR2P9p/wO3TS
+khQ5oDOXPSFoHme346bHyGAKCfwgMRSDQ/jxB3dYAkusaBIWlFcoeVh/15MCr9Z8eAImdg
+RQhPu+ZMkkS2VCfuctK9w1OZLTF3lEc1SD+6btEfAwrpfhlPjP/kx8lrQBDoHmjSen2KaN
+oqQ4W5k2Ld5JNpTCGqUcRgRbPt+7HQAAAAMBAAEAAAGBAJJCTOd70AC2ptEGbR0EHHqADT
+Wgefy7A94tHFEqxTy0JscGq/uCGimaY7kMdbcPXT59B4VieWeAC2cuUPP0ZHQSfS5ke7oT
+tU3N47U+0uBVbNS4rUAH7bOo2o9wptnOA5x/z+O+AARRZ6tEXQOd1oSy4gByLf2Wkh2QTi
+vP6Hln1vlFgKEzcXg6G8fN3MYWxKRhWmZM3DLERMvorlqqSBLcs5VvfZfLKcsKWTExioAq
+KgwEjYm8T9+rcpsw1xBus3j9k7wCI1Sus6PCDjq0pcYKLMYM7p8ygnU2tRYrOztdIxgWRA
+w/1oenm1Mqq2tV5xJcBCwCLOGe6SFwkIRywOYc57j5McH98Xhhg9cViyyBdXy/baF0mro+
+qPhOsWDxqwD4VKZ9UmQ6O8kPNKcc7QcIpFJhcO0g9zbp/MT0KueaWYrTKs8y4lUkTT7Xz6
++MzlR122/JwlAbBo6Y2kWtB+y+XwBZ0BfyJsm2czDhKm7OI5KfuBNhq0tFfKwOlYBq4QAA
+AMAyvUof1R8LLISkdO3EFTKn5RGNkPPoBJmGs6LwvU7NSjjLj/wPQe4jsIBc585tvbrddp
+60h72HgkZ5tqOfdeBYOKqX0qQQBHUEvI6M+NeQTQRev8bCHMLXQ21vzpClnrwNzlja359E
+uTRfiPRwIlyPLhOUiClBDSAnBI9h82Hkk3zzsQ/xGfsPB7iOjRbW69bMRSVCRpeweCVmWC
+77DTsEOq69V2TdljhQNIXE5OcOWonIlfgPiI74cdd+dLhzc/AAAADBAO1/JXd2kYiRyNkZ
+aXTLcwiSgBQIYbobqVP3OEtTclr0P1JAvby3Y4cCaEhkenx+fBqgXAku5lKM+U1Q9AEsMk
+cjIhaDpb43rU7GPjMn4zHwgGsEKd5pC1yIQ2PlK+cHanAdsDjIg+6RR+fuvid/mBeBOYXb
+Py0sa3HyekLJmCdx4UEyNASoiNaGFLQVAqo+RACsXy6VMxFH5dqDYlvwrfUQLwxJmse9Vb
+GEuuPAsklNugZqssC2XOIujFVUpslduQAAAMEAwzVHQVtsc3icCSzEAARpDTUdTbI29OhB
+/FMBnjzS9/3SWfLuBOSm9heNCHs2jdGNb8cPdKZuY7S9Fx6KuVUPyTbSSYkjj0F4fTeC9g
+0ym4p4UWYdF67WSWwLORkaG8K0d+G/CXkz8hvKUg6gcZWKBHAE1ROrHu1nsc8v7mkiKq4I
+bnTw5Q9TgjbWcQWtgPq0wXyyl/K8S1SFdkMCTOHDD0RQ+jTV2WNGVwFTodIRHenX+Rw2g4
+CHbTWbsFrHR1qFAAAACmphbWVzQG5ld3Q=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/integration_tests/assets/keys/id_rsa.test1.pub b/tests/integration_tests/assets/keys/id_rsa.test1.pub
new file mode 100644
index 00000000..3d2e26e1
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test1.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC1GUb3ponbdRG8CA79sGy4uX6Wq5zfnRpLBX3z+Ly+W86fkZVwfJ63+tdCDiVnizSjAbeRbJAVjye9OT4b5Q1Tx83RqcDMEOL7gzOx6OQX2xyNMwWZdWsikgKpXdthA2M+/qv65tB2QPTV27d4H23n/OXziC2yGnSEF2h/LPy7X8Dbmzt9VTrsbIjs90q9pL7K8lIidmdvZ71VDN8kL6oCc3uBRlq0EqN2BS0l6JRR4NpWdpLXtvRqhP2IE7SwhxC05MGViUKMW3VMAwtnrgePdJ8ZkHtiPRn4vMUHYhZfXPOyMjrxiRg8o5Iv81iFBT17nmQlDtaeeA0dj/af8Dt00pIUOaAzlz0haB5nt+Omx8hgCgn8IDEUg0P48Qd3WAJLrGgSFpRXKHlYf9eTAq/WfHgCJnYEUIT7vmTJJEtlQn7nLSvcNTmS0xd5RHNUg/um7RHwMK6X4ZT4z/5MfJa0AQ6B5o0np9imjaKkOFuZNi3eSTaUwhqlHEYEWz7fux0= test1@host
diff --git a/tests/integration_tests/assets/keys/id_rsa.test2 b/tests/integration_tests/assets/keys/id_rsa.test2
new file mode 100644
index 00000000..5854d901
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test2
@@ -0,0 +1,38 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEAvK50D2PWOc4ikyHVRJS6tDhqzjL5cKiivID4p1X8BYCVw83XAEGO
+LnItUyVXHNADlh6fpVq1NY6A2JVtygoPF6ZFx8ph7IWMmnhDdnxLLyGsbhd1M1tiXJD/R+
+3WnGHRJ4PKrQavMLgqHRrieV3QVVfjFSeo6jX/4TruP6ZmvITMZWJrXaGphxJ/pPykEdkO
+i8AmKU9FNviojyPS2nNtj9B/635IdgWvrd7Vf5Ycsw9MR55LWSidwa856RH62Yl6LpEGTH
+m1lJiMk1u88JPSqvohhaUkLKkFpcQwcB0m76W1KOyllJsmX8bNXrlZsI+WiiYI7Xl5vQm2
+17DEuNeavtPAtDMxu8HmTg2UJ55Naxehbfe2lx2k5kYGGw3i1O1OVN2pZ2/OB71LucYd/5
+qxPaz03wswcGOJYGPkNc40vdES/Scc7Yt8HsnZuzqkyOgzn0HiUCzoYUYLYTpLf+yGmwxS
+yAEY056aOfkCsboKHOKiOmlJxNaZZFQkX1evep4DAAAFgC7HMbUuxzG1AAAAB3NzaC1yc2
+EAAAGBALyudA9j1jnOIpMh1USUurQ4as4y+XCooryA+KdV/AWAlcPN1wBBji5yLVMlVxzQ
+A5Yen6VatTWOgNiVbcoKDxemRcfKYeyFjJp4Q3Z8Sy8hrG4XdTNbYlyQ/0ft1pxh0SeDyq
+0GrzC4Kh0a4nld0FVX4xUnqOo1/+E67j+mZryEzGVia12hqYcSf6T8pBHZDovAJilPRTb4
+qI8j0tpzbY/Qf+t+SHYFr63e1X+WHLMPTEeeS1koncGvOekR+tmJei6RBkx5tZSYjJNbvP
+CT0qr6IYWlJCypBaXEMHAdJu+ltSjspZSbJl/GzV65WbCPloomCO15eb0JttewxLjXmr7T
+wLQzMbvB5k4NlCeeTWsXoW33tpcdpOZGBhsN4tTtTlTdqWdvzge9S7nGHf+asT2s9N8LMH
+BjiWBj5DXONL3REv0nHO2LfB7J2bs6pMjoM59B4lAs6GFGC2E6S3/shpsMUsgBGNOemjn5
+ArG6ChziojppScTWmWRUJF9Xr3qeAwAAAAMBAAEAAAGASj/kkEHbhbfmxzujL2/P4Sfqb+
+aDXqAeGkwujbs6h/fH99vC5ejmSMTJrVSeaUo6fxLiBDIj6UWA0rpLEBzRP59BCpRL4MXV
+RNxav/+9nniD4Hb+ug0WMhMlQmsH71ZW9lPYqCpfOq7ec8GmqdgPKeaCCEspH7HMVhfYtd
+eHylwAC02lrpz1l5/h900sS5G9NaWR3uPA+xbzThDs4uZVkSidjlCNt1QZhDSSk7jA5n34
+qJ5UTGu9WQDZqyxWKND+RIyQuFAPGQyoyCC1FayHO2sEhT5qHuumL14Mn81XpzoXFoKyql
+rhBDe+pHhKArBYt92Evch0k1ABKblFxtxLXcvk4Fs7pHi+8k4+Cnazej2kcsu1kURlMZJB
+w2QT/8BV4uImbH05LtyscQuwGzpIoxqrnHrvg5VbohStmhoOjYybzqqW3/M0qhkn5JgTiy
+dJcHRJisRnAcmbmEchYtLDi6RW1e022H4I9AFXQqyr5HylBq6ugtWcFCsrcX8ibZ8xAAAA
+wQCAOPgwae6yZLkrYzRfbxZtGKNmhpI0EtNSDCHYuQQapFZJe7EFENs/VAaIiiut0yajGj
+c3aoKcwGIoT8TUM8E3GSNW6+WidUOC7H6W+/6N2OYZHRBACGz820xO+UBCl2oSk+dLBlfr
+IQzBGUWn5uVYCs0/2nxfCdFyHtMK8dMF/ypbdG+o1rXz5y9b7PVG6Mn+o1Rjsdkq7VERmy
+Pukd8hwATOIJqoKl3TuFyBeYFLqe+0e7uTeswQFw17PF31VjAAAADBAOpJRQb8c6qWqsvv
+vkve0uMuL0DfWW0G6+SxjPLcV6aTWL5xu0Grd8uBxDkkHU/CDrAwpchXyuLsvbw21Eje/u
+U5k9nLEscWZwcX7odxlK+EfAY2Bf5+Hd9bH5HMzTRJH8KkWK1EppOLPyiDxz4LZGzPLVyv
+/1PgSuvXkSWk1KIE4SvSemyxGX2tPVI6uO+URqevfnPOS1tMB7BMQlgkR6eh4bugx9UYx9
+mwlXonNa4dN0iQxZ7N4rKFBbT/uyB2bQAAAMEAzisnkD8k9Tn8uyhxpWLHwb03X4ZUUHDV
+zu15e4a8dZ+mM8nHO986913Xz5JujlJKkGwFTvgWkIiR2zqTEauZHARH7gANpaweTm6lPd
+E4p2S0M3ulY7xtp9lCFIrDhMPPkGq8SFZB6qhgucHcZSRLq6ZDou3S2IdNOzDTpBtkhRCS
+0zFcdTLh3zZweoy8HGbW36bwB6s1CIL76Pd4F64i0Ms9CCCU6b+E5ArFhYQIsXiDbgHWbD
+tZRSm2GEgnDGAvAAAACmphbWVzQG5ld3Q=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/integration_tests/assets/keys/id_rsa.test2.pub b/tests/integration_tests/assets/keys/id_rsa.test2.pub
new file mode 100644
index 00000000..f3831a57
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test2.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8rnQPY9Y5ziKTIdVElLq0OGrOMvlwqKK8gPinVfwFgJXDzdcAQY4uci1TJVcc0AOWHp+lWrU1joDYlW3KCg8XpkXHymHshYyaeEN2fEsvIaxuF3UzW2JckP9H7dacYdEng8qtBq8wuCodGuJ5XdBVV+MVJ6jqNf/hOu4/pma8hMxlYmtdoamHEn+k/KQR2Q6LwCYpT0U2+KiPI9Lac22P0H/rfkh2Ba+t3tV/lhyzD0xHnktZKJ3BrznpEfrZiXoukQZMebWUmIyTW7zwk9Kq+iGFpSQsqQWlxDBwHSbvpbUo7KWUmyZfxs1euVmwj5aKJgjteXm9CbbXsMS415q+08C0MzG7weZODZQnnk1rF6Ft97aXHaTmRgYbDeLU7U5U3alnb84HvUu5xh3/mrE9rPTfCzBwY4lgY+Q1zjS90RL9Jxzti3weydm7OqTI6DOfQeJQLOhhRgthOkt/7IabDFLIARjTnpo5+QKxugoc4qI6aUnE1plkVCRfV696ngM= test2@host
diff --git a/tests/integration_tests/assets/keys/id_rsa.test3 b/tests/integration_tests/assets/keys/id_rsa.test3
new file mode 100644
index 00000000..2596c762
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test3
@@ -0,0 +1,38 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEApPG4MdkYQKD57/qreFrh9GRC22y66qZOWZWRjC887rrbvBzO69hV
+yJpTIXleJEvpWiHYcjMR5G6NNFsnNtZ4fxDqmSc4vcFj53JsE/XNqLKq6psXadCb5vkNpG
+bxA+Z5bJlzJ969PgJIIEbgc86sei4kgR2MuPWqtZbY5GkpNCTqWuLYeFK+14oFruA2nyWH
+9MOIRDHK/d597psHy+LTMtymO7ZPhO571abKw6jvvwiSeDxVE9kV7KAQIuM9/S3gftvgQQ
+ron3GL34pgmIabdSGdbfHqGDooryJhlbquJZELBN236KgRNTCAjVvUzjjQr1eRP3xssGwV
+O6ECBGCQLl/aYogAgtwnwj9iXqtfiLK3EwlgjquU4+JQ0CVtLhG3gIZB+qoMThco0pmHTr
+jtfQCwrztsBBFunSa2/CstuV1mQ5O5ZrZ6ACo9yPRBNkns6+CiKdtMtCtzi3k2RDz9jpYm
+Pcak03Lr7IkdC1Tp6+jA+//yPHSO1o4CqW89IQzNAAAFgEUd7lZFHe5WAAAAB3NzaC1yc2
+EAAAGBAKTxuDHZGECg+e/6q3ha4fRkQttsuuqmTlmVkYwvPO6627wczuvYVciaUyF5XiRL
+6Voh2HIzEeRujTRbJzbWeH8Q6pknOL3BY+dybBP1zaiyquqbF2nQm+b5DaRm8QPmeWyZcy
+fevT4CSCBG4HPOrHouJIEdjLj1qrWW2ORpKTQk6lri2HhSvteKBa7gNp8lh/TDiEQxyv3e
+fe6bB8vi0zLcpju2T4Tue9WmysOo778Ikng8VRPZFeygECLjPf0t4H7b4EEK6J9xi9+KYJ
+iGm3UhnW3x6hg6KK8iYZW6riWRCwTdt+ioETUwgI1b1M440K9XkT98bLBsFTuhAgRgkC5f
+2mKIAILcJ8I/Yl6rX4iytxMJYI6rlOPiUNAlbS4Rt4CGQfqqDE4XKNKZh0647X0AsK87bA
+QRbp0mtvwrLbldZkOTuWa2egAqPcj0QTZJ7OvgoinbTLQrc4t5NkQ8/Y6WJj3GpNNy6+yJ
+HQtU6evowPv/8jx0jtaOAqlvPSEMzQAAAAMBAAEAAAGAGaqbdPZJNdVWzyb8g6/wtSzc0n
+Qq6dSTIJGLonq/So69HpqFAGIbhymsger24UMGvsXBfpO/1wH06w68HWZmPa+OMeLOi4iK
+WTuO4dQ/+l5DBlq32/lgKSLcIpb6LhcxEdsW9j9Mx1dnjc45owun/yMq/wRwH1/q/nLIsV
+JD3R9ZcGcYNDD8DWIm3D17gmw+qbG7hJES+0oh4n0xS2KyZpm7LFOEMDVEA8z+hE/HbryQ
+vjD1NC91n+qQWD1wKfN3WZDRwip3z1I5VHMpvXrA/spHpa9gzHK5qXNmZSz3/dfA1zHjCR
+2dHjJnrIUH8nyPfw8t+COC+sQBL3Nr0KUWEFPRM08cOcQm4ctzg17aDIZBONjlZGKlReR8
+1zfAw84Q70q2spLWLBLXSFblHkaOfijEbejIbaz2UUEQT27WD7RHAORdQlkx7eitk66T9d
+DzIq/cpYhm5Fs8KZsh3PLldp9nsHbD2Oa9J9LJyI4ryuIW0mVwRdvPSiiYi3K+mDCpAAAA
+wBe+ugEEJ+V7orb1f4Zez0Bd4FNkEc52WZL4CWbaCtM+ZBg5KnQ6xW14JdC8IS9cNi/I5P
+yLsBvG4bWPLGgQruuKY6oLueD6BFnKjqF6ACUCiSQldh4BAW1nYc2U48+FFvo3ZQyudFSy
+QEFlhHmcaNMDo0AIJY5Xnq2BG3nEX7AqdtZ8hhenHwLCRQJatDwSYBHDpSDdh9vpTnGp/2
+0jBz25Ko4UANzvSAc3sA4yN3jfpoM366TgdNf8x3g1v7yljQAAAMEA0HSQjzH5nhEwB58k
+mYYxnBYp1wb86zIuVhAyjZaeinvBQSTmLow8sXIHcCVuD3CgBezlU2SX5d9YuvRU9rcthi
+uzn4wWnbnzYy4SwzkMJXchUAkumFVD8Hq5TNPh2Z+033rLLE08EhYypSeVpuzdpFoStaS9
+3DUZA2bR/zLZI9MOVZRUcYImNegqIjOYHY8Sbj3/0QPV6+WpUJFMPvvedWhfaOsRMTA6nr
+VLG4pxkrieVl0UtuRGbzD/exXhXVi7AAAAwQDKkJj4ez/+KZFYlZQKiV0BrfUFcgS6ElFM
+2CZIEagCtu8eedrwkNqx2FUX33uxdvUTr4c9I3NvWeEEGTB9pgD4lh1x/nxfuhyGXtimFM
+GnznGV9oyz0DmKlKiKSEGwWf5G+/NiiCwwVJ7wsQQm7TqNtkQ9b8MhWWXC7xlXKUs7dmTa
+e8AqAndCCMEnbS1UQFO/R5PNcZXkFWDggLQ/eWRYKlrXgdnUgH6h0saOcViKpNJBUXb3+x
+eauhOY52PS/BcAAAAKamFtZXNAbmV3dAE=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/integration_tests/assets/keys/id_rsa.test3.pub b/tests/integration_tests/assets/keys/id_rsa.test3.pub
new file mode 100644
index 00000000..057db632
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test3.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCk8bgx2RhAoPnv+qt4WuH0ZELbbLrqpk5ZlZGMLzzuutu8HM7r2FXImlMheV4kS+laIdhyMxHkbo00Wyc21nh/EOqZJzi9wWPncmwT9c2osqrqmxdp0Jvm+Q2kZvED5nlsmXMn3r0+AkggRuBzzqx6LiSBHYy49aq1ltjkaSk0JOpa4th4Ur7XigWu4DafJYf0w4hEMcr93n3umwfL4tMy3KY7tk+E7nvVpsrDqO+/CJJ4PFUT2RXsoBAi4z39LeB+2+BBCuifcYvfimCYhpt1IZ1t8eoYOiivImGVuq4lkQsE3bfoqBE1MICNW9TOONCvV5E/fGywbBU7oQIEYJAuX9piiACC3CfCP2Jeq1+IsrcTCWCOq5Tj4lDQJW0uEbeAhkH6qgxOFyjSmYdOuO19ALCvO2wEEW6dJrb8Ky25XWZDk7lmtnoAKj3I9EE2Sezr4KIp20y0K3OLeTZEPP2OliY9xqTTcuvsiR0LVOnr6MD7//I8dI7WjgKpbz0hDM0= test3@host
diff --git a/tests/integration_tests/assets/test_version_change.pkl b/tests/integration_tests/assets/test_version_change.pkl
new file mode 100644
index 00000000..65ae93e5
--- /dev/null
+++ b/tests/integration_tests/assets/test_version_change.pkl
Binary files differ
diff --git a/tests/integration_tests/assets/trusty_with_mime.pkl b/tests/integration_tests/assets/trusty_with_mime.pkl
new file mode 100644
index 00000000..a4089ecf
--- /dev/null
+++ b/tests/integration_tests/assets/trusty_with_mime.pkl
@@ -0,0 +1,572 @@
+ccopy_reg
+_reconstructor
+p1
+(ccloudinit.sources.DataSourceNoCloud
+DataSourceNoCloudNet
+p2
+c__builtin__
+object
+p3
+NtRp4
+(dp5
+S'paths'
+p6
+g1
+(ccloudinit.helpers
+Paths
+p7
+g3
+NtRp8
+(dp9
+S'lookups'
+p10
+(dp11
+S'cloud_config'
+p12
+S'cloud-config.txt'
+p13
+sS'userdata'
+p14
+S'user-data.txt.i'
+p15
+sS'vendordata'
+p16
+S'vendor-data.txt.i'
+p17
+sS'userdata_raw'
+p18
+S'user-data.txt'
+p19
+sS'boothooks'
+p20
+g20
+sS'scripts'
+p21
+g21
+sS'sem'
+p22
+g22
+sS'data'
+p23
+g23
+sS'vendor_scripts'
+p24
+S'scripts/vendor'
+p25
+sS'handlers'
+p26
+g26
+sS'obj_pkl'
+p27
+S'obj.pkl'
+p28
+sS'vendordata_raw'
+p29
+S'vendor-data.txt'
+p30
+sS'vendor_cloud_config'
+p31
+S'vendor-cloud-config.txt'
+p32
+ssS'template_tpl'
+p33
+S'/etc/cloud/templates/%s.tmpl'
+p34
+sS'cfgs'
+p35
+(dp36
+S'cloud_dir'
+p37
+S'/var/lib/cloud/'
+p38
+sS'templates_dir'
+p39
+S'/etc/cloud/templates/'
+p40
+sS'upstart_dir'
+p41
+S'/etc/init/'
+p42
+ssS'cloud_dir'
+p43
+g38
+sS'datasource'
+p44
+NsS'upstart_conf_d'
+p45
+g42
+sS'boot_finished'
+p46
+S'/var/lib/cloud/instance/boot-finished'
+p47
+sS'instance_link'
+p48
+S'/var/lib/cloud/instance'
+p49
+sS'seed_dir'
+p50
+S'/var/lib/cloud/seed'
+p51
+sbsS'supported_seed_starts'
+p52
+(S'http://'
+p53
+S'https://'
+p54
+S'ftp://'
+p55
+tp56
+sS'sys_cfg'
+p57
+(dp58
+S'output'
+p59
+(dp60
+S'all'
+p61
+S'| tee -a /var/log/cloud-init-output.log'
+p62
+ssS'users'
+p63
+(lp64
+S'default'
+p65
+asS'def_log_file'
+p66
+S'/var/log/cloud-init.log'
+p67
+sS'cloud_final_modules'
+p68
+(lp69
+S'rightscale_userdata'
+p70
+aS'scripts-vendor'
+p71
+aS'scripts-per-once'
+p72
+aS'scripts-per-boot'
+p73
+aS'scripts-per-instance'
+p74
+aS'scripts-user'
+p75
+aS'ssh-authkey-fingerprints'
+p76
+aS'keys-to-console'
+p77
+aS'phone-home'
+p78
+aS'final-message'
+p79
+aS'power-state-change'
+p80
+asS'disable_root'
+p81
+I01
+sS'syslog_fix_perms'
+p82
+S'syslog:adm'
+p83
+sS'log_cfgs'
+p84
+(lp85
+(lp86
+S'[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n'
+p87
+aS'[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=("/dev/log", handlers.SysLogHandler.LOG_USER)\n'
+p88
+aa(lp89
+g87
+aS"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n"
+p90
+aasS'cloud_init_modules'
+p91
+(lp92
+S'migrator'
+p93
+aS'seed_random'
+p94
+aS'bootcmd'
+p95
+aS'write-files'
+p96
+aS'growpart'
+p97
+aS'resizefs'
+p98
+aS'set_hostname'
+p99
+aS'update_hostname'
+p100
+aS'update_etc_hosts'
+p101
+aS'ca-certs'
+p102
+aS'rsyslog'
+p103
+aS'users-groups'
+p104
+aS'ssh'
+p105
+asS'preserve_hostname'
+p106
+I00
+sS'_log'
+p107
+(lp108
+g87
+ag90
+ag88
+asS'datasource_list'
+p109
+(lp110
+S'NoCloud'
+p111
+aS'ConfigDrive'
+p112
+aS'OpenNebula'
+p113
+aS'Azure'
+p114
+aS'AltCloud'
+p115
+aS'OVF'
+p116
+aS'MAAS'
+p117
+aS'GCE'
+p118
+aS'OpenStack'
+p119
+aS'CloudSigma'
+p120
+aS'Ec2'
+p121
+aS'CloudStack'
+p122
+aS'SmartOS'
+p123
+aS'None'
+p124
+asS'vendor_data'
+p125
+(dp126
+S'prefix'
+p127
+(lp128
+sS'enabled'
+p129
+I01
+ssS'cloud_config_modules'
+p130
+(lp131
+S'emit_upstart'
+p132
+aS'disk_setup'
+p133
+aS'mounts'
+p134
+aS'ssh-import-id'
+p135
+aS'locale'
+p136
+aS'set-passwords'
+p137
+aS'grub-dpkg'
+p138
+aS'apt-pipelining'
+p139
+aS'apt-configure'
+p140
+aS'package-update-upgrade-install'
+p141
+aS'landscape'
+p142
+aS'timezone'
+p143
+aS'puppet'
+p144
+aS'chef'
+p145
+aS'salt-minion'
+p146
+aS'mcollective'
+p147
+aS'disable-ec2-metadata'
+p148
+aS'runcmd'
+p149
+aS'byobu'
+p150
+assg14
+(iemail.mime.multipart
+MIMEMultipart
+p151
+(dp152
+S'_headers'
+p153
+(lp154
+(S'Content-Type'
+p155
+S'multipart/mixed; boundary="===============4291038100093149247=="'
+tp156
+a(S'MIME-Version'
+p157
+S'1.0'
+p158
+tp159
+a(S'Number-Attachments'
+p160
+S'1'
+tp161
+asS'_payload'
+p162
+(lp163
+(iemail.mime.base
+MIMEBase
+p164
+(dp165
+g153
+(lp166
+(g157
+g158
+tp167
+a(S'Content-Type'
+p168
+S'text/x-not-multipart'
+tp169
+a(S'Content-Disposition'
+p170
+S'attachment; filename="part-001"'
+tp171
+asg162
+S''
+sS'_charset'
+p172
+NsS'_default_type'
+p173
+S'text/plain'
+p174
+sS'preamble'
+p175
+NsS'defects'
+p176
+(lp177
+sS'_unixfrom'
+p178
+NsS'epilogue'
+p179
+Nsbasg172
+Nsg173
+g174
+sg175
+Nsg176
+(lp180
+sg178
+Nsg179
+Nsbsg16
+S'#cloud-config\n{}\n\n'
+p181
+sg18
+S'Content-Type: multipart/mixed; boundary="===============1378281702283945349=="\nMIME-Version: 1.0\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script1.sh"\n\nIyEvYmluL3NoCgplY2hvICdoaScgPiAvdmFyL3RtcC9oaQo=\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script2.sh"\n\nIyEvYmluL2Jhc2gKCmVjaG8gJ2hpMicgPiAvdmFyL3RtcC9oaTIK\n\n--===============1378281702283945349==--\n\n#cloud-config\n# final_message: |\n# This is my final message!\n# $version\n# $timestamp\n# $datasource\n# $uptime\n# updates:\n# network:\n# when: [\'hotplug\']\n'
+p182
+sg29
+NsS'dsmode'
+p183
+S'net'
+p184
+sS'seed'
+p185
+S'/var/lib/cloud/seed/nocloud-net'
+p186
+sS'cmdline_id'
+p187
+S'ds=nocloud-net'
+p188
+sS'ud_proc'
+p189
+g1
+(ccloudinit.user_data
+UserDataProcessor
+p190
+g3
+NtRp191
+(dp192
+g6
+g8
+sS'ssl_details'
+p193
+(dp194
+sbsg50
+g186
+sS'ds_cfg'
+p195
+(dp196
+sS'distro'
+p197
+g1
+(ccloudinit.distros.ubuntu
+Distro
+p198
+g3
+NtRp199
+(dp200
+S'osfamily'
+p201
+S'debian'
+p202
+sS'_paths'
+p203
+g8
+sS'name'
+p204
+S'ubuntu'
+p205
+sS'_runner'
+p206
+g1
+(ccloudinit.helpers
+Runners
+p207
+g3
+NtRp208
+(dp209
+g6
+g8
+sS'sems'
+p210
+(dp211
+sbsS'_cfg'
+p212
+(dp213
+S'paths'
+p214
+(dp215
+g37
+g38
+sg39
+g40
+sg41
+g42
+ssS'default_user'
+p216
+(dp217
+S'shell'
+p218
+S'/bin/bash'
+p219
+sS'name'
+p220
+S'ubuntu'
+p221
+sS'sudo'
+p222
+(lp223
+S'ALL=(ALL) NOPASSWD:ALL'
+p224
+asS'lock_passwd'
+p225
+I01
+sS'gecos'
+p226
+S'Ubuntu'
+p227
+sS'groups'
+p228
+(lp229
+S'adm'
+p230
+aS'audio'
+p231
+aS'cdrom'
+p232
+aS'dialout'
+p233
+aS'dip'
+p234
+aS'floppy'
+p235
+aS'netdev'
+p236
+aS'plugdev'
+p237
+aS'sudo'
+p238
+aS'video'
+p239
+assS'package_mirrors'
+p240
+(lp241
+(dp242
+S'arches'
+p243
+(lp244
+S'i386'
+p245
+aS'amd64'
+p246
+asS'failsafe'
+p247
+(dp248
+S'security'
+p249
+S'http://security.ubuntu.com/ubuntu'
+p250
+sS'primary'
+p251
+S'http://archive.ubuntu.com/ubuntu'
+p252
+ssS'search'
+p253
+(dp254
+S'security'
+p255
+(lp256
+sS'primary'
+p257
+(lp258
+S'http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/'
+p259
+aS'http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/'
+p260
+aS'http://%(region)s.clouds.archive.ubuntu.com/ubuntu/'
+p261
+assa(dp262
+S'arches'
+p263
+(lp264
+S'armhf'
+p265
+aS'armel'
+p266
+aS'default'
+p267
+asS'failsafe'
+p268
+(dp269
+S'security'
+p270
+S'http://ports.ubuntu.com/ubuntu-ports'
+p271
+sS'primary'
+p272
+S'http://ports.ubuntu.com/ubuntu-ports'
+p273
+ssasS'ssh_svcname'
+p274
+S'ssh'
+p275
+ssbsS'metadata'
+p276
+(dp277
+g183
+g184
+sS'local-hostname'
+p278
+S'me'
+p279
+sS'instance-id'
+p280
+S'me'
+p281
+ssb. \ No newline at end of file
diff --git a/tests/integration_tests/bugs/test_gh570.py b/tests/integration_tests/bugs/test_gh570.py
new file mode 100644
index 00000000..e98ab5d0
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh570.py
@@ -0,0 +1,39 @@
+"""Integration test for #570.
+
+Test that we can add optional vendor-data to the seedfrom file in a
+NoCloud environment
+"""
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+VENDOR_DATA = """\
+#cloud-config
+runcmd:
+ - touch /var/tmp/seeded_vendordata_test_file
+"""
+
+
+# Only running on LXD because we need NoCloud for this test
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+def test_nocloud_seedfrom_vendordata(client: IntegrationInstance):
+ seed_dir = "/var/tmp/test_seed_dir"
+ result = client.execute(
+ "mkdir {seed_dir} && "
+ "touch {seed_dir}/user-data && "
+ "touch {seed_dir}/meta-data && "
+ "echo 'seedfrom: {seed_dir}/' > "
+ "/var/lib/cloud/seed/nocloud-net/meta-data".format(seed_dir=seed_dir)
+ )
+ assert result.return_code == 0
+
+ client.write_to_file(
+ "{}/vendor-data".format(seed_dir),
+ VENDOR_DATA,
+ )
+ client.execute("cloud-init clean --logs")
+ client.restart()
+ assert client.execute("cloud-init status").ok
+ assert "seeded_vendordata_test_file" in client.execute("ls /var/tmp")
diff --git a/tests/integration_tests/bugs/test_gh626.py b/tests/integration_tests/bugs/test_gh626.py
new file mode 100644
index 00000000..b80b677a
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh626.py
@@ -0,0 +1,43 @@
+"""Integration test for gh-626.
+
+Ensure if wakeonlan is specified in the network config that it is rendered
+in the /etc/network/interfaces or netplan config.
+"""
+
+import pytest
+import yaml
+
+from tests.integration_tests import random_mac_address
+from tests.integration_tests.instances import IntegrationInstance
+
+MAC_ADDRESS = random_mac_address()
+NETWORK_CONFIG = """\
+version: 2
+ethernets:
+ eth0:
+ dhcp4: true
+ wakeonlan: true
+ match:
+ macaddress: {}
+""".format(
+ MAC_ADDRESS
+)
+
+EXPECTED_ENI_END = """\
+iface eth0 inet dhcp
+ ethernet-wol g"""
+
+
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.lxd_config_dict(
+ {
+ "user.network-config": NETWORK_CONFIG,
+ "volatile.eth0.hwaddr": MAC_ADDRESS,
+ }
+)
+def test_wakeonlan(client: IntegrationInstance):
+ netplan_cfg = client.execute("cat /etc/netplan/50-cloud-init.yaml")
+ netplan_yaml = yaml.safe_load(netplan_cfg)
+ assert "wakeonlan" in netplan_yaml["network"]["ethernets"]["eth0"]
+ assert netplan_yaml["network"]["ethernets"]["eth0"]["wakeonlan"] is True
diff --git a/tests/integration_tests/bugs/test_gh632.py b/tests/integration_tests/bugs/test_gh632.py
new file mode 100644
index 00000000..c7a897c6
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh632.py
@@ -0,0 +1,33 @@
+"""Integration test for gh-632.
+
+Verify that if cloud-init is using DataSourceRbxCloud, there is
+no traceback if the metadata disk cannot be found.
+"""
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+
+# With some datasource hacking, we can run this on a NoCloud instance
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+def test_datasource_rbx_no_stacktrace(client: IntegrationInstance):
+ client.write_to_file(
+ "/etc/cloud/cloud.cfg.d/90_dpkg.cfg",
+ "datasource_list: [ RbxCloud, NoCloud ]\n",
+ )
+ client.write_to_file(
+ "/etc/cloud/ds-identify.cfg",
+ "policy: enabled\n",
+ )
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+ assert "Failed to load metadata and userdata" not in log
+ assert (
+ "Getting data from <class 'cloudinit.sources.DataSourceRbxCloud."
+ "DataSourceRbxCloud'> failed" not in log
+ )
diff --git a/tests/integration_tests/bugs/test_gh668.py b/tests/integration_tests/bugs/test_gh668.py
new file mode 100644
index 00000000..95edb48d
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh668.py
@@ -0,0 +1,46 @@
+"""Integration test for gh-668.
+
+Ensure that static route to host is working correctly.
+The original problem is specific to the ENI renderer but that test is suitable
+for all network configuration outputs.
+"""
+
+import pytest
+
+from tests.integration_tests import random_mac_address
+from tests.integration_tests.instances import IntegrationInstance
+
+DESTINATION_IP = "172.16.0.10"
+GATEWAY_IP = "10.0.0.100"
+MAC_ADDRESS = random_mac_address()
+
+NETWORK_CONFIG = """\
+version: 2
+ethernets:
+ eth0:
+ addresses: [10.0.0.10/8]
+ dhcp4: false
+ routes:
+ - to: {}/32
+ via: {}
+ match:
+ macaddress: {}
+""".format(
+ DESTINATION_IP, GATEWAY_IP, MAC_ADDRESS
+)
+
+EXPECTED_ROUTE = "{} via {}".format(DESTINATION_IP, GATEWAY_IP)
+
+
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.lxd_config_dict(
+ {
+ "user.network-config": NETWORK_CONFIG,
+ "volatile.eth0.hwaddr": MAC_ADDRESS,
+ }
+)
+@pytest.mark.lxd_use_exec
+def test_static_route_to_host(client: IntegrationInstance):
+ route = client.execute("ip route | grep {}".format(DESTINATION_IP))
+ assert route.startswith(EXPECTED_ROUTE)
diff --git a/tests/integration_tests/bugs/test_gh671.py b/tests/integration_tests/bugs/test_gh671.py
new file mode 100644
index 00000000..2d7c8118
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh671.py
@@ -0,0 +1,53 @@
+"""Integration test for gh-671.
+
+Verify that on Azure that if a default user and password are specified
+through the Azure API that a change in the default password overwrites
+the old password
+"""
+
+import crypt
+
+import pytest
+
+from tests.integration_tests.clouds import IntegrationCloud
+
+OLD_PASSWORD = "DoIM33tTheComplexityRequirements!??"
+NEW_PASSWORD = "DoIM33tTheComplexityRequirementsNow!??"
+
+
+def _check_password(instance, unhashed_password):
+ shadow_password = instance.execute("getent shadow ubuntu").split(":")[1]
+ salt = shadow_password.rsplit("$", 1)[0]
+ hashed_password = crypt.crypt(unhashed_password, salt)
+ assert shadow_password == hashed_password
+
+
+@pytest.mark.azure
+def test_update_default_password(setup_image, session_cloud: IntegrationCloud):
+ os_profile = {
+ "os_profile": {
+ "admin_password": "",
+ "linux_configuration": {"disable_password_authentication": False},
+ }
+ }
+ os_profile["os_profile"]["admin_password"] = OLD_PASSWORD
+ instance1 = session_cloud.launch(launch_kwargs={"vm_params": os_profile})
+
+ _check_password(instance1, OLD_PASSWORD)
+
+ snapshot_id = instance1.cloud.cloud_instance.snapshot(
+ instance1.instance, delete_provisioned_user=False
+ )
+
+ os_profile["os_profile"]["admin_password"] = NEW_PASSWORD
+ try:
+ with session_cloud.launch(
+ launch_kwargs={
+ "image_id": snapshot_id,
+ "vm_params": os_profile,
+ }
+ ) as instance2:
+ _check_password(instance2, NEW_PASSWORD)
+ finally:
+ session_cloud.cloud_instance.delete_image(snapshot_id)
+ instance1.destroy()
diff --git a/tests/integration_tests/bugs/test_gh868.py b/tests/integration_tests/bugs/test_gh868.py
new file mode 100644
index 00000000..a62e8b36
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh868.py
@@ -0,0 +1,27 @@
+"""Ensure no Traceback when 'chef_license' is set"""
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+USERDATA = """\
+#cloud-config
+chef:
+ install_type: omnibus
+ chef_license: accept
+ server_url: https://chef.yourorg.invalid
+ validation_name: some-validator
+"""
+
+
+@pytest.mark.adhoc # Can't be regularly reaching out to chef install script
+@pytest.mark.ec2
+@pytest.mark.gce
+@pytest.mark.azure
+@pytest.mark.oci
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.user_data(USERDATA)
+def test_chef_license(client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
diff --git a/tests/integration_tests/bugs/test_lp1813396.py b/tests/integration_tests/bugs/test_lp1813396.py
new file mode 100644
index 00000000..ddae02f5
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1813396.py
@@ -0,0 +1,31 @@
+"""Integration test for lp-1813396
+
+Ensure gpg is called with no tty flag.
+"""
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_ordered_items_in_text
+
+USER_DATA = """\
+#cloud-config
+apt:
+ sources:
+ cloudinit:
+ source: 'deb [arch=amd64] http://ppa.launchpad.net/cloud-init-dev/daily/ubuntu focal main'
+ keyserver: keyserver.ubuntu.com
+ keyid: E4D304DF
+""" # noqa: E501
+
+
+@pytest.mark.user_data(USER_DATA)
+def test_gpg_no_tty(client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ to_verify = [
+ "Running command ['gpg', '--no-tty', "
+ "'--keyserver=keyserver.ubuntu.com', '--recv-keys', 'E4D304DF'] "
+ "with allowed return codes [0] (shell=False, capture=True)",
+ "Imported key 'E4D304DF' from keyserver 'keyserver.ubuntu.com'",
+ ]
+ verify_ordered_items_in_text(to_verify, log)
diff --git a/tests/integration_tests/bugs/test_lp1835584.py b/tests/integration_tests/bugs/test_lp1835584.py
new file mode 100644
index 00000000..765d73ef
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1835584.py
@@ -0,0 +1,101 @@
+""" Integration test for LP #1835584
+
+Upstream linux kernels prior to 4.15 provide DMI product_uuid in uppercase.
+More recent kernels switched to lowercase for DMI product_uuid. Azure
+datasource uses this product_uuid as the instance-id for cloud-init.
+
+The linux-azure-fips kernel installed in PRO FIPs images, that product UUID is
+uppercase whereas the linux-azure cloud-optimized kernel reports the UUID as
+lowercase.
+
+In cases where product_uuid changes case, ensure cloud-init doesn't
+recreate ssh hostkeys across reboot (due to detecting an instance_id change).
+
+This currently only affects linux-azure-fips -> linux-azure on Bionic.
+This test won't run on Xenial because both linux-azure-fips and linux-azure
+report uppercase product_uuids.
+
+The test will launch a specific Bionic Ubuntu PRO FIPS image which has a
+linux-azure-fips kernel known to report product_uuid as uppercase. Then upgrade
+and reboot into linux-azure kernel which is known to report product_uuid as
+lowercase.
+
+Across the reboot, assert that we didn't re-run config_ssh by virtue of
+seeing only one semaphore creation log entry of type:
+
+ Writing to /var/lib/cloud/instances/<UUID>/sem/config_ssh -
+
+https://bugs.launchpad.net/cloud-init/+bug/1835584
+"""
+import re
+
+import pytest
+
+from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud
+from tests.integration_tests.conftest import get_validated_source
+from tests.integration_tests.instances import IntegrationInstance
+
+IMG_AZURE_UBUNTU_PRO_FIPS_BIONIC = (
+ "Canonical:0001-com-ubuntu-pro-bionic-fips:pro-fips-18_04:18.04.202010201"
+)
+
+
+def _check_iid_insensitive_across_kernel_upgrade(
+ instance: IntegrationInstance,
+):
+ uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid")
+ assert (
+ uuid.isupper()
+ ), "Expected uppercase UUID on Ubuntu FIPS image {}".format(uuid)
+ orig_kernel = instance.execute("uname -r").strip()
+ assert "azure-fips" in orig_kernel
+ result = instance.execute("apt-get update")
+ # Install a 5.4+ kernel which provides lowercase product_uuid
+ result = instance.execute("apt-get install linux-azure --assume-yes")
+ if not result.ok:
+ pytest.fail("Unable to install linux-azure kernel: {}".format(result))
+ # Remove ubuntu-azure-fips metapkg which mandates FIPS-flavour kernel
+ result = instance.execute("ua disable fips --assume-yes")
+ assert result.ok, "Unable to disable fips: {}".format(result)
+ instance.restart()
+ new_kernel = instance.execute("uname -r").strip()
+ assert orig_kernel != new_kernel
+ assert "azure-fips" not in new_kernel
+ assert "azure" in new_kernel
+ new_uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid")
+ assert (
+ uuid.lower() == new_uuid
+ ), "Expected UUID on linux-azure to be lowercase of FIPS: {}".format(uuid)
+ log = instance.read_from_file("/var/log/cloud-init.log")
+ RE_CONFIG_SSH_SEMAPHORE = r"Writing.*sem/config_ssh "
+ ssh_runs = len(re.findall(RE_CONFIG_SSH_SEMAPHORE, log))
+ assert 1 == ssh_runs, "config_ssh ran too many times {}".format(ssh_runs)
+
+
+@pytest.mark.azure
+def test_azure_kernel_upgrade_case_insensitive_uuid(
+ session_cloud: IntegrationCloud,
+):
+ cfg_image_spec = ImageSpecification.from_os_image()
+ if (cfg_image_spec.os, cfg_image_spec.release) != ("ubuntu", "bionic"):
+ pytest.skip(
+ "Test only supports ubuntu:bionic not {0.os}:{0.release}".format(
+ cfg_image_spec
+ )
+ )
+ source = get_validated_source(session_cloud)
+ if not source.installs_new_version():
+ pytest.skip(
+ "Provide CLOUD_INIT_SOURCE to install expected working cloud-init"
+ )
+ image_id = IMG_AZURE_UBUNTU_PRO_FIPS_BIONIC
+ with session_cloud.launch(
+ launch_kwargs={"image_id": image_id}
+ ) as instance:
+ # We can't use setup_image fixture here because we want to avoid
+ # taking a snapshot or cleaning the booted machine after cloud-init
+ # upgrade.
+ instance.install_new_cloud_init(
+ source, take_snapshot=False, clean=False
+ )
+ _check_iid_insensitive_across_kernel_upgrade(instance)
diff --git a/tests/integration_tests/bugs/test_lp1886531.py b/tests/integration_tests/bugs/test_lp1886531.py
index 058ea8bb..d56ca320 100644
--- a/tests/integration_tests/bugs/test_lp1886531.py
+++ b/tests/integration_tests/bugs/test_lp1886531.py
@@ -11,6 +11,7 @@ https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1886531
"""
import pytest
+from tests.integration_tests.util import verify_clean_log
USER_DATA = """\
#cloud-config
@@ -20,8 +21,7 @@ bootcmd:
class TestLp1886531:
-
@pytest.mark.user_data(USER_DATA)
def test_lp1886531(self, client):
log_content = client.read_from_file("/var/log/cloud-init.log")
- assert "WARNING" not in log_content
+ verify_clean_log(log_content)
diff --git a/tests/integration_tests/bugs/test_lp1897099.py b/tests/integration_tests/bugs/test_lp1897099.py
index 27c8927f..1f5030ce 100644
--- a/tests/integration_tests/bugs/test_lp1897099.py
+++ b/tests/integration_tests/bugs/test_lp1897099.py
@@ -7,7 +7,6 @@ https://bugs.launchpad.net/cloud-init/+bug/1897099
import pytest
-
USER_DATA = """\
#cloud-config
bootcmd:
@@ -19,13 +18,12 @@ swap:
"""
-@pytest.mark.sru_2020_11
@pytest.mark.user_data(USER_DATA)
-@pytest.mark.no_container('Containers cannot configure swap')
+@pytest.mark.no_container("Containers cannot configure swap")
def test_fallocate_fallback(client):
- log = client.read_from_file('/var/log/cloud-init.log')
- assert '/swap.img' in client.execute('cat /proc/swaps')
- assert '/swap.img' in client.execute('cat /etc/fstab')
- assert 'fallocate swap creation failed, will attempt with dd' in log
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "/swap.img" in client.execute("cat /proc/swaps")
+ assert "/swap.img" in client.execute("cat /etc/fstab")
+ assert "fallocate swap creation failed, will attempt with dd" in log
assert "Running command ['dd', 'if=/dev/zero', 'of=/swap.img'" in log
- assert 'SUCCESS: config-mounts ran successfully' in log
+ assert "SUCCESS: config-mounts ran successfully" in log
diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py
new file mode 100644
index 00000000..d8ea54c3
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1898997.py
@@ -0,0 +1,77 @@
+"""Integration test for LP: #1898997
+
+cloud-init was incorrectly excluding Open vSwitch bridge members from its list
+of interfaces. This meant that instances which had only one interface which
+was in an Open vSwitch bridge would not boot correctly: cloud-init would not
+find the expected physical interfaces, so would not apply network config.
+
+This test checks that cloud-init believes it has successfully applied the
+network configuration, and confirms that the bridge can be used to ping the
+default gateway.
+"""
+import pytest
+
+from tests.integration_tests import random_mac_address
+from tests.integration_tests.util import verify_clean_log
+
+MAC_ADDRESS = random_mac_address()
+
+
+NETWORK_CONFIG = """\
+bridges:
+ ovs-br:
+ dhcp4: true
+ interfaces:
+ - enp5s0
+ macaddress: 52:54:00:d9:08:1c
+ mtu: 1500
+ openvswitch: {{}}
+ethernets:
+ enp5s0:
+ mtu: 1500
+ set-name: enp5s0
+ match:
+ macaddress: {}
+version: 2
+""".format(
+ MAC_ADDRESS
+)
+
+
+@pytest.mark.lxd_config_dict(
+ {
+ "user.network-config": NETWORK_CONFIG,
+ "volatile.eth0.hwaddr": MAC_ADDRESS,
+ }
+)
+@pytest.mark.lxd_vm
+@pytest.mark.lxd_use_exec
+@pytest.mark.not_bionic
+@pytest.mark.ubuntu
+class TestInterfaceListingWithOpenvSwitch:
+ def test_ovs_member_interfaces_not_excluded(self, client):
+ # We need to install openvswitch for our provided network configuration
+ # to apply (on next boot), so DHCP on our default interface to fetch it
+ client.execute("dhclient enp5s0")
+ client.execute("apt update -qqy")
+ client.execute("apt-get install -qqy openvswitch-switch")
+
+ # Now our networking config should successfully apply on a clean reboot
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+ cloudinit_output = client.read_from_file("/var/log/cloud-init.log")
+
+ # Confirm that the network configuration was applied successfully
+ verify_clean_log(cloudinit_output)
+ # Confirm that the applied network config created the OVS bridge
+ assert "ovs-br" in client.execute("ip addr")
+
+ # Test that we can ping our gateway using our bridge
+ gateway = client.execute(
+ "ip -4 route show default | awk '{ print $3 }'"
+ )
+ ping_result = client.execute(
+ "ping -c 1 -W 1 -I ovs-br {}".format(gateway)
+ )
+ assert ping_result.ok
diff --git a/tests/integration_tests/bugs/test_lp1900837.py b/tests/integration_tests/bugs/test_lp1900837.py
index 3fe7d0d0..d9ef18aa 100644
--- a/tests/integration_tests/bugs/test_lp1900837.py
+++ b/tests/integration_tests/bugs/test_lp1900837.py
@@ -4,14 +4,12 @@ This test mirrors the reproducing steps from the reported bug: it changes the
permissions on cloud-init.log to 600 and confirms that they remain 600 after a
reboot.
"""
-import pytest
def _get_log_perms(client):
return client.execute("stat -c %a /var/log/cloud-init.log")
-@pytest.mark.sru_2020_11
class TestLogPermissionsNotResetOnReboot:
def test_permissions_unchanged(self, client):
# Confirm that the current permissions aren't 600
@@ -22,7 +20,8 @@ class TestLogPermissionsNotResetOnReboot:
assert "600" == _get_log_perms(client)
# Reboot
- client.instance.restart()
+ client.restart()
+ assert client.execute("cloud-init status").ok
# Check that permissions are not reset on reboot
assert "600" == _get_log_perms(client)
diff --git a/tests/integration_tests/bugs/test_lp1901011.py b/tests/integration_tests/bugs/test_lp1901011.py
new file mode 100644
index 00000000..7de8bd77
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1901011.py
@@ -0,0 +1,67 @@
+"""Integration test for LP: #1901011
+
+Ensure an ephemeral disk exists after boot.
+
+See https://github.com/canonical/cloud-init/pull/800
+"""
+import pytest
+
+from tests.integration_tests.clouds import IntegrationCloud
+
+
+@pytest.mark.azure
+@pytest.mark.parametrize(
+ "instance_type,is_ephemeral",
+ [
+ ("Standard_DS1_v2", True),
+ ("Standard_D2s_v4", False),
+ ],
+)
+def test_ephemeral(
+ instance_type, is_ephemeral, session_cloud: IntegrationCloud, setup_image
+):
+ if is_ephemeral:
+ expected_log = (
+ "Ephemeral resource disk '/dev/disk/cloud/azure_resource' exists. "
+ "Merging default Azure cloud ephemeral disk configs."
+ )
+ else:
+ expected_log = (
+ "Ephemeral resource disk '/dev/disk/cloud/azure_resource' does "
+ "not exist. Not merging default Azure cloud ephemeral disk "
+ "configs."
+ )
+
+ with session_cloud.launch(
+ launch_kwargs={"instance_type": instance_type}
+ ) as client:
+ # Verify log file
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert expected_log in log
+
+ # Verify devices
+ dev_links = client.execute("ls /dev/disk/cloud")
+ assert "azure_root" in dev_links
+ assert "azure_root-part1" in dev_links
+ if is_ephemeral:
+ assert "azure_resource" in dev_links
+ assert "azure_resource-part1" in dev_links
+
+ # Verify mounts
+ blks = client.execute("lsblk -pPo NAME,TYPE,MOUNTPOINT")
+ root_device = client.execute(
+ "realpath /dev/disk/cloud/azure_root-part1"
+ )
+ assert (
+ 'NAME="{}" TYPE="part" MOUNTPOINT="/"'.format(root_device) in blks
+ )
+ if is_ephemeral:
+ ephemeral_device = client.execute(
+ "realpath /dev/disk/cloud/azure_resource-part1"
+ )
+ assert (
+ 'NAME="{}" TYPE="part" MOUNTPOINT="/mnt"'.format(
+ ephemeral_device
+ )
+ in blks
+ )
diff --git a/tests/integration_tests/bugs/test_lp1910835.py b/tests/integration_tests/bugs/test_lp1910835.py
new file mode 100644
index 00000000..1844594c
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1910835.py
@@ -0,0 +1,64 @@
+"""Integration test for LP: #1910835.
+
+If users do not provide an SSH key and instead ask Azure to generate a key for
+them, the key material available in the IMDS may include CRLF sequences. Prior
+to e56b55452549cb037da0a4165154ffa494e9678a, the Azure datasource handled keys
+via a certificate, the tooling for which removed these sequences. This test
+ensures that cloud-init does not regress support for this Azure behaviour.
+
+This test provides the SSH key configured for tests to the instance in two
+ways: firstly, with CRLFs to mimic the generated keys, via the Azure API;
+secondly, as user-data in unmodified form. This means that even on systems
+which exhibit the bug fetching the platform's metadata, we can SSH into the SUT
+to confirm this (instead of having to assert SSH failure; there are lots of
+reasons SSH might fail).
+
+Once SSH'd in, we check that the two keys in .ssh/authorized_keys have the same
+material: if the Azure datasource has removed the CRLFs correctly, then they
+will match.
+"""
+import pytest
+
+USER_DATA_TMPL = """\
+#cloud-config
+ssh_authorized_keys:
+ - {}"""
+
+
+@pytest.mark.azure
+def test_crlf_in_azure_metadata_ssh_keys(session_cloud, setup_image):
+ authorized_keys_path = "/home/{}/.ssh/authorized_keys".format(
+ session_cloud.cloud_instance.username
+ )
+ # Pass in user-data to allow us to access the instance when the normal
+ # path fails
+ key_data = session_cloud.cloud_instance.key_pair.public_key_content
+ user_data = USER_DATA_TMPL.format(key_data)
+ # Throw a CRLF into the otherwise good key data, to emulate Azure's
+ # behaviour for generated keys
+ key_data = key_data[:20] + "\r\n" + key_data[20:]
+ vm_params = {
+ "os_profile": {
+ "linux_configuration": {
+ "ssh": {
+ "public_keys": [
+ {"path": authorized_keys_path, "key_data": key_data}
+ ]
+ }
+ }
+ }
+ }
+ with session_cloud.launch(
+ launch_kwargs={"vm_params": vm_params, "user_data": user_data}
+ ) as client:
+ authorized_keys = (
+ client.read_from_file(authorized_keys_path).strip().splitlines()
+ )
+ # We expect one key from the cloud, one from user-data
+ assert 2 == len(authorized_keys)
+ # And those two keys should be the same, except for a possible key
+ # comment, which Azure strips out
+ assert (
+ authorized_keys[0].rsplit(" ")[:2]
+ == authorized_keys[1].split(" ")[:2]
+ )
diff --git a/tests/integration_tests/bugs/test_lp1912844.py b/tests/integration_tests/bugs/test_lp1912844.py
new file mode 100644
index 00000000..55511ed2
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1912844.py
@@ -0,0 +1,105 @@
+"""Integration test for LP: #1912844
+
+cloud-init should ignore OVS-internal interfaces when performing its own
+interface determination: these interfaces are handled fully by OVS, so
+cloud-init should never need to touch them.
+
+This test is a semi-synthetic reproducer for the bug. It uses a similar
+network configuration, tweaked slightly to DHCP in a way that will succeed even
+on "failed" boots. The exact bug doesn't reproduce with the NoCloud
+datasource, because it runs at init-local time (whereas the MAAS datasource,
+from the report, runs only at init (network) time): this means that the
+networking code runs before OVS creates its interfaces (which happens after
+init-local but, of course, before networking is up), and so doesn't generate
+the traceback that they cause. We work around this by calling
+``get_interfaces_by_mac` directly in the test code.
+"""
+import pytest
+
+from tests.integration_tests import random_mac_address
+
+MAC_ADDRESS = random_mac_address()
+
+NETWORK_CONFIG = """\
+bonds:
+ bond0:
+ interfaces:
+ - enp5s0
+ macaddress: {0}
+ mtu: 1500
+bridges:
+ ovs-br:
+ interfaces:
+ - bond0
+ macaddress: {0}
+ mtu: 1500
+ openvswitch: {{}}
+ dhcp4: true
+ethernets:
+ enp5s0:
+ mtu: 1500
+ set-name: enp5s0
+ match:
+ macaddress: {0}
+version: 2
+vlans:
+ ovs-br.100:
+ id: 100
+ link: ovs-br
+ mtu: 1500
+ ovs-br.200:
+ id: 200
+ link: ovs-br
+ mtu: 1500
+""".format(
+ MAC_ADDRESS
+)
+
+
+SETUP_USER_DATA = """\
+#cloud-config
+packages:
+- openvswitch-switch
+"""
+
+
+@pytest.fixture
+def ovs_enabled_session_cloud(session_cloud):
+ """A session_cloud wrapper, to use an OVS-enabled image for tests.
+
+ This implementation is complicated by wanting to use ``session_cloud``s
+ snapshot cleanup/retention logic, to avoid having to reimplement that here.
+ """
+ old_snapshot_id = session_cloud.snapshot_id
+ with session_cloud.launch(
+ user_data=SETUP_USER_DATA,
+ ) as instance:
+ instance.instance.clean()
+ session_cloud.snapshot_id = instance.snapshot()
+
+ yield session_cloud
+
+ try:
+ session_cloud.delete_snapshot()
+ finally:
+ session_cloud.snapshot_id = old_snapshot_id
+
+
+@pytest.mark.lxd_vm
+def test_get_interfaces_by_mac_doesnt_traceback(ovs_enabled_session_cloud):
+ """Launch our OVS-enabled image and confirm the bug doesn't reproduce."""
+ launch_kwargs = {
+ "config_dict": {
+ "user.network-config": NETWORK_CONFIG,
+ "volatile.eth0.hwaddr": MAC_ADDRESS,
+ },
+ }
+ with ovs_enabled_session_cloud.launch(
+ launch_kwargs=launch_kwargs,
+ ) as client:
+ result = client.execute(
+ "python3 -c"
+ "'from cloudinit.net import get_interfaces_by_mac;"
+ "get_interfaces_by_mac()'"
+ )
+ assert result.ok
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index 88ac4408..83bc6af6 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -1,38 +1,107 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from abc import ABC, abstractmethod
+import datetime
import logging
-
-from pycloudlib import EC2, GCE, Azure, OCI, LXDContainer, LXDVirtualMachine
+import os.path
+import random
+import string
+from abc import ABC, abstractmethod
+from typing import Optional, Type
+from uuid import UUID
+
+from pycloudlib import (
+ EC2,
+ GCE,
+ OCI,
+ Azure,
+ LXDContainer,
+ LXDVirtualMachine,
+ Openstack,
+)
+from pycloudlib.cloud import BaseCloud
+from pycloudlib.lxd.cloud import _BaseLXD
from pycloudlib.lxd.instance import LXDInstance
import cloudinit
-from cloudinit.subp import subp
+from cloudinit.subp import ProcessExecutionError, subp
from tests.integration_tests import integration_settings
-from tests.integration_tests.instances import (
- IntegrationEc2Instance,
- IntegrationGceInstance,
- IntegrationAzureInstance, IntegrationInstance,
- IntegrationOciInstance,
- IntegrationLxdInstance,
-)
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import emit_dots_on_travis
-try:
- from typing import Optional
-except ImportError:
- pass
+log = logging.getLogger("integration_testing")
-log = logging.getLogger('integration_testing')
+def _get_ubuntu_series() -> list:
+ """Use distro-info-data's ubuntu.csv to get a list of Ubuntu series"""
+ out = ""
+ try:
+ out, _err = subp(["ubuntu-distro-info", "-a"])
+ except ProcessExecutionError:
+ log.info(
+ "ubuntu-distro-info (from the distro-info package) must be"
+ " installed to guess Ubuntu os/release"
+ )
+ return out.splitlines()
+
+
+class ImageSpecification:
+ """A specification of an image to launch for testing.
+
+ If either of ``os`` and ``release`` are not specified, an attempt will be
+ made to infer the correct values for these on instantiation.
+
+ :param image_id:
+ The image identifier used by the rest of the codebase to launch this
+ image.
+ :param os:
+ An optional string describing the operating system this image is for
+ (e.g. "ubuntu", "rhel", "freebsd").
+ :param release:
+ A optional string describing the operating system release (e.g.
+ "focal", "8"; the exact values here will depend on the OS).
+ """
+
+ def __init__(
+ self,
+ image_id: str,
+ os: Optional[str] = None,
+ release: Optional[str] = None,
+ ):
+ if image_id in _get_ubuntu_series():
+ if os is None:
+ os = "ubuntu"
+ if release is None:
+ release = image_id
+
+ self.image_id = image_id
+ self.os = os
+ self.release = release
+ log.info(
+ "Detected image: image_id=%s os=%s release=%s",
+ self.image_id,
+ self.os,
+ self.release,
+ )
+
+ @classmethod
+ def from_os_image(cls):
+ """Return an ImageSpecification for integration_settings.OS_IMAGE."""
+ parts = integration_settings.OS_IMAGE.split("::", 2)
+ return cls(*parts)
class IntegrationCloud(ABC):
- datasource = None # type: Optional[str]
- integration_instance_cls = IntegrationInstance
+ datasource: str
+ cloud_instance: BaseCloud
def __init__(self, settings=integration_settings):
self.settings = settings
- self.cloud_instance = self._get_cloud_instance()
- self.image_id = self._get_initial_image()
+ self.cloud_instance: BaseCloud = self._get_cloud_instance()
+ self.initial_image_id = self._get_initial_image()
+ self.snapshot_id = None
+
+ @property
+ def image_id(self):
+ return self.snapshot_id or self.initial_image_id
def emit_settings_to_log(self) -> None:
log.info(
@@ -50,49 +119,62 @@ class IntegrationCloud(ABC):
raise NotImplementedError
def _get_initial_image(self):
- image_id = self.settings.OS_IMAGE
+ image = ImageSpecification.from_os_image()
try:
- image_id = self.cloud_instance.released_image(
- self.settings.OS_IMAGE)
+ return self.cloud_instance.daily_image(image.image_id)
except (ValueError, IndexError):
- pass
- return image_id
+ return image.image_id
- def _perform_launch(self, launch_kwargs):
+ def _perform_launch(self, launch_kwargs, **kwargs):
pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs)
- pycloudlib_instance.wait(raise_on_cloudinit_failure=False)
return pycloudlib_instance
- def launch(self, user_data=None, launch_kwargs=None,
- settings=integration_settings):
+ def launch(
+ self,
+ user_data=None,
+ launch_kwargs=None,
+ settings=integration_settings,
+ **kwargs,
+ ) -> IntegrationInstance:
+ if launch_kwargs is None:
+ launch_kwargs = {}
if self.settings.EXISTING_INSTANCE_ID:
log.info(
- 'Not launching instance due to EXISTING_INSTANCE_ID. '
- 'Instance id: %s', self.settings.EXISTING_INSTANCE_ID)
+ "Not launching instance due to EXISTING_INSTANCE_ID. "
+ "Instance id: %s",
+ self.settings.EXISTING_INSTANCE_ID,
+ )
self.instance = self.cloud_instance.get_instance(
self.settings.EXISTING_INSTANCE_ID
)
- return
- kwargs = {
- 'image_id': self.image_id,
- 'user_data': user_data,
- 'wait': False,
+ return self.instance
+ default_launch_kwargs = {
+ "image_id": self.image_id,
+ "user_data": user_data,
}
- if launch_kwargs:
- kwargs.update(launch_kwargs)
+ launch_kwargs = {**default_launch_kwargs, **launch_kwargs}
log.info(
- "Launching instance with launch_kwargs:\n{}".format(
- "\n".join("{}={}".format(*item) for item in kwargs.items())
- )
+ "Launching instance with launch_kwargs:\n%s",
+ "\n".join("{}={}".format(*item) for item in launch_kwargs.items()),
)
- pycloudlib_instance = self._perform_launch(kwargs)
-
- log.info('Launched instance: %s', pycloudlib_instance)
- return self.get_instance(pycloudlib_instance, settings)
+ with emit_dots_on_travis():
+ pycloudlib_instance = self._perform_launch(launch_kwargs, **kwargs)
+ log.info("Launched instance: %s", pycloudlib_instance)
+ instance = self.get_instance(pycloudlib_instance, settings)
+ if launch_kwargs.get("wait", True):
+ # If we aren't waiting, we can't rely on command execution here
+ log.info(
+ "cloud-init version: %s",
+ instance.execute("cloud-init --version"),
+ )
+ serial = instance.execute("grep serial /etc/cloud/build.info")
+ if serial:
+ log.info("image serial: %s", serial.split()[1])
+ return instance
def get_instance(self, cloud_instance, settings=integration_settings):
- return self.integration_instance_cls(self, cloud_instance, settings)
+ return IntegrationInstance(self, cloud_instance, settings)
def destroy(self):
pass
@@ -100,52 +182,69 @@ class IntegrationCloud(ABC):
def snapshot(self, instance):
return self.cloud_instance.snapshot(instance, clean=True)
+ def delete_snapshot(self):
+ if self.snapshot_id:
+ if self.settings.KEEP_IMAGE:
+ log.info(
+ "NOT deleting snapshot image created for this testrun "
+ "because KEEP_IMAGE is True: %s",
+ self.snapshot_id,
+ )
+ else:
+ log.info(
+ "Deleting snapshot image created for this testrun: %s",
+ self.snapshot_id,
+ )
+ self.cloud_instance.delete_image(self.snapshot_id)
+
class Ec2Cloud(IntegrationCloud):
- datasource = 'ec2'
- integration_instance_cls = IntegrationEc2Instance
+ datasource = "ec2"
def _get_cloud_instance(self):
- return EC2(tag='ec2-integration-test')
+ return EC2(tag="ec2-integration-test")
class GceCloud(IntegrationCloud):
- datasource = 'gce'
- integration_instance_cls = IntegrationGceInstance
+ datasource = "gce"
def _get_cloud_instance(self):
return GCE(
- tag='gce-integration-test',
- project=self.settings.GCE_PROJECT,
- region=self.settings.GCE_REGION,
- zone=self.settings.GCE_ZONE,
+ tag="gce-integration-test",
)
class AzureCloud(IntegrationCloud):
- datasource = 'azure'
- integration_instance_cls = IntegrationAzureInstance
+ datasource = "azure"
+ cloud_instance: Azure
def _get_cloud_instance(self):
- return Azure(tag='azure-integration-test')
+ return Azure(tag="azure-integration-test")
def destroy(self):
- self.cloud_instance.delete_resource_group()
+ if self.settings.KEEP_INSTANCE:
+ log.info(
+ "NOT deleting resource group because KEEP_INSTANCE is true "
+ "and deleting resource group would also delete instance. "
+ "Instance and resource group must both be manually deleted."
+ )
+ else:
+ self.cloud_instance.delete_resource_group()
class OciCloud(IntegrationCloud):
- datasource = 'oci'
- integration_instance_cls = IntegrationOciInstance
+ datasource = "oci"
def _get_cloud_instance(self):
return OCI(
- tag='oci-integration-test',
- compartment_id=self.settings.OCI_COMPARTMENT_ID
+ tag="oci-integration-test",
)
class _LxdIntegrationCloud(IntegrationCloud):
- integration_instance_cls = IntegrationLxdInstance
+ pycloudlib_instance_cls: Type[_BaseLXD]
+ instance_tag: str
+ cloud_instance: _BaseLXD
def _get_cloud_instance(self):
return self.pycloudlib_instance_cls(tag=self.instance_tag)
@@ -156,60 +255,102 @@ class _LxdIntegrationCloud(IntegrationCloud):
@staticmethod
def _mount_source(instance: LXDInstance):
- target_path = '/usr/lib/python3/dist-packages/cloudinit'
- format_variables = {
- 'name': instance.name,
- 'source_path': cloudinit.__path__[0],
- 'container_path': target_path,
- }
- log.info(
- 'Mounting source {source_path} directly onto LXD container/vm '
- 'named {name} at {container_path}'.format(**format_variables))
- command = (
- 'lxc config device add {name} host-cloud-init disk '
- 'source={source_path} '
- 'path={container_path}'
- ).format(**format_variables)
- subp(command.split())
-
- def _perform_launch(self, launch_kwargs):
- launch_kwargs['inst_type'] = launch_kwargs.pop('instance_type', None)
- launch_kwargs.pop('wait')
- release = launch_kwargs.pop('image_id')
+ cloudinit_path = cloudinit.__path__[0]
+ mounts = [
+ (cloudinit_path, "/usr/lib/python3/dist-packages/cloudinit"),
+ (
+ os.path.join(cloudinit_path, "..", "templates"),
+ "/etc/cloud/templates",
+ ),
+ ]
+ for (n, (source_path, target_path)) in enumerate(mounts):
+ format_variables = {
+ "name": instance.name,
+ "source_path": os.path.realpath(source_path),
+ "container_path": target_path,
+ "idx": n,
+ }
+ log.info(
+ "Mounting source %(source_path)s directly onto LXD"
+ " container/VM named %(name)s at %(container_path)s",
+ format_variables,
+ )
+ command = (
+ "lxc config device add {name} host-cloud-init-{idx} disk "
+ "source={source_path} "
+ "path={container_path}"
+ ).format(**format_variables)
+ subp(command.split())
+
+ def _perform_launch(self, launch_kwargs, **kwargs):
+ launch_kwargs["inst_type"] = launch_kwargs.pop("instance_type", None)
+ wait = launch_kwargs.pop("wait", True)
+ release = launch_kwargs.pop("image_id")
try:
- profile_list = launch_kwargs['profile_list']
+ profile_list = launch_kwargs["profile_list"]
except KeyError:
profile_list = self._get_or_set_profile_list(release)
+ prefix = datetime.datetime.utcnow().strftime("cloudinit-%m%d-%H%M%S")
+ default_name = prefix + "".join(
+ random.choices(string.ascii_lowercase + string.digits, k=8)
+ )
pycloudlib_instance = self.cloud_instance.init(
- launch_kwargs.pop('name', None),
+ launch_kwargs.pop("name", default_name),
release,
profile_list=profile_list,
- **launch_kwargs
+ **launch_kwargs,
)
- if self.settings.CLOUD_INIT_SOURCE == 'IN_PLACE':
+ if self.settings.CLOUD_INIT_SOURCE == "IN_PLACE":
self._mount_source(pycloudlib_instance)
- pycloudlib_instance.start(wait=False)
- pycloudlib_instance.wait(raise_on_cloudinit_failure=False)
+ if "lxd_setup" in kwargs:
+ log.info("Running callback specified by 'lxd_setup' mark")
+ kwargs["lxd_setup"](pycloudlib_instance)
+ pycloudlib_instance.start(wait=wait)
return pycloudlib_instance
class LxdContainerCloud(_LxdIntegrationCloud):
- datasource = 'lxd_container'
+ datasource = "lxd_container"
+ cloud_instance: LXDContainer
pycloudlib_instance_cls = LXDContainer
- instance_tag = 'lxd-container-integration-test'
+ instance_tag = "lxd-container-integration-test"
class LxdVmCloud(_LxdIntegrationCloud):
- datasource = 'lxd_vm'
+ datasource = "lxd_vm"
+ cloud_instance: LXDVirtualMachine
pycloudlib_instance_cls = LXDVirtualMachine
- instance_tag = 'lxd-vm-integration-test'
+ instance_tag = "lxd-vm-integration-test"
_profile_list = None
def _get_or_set_profile_list(self, release):
if self._profile_list:
return self._profile_list
self._profile_list = self.cloud_instance.build_necessary_profiles(
- release)
+ release
+ )
return self._profile_list
+
+
+class OpenstackCloud(IntegrationCloud):
+ datasource = "openstack"
+
+ def _get_cloud_instance(self):
+ return Openstack(
+ tag="openstack-integration-test",
+ )
+
+ def _get_initial_image(self):
+ image = ImageSpecification.from_os_image()
+ try:
+ UUID(image.image_id)
+ except ValueError as e:
+ raise Exception(
+ "When using Openstack, `OS_IMAGE` MUST be specified with "
+ "a 36-character UUID image ID. Passing in a release name is "
+ "not valid here.\n"
+ "OS image id: {}".format(image.image_id)
+ ) from e
+ return image.image_id
diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py
index 73b44bfc..a90a5d49 100644
--- a/tests/integration_tests/conftest.py
+++ b/tests/integration_tests/conftest.py
@@ -1,33 +1,51 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import datetime
+import functools
import logging
import os
-import pytest
import sys
from contextlib import contextmanager
+from pathlib import Path
+from tarfile import TarFile
+from typing import Dict, Type
+
+import pytest
+from pycloudlib.lxd.instance import LXDInstance
from tests.integration_tests import integration_settings
from tests.integration_tests.clouds import (
+ AzureCloud,
Ec2Cloud,
GceCloud,
- AzureCloud,
- OciCloud,
+ ImageSpecification,
+ IntegrationCloud,
LxdContainerCloud,
LxdVmCloud,
+ OciCloud,
+ OpenstackCloud,
+ _LxdIntegrationCloud,
+)
+from tests.integration_tests.instances import (
+ CloudInitSource,
+ IntegrationInstance,
)
-
-log = logging.getLogger('integration_testing')
+log = logging.getLogger("integration_testing")
log.addHandler(logging.StreamHandler(sys.stdout))
log.setLevel(logging.INFO)
-platforms = {
- 'ec2': Ec2Cloud,
- 'gce': GceCloud,
- 'azure': AzureCloud,
- 'oci': OciCloud,
- 'lxd_container': LxdContainerCloud,
- 'lxd_vm': LxdVmCloud,
+platforms: Dict[str, Type[IntegrationCloud]] = {
+ "ec2": Ec2Cloud,
+ "gce": GceCloud,
+ "azure": AzureCloud,
+ "oci": OciCloud,
+ "lxd_container": LxdContainerCloud,
+ "lxd_vm": LxdVmCloud,
+ "openstack": OpenstackCloud,
}
+os_list = ["ubuntu"]
+
+session_start_time = datetime.datetime.now().strftime("%y%m%d%H%M%S")
def pytest_runtest_setup(item):
@@ -42,18 +60,30 @@ def pytest_runtest_setup(item):
test_marks = [mark.name for mark in item.iter_markers()]
supported_platforms = set(all_platforms).intersection(test_marks)
current_platform = integration_settings.PLATFORM
- unsupported_message = 'Cannot run on platform {}'.format(current_platform)
- if 'no_container' in test_marks:
- if 'lxd_container' in test_marks:
+ unsupported_message = "Cannot run on platform {}".format(current_platform)
+ if "no_container" in test_marks:
+ if "lxd_container" in test_marks:
raise Exception(
- 'lxd_container and no_container marks simultaneously set '
- 'on test'
+ "lxd_container and no_container marks simultaneously set "
+ "on test"
)
- if current_platform == 'lxd_container':
+ if current_platform == "lxd_container":
pytest.skip(unsupported_message)
if supported_platforms and current_platform not in supported_platforms:
pytest.skip(unsupported_message)
+ image = ImageSpecification.from_os_image()
+ current_os = image.os
+ supported_os_set = set(os_list).intersection(test_marks)
+ if current_os and supported_os_set and current_os not in supported_os_set:
+ pytest.skip("Cannot run on OS {}".format(current_os))
+ if "unstable" in test_marks and not integration_settings.RUN_UNSTABLE:
+ pytest.skip("Test marked unstable. Manually remove mark to run it")
+
+ current_release = image.release
+ if "not_{}".format(current_release) in test_marks:
+ pytest.skip("Cannot run on release {}".format(current_release))
+
# disable_subp_usage is defined at a higher level, but we don't
# want it applied here
@@ -62,7 +92,7 @@ def disable_subp_usage(request):
pass
-@pytest.yield_fixture(scope='session')
+@pytest.fixture(scope="session")
def session_cloud():
if integration_settings.PLATFORM not in platforms.keys():
raise ValueError(
@@ -74,83 +104,185 @@ def session_cloud():
cloud = platforms[integration_settings.PLATFORM]()
cloud.emit_settings_to_log()
+
yield cloud
+
cloud.destroy()
-@pytest.fixture(scope='session', autouse=True)
-def setup_image(session_cloud):
+def get_validated_source(
+ session_cloud: IntegrationCloud,
+ source=integration_settings.CLOUD_INIT_SOURCE,
+) -> CloudInitSource:
+ if source == "NONE":
+ return CloudInitSource.NONE
+ elif source == "IN_PLACE":
+ if session_cloud.datasource not in ["lxd_container", "lxd_vm"]:
+ raise ValueError(
+ "IN_PLACE as CLOUD_INIT_SOURCE only works for LXD"
+ )
+ return CloudInitSource.IN_PLACE
+ elif source == "PROPOSED":
+ return CloudInitSource.PROPOSED
+ elif source.startswith("ppa:"):
+ return CloudInitSource.PPA
+ elif os.path.isfile(str(source)):
+ return CloudInitSource.DEB_PACKAGE
+ elif source == "UPGRADE":
+ return CloudInitSource.UPGRADE
+ raise ValueError(
+ "Invalid value for CLOUD_INIT_SOURCE setting: {}".format(source)
+ )
+
+
+@pytest.fixture(scope="session")
+def setup_image(session_cloud: IntegrationCloud, request):
"""Setup the target environment with the correct version of cloud-init.
So we can launch instances / run tests with the correct image
"""
- client = None
- log.info('Setting up environment for %s', session_cloud.datasource)
- if integration_settings.CLOUD_INIT_SOURCE == 'NONE':
- pass # that was easy
- elif integration_settings.CLOUD_INIT_SOURCE == 'IN_PLACE':
- if session_cloud.datasource not in ['lxd_container', 'lxd_vm']:
- raise ValueError(
- 'IN_PLACE as CLOUD_INIT_SOURCE only works for LXD')
- # The mount needs to happen after the instance is created, so
- # no further action needed here
- elif integration_settings.CLOUD_INIT_SOURCE == 'PROPOSED':
- client = session_cloud.launch()
- client.install_proposed_image()
- elif integration_settings.CLOUD_INIT_SOURCE.startswith('ppa:'):
- client = session_cloud.launch()
- client.install_ppa(integration_settings.CLOUD_INIT_SOURCE)
- elif os.path.isfile(str(integration_settings.CLOUD_INIT_SOURCE)):
- client = session_cloud.launch()
- client.install_deb()
- else:
- raise ValueError(
- 'Invalid value for CLOUD_INIT_SOURCE setting: {}'.format(
- integration_settings.CLOUD_INIT_SOURCE))
- if client:
- # Even if we're keeping instances, we don't want to keep this
- # one around as it was just for image creation
- client.destroy()
- log.info('Done with environment setup')
+
+ source = get_validated_source(session_cloud)
+ if not source.installs_new_version():
+ return
+ log.info("Setting up environment for %s", session_cloud.datasource)
+ client = session_cloud.launch()
+ client.install_new_cloud_init(source)
+ # Even if we're keeping instances, we don't want to keep this
+ # one around as it was just for image creation
+ client.destroy()
+ log.info("Done with environment setup")
+
+ # For some reason a yield here raises a
+ # ValueError: setup_image did not yield a value
+ # during setup so use a finalizer instead.
+ request.addfinalizer(session_cloud.delete_snapshot)
+
+
+def _collect_logs(
+ instance: IntegrationInstance, node_id: str, test_failed: bool
+):
+ """Collect logs from remote instance.
+
+ Args:
+ instance: The current IntegrationInstance to collect logs from
+ node_id: The pytest representation of this test, E.g.:
+ tests/integration_tests/test_example.py::TestExample.test_example
+ test_failed: If test failed or not
+ """
+ if any(
+ [
+ integration_settings.COLLECT_LOGS == "NEVER",
+ integration_settings.COLLECT_LOGS == "ON_ERROR"
+ and not test_failed,
+ ]
+ ):
+ return
+ instance.execute(
+ "cloud-init collect-logs -u -t /var/tmp/cloud-init.tar.gz"
+ )
+ node_id_path = Path(
+ node_id.replace(
+ ".py", ""
+ ) # Having a directory with '.py' would be weird
+ .replace("::", os.path.sep) # Turn classes/tests into paths
+ .replace("[", "-") # For parametrized names
+ .replace("]", "") # For parameterized names
+ )
+ log_dir = (
+ Path(integration_settings.LOCAL_LOG_PATH)
+ / session_start_time
+ / node_id_path
+ )
+ log.info("Writing logs to %s", log_dir)
+
+ if not log_dir.exists():
+ log_dir.mkdir(parents=True)
+
+ # Add a symlink to the latest log output directory
+ last_symlink = Path(integration_settings.LOCAL_LOG_PATH) / "last"
+ if os.path.islink(last_symlink):
+ os.unlink(last_symlink)
+ os.symlink(log_dir.parent, last_symlink)
+
+ tarball_path = log_dir / "cloud-init.tar.gz"
+ try:
+ instance.pull_file("/var/tmp/cloud-init.tar.gz", tarball_path)
+ except Exception as e:
+ log.error("Failed to pull logs: %s", e)
+ return
+
+ tarball = TarFile.open(str(tarball_path))
+ tarball.extractall(path=str(log_dir))
+ tarball_path.unlink()
@contextmanager
-def _client(request, fixture_utils, session_cloud):
+def _client(request, fixture_utils, session_cloud: IntegrationCloud):
"""Fixture implementation for the client fixtures.
Launch the dynamic IntegrationClient instance using any provided
userdata, yield to the test, then cleanup
"""
- user_data = fixture_utils.closest_marker_first_arg_or(
- request, 'user_data', None)
- name = fixture_utils.closest_marker_first_arg_or(
- request, 'instance_name', None
+ getter = functools.partial(
+ fixture_utils.closest_marker_first_arg_or, request, default=None
+ )
+ user_data = getter("user_data")
+ name = getter("instance_name")
+ lxd_config_dict = getter("lxd_config_dict")
+ lxd_setup = getter("lxd_setup")
+ lxd_use_exec = fixture_utils.closest_marker_args_or(
+ request, "lxd_use_exec", None
)
+
launch_kwargs = {}
if name is not None:
- launch_kwargs = {"name": name}
+ launch_kwargs["name"] = name
+ if lxd_config_dict is not None:
+ if not isinstance(session_cloud, _LxdIntegrationCloud):
+ pytest.skip("lxd_config_dict requires LXD")
+ launch_kwargs["config_dict"] = lxd_config_dict
+ if lxd_use_exec is not None:
+ if not isinstance(session_cloud, _LxdIntegrationCloud):
+ pytest.skip("lxd_use_exec requires LXD")
+ launch_kwargs["execute_via_ssh"] = False
+ local_launch_kwargs = {}
+ if lxd_setup is not None:
+ if not isinstance(session_cloud, _LxdIntegrationCloud):
+ pytest.skip("lxd_setup requires LXD")
+ local_launch_kwargs["lxd_setup"] = lxd_setup
+
with session_cloud.launch(
- user_data=user_data, launch_kwargs=launch_kwargs
+ user_data=user_data, launch_kwargs=launch_kwargs, **local_launch_kwargs
) as instance:
+ if lxd_use_exec is not None and isinstance(
+ instance.instance, LXDInstance
+ ):
+ # Existing instances are not affected by the launch kwargs, so
+ # ensure it here; we still need the launch kwarg so waiting works
+ instance.instance.execute_via_ssh = False
+ previous_failures = request.session.testsfailed
yield instance
+ test_failed = request.session.testsfailed - previous_failures > 0
+ _collect_logs(instance, request.node.nodeid, test_failed)
-@pytest.yield_fixture
-def client(request, fixture_utils, session_cloud):
+@pytest.fixture
+def client(request, fixture_utils, session_cloud, setup_image):
"""Provide a client that runs for every test."""
with _client(request, fixture_utils, session_cloud) as client:
yield client
-@pytest.yield_fixture(scope='module')
-def module_client(request, fixture_utils, session_cloud):
+@pytest.fixture(scope="module")
+def module_client(request, fixture_utils, session_cloud, setup_image):
"""Provide a client that runs once per module."""
with _client(request, fixture_utils, session_cloud) as client:
yield client
-@pytest.yield_fixture(scope='class')
-def class_client(request, fixture_utils, session_cloud):
+@pytest.fixture(scope="class")
+def class_client(request, fixture_utils, session_cloud, setup_image):
"""Provide a client that runs once per class."""
with _client(request, fixture_utils, session_cloud) as client:
yield client
@@ -180,3 +312,20 @@ def pytest_assertrepr_compare(op, left, right):
'"{}" not in cloud-init.log string; unexpectedly found on'
" these lines:".format(left)
] + found_lines
+
+
+def pytest_configure(config):
+ """Perform initial configuration, before the test runs start.
+
+ This hook is only called if integration tests are being executed, so we can
+ use it to configure defaults for integration testing that differ from the
+ rest of the tests in the codebase.
+
+ See
+ https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_configure
+ for pytest's documentation.
+ """
+ if "log_cli_level" in config.option and not config.option.log_cli_level:
+ # If log_cli_level is available in this version of pytest and not set
+ # to anything, set it to INFO.
+ config.option.log_cli_level = "INFO"
diff --git a/tests/integration_tests/datasources/test_lxd_discovery.py b/tests/integration_tests/datasources/test_lxd_discovery.py
new file mode 100644
index 00000000..eb2a4cf2
--- /dev/null
+++ b/tests/integration_tests/datasources/test_lxd_discovery.py
@@ -0,0 +1,90 @@
+import json
+
+import pytest
+import yaml
+
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+
+def _customize_envionment(client: IntegrationInstance):
+ client.write_to_file(
+ "/etc/cloud/cloud.cfg.d/99-detect-lxd.cfg",
+ "datasource_list: [LXD]\n",
+ )
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+
+# This test should be able to work on any cloud whose datasource specifies
+# a NETWORK dependency
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.ubuntu # Because netplan
+def test_lxd_datasource_discovery(client: IntegrationInstance):
+ """Test that DataSourceLXD is detected instead of NoCloud."""
+ _customize_envionment(client)
+ nic_dev = "enp5s0" if client.settings.PLATFORM == "lxd_vm" else "eth0"
+ result = client.execute("cloud-init status --long")
+ if not result.ok:
+ raise AssertionError("cloud-init failed:\n%s", result.stderr)
+ if "DataSourceLXD" not in result.stdout:
+ raise AssertionError(
+ "cloud-init did not discover DataSourceLXD", result.stdout
+ )
+ netplan_yaml = client.execute("cat /etc/netplan/50-cloud-init.yaml")
+ netplan_cfg = yaml.safe_load(netplan_yaml)
+ assert {
+ "network": {"ethernets": {nic_dev: {"dhcp4": True}}, "version": 2}
+ } == netplan_cfg
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+ result = client.execute("cloud-id")
+ if result.stdout != "lxd":
+ raise AssertionError(
+ "cloud-id didn't report lxd. Result: %s", result.stdout
+ )
+ # Validate config instance data represented
+ data = json.loads(
+ client.read_from_file("/run/cloud-init/instance-data.json")
+ )
+ v1 = data["v1"]
+ ds_cfg = data["ds"]
+ assert "lxd" == v1["platform"]
+ assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == v1["subplatform"]
+ ds_cfg = json.loads(client.execute("cloud-init query ds").stdout)
+ assert ["_doc", "_metadata_api_version", "config", "meta-data"] == sorted(
+ list(ds_cfg.keys())
+ )
+ if (
+ client.settings.PLATFORM == "lxd_vm"
+ and ImageSpecification.from_os_image().release == "bionic"
+ ):
+ # pycloudlib injects user.vendor_data for lxd_vm on bionic
+ # to start the lxd-agent.
+ # https://github.com/canonical/pycloudlib/blob/main/pycloudlib/\
+ # lxd/defaults.py#L13-L27
+ # Underscore-delimited aliases exist for any keys containing hyphens or
+ # dots.
+ lxd_config_keys = ["user.meta-data", "user.vendor-data"]
+ else:
+ lxd_config_keys = ["user.meta-data"]
+ assert "1.0" == ds_cfg["_metadata_api_version"]
+ assert lxd_config_keys == list(ds_cfg["config"].keys())
+ assert {"public-keys": v1["public_ssh_keys"][0]} == (
+ yaml.safe_load(ds_cfg["config"]["user.meta-data"])
+ )
+ assert "#cloud-config\ninstance-id" in ds_cfg["meta-data"]
+ # Assert NoCloud seed data is still present in cloud image metadata
+ # This will start failing if we redact metadata templates from
+ # https://cloud-images.ubuntu.com/daily/server/jammy/current/\
+ # jammy-server-cloudimg-amd64-lxd.tar.xz
+ nocloud_metadata = yaml.safe_load(
+ client.read_from_file("/var/lib/cloud/seed/nocloud-net/meta-data")
+ )
+ assert client.instance.name == nocloud_metadata["instance-id"]
+ assert (
+ nocloud_metadata["instance-id"] == nocloud_metadata["local-hostname"]
+ )
+ assert v1["public_ssh_keys"][0] == nocloud_metadata["public-keys"]
diff --git a/tests/integration_tests/datasources/test_network_dependency.py b/tests/integration_tests/datasources/test_network_dependency.py
new file mode 100644
index 00000000..32ac7053
--- /dev/null
+++ b/tests/integration_tests/datasources/test_network_dependency.py
@@ -0,0 +1,33 @@
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+def _customize_envionment(client: IntegrationInstance):
+ # Insert our "disable_network_activation" file here
+ client.write_to_file(
+ "/etc/cloud/cloud.cfg.d/99-disable-network-activation.cfg",
+ "disable_network_activation: true\n",
+ )
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+
+# This test should be able to work on any cloud whose datasource specifies
+# a NETWORK dependency
+@pytest.mark.gce
+@pytest.mark.ubuntu # Because netplan
+def test_network_activation_disabled(client: IntegrationInstance):
+ """Test that the network is not activated during init mode."""
+ _customize_envionment(client)
+ result = client.execute("systemctl status google-guest-agent.service")
+ if not result.ok:
+ raise AssertionError(
+ "google-guest-agent is not active:\n%s", result.stdout
+ )
+ log = client.read_from_file("/var/log/cloud-init.log")
+
+ assert "Running command ['netplan', 'apply']" not in log
+
+ assert "Not bringing up newly configured network interfaces" in log
+ assert "Bringing up newly configured network interfaces" not in log
diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py
index 9b13288c..e26ee233 100644
--- a/tests/integration_tests/instances.py
+++ b/tests/integration_tests/instances.py
@@ -2,34 +2,61 @@
import logging
import os
import uuid
+from enum import Enum
from tempfile import NamedTemporaryFile
from pycloudlib.instance import BaseInstance
from pycloudlib.result import Result
from tests.integration_tests import integration_settings
+from tests.integration_tests.util import retry
try:
from typing import TYPE_CHECKING
+
if TYPE_CHECKING:
- from tests.integration_tests.clouds import IntegrationCloud
+ from tests.integration_tests.clouds import ( # noqa: F401
+ IntegrationCloud,
+ )
except ImportError:
pass
-log = logging.getLogger('integration_testing')
+log = logging.getLogger("integration_testing")
def _get_tmp_path():
tmp_filename = str(uuid.uuid4())
- return '/var/tmp/{}.tmp'.format(tmp_filename)
+ return "/var/tmp/{}.tmp".format(tmp_filename)
-class IntegrationInstance:
- use_sudo = True
+class CloudInitSource(Enum):
+ """Represents the cloud-init image source setting as a defined value.
+
+ Values here represent all possible values for CLOUD_INIT_SOURCE in
+ tests/integration_tests/integration_settings.py. See that file for an
+ explanation of these values. If the value set there can't be parsed into
+ one of these values, an exception will be raised
+ """
+
+ NONE = 1
+ IN_PLACE = 2
+ PROPOSED = 3
+ PPA = 4
+ DEB_PACKAGE = 5
+ UPGRADE = 6
- def __init__(self, cloud: 'IntegrationCloud', instance: BaseInstance,
- settings=integration_settings):
+ def installs_new_version(self):
+ return self.name not in [self.NONE.name, self.IN_PLACE.name]
+
+
+class IntegrationInstance:
+ def __init__(
+ self,
+ cloud: "IntegrationCloud",
+ instance: BaseInstance,
+ settings=integration_settings,
+ ):
self.cloud = cloud
self.instance = instance
self.settings = settings
@@ -37,44 +64,53 @@ class IntegrationInstance:
def destroy(self):
self.instance.delete()
- def execute(self, command, *, use_sudo=None) -> Result:
- if self.instance.username == 'root' and use_sudo is False:
- raise Exception('Root user cannot run unprivileged')
- if use_sudo is None:
- use_sudo = self.use_sudo
+ def restart(self):
+ """Restart this instance (via cloud mechanism) and wait for boot.
+
+ This wraps pycloudlib's `BaseInstance.restart`
+ """
+ log.info("Restarting instance and waiting for boot")
+ self.instance.restart()
+
+ def execute(self, command, *, use_sudo=True) -> Result:
+ if self.instance.username == "root" and use_sudo is False:
+ raise Exception("Root user cannot run unprivileged")
return self.instance.execute(command, use_sudo=use_sudo)
def pull_file(self, remote_path, local_path):
# First copy to a temporary directory because of permissions issues
tmp_path = _get_tmp_path()
- self.instance.execute('cp {} {}'.format(remote_path, tmp_path))
- self.instance.pull_file(tmp_path, local_path)
+ self.instance.execute("cp {} {}".format(str(remote_path), tmp_path))
+ self.instance.pull_file(tmp_path, str(local_path))
def push_file(self, local_path, remote_path):
# First push to a temporary directory because of permissions issues
tmp_path = _get_tmp_path()
- self.instance.push_file(local_path, tmp_path)
- self.execute('mv {} {}'.format(tmp_path, remote_path))
+ self.instance.push_file(str(local_path), tmp_path)
+ assert self.execute("mv {} {}".format(tmp_path, str(remote_path))).ok
def read_from_file(self, remote_path) -> str:
- result = self.execute('cat {}'.format(remote_path))
+ result = self.execute("cat {}".format(remote_path))
if result.failed:
# TODO: Raise here whatever pycloudlib raises when it has
# a consistent error response
raise IOError(
- 'Failed reading remote file via cat: {}\n'
- 'Return code: {}\n'
- 'Stderr: {}\n'
- 'Stdout: {}'.format(
- remote_path, result.return_code,
- result.stderr, result.stdout)
+ "Failed reading remote file via cat: {}\n"
+ "Return code: {}\n"
+ "Stderr: {}\n"
+ "Stdout: {}".format(
+ remote_path,
+ result.return_code,
+ result.stderr,
+ result.stdout,
+ )
)
return result.stdout
def write_to_file(self, remote_path, contents: str):
# Writes file locally and then pushes it rather
# than writing the file directly on the instance
- with NamedTemporaryFile('w', delete=False) as tmp_file:
+ with NamedTemporaryFile("w", delete=False) as tmp_file:
tmp_file.write(contents)
try:
@@ -83,48 +119,79 @@ class IntegrationInstance:
os.unlink(tmp_file.name)
def snapshot(self):
- return self.cloud.snapshot(self.instance)
-
- def _install_new_cloud_init(self, remote_script):
- self.execute(remote_script)
- version = self.execute('cloud-init -v').split()[-1]
- log.info('Installed cloud-init version: %s', version)
- self.instance.clean()
- image_id = self.snapshot()
- log.info('Created new image: %s', image_id)
- self.cloud.image_id = image_id
-
+ image_id = self.cloud.snapshot(self.instance)
+ log.info("Created new image: %s", image_id)
+ return image_id
+
+ def install_new_cloud_init(
+ self,
+ source: CloudInitSource,
+ take_snapshot=True,
+ clean=True,
+ ):
+ if source == CloudInitSource.DEB_PACKAGE:
+ self.install_deb()
+ elif source == CloudInitSource.PPA:
+ self.install_ppa()
+ elif source == CloudInitSource.PROPOSED:
+ self.install_proposed_image()
+ elif source == CloudInitSource.UPGRADE:
+ self.upgrade_cloud_init()
+ else:
+ raise Exception(
+ "Specified to install {} which isn't supported here".format(
+ source
+ )
+ )
+ version = self.execute("cloud-init -v").split()[-1]
+ log.info("Installed cloud-init version: %s", version)
+ if clean:
+ self.instance.clean()
+ if take_snapshot:
+ snapshot_id = self.snapshot()
+ self.cloud.snapshot_id = snapshot_id
+
+ # assert with retry because we can compete with apt already running in the
+ # background and get: E: Could not get lock /var/lib/apt/lists/lock - open
+ # (11: Resource temporarily unavailable)
+
+ @retry(tries=30, delay=1)
def install_proposed_image(self):
- log.info('Installing proposed image')
- remote_script = (
- '{sudo} echo deb "http://archive.ubuntu.com/ubuntu '
- '$(lsb_release -sc)-proposed main" | '
- '{sudo} tee /etc/apt/sources.list.d/proposed.list\n'
- '{sudo} apt-get update -q\n'
- '{sudo} apt-get install -qy cloud-init'
- ).format(sudo='sudo' if self.use_sudo else '')
- self._install_new_cloud_init(remote_script)
-
- def install_ppa(self, repo):
- log.info('Installing PPA')
- remote_script = (
- '{sudo} add-apt-repository {repo} -y && '
- '{sudo} apt-get update -q && '
- '{sudo} apt-get install -qy cloud-init'
- ).format(sudo='sudo' if self.use_sudo else '', repo=repo)
- self._install_new_cloud_init(remote_script)
-
+ log.info("Installing proposed image")
+ assert self.execute(
+ 'echo deb "http://archive.ubuntu.com/ubuntu '
+ '$(lsb_release -sc)-proposed main" >> '
+ "/etc/apt/sources.list.d/proposed.list"
+ ).ok
+ assert self.execute("apt-get update -q").ok
+ assert self.execute("apt-get install -qy cloud-init").ok
+
+ @retry(tries=30, delay=1)
+ def install_ppa(self):
+ log.info("Installing PPA")
+ assert self.execute(
+ "add-apt-repository {} -y".format(self.settings.CLOUD_INIT_SOURCE)
+ ).ok
+ assert self.execute("apt-get update -q").ok
+ assert self.execute("apt-get install -qy cloud-init").ok
+
+ @retry(tries=30, delay=1)
def install_deb(self):
- log.info('Installing deb package')
+ log.info("Installing deb package")
deb_path = integration_settings.CLOUD_INIT_SOURCE
deb_name = os.path.basename(deb_path)
- remote_path = '/var/tmp/{}'.format(deb_name)
+ remote_path = "/var/tmp/{}".format(deb_name)
self.push_file(
local_path=integration_settings.CLOUD_INIT_SOURCE,
- remote_path=remote_path)
- remote_script = '{sudo} dpkg -i {path}'.format(
- sudo='sudo' if self.use_sudo else '', path=remote_path)
- self._install_new_cloud_init(remote_script)
+ remote_path=remote_path,
+ )
+ assert self.execute("dpkg -i {path}".format(path=remote_path)).ok
+
+ @retry(tries=30, delay=1)
+ def upgrade_cloud_init(self):
+ log.info("Upgrading cloud-init to latest version in archive")
+ assert self.execute("apt-get update -q").ok
+ assert self.execute("apt-get install -qy cloud-init").ok
def __enter__(self):
return self
@@ -132,23 +199,3 @@ class IntegrationInstance:
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.settings.KEEP_INSTANCE:
self.destroy()
-
-
-class IntegrationEc2Instance(IntegrationInstance):
- pass
-
-
-class IntegrationGceInstance(IntegrationInstance):
- pass
-
-
-class IntegrationAzureInstance(IntegrationInstance):
- pass
-
-
-class IntegrationOciInstance(IntegrationInstance):
- pass
-
-
-class IntegrationLxdInstance(IntegrationInstance):
- use_sudo = False
diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py
index a0609f7e..f27e4f12 100644
--- a/tests/integration_tests/integration_settings.py
+++ b/tests/integration_tests/integration_settings.py
@@ -1,29 +1,40 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
+from cloudinit.util import is_false, is_true
+
##################################################################
# LAUNCH SETTINGS
##################################################################
# Keep instance (mostly for debugging) when test is finished
KEEP_INSTANCE = False
+# Keep snapshot image (mostly for debugging) when test is finished
+KEEP_IMAGE = False
+# Run tests marked as unstable. Expect failures and dragons.
+RUN_UNSTABLE = False
# One of:
# lxd_container
+# lxd_vm
# azure
# ec2
# gce
# oci
-PLATFORM = 'lxd_container'
+# openstack
+PLATFORM = "lxd_container"
# The cloud-specific instance type to run. E.g., a1.medium on AWS
# If the pycloudlib instance provides a default, this can be left None
INSTANCE_TYPE = None
# Determines the base image to use or generate new images from.
-# Can be the name of the OS if running a stock image,
-# otherwise the id of the image being used if using a custom image
-OS_IMAGE = 'focal'
+#
+# This can be the name of an Ubuntu release, or in the format
+# <image_id>[::<os>[::<release>]]. If given, os and release should describe
+# the image specified by image_id. (Ubuntu releases are converted to this
+# format internally; in this case, to "focal::ubuntu::focal".)
+OS_IMAGE = "focal"
# Populate if you want to use a pre-launched instance instead of
# creating a new one. The exact contents will be platform dependent
@@ -49,35 +60,30 @@ EXISTING_INSTANCE_ID = None
# code.
# PROPOSED
# Install from the Ubuntu proposed repo
+# UPGRADE
+# Upgrade cloud-init to the version in the Ubuntu archive
# <ppa repo>, e.g., ppa:cloud-init-dev/proposed
# Install from a PPA. It MUST start with 'ppa:'
# <file path>
# A path to a valid package to be uploaded and installed
-CLOUD_INIT_SOURCE = 'NONE'
-
-##################################################################
-# GCE SPECIFIC SETTINGS
-##################################################################
-# Required for GCE
-GCE_PROJECT = None
+CLOUD_INIT_SOURCE = "NONE"
-# You probably want to override these
-GCE_REGION = 'us-central1'
-GCE_ZONE = 'a'
-
-##################################################################
-# OCI SPECIFIC SETTINGS
-##################################################################
-# Compartment-id found at
-# https://console.us-phoenix-1.oraclecloud.com/a/identity/compartments
-# Required for Oracle
-OCI_COMPARTMENT_ID = None
+# Before an instance is torn down, we run `cloud-init collect-logs`
+# and transfer them locally. These settings specify when to collect these
+# logs and where to put them on the local filesystem
+# One of:
+# 'ALWAYS'
+# 'ON_ERROR'
+# 'NEVER'
+COLLECT_LOGS = "ON_ERROR"
+LOCAL_LOG_PATH = "/tmp/cloud_init_test_logs"
##################################################################
# USER SETTINGS OVERRIDES
##################################################################
# Bring in any user-file defined settings
try:
+ # pylint: disable=wildcard-import,unused-wildcard-import
from tests.integration_tests.user_settings import * # noqa
except ImportError:
pass
@@ -91,6 +97,13 @@ except ImportError:
# Perhaps a bit too hacky, but it works :)
current_settings = [var for var in locals() if var.isupper()]
for setting in current_settings:
- globals()[setting] = os.getenv(
- 'CLOUD_INIT_{}'.format(setting), globals()[setting]
+ env_setting = os.getenv(
+ "CLOUD_INIT_{}".format(setting), globals()[setting]
)
+ if isinstance(env_setting, str):
+ env_setting = env_setting.strip()
+ if is_true(env_setting):
+ env_setting = True
+ elif is_false(env_setting):
+ env_setting = False
+ globals()[setting] = env_setting
diff --git a/tests/integration_tests/modules/test_apt.py b/tests/integration_tests/modules/test_apt.py
new file mode 100644
index 00000000..adab46a8
--- /dev/null
+++ b/tests/integration_tests/modules/test_apt.py
@@ -0,0 +1,354 @@
+"""Series of integration tests covering apt functionality."""
+import re
+
+import pytest
+
+from cloudinit import gpg
+from cloudinit.config import cc_apt_configure
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
+
+USER_DATA = """\
+#cloud-config
+apt:
+ conf: |
+ APT {
+ Get {
+ Assume-Yes "true";
+ Fix-Broken "true";
+ }
+ }
+ primary:
+ - arches: [default]
+ uri: http://badarchive.ubuntu.com/ubuntu
+ security:
+ - arches: [default]
+ uri: http://badsecurity.ubuntu.com/ubuntu
+ sources_list: |
+ deb $MIRROR $RELEASE main restricted
+ deb-src $MIRROR $RELEASE main restricted
+ deb $PRIMARY $RELEASE universe restricted
+ deb-src $PRIMARY $RELEASE universe restricted
+ deb $SECURITY $RELEASE-security multiverse
+ deb-src $SECURITY $RELEASE-security multiverse
+ sources:
+ test_keyserver:
+ keyid: 110E21D8B0E2A1F0243AF6820856F197B892ACEA
+ keyserver: keyserver.ubuntu.com
+ source: "deb http://ppa.launchpad.net/canonical-kernel-team/ppa/ubuntu $RELEASE main"
+ test_ppa:
+ keyid: 441614D8
+ keyserver: keyserver.ubuntu.com
+ source: "ppa:simplestreams-dev/trunk"
+ test_signed_by:
+ keyid: A2EB2DEC0BD7519B7B38BE38376A290EC8068B11
+ keyserver: keyserver.ubuntu.com
+ source: "deb [signed-by=$KEY_FILE] http://ppa.launchpad.net/juju/stable/ubuntu $RELEASE main"
+ test_bad_key:
+ key: ""
+ source: "deb $MIRROR $RELEASE main"
+ test_key:
+ source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: SKS 1.1.6
+ Comment: Hostname: keyserver.ubuntu.com
+
+ mQINBFbZRUIBEAC+A0PIKYBP9kLC4hQtRrffRS11uLo8/BdtmOdrlW0hpPHzCfKnjR3tvSEI
+ lqPHG1QrrjAXKZDnZMRz+h/px7lUztvytGzHPSJd5ARUzAyjyRezUhoJ3VSCxrPqx62avuWf
+ RfoJaIeHfDehL5/dTVkyiWxfVZ369ZX6JN2AgLsQTeybTQ75+2z0xPrrhnGmgh6g0qTYcAaq
+ M5ONOGiqeSBX/Smjh6ALy5XkhUiFGLsI7Yluf6XSICY/x7gd6RAfgSIQrUTNMoS1sqhT4aot
+ +xvOfQy8ySkfAK4NddXql6E/+ZqTmBY/Lr0YklFBy8jGT+UysfiIznPMIwbmgq5Li7BtDDtX
+ b8Uyi4edPpjtextezfXYn4NVIpPL5dPZS/FXh4HpzyH0pYCfrH4QDGA7i52AGmhpiOFjJMo6
+ N33sdjZHOH/2Vyp+QZaQnsdUAi1N4M6c33tQbpIScn1SY+El8z5JDA4PBzkw8HpLCi1gGoa6
+ V4kfbWqXXbGAJFkLkP/vc4+pY9axOlmCkJg7xCPwhI75y1cONgovhz+BEXOzolh5KZuGbGbj
+ xe0wva5DLBeIg7EQFf+99pOS7Syby3Xpm6ZbswEFV0cllK4jf/QMjtfInxobuMoI0GV0bE5l
+ WlRtPCK5FnbHwxi0wPNzB/5fwzJ77r6HgPrR0OkT0lWmbUyoOQARAQABtC1MYXVuY2hwYWQg
+ UFBBIGZvciBjbG91ZCBpbml0IGRldmVsb3BtZW50IHRlYW2JAjgEEwECACIFAlbZRUICGwMG
+ CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEAg9Bvvk0wTfHfcP/REK5N2s1JYc69qEa9ZN
+ o6oi+A7l6AYw+ZY88O5TJe7F9otv5VXCIKSUT0Vsepjgf0mtXAgf/sb2lsJn/jp7tzgov3YH
+ vSrkTkRydz8xcA87gwQKePuvTLxQpftF4flrBxgSueIn5O/tPrBOxLz7EVYBc78SKg9aj9L2
+ yUp+YuNevlwfZCTYeBb9r3FHaab2HcgkwqYch66+nKYfwiLuQ9NzXXm0Wn0JcEQ6pWvJscbj
+ C9BdawWovfvMK5/YLfI6Btm7F4mIpQBdhSOUp/YXKmdvHpmwxMCN2QhqYK49SM7qE9aUDbJL
+ arppSEBtlCLWhRBZYLTUna+BkuQ1bHz4St++XTR49Qd7vDERALpApDjB2dxPfMiBzCMwQQyq
+ uy13exU8o2ETLg+dZSLfDTzrBNsBFmXlw8WW17nTISYdKeGKL+QdlUjpzdwUMMzHhAO8SmMH
+ zjeSlDSRMXBJFAFSbCl7EwmMKa3yVX0zInT91fNllZ3iatAmtVdqVH/BFQfTIMH2ET7A8WzJ
+ ZzVSuMRhqoKdr5AMcHuJGPUoVkVJHQA+NNvEiXSysF3faL7jmKapmUwrhpYYX2H8pf+VMu2e
+ cLflKTI28dl+ZQ4Pl/aVsxrti/pzhdYy05Sn5ddtySyIkvo8L1cU5MWpbvSlFPkTstBUDLBf
+ pb0uBy+g0oxJQg15
+ =uy53
+ -----END PGP PUBLIC KEY BLOCK-----
+apt_pipelining: os
+""" # noqa: E501
+
+EXPECTED_REGEXES = [
+ r"deb http://badarchive.ubuntu.com/ubuntu [a-z]+ main restricted",
+ r"deb-src http://badarchive.ubuntu.com/ubuntu [a-z]+ main restricted",
+ r"deb http://badarchive.ubuntu.com/ubuntu [a-z]+ universe restricted",
+ r"deb-src http://badarchive.ubuntu.com/ubuntu [a-z]+ universe restricted",
+ r"deb http://badsecurity.ubuntu.com/ubuntu [a-z]+-security multiverse",
+ r"deb-src http://badsecurity.ubuntu.com/ubuntu [a-z]+-security multiverse",
+]
+
+TEST_KEYSERVER_KEY = "110E 21D8 B0E2 A1F0 243A F682 0856 F197 B892 ACEA"
+TEST_PPA_KEY = "3552 C902 B4DD F7BD 3842 1821 015D 28D7 4416 14D8"
+TEST_KEY = "1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF"
+TEST_SIGNED_BY_KEY = "A2EB 2DEC 0BD7 519B 7B38 BE38 376A 290E C806 8B11"
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(USER_DATA)
+class TestApt:
+ def get_keys(self, class_client: IntegrationInstance):
+ """Return all keys in /etc/apt/trusted.gpg.d/ and /etc/apt/trusted.gpg
+ in human readable format. Mimics the output of apt-key finger
+ """
+ list_cmd = " ".join(gpg.GPG_LIST) + " "
+ keys = class_client.execute(list_cmd + cc_apt_configure.APT_LOCAL_KEYS)
+ print(keys)
+ files = class_client.execute(
+ "ls " + cc_apt_configure.APT_TRUSTED_GPG_DIR
+ )
+ for file in files.split():
+ path = cc_apt_configure.APT_TRUSTED_GPG_DIR + file
+ keys += class_client.execute(list_cmd + path) or ""
+ return keys
+
+ def test_sources_list(self, class_client: IntegrationInstance):
+ """Integration test for the apt module's `sources_list` functionality.
+
+ This test specifies a ``sources_list`` and then checks that (a) the
+ expected number of sources.list entries is present, and (b) that each
+ expected line appears in the file.
+
+ (This is ported from
+ `tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml`.)
+ """
+ sources_list = class_client.read_from_file("/etc/apt/sources.list")
+ assert 6 == len(sources_list.rstrip().split("\n"))
+
+ for expected_re in EXPECTED_REGEXES:
+ assert re.search(expected_re, sources_list) is not None
+
+ def test_apt_conf(self, class_client: IntegrationInstance):
+ """Test the apt conf functionality.
+
+ Ported from tests/cloud_tests/testcases/modules/apt_configure_conf.py
+ """
+ apt_config = class_client.read_from_file(
+ "/etc/apt/apt.conf.d/94cloud-init-config"
+ )
+ assert 'Assume-Yes "true";' in apt_config
+ assert 'Fix-Broken "true";' in apt_config
+
+ def test_ppa_source(self, class_client: IntegrationInstance):
+ """Test the apt ppa functionality.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
+ """
+ release = ImageSpecification.from_os_image().release
+ ppa_path_contents = class_client.read_from_file(
+ "/etc/apt/sources.list.d/"
+ "simplestreams-dev-ubuntu-trunk-{}.list".format(release)
+ )
+
+ assert (
+ "http://ppa.launchpad.net/simplestreams-dev/trunk/ubuntu"
+ in ppa_path_contents
+ )
+
+ assert TEST_PPA_KEY in self.get_keys(class_client)
+
+ def test_signed_by(self, class_client: IntegrationInstance):
+ """Test the apt signed-by functionality."""
+ release = ImageSpecification.from_os_image().release
+ source = (
+ "deb [signed-by=/etc/apt/cloud-init.gpg.d/test_signed_by.gpg] "
+ "http://ppa.launchpad.net/juju/stable/ubuntu"
+ " {} main".format(release)
+ )
+ path_contents = class_client.read_from_file(
+ "/etc/apt/sources.list.d/test_signed_by.list"
+ )
+ assert path_contents == source
+
+ key = class_client.execute(
+ "gpg --no-default-keyring --with-fingerprint --list-keys "
+ "--keyring /etc/apt/cloud-init.gpg.d/test_signed_by.gpg"
+ )
+
+ assert TEST_SIGNED_BY_KEY in key
+
+ def test_bad_key(self, class_client: IntegrationInstance):
+ """Test the apt signed-by functionality."""
+ with pytest.raises(OSError):
+ class_client.read_from_file(
+ "/etc/apt/trusted.list.d/test_bad_key.gpg"
+ )
+
+ def test_key(self, class_client: IntegrationInstance):
+ """Test the apt key functionality.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_sources_key.py
+ """
+ test_archive_contents = class_client.read_from_file(
+ "/etc/apt/sources.list.d/test_key.list"
+ )
+
+ assert (
+ "http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu"
+ in test_archive_contents
+ )
+ assert TEST_KEY in self.get_keys(class_client)
+
+ def test_keyserver(self, class_client: IntegrationInstance):
+ """Test the apt keyserver functionality.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py
+ """
+ test_keyserver_contents = class_client.read_from_file(
+ "/etc/apt/sources.list.d/test_keyserver.list"
+ )
+
+ assert (
+ "http://ppa.launchpad.net/canonical-kernel-team/ppa/ubuntu"
+ in test_keyserver_contents
+ )
+
+ assert TEST_KEYSERVER_KEY in self.get_keys(class_client)
+
+ def test_os_pipelining(self, class_client: IntegrationInstance):
+ """Test 'os' settings does not write apt config file.
+
+ Ported from tests/cloud_tests/testcases/modules/apt_pipelining_os.py
+ """
+ conf_exists = class_client.execute(
+ "test -f /etc/apt/apt.conf.d/90cloud-init-pipelining"
+ ).ok
+ assert conf_exists is False
+
+
+_DEFAULT_DATA = """\
+#cloud-config
+apt:
+ primary:
+ - arches:
+ - default
+ {uri}
+ security:
+ - arches:
+ - default
+"""
+DEFAULT_DATA = _DEFAULT_DATA.format(uri="")
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(DEFAULT_DATA)
+class TestDefaults:
+ @pytest.mark.openstack
+ def test_primary_on_openstack(self, class_client: IntegrationInstance):
+ """Test apt default primary source on openstack.
+
+ When no uri is provided.
+ """
+ zone = class_client.execute("cloud-init query v1.availability_zone")
+ sources_list = class_client.read_from_file("/etc/apt/sources.list")
+ assert "{}.clouds.archive.ubuntu.com".format(zone) in sources_list
+
+ def test_security(self, class_client: IntegrationInstance):
+ """Test apt default security sources.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_security.py
+ """
+ sources_list = class_client.read_from_file("/etc/apt/sources.list")
+
+ # 3 lines from main, universe, and multiverse
+ sec_url = "deb http://security.ubuntu.com/ubuntu"
+ if class_client.settings.PLATFORM == "azure":
+ sec_url = (
+ "deb http://azure.archive.ubuntu.com/ubuntu/ jammy-security"
+ )
+ sec_src_url = sec_url.replace("deb ", "# deb-src ")
+ assert 3 == sources_list.count(sec_url)
+ assert 3 == sources_list.count(sec_src_url)
+
+
+DEFAULT_DATA_WITH_URI = _DEFAULT_DATA.format(
+ uri='uri: "http://something.random.invalid/ubuntu"'
+)
+
+
+@pytest.mark.user_data(DEFAULT_DATA_WITH_URI)
+def test_default_primary_with_uri(client: IntegrationInstance):
+ """Test apt default primary sources.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_primary.py
+ """
+ sources_list = client.read_from_file("/etc/apt/sources.list")
+ assert "archive.ubuntu.com" not in sources_list
+
+ assert "something.random.invalid" in sources_list
+
+
+DISABLED_DATA = """\
+#cloud-config
+apt:
+ disable_suites:
+ - $RELEASE
+ - $RELEASE-updates
+ - $RELEASE-backports
+ - $RELEASE-security
+apt_pipelining: false
+"""
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(DISABLED_DATA)
+class TestDisabled:
+ def test_disable_suites(self, class_client: IntegrationInstance):
+ """Test disabling of apt suites.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py
+ """
+ sources_list = class_client.execute(
+ "cat /etc/apt/sources.list | grep -v '^#'"
+ ).strip()
+ assert "" == sources_list
+
+ def test_disable_apt_pipelining(self, class_client: IntegrationInstance):
+ """Test disabling of apt pipelining.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_pipelining_disable.py
+ """
+ conf = class_client.read_from_file(
+ "/etc/apt/apt.conf.d/90cloud-init-pipelining"
+ )
+ assert 'Acquire::http::Pipeline-Depth "0";' in conf
+
+
+APT_PROXY_DATA = """\
+#cloud-config
+apt:
+ proxy: "http://proxy.internal:3128"
+ http_proxy: "http://squid.internal:3128"
+ ftp_proxy: "ftp://squid.internal:3128"
+ https_proxy: "https://squid.internal:3128"
+"""
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(APT_PROXY_DATA)
+def test_apt_proxy(client: IntegrationInstance):
+ """Test the apt proxy data gets written correctly."""
+ out = client.read_from_file("/etc/apt/apt.conf.d/90cloud-init-aptproxy")
+ assert 'Acquire::http::Proxy "http://proxy.internal:3128";' in out
+ assert 'Acquire::http::Proxy "http://squid.internal:3128";' in out
+ assert 'Acquire::ftp::Proxy "ftp://squid.internal:3128";' in out
+ assert 'Acquire::https::Proxy "https://squid.internal:3128";' in out
diff --git a/tests/integration_tests/modules/test_apt_configure_sources_list.py b/tests/integration_tests/modules/test_apt_configure_sources_list.py
deleted file mode 100644
index d2bcc61a..00000000
--- a/tests/integration_tests/modules/test_apt_configure_sources_list.py
+++ /dev/null
@@ -1,51 +0,0 @@
-"""Integration test for the apt module's ``sources_list`` functionality.
-
-This test specifies a ``sources_list`` and then checks that (a) the expected
-number of sources.list entries is present, and (b) that each expected line
-appears in the file.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml``.)"""
-import re
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-apt:
- primary:
- - arches: [default]
- uri: http://archive.ubuntu.com/ubuntu
- security:
- - arches: [default]
- uri: http://security.ubuntu.com/ubuntu
- sources_list: |
- deb $MIRROR $RELEASE main restricted
- deb-src $MIRROR $RELEASE main restricted
- deb $PRIMARY $RELEASE universe restricted
- deb-src $PRIMARY $RELEASE universe restricted
- deb $SECURITY $RELEASE-security multiverse
- deb-src $SECURITY $RELEASE-security multiverse
-"""
-
-EXPECTED_REGEXES = [
- r"deb http://archive.ubuntu.com/ubuntu [a-z].* main restricted",
- r"deb-src http://archive.ubuntu.com/ubuntu [a-z].* main restricted",
- r"deb http://archive.ubuntu.com/ubuntu [a-z].* universe restricted",
- r"deb-src http://archive.ubuntu.com/ubuntu [a-z].* universe restricted",
- r"deb http://security.ubuntu.com/ubuntu [a-z].*security multiverse",
- r"deb-src http://security.ubuntu.com/ubuntu [a-z].*security multiverse",
-]
-
-
-@pytest.mark.ci
-class TestAptConfigureSourcesList:
-
- @pytest.mark.user_data(USER_DATA)
- def test_sources_list(self, client):
- sources_list = client.read_from_file("/etc/apt/sources.list")
- assert 6 == len(sources_list.rstrip().split('\n'))
-
- for expected_re in EXPECTED_REGEXES:
- assert re.search(expected_re, sources_list) is not None
diff --git a/tests/integration_tests/modules/test_ca_certs.py b/tests/integration_tests/modules/test_ca_certs.py
new file mode 100644
index 00000000..7247fd7d
--- /dev/null
+++ b/tests/integration_tests/modules/test_ca_certs.py
@@ -0,0 +1,90 @@
+"""Integration tests for cc_ca_certs.
+
+(This is ported from ``tests/cloud_tests//testcases/modules/ca_certs.yaml``.)
+
+TODO:
+* Mark this as running on Debian and Alpine (once we have marks for that)
+* Implement testing for the RHEL-specific paths
+"""
+import os.path
+
+import pytest
+
+USER_DATA = """\
+#cloud-config
+ca_certs:
+ remove_defaults: true
+ trusted:
+ - |
+ -----BEGIN CERTIFICATE-----
+ MIIGJzCCBA+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBsjELMAkGA1UEBhMCRlIx
+ DzANBgNVBAgMBkFsc2FjZTETMBEGA1UEBwwKU3RyYXNib3VyZzEYMBYGA1UECgwP
+ d3d3LmZyZWVsYW4ub3JnMRAwDgYDVQQLDAdmcmVlbGFuMS0wKwYDVQQDDCRGcmVl
+ bGFuIFNhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxIjAgBgkqhkiG9w0BCQEW
+ E2NvbnRhY3RAZnJlZWxhbi5vcmcwHhcNMTIwNDI3MTAzMTE4WhcNMjIwNDI1MTAz
+ MTE4WjB+MQswCQYDVQQGEwJGUjEPMA0GA1UECAwGQWxzYWNlMRgwFgYDVQQKDA93
+ d3cuZnJlZWxhbi5vcmcxEDAOBgNVBAsMB2ZyZWVsYW4xDjAMBgNVBAMMBWFsaWNl
+ MSIwIAYJKoZIhvcNAQkBFhNjb250YWN0QGZyZWVsYW4ub3JnMIICIjANBgkqhkiG
+ 9w0BAQEFAAOCAg8AMIICCgKCAgEA3W29+ID6194bH6ejLrIC4hb2Ugo8v6ZC+Mrc
+ k2dNYMNPjcOKABvxxEtBamnSaeU/IY7FC/giN622LEtV/3oDcrua0+yWuVafyxmZ
+ yTKUb4/GUgafRQPf/eiX9urWurtIK7XgNGFNUjYPq4dSJQPPhwCHE/LKAykWnZBX
+ RrX0Dq4XyApNku0IpjIjEXH+8ixE12wH8wt7DEvdO7T3N3CfUbaITl1qBX+Nm2Z6
+ q4Ag/u5rl8NJfXg71ZmXA3XOj7zFvpyapRIZcPmkvZYn7SMCp8dXyXHPdpSiIWL2
+ uB3KiO4JrUYvt2GzLBUThp+lNSZaZ/Q3yOaAAUkOx+1h08285Pi+P8lO+H2Xic4S
+ vMq1xtLg2bNoPC5KnbRfuFPuUD2/3dSiiragJ6uYDLOyWJDivKGt/72OVTEPAL9o
+ 6T2pGZrwbQuiFGrGTMZOvWMSpQtNl+tCCXlT4mWqJDRwuMGrI4DnnGzt3IKqNwS4
+ Qyo9KqjMIPwnXZAmWPm3FOKe4sFwc5fpawKO01JZewDsYTDxVj+cwXwFxbE2yBiF
+ z2FAHwfopwaH35p3C6lkcgP2k/zgAlnBluzACUI+MKJ/G0gv/uAhj1OHJQ3L6kn1
+ SpvQ41/ueBjlunExqQSYD7GtZ1Kg8uOcq2r+WISE3Qc9MpQFFkUVllmgWGwYDuN3
+ Zsez95kCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNT
+ TCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFFlfyRO6G8y5qEFKikl5
+ ajb2fT7XMB8GA1UdIwQYMBaAFCNsLT0+KV14uGw+quK7Lh5sh/JTMA0GCSqGSIb3
+ DQEBBQUAA4ICAQAT5wJFPqervbja5+90iKxi1d0QVtVGB+z6aoAMuWK+qgi0vgvr
+ mu9ot2lvTSCSnRhjeiP0SIdqFMORmBtOCFk/kYDp9M/91b+vS+S9eAlxrNCB5VOf
+ PqxEPp/wv1rBcE4GBO/c6HcFon3F+oBYCsUQbZDKSSZxhDm3mj7pb67FNbZbJIzJ
+ 70HDsRe2O04oiTx+h6g6pW3cOQMgIAvFgKN5Ex727K4230B0NIdGkzuj4KSML0NM
+ slSAcXZ41OoSKNjy44BVEZv0ZdxTDrRM4EwJtNyggFzmtTuV02nkUj1bYYYC5f0L
+ ADr6s0XMyaNk8twlWYlYDZ5uKDpVRVBfiGcq0uJIzIvemhuTrofh8pBQQNkPRDFT
+ Rq1iTo1Ihhl3/Fl1kXk1WR3jTjNb4jHX7lIoXwpwp767HAPKGhjQ9cFbnHMEtkro
+ RlJYdtRq5mccDtwT0GFyoJLLBZdHHMHJz0F9H7FNk2tTQQMhK5MVYwg+LIaee586
+ CQVqfbscp7evlgjLW98H+5zylRHAgoH2G79aHljNKMp9BOuq6SnEglEsiWGVtu2l
+ hnx8SB3sVJZHeer8f/UQQwqbAO+Kdy70NmbSaqaVtp8jOxLiidWkwSyRTsuU6D8i
+ DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ==
+ -----END CERTIFICATE-----
+"""
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(USER_DATA)
+class TestCaCerts:
+ def test_certs_updated(self, class_client):
+ """Test that /etc/ssl/certs is updated as we expect."""
+ root = "/etc/ssl/certs"
+ filenames = class_client.execute(["ls", "-1", root]).splitlines()
+ unlinked_files = []
+ links = {}
+ for filename in filenames:
+ full_path = os.path.join(root, filename)
+ symlink_target = class_client.execute(["readlink", full_path])
+ is_symlink = symlink_target.ok
+ if is_symlink:
+ links[filename] = symlink_target
+ else:
+ unlinked_files.append(filename)
+
+ assert ["ca-certificates.crt"] == unlinked_files
+ assert "cloud-init-ca-certs.pem" == links["a535c1f3.0"]
+ assert (
+ "/usr/share/ca-certificates/cloud-init-ca-certs.crt"
+ == links["cloud-init-ca-certs.pem"]
+ )
+
+ def test_cert_installed(self, class_client):
+ """Test that our specified cert has been installed"""
+ checksum = class_client.execute(
+ "sha256sum /etc/ssl/certs/ca-certificates.crt"
+ )
+ assert (
+ "78e875f18c73c1aab9167ae0bd323391e52222cc2dbcda42d129537219300062"
+ in checksum
+ )
diff --git a/tests/integration_tests/modules/test_cli.py b/tests/integration_tests/modules/test_cli.py
new file mode 100644
index 00000000..baaa7567
--- /dev/null
+++ b/tests/integration_tests/modules/test_cli.py
@@ -0,0 +1,81 @@
+"""Integration tests for CLI functionality
+
+These would be for behavior manually invoked by user from the command line
+"""
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+VALID_USER_DATA = """\
+#cloud-config
+runcmd:
+ - echo 'hi' > /var/tmp/test
+"""
+
+INVALID_USER_DATA_HEADER = """\
+runcmd:
+ - echo 'hi' > /var/tmp/test
+"""
+
+INVALID_USER_DATA_SCHEMA = """\
+#cloud-config
+updates:
+ notnetwork: -1
+apt_pipelining: bogus
+"""
+
+
+@pytest.mark.user_data(VALID_USER_DATA)
+def test_valid_userdata(client: IntegrationInstance):
+ """Test `cloud-init devel schema` with valid userdata.
+
+ PR #575
+ """
+ result = client.execute("cloud-init devel schema --system")
+ assert result.ok
+ assert "Valid cloud-config: system userdata" == result.stdout.strip()
+ result = client.execute("cloud-init status --long")
+ if not result.ok:
+ raise AssertionError(
+ f"Unexpected error from cloud-init status: {result}"
+ )
+
+
+@pytest.mark.user_data(INVALID_USER_DATA_HEADER)
+def test_invalid_userdata(client: IntegrationInstance):
+ """Test `cloud-init devel schema` with invalid userdata.
+
+ PR #575
+ """
+ result = client.execute("cloud-init devel schema --system")
+ assert not result.ok
+ assert "Cloud config schema errors" in result.stderr
+ assert 'needs to begin with "#cloud-config"' in result.stderr
+ result = client.execute("cloud-init status --long")
+ if not result.ok:
+ raise AssertionError(
+ f"Unexpected error from cloud-init status: {result}"
+ )
+
+
+@pytest.mark.user_data(INVALID_USER_DATA_SCHEMA)
+def test_invalid_userdata_schema(client: IntegrationInstance):
+ """Test invalid schema represented as Warnings, not fatal
+
+ PR #1175
+ """
+ result = client.execute("cloud-init status --long")
+ assert result.ok
+ log = client.read_from_file("/var/log/cloud-init.log")
+ warning = (
+ "[WARNING]: Invalid cloud-config provided:\napt_pipelining: 'bogus'"
+ " is not valid under any of the given schemas\nupdates: Additional"
+ " properties are not allowed ('notnetwork' was unexpected)"
+ )
+ assert warning in log
+ result = client.execute("cloud-init status --long")
+ if not result.ok:
+ raise AssertionError(
+ f"Unexpected error from cloud-init status: {result}"
+ )
diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py
new file mode 100644
index 00000000..7a9a6e27
--- /dev/null
+++ b/tests/integration_tests/modules/test_combined.py
@@ -0,0 +1,342 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""A set of somewhat unrelated tests that can be combined into a single
+instance launch. Generally tests should only be added here if a failure
+of the test would be unlikely to affect the running of another test using
+the same instance launch. Most independent module coherence tests can go
+here.
+"""
+import json
+import re
+
+import pytest
+
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import (
+ retry,
+ verify_clean_log,
+ verify_ordered_items_in_text,
+)
+
+USER_DATA = """\
+#cloud-config
+apt:
+ primary:
+ - arches: [default]
+ uri: http://us.archive.ubuntu.com/ubuntu/
+byobu_by_default: enable
+final_message: |
+ This is my final message!
+ $version
+ $timestamp
+ $datasource
+ $uptime
+locale: en_GB.UTF-8
+locale_configfile: /etc/default/locale
+ntp:
+ servers: ['ntp.ubuntu.com']
+package_update: true
+random_seed:
+ data: 'MYUb34023nD:LFDK10913jk;dfnk:Df'
+ encoding: raw
+ file: /root/seed
+rsyslog:
+ configs:
+ - "*.* @@127.0.0.1"
+ - filename: 0-basic-config.conf
+ content: |
+ module(load="imtcp")
+ input(type="imtcp" port="514")
+ $template RemoteLogs,"/var/tmp/rsyslog.log"
+ *.* ?RemoteLogs
+ & ~
+ remotes:
+ me: "127.0.0.1"
+runcmd:
+ - echo 'hello world' > /var/tmp/runcmd_output
+
+ - #
+ - logger "My test log"
+snap:
+ squashfuse_in_container: true
+ commands:
+ - snap install hello-world
+ssh_import_id:
+ - gh:powersj
+ - lp:smoser
+timezone: US/Aleutian
+"""
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(USER_DATA)
+class TestCombined:
+ def test_final_message(self, class_client: IntegrationInstance):
+ """Test that final_message module works as expected.
+
+ Also tests LP 1511485: final_message is silent.
+ """
+ client = class_client
+ log = client.read_from_file("/var/log/cloud-init.log")
+ expected = (
+ "This is my final message!\n"
+ r"\d+\.\d+.*\n"
+ r"\w{3}, \d{2} \w{3} \d{4} \d{2}:\d{2}:\d{2} \+\d{4}\n" # Datetime
+ "DataSource.*\n"
+ r"\d+\.\d+"
+ )
+
+ assert re.search(expected, log)
+
+ def test_ntp_with_apt(self, class_client: IntegrationInstance):
+ """LP #1628337.
+
+ cloud-init tries to install NTP before even
+ configuring the archives.
+ """
+ client = class_client
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "W: Failed to fetch" not in log
+ assert "W: Some index files failed to download" not in log
+ assert "E: Unable to locate package ntp" not in log
+
+ def test_byobu(self, class_client: IntegrationInstance):
+ """Test byobu configured as enabled by default."""
+ client = class_client
+ assert client.execute('test -e "/etc/byobu/autolaunch"').ok
+
+ def test_configured_locale(self, class_client: IntegrationInstance):
+ """Test locale can be configured correctly."""
+ client = class_client
+ default_locale = client.read_from_file("/etc/default/locale")
+ assert "LANG=en_GB.UTF-8" in default_locale
+
+ locale_a = client.execute("locale -a")
+ verify_ordered_items_in_text(["en_GB.utf8", "en_US.utf8"], locale_a)
+
+ locale_gen = client.execute(
+ "cat /etc/locale.gen | grep -v '^#' | uniq"
+ )
+ verify_ordered_items_in_text(
+ ["en_GB.UTF-8", "en_US.UTF-8"], locale_gen
+ )
+
+ def test_random_seed_data(self, class_client: IntegrationInstance):
+ """Integration test for the random seed module.
+
+ This test specifies a command to be executed by the ``seed_random``
+ module, by providing a different data to be used as seed data. We will
+ then check if that seed data was actually used.
+ """
+ client = class_client
+
+ # Only read the first 31 characters, because the rest could be
+ # binary data
+ result = client.execute("head -c 31 < /root/seed")
+ assert result.startswith("MYUb34023nD:LFDK10913jk;dfnk:Df")
+
+ def test_rsyslog(self, class_client: IntegrationInstance):
+ """Test rsyslog is configured correctly."""
+ client = class_client
+ assert "My test log" in client.read_from_file("/var/tmp/rsyslog.log")
+
+ def test_runcmd(self, class_client: IntegrationInstance):
+ """Test runcmd works as expected"""
+ client = class_client
+ assert "hello world" == client.read_from_file("/var/tmp/runcmd_output")
+
+ @retry(tries=30, delay=1)
+ def test_ssh_import_id(self, class_client: IntegrationInstance):
+ """Integration test for the ssh_import_id module.
+
+ This test specifies ssh keys to be imported by the ``ssh_import_id``
+ module and then checks that if the ssh keys were successfully imported.
+
+ TODO:
+ * This test assumes that SSH keys will be imported into the
+ /home/ubuntu; this will need modification to run on other OSes.
+ """
+ client = class_client
+ ssh_output = client.read_from_file("/home/ubuntu/.ssh/authorized_keys")
+
+ assert "# ssh-import-id gh:powersj" in ssh_output
+ assert "# ssh-import-id lp:smoser" in ssh_output
+
+ def test_snap(self, class_client: IntegrationInstance):
+ """Integration test for the snap module.
+
+ This test specifies a command to be executed by the ``snap`` module
+ and then checks that if that command was executed during boot.
+ """
+ client = class_client
+ snap_output = client.execute("snap list")
+ assert "core " in snap_output
+ assert "hello-world " in snap_output
+
+ def test_timezone(self, class_client: IntegrationInstance):
+ """Integration test for the timezone module.
+
+ This test specifies a timezone to be used by the ``timezone`` module
+ and then checks that if that timezone was respected during boot.
+ """
+ client = class_client
+ timezone_output = client.execute(
+ 'date "+%Z" --date="Thu, 03 Nov 2016 00:47:00 -0400"'
+ )
+ assert timezone_output.strip() == "HDT"
+
+ def test_no_problems(self, class_client: IntegrationInstance):
+ """Test no errors, warnings, or tracebacks"""
+ client = class_client
+ status_file = client.read_from_file("/run/cloud-init/status.json")
+ status_json = json.loads(status_file)["v1"]
+ for stage in ("init", "init-local", "modules-config", "modules-final"):
+ assert status_json[stage]["errors"] == []
+ result_file = client.read_from_file("/run/cloud-init/result.json")
+ result_json = json.loads(result_file)["v1"]
+ assert result_json["errors"] == []
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+
+ def test_correct_datasource_detected(
+ self, class_client: IntegrationInstance
+ ):
+ """Test datasource is detected at the proper boot stage."""
+ client = class_client
+ status_file = client.read_from_file("/run/cloud-init/status.json")
+ parsed_datasource = json.loads(status_file)["v1"]["datasource"]
+
+ if client.settings.PLATFORM in ["lxd_container", "lxd_vm"]:
+ assert parsed_datasource.startswith("DataSourceNoCloud")
+ else:
+ platform_datasources = {
+ "azure": "DataSourceAzure [seed=/dev/sr0]",
+ "ec2": "DataSourceEc2Local",
+ "gce": "DataSourceGCELocal",
+ "oci": "DataSourceOracle",
+ "openstack": "DataSourceOpenStackLocal [net,ver=2]",
+ }
+ assert (
+ platform_datasources[client.settings.PLATFORM]
+ == parsed_datasource
+ )
+
+ def test_cloud_id_file_symlink(self, class_client: IntegrationInstance):
+ cloud_id = class_client.execute("cloud-id").stdout
+ expected_link_output = (
+ "'/run/cloud-init/cloud-id' -> "
+ f"'/run/cloud-init/cloud-id-{cloud_id}'"
+ )
+ assert expected_link_output == str(
+ class_client.execute("stat -c %N /run/cloud-init/cloud-id")
+ )
+
+ def _check_common_metadata(self, data):
+ assert data["base64_encoded_keys"] == []
+ assert data["merged_cfg"] == "redacted for non-root user"
+
+ image_spec = ImageSpecification.from_os_image()
+ assert data["sys_info"]["dist"][0] == image_spec.os
+
+ v1_data = data["v1"]
+ assert re.match(r"\d\.\d+\.\d+-\d+", v1_data["kernel_release"])
+ assert v1_data["variant"] == image_spec.os
+ assert v1_data["distro"] == image_spec.os
+ assert v1_data["distro_release"] == image_spec.release
+ assert v1_data["machine"] == "x86_64"
+ assert re.match(r"3.\d\.\d", v1_data["python_version"])
+
+ @pytest.mark.lxd_container
+ def test_instance_json_lxd(self, class_client: IntegrationInstance):
+ client = class_client
+ instance_json_file = client.read_from_file(
+ "/run/cloud-init/instance-data.json"
+ )
+
+ data = json.loads(instance_json_file)
+ self._check_common_metadata(data)
+ v1_data = data["v1"]
+ assert v1_data["cloud_name"] == "unknown"
+ assert v1_data["platform"] == "lxd"
+ assert v1_data["cloud_id"] == "lxd"
+ assert f"{v1_data['cloud_id']}" == client.read_from_file(
+ "/run/cloud-init/cloud-id-lxd"
+ )
+ assert (
+ v1_data["subplatform"]
+ == "seed-dir (/var/lib/cloud/seed/nocloud-net)"
+ )
+ assert v1_data["availability_zone"] is None
+ assert v1_data["instance_id"] == client.instance.name
+ assert v1_data["local_hostname"] == client.instance.name
+ assert v1_data["region"] is None
+
+ @pytest.mark.lxd_vm
+ def test_instance_json_lxd_vm(self, class_client: IntegrationInstance):
+ client = class_client
+ instance_json_file = client.read_from_file(
+ "/run/cloud-init/instance-data.json"
+ )
+
+ data = json.loads(instance_json_file)
+ self._check_common_metadata(data)
+ v1_data = data["v1"]
+ assert v1_data["cloud_name"] == "unknown"
+ assert v1_data["platform"] == "lxd"
+ assert v1_data["cloud_id"] == "lxd"
+ assert f"{v1_data['cloud_id']}" == client.read_from_file(
+ "/run/cloud-init/cloud-id-lxd"
+ )
+ assert any(
+ [
+ "/var/lib/cloud/seed/nocloud-net" in v1_data["subplatform"],
+ "/dev/sr0" in v1_data["subplatform"],
+ ]
+ )
+ assert v1_data["availability_zone"] is None
+ assert v1_data["instance_id"] == client.instance.name
+ assert v1_data["local_hostname"] == client.instance.name
+ assert v1_data["region"] is None
+
+ @pytest.mark.ec2
+ def test_instance_json_ec2(self, class_client: IntegrationInstance):
+ client = class_client
+ instance_json_file = client.read_from_file(
+ "/run/cloud-init/instance-data.json"
+ )
+ data = json.loads(instance_json_file)
+ v1_data = data["v1"]
+ assert v1_data["cloud_name"] == "aws"
+ assert v1_data["platform"] == "ec2"
+ # Different regions will show up as ec2-(gov|china)
+ assert v1_data["cloud_id"].startswith("ec2")
+ assert f"{v1_data['cloud_id']}" == client.read_from_file(
+ "/run/cloud-init/cloud-id-ec2"
+ )
+ assert v1_data["subplatform"].startswith("metadata")
+ assert (
+ v1_data["availability_zone"] == client.instance.availability_zone
+ )
+ assert v1_data["instance_id"] == client.instance.name
+ assert v1_data["local_hostname"].startswith("ip-")
+ assert v1_data["region"] == client.cloud.cloud_instance.region
+
+ @pytest.mark.gce
+ def test_instance_json_gce(self, class_client: IntegrationInstance):
+ client = class_client
+ instance_json_file = client.read_from_file(
+ "/run/cloud-init/instance-data.json"
+ )
+ data = json.loads(instance_json_file)
+ self._check_common_metadata(data)
+ v1_data = data["v1"]
+ assert v1_data["cloud_name"] == "gce"
+ assert v1_data["platform"] == "gce"
+ assert f"{v1_data['cloud_id']}" == client.read_from_file(
+ "/run/cloud-init/cloud-id-gce"
+ )
+ assert v1_data["subplatform"].startswith("metadata")
+ assert v1_data["availability_zone"] == client.instance.zone
+ assert v1_data["instance_id"] == client.instance.instance_id
+ assert v1_data["local_hostname"] == client.instance.name
diff --git a/tests/integration_tests/modules/test_command_output.py b/tests/integration_tests/modules/test_command_output.py
new file mode 100644
index 00000000..96525cac
--- /dev/null
+++ b/tests/integration_tests/modules/test_command_output.py
@@ -0,0 +1,21 @@
+"""Integration test for output redirection.
+
+This test redirects the output of a command to a file and then checks the file.
+
+(This is ported from
+``tests/cloud_tests/testcases/main/command_output_simple.yaml``.)"""
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+USER_DATA = """\
+#cloud-config
+output: { all: "| tee -a /var/log/cloud-init-test-output" }
+final_message: "should be last line in cloud-init-test-output file"
+"""
+
+
+@pytest.mark.user_data(USER_DATA)
+def test_runcmd(client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init-test-output")
+ assert "should be last line in cloud-init-test-output file" in log
diff --git a/tests/integration_tests/modules/test_disk_setup.py b/tests/integration_tests/modules/test_disk_setup.py
new file mode 100644
index 00000000..7aaba7db
--- /dev/null
+++ b/tests/integration_tests/modules/test_disk_setup.py
@@ -0,0 +1,212 @@
+import json
+import os
+from uuid import uuid4
+
+import pytest
+from pycloudlib.lxd.instance import LXDInstance
+
+from cloudinit.subp import subp
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+DISK_PATH = "/tmp/test_disk_setup_{}".format(uuid4())
+
+
+def setup_and_mount_lxd_disk(instance: LXDInstance):
+ subp(
+ "lxc config device add {} test-disk-setup-disk disk source={}".format(
+ instance.name, DISK_PATH
+ ).split()
+ )
+
+
+@pytest.fixture
+def create_disk():
+ # 640k should be enough for anybody
+ subp("dd if=/dev/zero of={} bs=1k count=640".format(DISK_PATH).split())
+ yield
+ os.remove(DISK_PATH)
+
+
+ALIAS_USERDATA = """\
+#cloud-config
+device_aliases:
+ my_alias: /dev/sdb
+disk_setup:
+ my_alias:
+ table_type: mbr
+ layout: [50, 50]
+ overwrite: True
+fs_setup:
+- label: fs1
+ device: my_alias.1
+ filesystem: ext4
+- label: fs2
+ device: my_alias.2
+ filesystem: ext4
+mounts:
+- ["my_alias.1", "/mnt1"]
+- ["my_alias.2", "/mnt2"]
+"""
+
+
+@pytest.mark.user_data(ALIAS_USERDATA)
+@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk)
+@pytest.mark.ubuntu
+@pytest.mark.lxd_vm
+class TestDeviceAliases:
+ """Test devices aliases work on disk setup/mount"""
+
+ def test_device_alias(self, create_disk, client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert (
+ "updated disk_setup device entry 'my_alias' to '/dev/sdb'" in log
+ )
+ assert "changed my_alias.1 => /dev/sdb1" in log
+ assert "changed my_alias.2 => /dev/sdb2" in log
+ verify_clean_log(log)
+
+ lsblk = json.loads(client.execute("lsblk --json"))
+ sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0]
+ assert len(sdb["children"]) == 2
+ assert sdb["children"][0]["name"] == "sdb1"
+ assert sdb["children"][1]["name"] == "sdb2"
+ if "mountpoint" in sdb["children"][0]:
+ assert sdb["children"][0]["mountpoint"] == "/mnt1"
+ assert sdb["children"][1]["mountpoint"] == "/mnt2"
+ else:
+ assert sdb["children"][0]["mountpoints"] == ["/mnt1"]
+ assert sdb["children"][1]["mountpoints"] == ["/mnt2"]
+ result = client.execute("mount -a")
+ assert result.return_code == 0
+ assert result.stdout.strip() == ""
+ assert result.stderr.strip() == ""
+ result = client.execute("findmnt -J /mnt1")
+ assert result.return_code == 0
+ result = client.execute("findmnt -J /mnt2")
+ assert result.return_code == 0
+
+
+PARTPROBE_USERDATA = """\
+#cloud-config
+disk_setup:
+ /dev/sdb:
+ table_type: mbr
+ layout: [50, 50]
+ overwrite: True
+fs_setup:
+ - label: test
+ device: /dev/sdb1
+ filesystem: ext4
+ - label: test2
+ device: /dev/sdb2
+ filesystem: ext4
+mounts:
+- ["/dev/sdb1", "/mnt1"]
+- ["/dev/sdb2", "/mnt2"]
+"""
+
+UPDATED_PARTPROBE_USERDATA = """\
+#cloud-config
+disk_setup:
+ /dev/sdb:
+ table_type: mbr
+ layout: [100]
+ overwrite: True
+fs_setup:
+ - label: test3
+ device: /dev/sdb1
+ filesystem: ext4
+mounts:
+- ["/dev/sdb1", "/mnt3"]
+"""
+
+
+@pytest.mark.user_data(PARTPROBE_USERDATA)
+@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk)
+@pytest.mark.ubuntu
+@pytest.mark.lxd_vm
+class TestPartProbeAvailability:
+ """Test disk setup works with partprobe
+
+ Disk setup can run successfully on a mounted partition when
+ partprobe is being used.
+
+ lp-1920939
+ """
+
+ def _verify_first_disk_setup(self, client, log):
+ verify_clean_log(log)
+ lsblk = json.loads(client.execute("lsblk --json"))
+ sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0]
+ assert len(sdb["children"]) == 2
+ assert sdb["children"][0]["name"] == "sdb1"
+ assert sdb["children"][1]["name"] == "sdb2"
+ if "mountpoint" in sdb["children"][0]:
+ assert sdb["children"][0]["mountpoint"] == "/mnt1"
+ assert sdb["children"][1]["mountpoint"] == "/mnt2"
+ else:
+ assert sdb["children"][0]["mountpoints"] == ["/mnt1"]
+ assert sdb["children"][1]["mountpoints"] == ["/mnt2"]
+
+ # Not bionic because the LXD agent gets in the way of us
+ # changing the userdata
+ @pytest.mark.not_bionic
+ def test_disk_setup_when_mounted(
+ self, create_disk, client: IntegrationInstance
+ ):
+ """Test lp-1920939.
+
+ We insert an extra disk into our VM, format it to have two partitions,
+ modify our cloud config to mount devices before disk setup, and modify
+ our userdata to setup a single partition on the disk.
+
+ This allows cloud-init to attempt disk setup on a mounted partition.
+ When blockdev is in use, it will fail with
+ "blockdev: ioctl error on BLKRRPART: Device or resource busy" along
+ with a warning and a traceback. When partprobe is in use, everything
+ should work successfully.
+ """
+ log = client.read_from_file("/var/log/cloud-init.log")
+ self._verify_first_disk_setup(client, log)
+
+ # Update our userdata and cloud.cfg to mount then perform new disk
+ # setup
+ client.write_to_file(
+ "/var/lib/cloud/seed/nocloud-net/user-data",
+ UPDATED_PARTPROBE_USERDATA,
+ )
+ client.execute(
+ "sed -i 's/write-files/write-files\\n - mounts/' "
+ "/etc/cloud/cloud.cfg"
+ )
+
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+ # Assert new setup works as expected
+ verify_clean_log(log)
+
+ lsblk = json.loads(client.execute("lsblk --json"))
+ sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0]
+ assert len(sdb["children"]) == 1
+ assert sdb["children"][0]["name"] == "sdb1"
+ if "mountpoint" in sdb["children"][0]:
+ assert sdb["children"][0]["mountpoint"] == "/mnt3"
+ else:
+ assert sdb["children"][0]["mountpoints"] == ["/mnt3"]
+
+ def test_disk_setup_no_partprobe(
+ self, create_disk, client: IntegrationInstance
+ ):
+ """Ensure disk setup still works as expected without partprobe."""
+ # We can't do this part in a bootcmd because the path has already
+ # been found by the time we get to the bootcmd
+ client.execute("rm $(which partprobe)")
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+ self._verify_first_disk_setup(client, log)
+
+ assert "partprobe" not in log
diff --git a/tests/integration_tests/modules/test_growpart.py b/tests/integration_tests/modules/test_growpart.py
new file mode 100644
index 00000000..67251817
--- /dev/null
+++ b/tests/integration_tests/modules/test_growpart.py
@@ -0,0 +1,68 @@
+import json
+import os
+import pathlib
+from uuid import uuid4
+
+import pytest
+from pycloudlib.lxd.instance import LXDInstance
+
+from cloudinit.subp import subp
+from tests.integration_tests.instances import IntegrationInstance
+
+DISK_PATH = "/tmp/test_disk_setup_{}".format(uuid4())
+
+
+def setup_and_mount_lxd_disk(instance: LXDInstance):
+ subp(
+ "lxc config device add {} test-disk-setup-disk disk source={}".format(
+ instance.name, DISK_PATH
+ ).split()
+ )
+
+
+@pytest.fixture(scope="class", autouse=True)
+def create_disk():
+ """Create 16M sparse file"""
+ pathlib.Path(DISK_PATH).touch()
+ os.truncate(DISK_PATH, 1 << 24)
+ yield
+ os.remove(DISK_PATH)
+
+
+# Create undersized partition in bootcmd
+ALIAS_USERDATA = """\
+#cloud-config
+bootcmd:
+ - parted /dev/sdb --script \
+ mklabel gpt \
+ mkpart primary 0 1MiB
+ - parted /dev/sdb --script print
+growpart:
+ devices:
+ - "/"
+ - "/dev/sdb1"
+runcmd:
+ - parted /dev/sdb --script print
+"""
+
+
+@pytest.mark.user_data(ALIAS_USERDATA)
+@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk)
+@pytest.mark.ubuntu
+@pytest.mark.lxd_vm
+class TestGrowPart:
+ """Test growpart"""
+
+ def test_grow_part(self, client: IntegrationInstance):
+ """Verify"""
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert (
+ "cc_growpart.py[INFO]: '/dev/sdb1' resized:"
+ " changed (/dev/sdb, 1) from" in log
+ )
+
+ lsblk = json.loads(client.execute("lsblk --json"))
+ sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0]
+ assert len(sdb["children"]) == 1
+ assert sdb["children"][0]["name"] == "sdb1"
+ assert sdb["size"] == "16M"
diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py
new file mode 100644
index 00000000..0bad761e
--- /dev/null
+++ b/tests/integration_tests/modules/test_hotplug.py
@@ -0,0 +1,112 @@
+import time
+from collections import namedtuple
+
+import pytest
+import yaml
+
+from tests.integration_tests.instances import IntegrationInstance
+
+USER_DATA = """\
+#cloud-config
+updates:
+ network:
+ when: ['hotplug']
+"""
+
+ip_addr = namedtuple("ip_addr", "interface state ip4 ip6")
+
+
+def _wait_till_hotplug_complete(client, expected_runs=1):
+ for _ in range(60):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ if log.count("Exiting hotplug handler") == expected_runs:
+ return log
+ time.sleep(1)
+ raise Exception("Waiting for hotplug handler failed")
+
+
+def _get_ip_addr(client):
+ ips = []
+ lines = client.execute("ip --brief addr").split("\n")
+ for line in lines:
+ attributes = line.split()
+ interface, state = attributes[0], attributes[1]
+ ip4_cidr = attributes[2] if len(attributes) > 2 else None
+ ip6_cidr = attributes[3] if len(attributes) > 3 else None
+ ip4 = ip4_cidr.split("/")[0] if ip4_cidr else None
+ ip6 = ip6_cidr.split("/")[0] if ip6_cidr else None
+ ip = ip_addr(interface, state, ip4, ip6)
+ ips.append(ip)
+ return ips
+
+
+@pytest.mark.openstack
+# On Bionic, we traceback when attempting to detect the hotplugged
+# device in the updated metadata. This is because Bionic is specifically
+# configured not to provide network metadata.
+@pytest.mark.not_bionic
+@pytest.mark.user_data(USER_DATA)
+def test_hotplug_add_remove(client: IntegrationInstance):
+ ips_before = _get_ip_addr(client)
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Exiting hotplug handler" not in log
+ assert client.execute(
+ "test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules"
+ ).ok
+
+ # Add new NIC
+ added_ip = client.instance.add_network_interface()
+ _wait_till_hotplug_complete(client, expected_runs=1)
+ ips_after_add = _get_ip_addr(client)
+ new_addition = [ip for ip in ips_after_add if ip.ip4 == added_ip][0]
+
+ assert len(ips_after_add) == len(ips_before) + 1
+ assert added_ip not in [ip.ip4 for ip in ips_before]
+ assert added_ip in [ip.ip4 for ip in ips_after_add]
+ assert new_addition.state == "UP"
+
+ netplan_cfg = client.read_from_file("/etc/netplan/50-cloud-init.yaml")
+ config = yaml.safe_load(netplan_cfg)
+ assert new_addition.interface in config["network"]["ethernets"]
+
+ # Remove new NIC
+ client.instance.remove_network_interface(added_ip)
+ _wait_till_hotplug_complete(client, expected_runs=2)
+ ips_after_remove = _get_ip_addr(client)
+ assert len(ips_after_remove) == len(ips_before)
+ assert added_ip not in [ip.ip4 for ip in ips_after_remove]
+
+ netplan_cfg = client.read_from_file("/etc/netplan/50-cloud-init.yaml")
+ config = yaml.safe_load(netplan_cfg)
+ assert new_addition.interface not in config["network"]["ethernets"]
+
+ assert "enabled" == client.execute(
+ "cloud-init devel hotplug-hook -s net query"
+ )
+
+
+@pytest.mark.openstack
+def test_no_hotplug_in_userdata(client: IntegrationInstance):
+ ips_before = _get_ip_addr(client)
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Exiting hotplug handler" not in log
+ assert client.execute(
+ "test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules"
+ ).failed
+
+ # Add new NIC
+ client.instance.add_network_interface()
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "hotplug-hook" not in log
+
+ ips_after_add = _get_ip_addr(client)
+ if len(ips_after_add) == len(ips_before) + 1:
+ # We can see the device, but it should not have been brought up
+ new_ip = [ip for ip in ips_after_add if ip not in ips_before][0]
+ assert new_ip.state == "DOWN"
+ else:
+ assert len(ips_after_add) == len(ips_before)
+
+ assert "disabled" == client.execute(
+ "cloud-init devel hotplug-hook -s net query"
+ )
diff --git a/tests/integration_tests/modules/test_jinja_templating.py b/tests/integration_tests/modules/test_jinja_templating.py
new file mode 100644
index 00000000..7788c6f0
--- /dev/null
+++ b/tests/integration_tests/modules/test_jinja_templating.py
@@ -0,0 +1,33 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_ordered_items_in_text
+
+USER_DATA = """\
+## template: jinja
+#cloud-config
+runcmd:
+ - echo {{v1.local_hostname}} > /var/tmp/runcmd_output
+ - echo {{merged_cfg._doc}} >> /var/tmp/runcmd_output
+ - echo {{v1['local-hostname']}} >> /var/tmp/runcmd_output
+"""
+
+
+@pytest.mark.user_data(USER_DATA)
+def test_runcmd_with_variable_substitution(client: IntegrationInstance):
+ """Test jinja substitution.
+
+ Ensure underscore-delimited aliases exist for hyphenated key and
+ we can also substitute variables from instance-data-sensitive
+ LP: #1931392.
+ """
+ hostname = client.execute("hostname").stdout.strip()
+ expected = [
+ hostname,
+ "Merged cloud-init system config from /etc/cloud/cloud.cfg and "
+ "/etc/cloud/cloud.cfg.d/",
+ hostname,
+ ]
+ output = client.read_from_file("/var/tmp/runcmd_output")
+ verify_ordered_items_in_text(expected, output)
diff --git a/tests/integration_tests/modules/test_keyboard.py b/tests/integration_tests/modules/test_keyboard.py
new file mode 100644
index 00000000..7db35014
--- /dev/null
+++ b/tests/integration_tests/modules/test_keyboard.py
@@ -0,0 +1,17 @@
+import pytest
+
+USER_DATA = """\
+#cloud-config
+keyboard:
+ layout: de
+ model: pc105
+ variant: nodeadkeys
+ options: compose:rwin
+"""
+
+
+class TestKeyboard:
+ @pytest.mark.user_data(USER_DATA)
+ def test_keyboard(self, client):
+ lc = client.execute("localectl")
+ assert "X11 Layout: de" in lc
diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py
new file mode 100644
index 00000000..50899982
--- /dev/null
+++ b/tests/integration_tests/modules/test_keys_to_console.py
@@ -0,0 +1,113 @@
+"""Integration tests for the cc_keys_to_console module.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/keys_to_console.yaml``.)"""
+import pytest
+
+from tests.integration_tests.util import retry
+
+BLACKLIST_USER_DATA = """\
+#cloud-config
+ssh_fp_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
+ssh_key_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
+"""
+
+BLACKLIST_ALL_KEYS_USER_DATA = """\
+#cloud-config
+ssh_fp_console_blacklist: [ssh-dsa, ssh-ecdsa, ssh-ed25519, ssh-rsa, ssh-dss, ecdsa-sha2-nistp256]
+""" # noqa: E501
+
+DISABLED_USER_DATA = """\
+#cloud-config
+ssh:
+ emit_keys_to_console: false
+"""
+
+ENABLE_KEYS_TO_CONSOLE_USER_DATA = """\
+#cloud-config
+ssh:
+ emit_keys_to_console: true
+users:
+ - default
+ - name: barfoo
+"""
+
+
+@pytest.mark.user_data(BLACKLIST_USER_DATA)
+class TestKeysToConsoleBlacklist:
+ """Test that the blacklist options work as expected."""
+
+ @pytest.mark.parametrize("key_type", ["DSA", "ECDSA"])
+ def test_excluded_keys(self, class_client, key_type):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "({})".format(key_type) not in syslog
+
+ # retry decorator here because it can take some time to be reflected
+ # in syslog
+ @retry(tries=30, delay=1)
+ @pytest.mark.parametrize("key_type", ["ED25519", "RSA"])
+ def test_included_keys(self, class_client, key_type):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "({})".format(key_type) in syslog
+
+
+@pytest.mark.user_data(BLACKLIST_ALL_KEYS_USER_DATA)
+class TestAllKeysToConsoleBlacklist:
+ """Test that when key blacklist contains all key types that
+ no header/footer are output.
+ """
+
+ def test_header_excluded(self, class_client):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "BEGIN SSH HOST KEY FINGERPRINTS" not in syslog
+
+ def test_footer_excluded(self, class_client):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "END SSH HOST KEY FINGERPRINTS" not in syslog
+
+
+@pytest.mark.user_data(DISABLED_USER_DATA)
+class TestKeysToConsoleDisabled:
+ """Test that output can be fully disabled."""
+
+ @pytest.mark.parametrize("key_type", ["DSA", "ECDSA", "ED25519", "RSA"])
+ def test_keys_excluded(self, class_client, key_type):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "({})".format(key_type) not in syslog
+
+ def test_header_excluded(self, class_client):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "BEGIN SSH HOST KEY FINGERPRINTS" not in syslog
+
+ def test_footer_excluded(self, class_client):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "END SSH HOST KEY FINGERPRINTS" not in syslog
+
+
+@pytest.mark.user_data(ENABLE_KEYS_TO_CONSOLE_USER_DATA)
+@pytest.mark.ec2
+@pytest.mark.lxd_container
+@pytest.mark.oci
+@pytest.mark.openstack
+class TestKeysToConsoleEnabled:
+ """Test that output can be enabled disabled."""
+
+ def test_duplicate_messaging_console_log(self, class_client):
+ class_client.execute("cloud-init status --wait --long").ok
+ try:
+ console_log = class_client.instance.console_log()
+ except NotImplementedError:
+ # Assume that an exception here means that we can't use the console
+ # log
+ pytest.skip("NotImplementedError when requesting console log")
+ return
+ if console_log.lower() == "no console output":
+ # This test retries because we might not have the full console log
+ # on the first fetch. However, if we have no console output
+ # at all, we don't want to keep retrying as that would trigger
+ # another 5 minute wait on the pycloudlib side, which could
+ # leave us waiting for a couple hours
+ pytest.fail("no console output")
+ return
+ msg = "no authorized SSH keys fingerprints found for user barfoo."
+ assert 1 == console_log.count(msg)
diff --git a/tests/integration_tests/modules/test_lxd_bridge.py b/tests/integration_tests/modules/test_lxd_bridge.py
new file mode 100644
index 00000000..3292a833
--- /dev/null
+++ b/tests/integration_tests/modules/test_lxd_bridge.py
@@ -0,0 +1,46 @@
+"""Integration tests for LXD bridge creation.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/lxd_bridge.yaml``.)
+"""
+import pytest
+import yaml
+
+from tests.integration_tests.util import verify_clean_log
+
+USER_DATA = """\
+#cloud-config
+lxd:
+ init:
+ storage_backend: dir
+ bridge:
+ mode: new
+ name: lxdbr0
+ ipv4_address: 10.100.100.1
+ ipv4_netmask: 24
+ ipv4_dhcp_first: 10.100.100.100
+ ipv4_dhcp_last: 10.100.100.200
+ ipv4_nat: true
+ domain: lxd
+"""
+
+
+@pytest.mark.no_container
+@pytest.mark.user_data(USER_DATA)
+class TestLxdBridge:
+ @pytest.mark.parametrize("binary_name", ["lxc", "lxd"])
+ def test_binaries_installed(self, class_client, binary_name):
+ """Check that the expected LXD binaries are installed"""
+ assert class_client.execute(["which", binary_name]).ok
+
+ def test_bridge(self, class_client):
+ """Check that the given bridge is configured"""
+ cloud_init_log = class_client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(cloud_init_log)
+
+ # The bridge should exist
+ assert class_client.execute("ip addr show lxdbr0")
+
+ raw_network_config = class_client.execute("lxc network show lxdbr0")
+ network_config = yaml.safe_load(raw_network_config)
+ assert "10.100.100.1/24" == network_config["config"]["ipv4.address"]
diff --git a/tests/integration_tests/modules/test_ntp_servers.py b/tests/integration_tests/modules/test_ntp_servers.py
index e72389c1..fc62e63b 100644
--- a/tests/integration_tests/modules/test_ntp_servers.py
+++ b/tests/integration_tests/modules/test_ntp_servers.py
@@ -1,14 +1,18 @@
-"""Integration test for the ntp module's ``servers`` functionality with ntp.
+"""Integration test for the ntp module's ntp functionality.
This test specifies the use of the `ntp` NTP client, and ensures that the given
NTP servers are configured as expected.
-(This is ported from ``tests/cloud_tests/testcases/modules/ntp_servers.yaml``.)
+(This is ported from ``tests/cloud_tests/testcases/modules/ntp_servers.yaml``,
+``tests/cloud_tests/testcases/modules/ntp_pools.yaml``,
+and ``tests/cloud_tests/testcases/modules/ntp_chrony.yaml``)
"""
import re
-import yaml
import pytest
+import yaml
+
+from tests.integration_tests.instances import IntegrationInstance
USER_DATA = """\
#cloud-config
@@ -17,21 +21,25 @@ ntp:
servers:
- 172.16.15.14
- 172.16.17.18
+ pools:
+ - 0.cloud-init.mypool
+ - 1.cloud-init.mypool
+ - 172.16.15.15
"""
EXPECTED_SERVERS = yaml.safe_load(USER_DATA)["ntp"]["servers"]
+EXPECTED_POOLS = yaml.safe_load(USER_DATA)["ntp"]["pools"]
-@pytest.mark.ci
@pytest.mark.user_data(USER_DATA)
class TestNtpServers:
-
- def test_ntp_installed(self, class_client):
+ def test_ntp_installed(self, class_client: IntegrationInstance):
"""Test that `ntpd --version` succeeds, indicating installation."""
- result = class_client.execute("ntpd --version")
- assert 0 == result.return_code
+ assert class_client.execute("ntpd --version").ok
- def test_dist_config_file_is_empty(self, class_client):
+ def test_dist_config_file_is_empty(
+ self, class_client: IntegrationInstance
+ ):
"""Test that the distributed config file is empty.
(This test is skipped on all currently supported Ubuntu releases, so
@@ -42,17 +50,79 @@ class TestNtpServers:
dist_file = class_client.read_from_file("/etc/ntp.conf.dist")
assert 0 == len(dist_file.strip().splitlines())
- def test_ntp_entries(self, class_client):
+ def test_ntp_entries(self, class_client: IntegrationInstance):
ntp_conf = class_client.read_from_file("/etc/ntp.conf")
for expected_server in EXPECTED_SERVERS:
assert re.search(
r"^server {} iburst".format(expected_server),
ntp_conf,
- re.MULTILINE
+ re.MULTILINE,
+ )
+ for expected_pool in EXPECTED_POOLS:
+ assert re.search(
+ r"^pool {} iburst".format(expected_pool),
+ ntp_conf,
+ re.MULTILINE,
)
- def test_ntpq_servers(self, class_client):
+ def test_ntpq_servers(self, class_client: IntegrationInstance):
result = class_client.execute("ntpq -p -w -n")
assert result.ok
- for expected_server in EXPECTED_SERVERS:
- assert expected_server in result.stdout
+ for expected_server_or_pool in [*EXPECTED_SERVERS, *EXPECTED_POOLS]:
+ assert expected_server_or_pool in result.stdout
+
+
+CHRONY_DATA = """\
+#cloud-config
+ntp:
+ enabled: true
+ ntp_client: chrony
+ servers:
+ - 172.16.15.14
+"""
+
+
+@pytest.mark.user_data(CHRONY_DATA)
+def test_chrony(client: IntegrationInstance):
+ if client.execute("test -f /etc/chrony.conf").ok:
+ chrony_conf = "/etc/chrony.conf"
+ else:
+ chrony_conf = "/etc/chrony/chrony.conf"
+ contents = client.read_from_file(chrony_conf)
+ assert "server 172.16.15.14" in contents
+
+
+TIMESYNCD_DATA = """\
+#cloud-config
+ntp:
+ enabled: true
+ ntp_client: systemd-timesyncd
+ servers:
+ - 172.16.15.14
+"""
+
+
+@pytest.mark.user_data(TIMESYNCD_DATA)
+def test_timesyncd(client: IntegrationInstance):
+ contents = client.read_from_file(
+ "/etc/systemd/timesyncd.conf.d/cloud-init.conf"
+ )
+ assert "NTP=172.16.15.14" in contents
+
+
+EMPTY_NTP = """\
+#cloud-config
+ntp:
+ ntp_client: ntp
+ pools: []
+ servers: []
+"""
+
+
+@pytest.mark.user_data(EMPTY_NTP)
+def test_empty_ntp(client: IntegrationInstance):
+ assert client.execute("ntpd --version").ok
+ assert client.execute("test -f /etc/ntp.conf.dist").failed
+ assert "pool.ntp.org iburst" in client.execute(
+ 'grep -v "^#" /etc/ntp.conf'
+ )
diff --git a/tests/integration_tests/modules/test_package_update_upgrade_install.py b/tests/integration_tests/modules/test_package_update_upgrade_install.py
index 8a38ad84..d668d81c 100644
--- a/tests/integration_tests/modules/test_package_update_upgrade_install.py
+++ b/tests/integration_tests/modules/test_package_update_upgrade_install.py
@@ -13,8 +13,8 @@ NOTE: the testcase for this looks for the command in history.log as
"""
import re
-import pytest
+import pytest
USER_DATA = """\
#cloud-config
@@ -26,9 +26,9 @@ package_upgrade: true
"""
+@pytest.mark.ubuntu
@pytest.mark.user_data(USER_DATA)
class TestPackageUpdateUpgradeInstall:
-
def assert_package_installed(self, pkg_out, name, version=None):
"""Check dpkg-query --show output for matching package name.
@@ -37,7 +37,8 @@ class TestPackageUpdateUpgradeInstall:
version.
"""
pkg_match = re.search(
- "^%s\t(?P<version>.*)$" % name, pkg_out, re.MULTILINE)
+ "^%s\t(?P<version>.*)$" % name, pkg_out, re.MULTILINE
+ )
if pkg_match:
installed_version = pkg_match.group("version")
if not version:
@@ -45,8 +46,10 @@ class TestPackageUpdateUpgradeInstall:
if installed_version.startswith(version):
return # Success
raise AssertionError(
- "Expected package version %s-%s not found. Found %s" %
- name, version, installed_version)
+ "Expected package version %s-%s not found. Found %s" % name,
+ version,
+ installed_version,
+ )
raise AssertionError("Package not installed: %s" % name)
def test_new_packages_are_installed(self, class_client):
@@ -57,11 +60,13 @@ class TestPackageUpdateUpgradeInstall:
def test_packages_were_updated(self, class_client):
out = class_client.execute(
- "grep ^Commandline: /var/log/apt/history.log")
+ "grep ^Commandline: /var/log/apt/history.log"
+ )
assert (
"Commandline: /usr/bin/apt-get --option=Dpkg::Options"
"::=--force-confold --option=Dpkg::options::=--force-unsafe-io "
- "--assume-yes --quiet install sl tree") in out
+ "--assume-yes --quiet install sl tree" in out
+ )
def test_packages_were_upgraded(self, class_client):
"""Test cloud-init-output for install & upgrade stuff."""
diff --git a/tests/integration_tests/modules/test_persistence.py b/tests/integration_tests/modules/test_persistence.py
new file mode 100644
index 00000000..33527e1e
--- /dev/null
+++ b/tests/integration_tests/modules/test_persistence.py
@@ -0,0 +1,32 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Test the behavior of loading/discarding pickle data"""
+from pathlib import Path
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import (
+ ASSETS_DIR,
+ verify_ordered_items_in_text,
+)
+
+PICKLE_PATH = Path("/var/lib/cloud/instance/obj.pkl")
+TEST_PICKLE = ASSETS_DIR / "trusty_with_mime.pkl"
+
+
+@pytest.mark.lxd_container
+def test_log_message_on_missing_version_file(client: IntegrationInstance):
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.restart()
+ assert client.execute("cloud-init status --wait").ok
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_ordered_items_in_text(
+ [
+ "Unable to unpickle datasource: 'MIMEMultipart' object has no "
+ "attribute 'policy'. Ignoring current cache.",
+ "no cache found",
+ "Searching for local data source",
+ "SUCCESS: found local data from DataSourceNoCloud",
+ ],
+ log,
+ )
diff --git a/tests/integration_tests/modules/test_power_state_change.py b/tests/integration_tests/modules/test_power_state_change.py
new file mode 100644
index 00000000..5cd19764
--- /dev/null
+++ b/tests/integration_tests/modules/test_power_state_change.py
@@ -0,0 +1,97 @@
+"""Integration test of the cc_power_state_change module.
+
+Test that the power state config options work as expected.
+"""
+
+import time
+
+import pytest
+
+from tests.integration_tests.clouds import IntegrationCloud
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_ordered_items_in_text
+
+USER_DATA = """\
+#cloud-config
+power_state:
+ delay: {delay}
+ mode: {mode}
+ message: msg
+ timeout: {timeout}
+ condition: {condition}
+"""
+
+
+def _detect_reboot(instance: IntegrationInstance):
+ # We'll wait for instance up here, but we don't know if we're
+ # detecting the first boot or second boot, so we also check
+ # the logs to ensure we've booted twice. If the logs show we've
+ # only booted once, wait until we've booted twice
+ instance.instance.wait()
+ for _ in range(600):
+ try:
+ log = instance.read_from_file("/var/log/cloud-init.log")
+ boot_count = log.count("running 'init-local'")
+ if boot_count == 1:
+ instance.instance.wait()
+ elif boot_count > 1:
+ break
+ except Exception:
+ pass
+ time.sleep(1)
+ else:
+ raise Exception("Could not detect reboot")
+
+
+def _can_connect(instance):
+ return instance.execute("true").ok
+
+
+# This test is marked unstable because even though it should be able to
+# run anywhere, I can only get it to run in an lxd container, and even then
+# occasionally some timing issues will crop up.
+@pytest.mark.unstable
+@pytest.mark.ubuntu
+@pytest.mark.lxd_container
+class TestPowerChange:
+ @pytest.mark.parametrize(
+ "mode,delay,timeout,expected",
+ [
+ ("poweroff", "now", "10", "will execute: shutdown -P now msg"),
+ ("reboot", "now", "0", "will execute: shutdown -r now msg"),
+ ("halt", "+1", "0", "will execute: shutdown -H +1 msg"),
+ ],
+ )
+ def test_poweroff(
+ self, session_cloud: IntegrationCloud, mode, delay, timeout, expected
+ ):
+ with session_cloud.launch(
+ user_data=USER_DATA.format(
+ delay=delay, mode=mode, timeout=timeout, condition="true"
+ ),
+ launch_kwargs={"wait": False},
+ ) as instance:
+ if mode == "reboot":
+ _detect_reboot(instance)
+ else:
+ instance.instance.wait_for_stop()
+ instance.instance.start(wait=True)
+ log = instance.read_from_file("/var/log/cloud-init.log")
+ assert _can_connect(instance)
+ lines_to_check = [
+ "Running module power-state-change",
+ expected,
+ "running 'init-local'",
+ "config-power-state-change already ran",
+ ]
+ verify_ordered_items_in_text(lines_to_check, log)
+
+ @pytest.mark.user_data(
+ USER_DATA.format(
+ delay="0", mode="poweroff", timeout="0", condition="false"
+ )
+ )
+ def test_poweroff_false_condition(self, client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert _can_connect(client)
+ assert "Condition was false. Will not perform state change" in log
diff --git a/tests/integration_tests/modules/test_puppet.py b/tests/integration_tests/modules/test_puppet.py
new file mode 100644
index 00000000..1bd9cee4
--- /dev/null
+++ b/tests/integration_tests/modules/test_puppet.py
@@ -0,0 +1,39 @@
+"""Test installation configuration of puppet module."""
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+SERVICE_DATA = """\
+#cloud-config
+puppet:
+ install: true
+ install_type: packages
+"""
+
+
+@pytest.mark.user_data(SERVICE_DATA)
+def test_puppet_service(client: IntegrationInstance):
+ """Basic test that puppet gets installed and runs."""
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+ assert client.execute("systemctl is-active puppet").ok
+ assert "Running command ['puppet', 'agent'" not in log
+
+
+EXEC_DATA = """\
+#cloud-config
+puppet:
+ install: true
+ install_type: packages
+ exec: true
+ exec_args: ['--noop']
+"""
+
+
+@pytest.mark.user_data
+@pytest.mark.user_data(EXEC_DATA)
+def test_pupet_exec(client: IntegrationInstance):
+ """Basic test that puppet gets installed and runs."""
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Running command ['puppet', 'agent', '--noop']" in log
diff --git a/tests/integration_tests/modules/test_runcmd.py b/tests/integration_tests/modules/test_runcmd.py
deleted file mode 100644
index 50d1851e..00000000
--- a/tests/integration_tests/modules/test_runcmd.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""Integration test for the runcmd module.
-
-This test specifies a command to be executed by the ``runcmd`` module
-and then checks if that command was executed during boot.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/runcmd.yaml``.)"""
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-runcmd:
- - echo cloud-init run cmd test > /var/tmp/run_cmd
-"""
-
-
-@pytest.mark.ci
-class TestRuncmd:
-
- @pytest.mark.user_data(USER_DATA)
- def test_runcmd(self, client):
- runcmd_output = client.read_from_file("/var/tmp/run_cmd")
- assert runcmd_output.strip() == "cloud-init run cmd test"
diff --git a/tests/integration_tests/modules/test_seed_random_data.py b/tests/integration_tests/modules/test_seed_random_data.py
deleted file mode 100644
index b365fa98..00000000
--- a/tests/integration_tests/modules/test_seed_random_data.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""Integration test for the random seed module.
-
-This test specifies a command to be executed by the ``seed_random`` module, by
-providing a different data to be used as seed data. We will then check
-if that seed data was actually used.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/seed_random_data.yaml``.)"""
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-random_seed:
- data: 'MYUb34023nD:LFDK10913jk;dfnk:Df'
- encoding: raw
- file: /root/seed
-"""
-
-
-@pytest.mark.ci
-class TestSeedRandomData:
-
- @pytest.mark.user_data(USER_DATA)
- def test_seed_random_data(self, client):
- seed_output = client.read_from_file("/root/seed")
- assert seed_output.strip() == "MYUb34023nD:LFDK10913jk;dfnk:Df"
diff --git a/tests/integration_tests/modules/test_set_hostname.py b/tests/integration_tests/modules/test_set_hostname.py
index 2bfa403d..ae0aeae9 100644
--- a/tests/integration_tests/modules/test_set_hostname.py
+++ b/tests/integration_tests/modules/test_set_hostname.py
@@ -11,7 +11,6 @@ after the system is boot.
import pytest
-
USER_DATA_HOSTNAME = """\
#cloud-config
hostname: cloudinit2
@@ -24,15 +23,31 @@ hostname: cloudinit1
fqdn: cloudinit2.i9n.cloud-init.io
"""
+USER_DATA_PREFER_FQDN = """\
+#cloud-config
+prefer_fqdn_over_hostname: {}
+hostname: cloudinit1
+fqdn: cloudinit2.test.io
+"""
+
@pytest.mark.ci
class TestHostname:
-
@pytest.mark.user_data(USER_DATA_HOSTNAME)
def test_hostname(self, client):
hostname_output = client.execute("hostname")
assert "cloudinit2" in hostname_output.strip()
+ @pytest.mark.user_data(USER_DATA_PREFER_FQDN.format(True))
+ def test_prefer_fqdn(self, client):
+ hostname_output = client.execute("hostname")
+ assert "cloudinit2.test.io" in hostname_output.strip()
+
+ @pytest.mark.user_data(USER_DATA_PREFER_FQDN.format(False))
+ def test_prefer_short_hostname(self, client):
+ hostname_output = client.execute("hostname")
+ assert "cloudinit1" in hostname_output.strip()
+
@pytest.mark.user_data(USER_DATA_FQDN)
def test_hostname_and_fqdn(self, client):
hostname_output = client.execute("hostname")
@@ -42,6 +57,8 @@ class TestHostname:
assert "cloudinit2.i9n.cloud-init.io" in fqdn_output.strip()
host_output = client.execute("grep ^127 /etc/hosts")
- assert '127.0.1.1 {} {}'.format(
- fqdn_output, hostname_output) in host_output
- assert '127.0.0.1 localhost' in host_output
+ assert (
+ "127.0.1.1 {} {}".format(fqdn_output, hostname_output)
+ in host_output
+ )
+ assert "127.0.0.1 localhost" in host_output
diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py
index b13f76fb..0e35cd26 100644
--- a/tests/integration_tests/modules/test_set_password.py
+++ b/tests/integration_tests/modules/test_set_password.py
@@ -8,11 +8,10 @@ other tests chpasswd's list being a string. Both expect the same results, so
they use a mixin to share their test definitions, because we can (of course)
only specify one user-data per instance.
"""
-import crypt
-
import pytest
import yaml
+from tests.integration_tests.util import retry
COMMON_USER_DATA = """\
#cloud-config
@@ -40,7 +39,9 @@ Uh69tP4GSrGW5XKHxMLiKowJgm/"
lock_passwd: false
"""
-LIST_USER_DATA = COMMON_USER_DATA + """
+LIST_USER_DATA = (
+ COMMON_USER_DATA
+ + """
chpasswd:
list:
- tom:mypassword123!
@@ -48,8 +49,11 @@ chpasswd:
- harry:RANDOM
- mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
"""
+)
-STRING_USER_DATA = COMMON_USER_DATA + """
+STRING_USER_DATA = (
+ COMMON_USER_DATA
+ + """
chpasswd:
list: |
tom:mypassword123!
@@ -57,6 +61,7 @@ chpasswd:
harry:RANDOM
mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
"""
+)
USERS_DICTS = yaml.safe_load(COMMON_USER_DATA)["users"]
USERS_PASSWD_VALUES = {
@@ -116,14 +121,52 @@ class Mixin:
# Which are not the same
assert shadow_users["harry"] != shadow_users["dick"]
+ def test_random_passwords_not_stored_in_cloud_init_output_log(
+ self, class_client
+ ):
+ """We should not emit passwords to the in-instance log file.
+
+ LP: #1918303
+ """
+ cloud_init_output = class_client.read_from_file(
+ "/var/log/cloud-init-output.log"
+ )
+ assert "dick:" not in cloud_init_output
+ assert "harry:" not in cloud_init_output
+
+ @retry(tries=30, delay=1)
+ def test_random_passwords_emitted_to_serial_console(self, class_client):
+ """We should emit passwords to the serial console. (LP: #1918303)"""
+ try:
+ console_log = class_client.instance.console_log()
+ except NotImplementedError:
+ # Assume that an exception here means that we can't use the console
+ # log
+ pytest.skip("NotImplementedError when requesting console log")
+ return
+ if console_log.lower() == "no console output":
+ # This test retries because we might not have the full console log
+ # on the first fetch. However, if we have no console output
+ # at all, we don't want to keep retrying as that would trigger
+ # another 5 minute wait on the pycloudlib side, which could
+ # leave us waiting for a couple hours
+ pytest.fail("no console output")
+ return
+ assert "dick:" in console_log
+ assert "harry:" in console_log
+
def test_explicit_password_set_correctly(self, class_client):
"""Test that an explicitly-specified password is set correctly."""
shadow_users, _ = self._fetch_and_parse_etc_shadow(class_client)
fmt_and_salt = shadow_users["tom"].rsplit("$", 1)[0]
- expected_value = crypt.crypt("mypassword123!", fmt_and_salt)
-
- assert expected_value == shadow_users["tom"]
+ GEN_CRYPT_CONTENT = (
+ "import crypt\n"
+ f"print(crypt.crypt('mypassword123!', '{fmt_and_salt}'))\n"
+ )
+ class_client.write_to_file("/gen_crypt.py", GEN_CRYPT_CONTENT)
+ result = class_client.execute("python3 /gen_crypt.py")
+ assert result.stdout == shadow_users["tom"]
def test_shadow_expected_users(self, class_client):
"""Test that the right set of users is in /etc/shadow."""
diff --git a/tests/integration_tests/modules/test_snap.py b/tests/integration_tests/modules/test_snap.py
deleted file mode 100644
index b626f6b0..00000000
--- a/tests/integration_tests/modules/test_snap.py
+++ /dev/null
@@ -1,29 +0,0 @@
-"""Integration test for the snap module.
-
-This test specifies a command to be executed by the ``snap`` module
-and then checks that if that command was executed during boot.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/runcmd.yaml``.)"""
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-package_update: true
-snap:
- squashfuse_in_container: true
- commands:
- - snap install hello-world
-"""
-
-
-@pytest.mark.ci
-class TestSnap:
-
- @pytest.mark.user_data(USER_DATA)
- def test_snap(self, client):
- snap_output = client.execute("snap list")
- assert "core " in snap_output
- assert "hello-world " in snap_output
diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
index b9b0d85e..89b49576 100644
--- a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
+++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
@@ -12,13 +12,14 @@ import re
import pytest
+from tests.integration_tests.util import retry
USER_DATA_SSH_AUTHKEY_DISABLE = """\
#cloud-config
no_ssh_fingerprints: true
"""
-USER_DATA_SSH_AUTHKEY_ENABLE="""\
+USER_DATA_SSH_AUTHKEY_ENABLE = """\
#cloud-config
ssh_genkeytypes:
- ecdsa
@@ -30,19 +31,22 @@ ssh_authorized_keys:
@pytest.mark.ci
class TestSshAuthkeyFingerprints:
-
@pytest.mark.user_data(USER_DATA_SSH_AUTHKEY_DISABLE)
def test_ssh_authkey_fingerprints_disable(self, client):
cloudinit_output = client.read_from_file("/var/log/cloud-init.log")
assert (
"Skipping module named ssh-authkey-fingerprints, "
- "logging of SSH fingerprints disabled") in cloudinit_output
+ "logging of SSH fingerprints disabled" in cloudinit_output
+ )
+ # retry decorator here because it can take some time to be reflected
+ # in syslog
+ @retry(tries=30, delay=1)
@pytest.mark.user_data(USER_DATA_SSH_AUTHKEY_ENABLE)
def test_ssh_authkey_fingerprints_enable(self, client):
syslog_output = client.read_from_file("/var/log/syslog")
- assert re.search(r'256 SHA256:.*(ECDSA)', syslog_output) is not None
- assert re.search(r'256 SHA256:.*(ED25519)', syslog_output) is not None
- assert re.search(r'1024 SHA256:.*(DSA)', syslog_output) is None
- assert re.search(r'2048 SHA256:.*(RSA)', syslog_output) is None
+ assert re.search(r"256 SHA256:.*(ECDSA)", syslog_output) is not None
+ assert re.search(r"256 SHA256:.*(ED25519)", syslog_output) is not None
+ assert re.search(r"1024 SHA256:.*(DSA)", syslog_output) is None
+ assert re.search(r"2048 SHA256:.*(RSA)", syslog_output) is None
diff --git a/tests/integration_tests/modules/test_ssh_generate.py b/tests/integration_tests/modules/test_ssh_generate.py
index 60c36982..1dd0adf1 100644
--- a/tests/integration_tests/modules/test_ssh_generate.py
+++ b/tests/integration_tests/modules/test_ssh_generate.py
@@ -10,7 +10,6 @@ keys were created.
import pytest
-
USER_DATA = """\
#cloud-config
ssh_genkeytypes:
@@ -23,28 +22,27 @@ authkey_hash: sha512
@pytest.mark.ci
@pytest.mark.user_data(USER_DATA)
class TestSshKeysGenerate:
-
@pytest.mark.parametrize(
- "ssh_key_path", (
+ "ssh_key_path",
+ (
"/etc/ssh/ssh_host_dsa_key.pub",
"/etc/ssh/ssh_host_dsa_key",
"/etc/ssh/ssh_host_rsa_key.pub",
"/etc/ssh/ssh_host_rsa_key",
- )
+ ),
)
def test_ssh_keys_not_generated(self, ssh_key_path, class_client):
- out = class_client.execute(
- "test -e {}".format(ssh_key_path)
- )
+ out = class_client.execute("test -e {}".format(ssh_key_path))
assert out.failed
@pytest.mark.parametrize(
- "ssh_key_path", (
+ "ssh_key_path",
+ (
"/etc/ssh/ssh_host_ecdsa_key.pub",
"/etc/ssh/ssh_host_ecdsa_key",
"/etc/ssh/ssh_host_ed25519_key.pub",
"/etc/ssh/ssh_host_ed25519_key",
- )
+ ),
)
def test_ssh_keys_generated(self, ssh_key_path, class_client):
out = class_client.read_from_file(ssh_key_path)
diff --git a/tests/integration_tests/modules/test_ssh_import_id.py b/tests/integration_tests/modules/test_ssh_import_id.py
deleted file mode 100644
index 45d37d6c..00000000
--- a/tests/integration_tests/modules/test_ssh_import_id.py
+++ /dev/null
@@ -1,29 +0,0 @@
-"""Integration test for the ssh_import_id module.
-
-This test specifies ssh keys to be imported by the ``ssh_import_id`` module
-and then checks that if the ssh keys were successfully imported.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/ssh_import_id.yaml``.)"""
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-ssh_import_id:
- - gh:powersj
- - lp:smoser
-"""
-
-
-@pytest.mark.ci
-class TestSshImportId:
-
- @pytest.mark.user_data(USER_DATA)
- def test_ssh_import_id(self, client):
- ssh_output = client.read_from_file(
- "/home/ubuntu/.ssh/authorized_keys")
-
- assert '# ssh-import-id gh:powersj' in ssh_output
- assert '# ssh-import-id lp:smoser' in ssh_output
diff --git a/tests/integration_tests/modules/test_ssh_keys_provided.py b/tests/integration_tests/modules/test_ssh_keys_provided.py
index 27d193c1..b79f18eb 100644
--- a/tests/integration_tests/modules/test_ssh_keys_provided.py
+++ b/tests/integration_tests/modules/test_ssh_keys_provided.py
@@ -9,7 +9,6 @@ system.
import pytest
-
USER_DATA = """\
#cloud-config
disable_root: false
@@ -82,67 +81,60 @@ ssh_keys:
@pytest.mark.ci
@pytest.mark.user_data(USER_DATA)
class TestSshKeysProvided:
-
- def test_ssh_dsa_keys_provided(self, class_client):
- """Test dsa public key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_dsa_key.pub")
- assert (
- "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R"
- "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM") in out
-
- """Test dsa private key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_dsa_key")
- assert (
- "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr"
- "hOVAfzZ6+jklP") in out
-
- def test_ssh_rsa_keys_provided(self, class_client):
- """Test rsa public key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key.pub")
- assert (
- "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT"
- "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4") in out
-
- """Test rsa private key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key")
- assert (
- "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un"
- "RQvLZpMRdywBm") in out
-
- def test_ssh_rsa_certificate_provided(self, class_client):
- """Test rsa certificate was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key-cert.pub")
- assert (
- "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg"
- "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD") in out
-
- def test_ssh_certificate_updated_sshd_config(self, class_client):
- """Test ssh certificate was added to /etc/ssh/sshd_config."""
- out = class_client.read_from_file("/etc/ssh/sshd_config").strip()
- assert "HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub" in out
-
- def test_ssh_ecdsa_keys_provided(self, class_client):
- """Test ecdsa public key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_ecdsa_key.pub")
- assert (
- "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB"
- "BBFsS5Tvky/IC/dXhE/afxxU") in out
-
- """Test ecdsa private key generated."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_ecdsa_key")
- assert (
- "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY"
- "5mpZqxgX4vcgb") in out
-
- def test_ssh_ed25519_keys_provided(self, class_client):
- """Test ed25519 public key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_ed25519_key.pub")
- assert (
- "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6"
- "G15dqjQ2XkNVOEnb5") in out
-
- """Test ed25519 private key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_ed25519_key")
- assert (
- "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT"
- "OhteXao0Nl5DVThJ2+Q") in out
+ @pytest.mark.parametrize(
+ "config_path,expected_out",
+ (
+ (
+ "/etc/ssh/ssh_host_dsa_key.pub",
+ "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R"
+ "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM",
+ ),
+ (
+ "/etc/ssh/ssh_host_dsa_key",
+ "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr"
+ "hOVAfzZ6+jklP",
+ ),
+ (
+ "/etc/ssh/ssh_host_rsa_key.pub",
+ "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT"
+ "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4",
+ ),
+ (
+ "/etc/ssh/ssh_host_rsa_key",
+ "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un"
+ "RQvLZpMRdywBm",
+ ),
+ (
+ "/etc/ssh/ssh_host_rsa_key-cert.pub",
+ "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg"
+ "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD",
+ ),
+ (
+ "/etc/ssh/sshd_config",
+ "HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub",
+ ),
+ (
+ "/etc/ssh/ssh_host_ecdsa_key.pub",
+ "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB"
+ "BBFsS5Tvky/IC/dXhE/afxxU",
+ ),
+ (
+ "/etc/ssh/ssh_host_ecdsa_key",
+ "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY"
+ "5mpZqxgX4vcgb",
+ ),
+ (
+ "/etc/ssh/ssh_host_ed25519_key.pub",
+ "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6"
+ "G15dqjQ2XkNVOEnb5",
+ ),
+ (
+ "/etc/ssh/ssh_host_ed25519_key",
+ "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT"
+ "OhteXao0Nl5DVThJ2+Q",
+ ),
+ ),
+ )
+ def test_ssh_provided_keys(self, config_path, expected_out, class_client):
+ out = class_client.read_from_file(config_path).strip()
+ assert expected_out in out
diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py
new file mode 100644
index 00000000..8330a1ce
--- /dev/null
+++ b/tests/integration_tests/modules/test_ssh_keysfile.py
@@ -0,0 +1,224 @@
+from io import StringIO
+
+import paramiko
+import pytest
+from paramiko.ssh_exception import SSHException
+
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import get_test_rsa_keypair
+
+TEST_USER1_KEYS = get_test_rsa_keypair("test1")
+TEST_USER2_KEYS = get_test_rsa_keypair("test2")
+TEST_DEFAULT_KEYS = get_test_rsa_keypair("test3")
+
+_USERDATA = """\
+#cloud-config
+bootcmd:
+ - {bootcmd}
+ssh_authorized_keys:
+ - {default}
+users:
+- default
+- name: test_user1
+ ssh_authorized_keys:
+ - {user1}
+- name: test_user2
+ ssh_authorized_keys:
+ - {user2}
+""".format(
+ bootcmd="{bootcmd}",
+ default=TEST_DEFAULT_KEYS.public_key,
+ user1=TEST_USER1_KEYS.public_key,
+ user2=TEST_USER2_KEYS.public_key,
+)
+
+
+def common_verify(client, expected_keys):
+ for user, filename, keys in expected_keys:
+ # Ensure key is in the key file
+ contents = client.read_from_file(filename)
+ if user in ["ubuntu", "root"]:
+ lines = contents.split("\n")
+ if user == "root":
+ # Our personal public key gets added by pycloudlib in
+ # addition to the default `ssh_authorized_keys`
+ assert len(lines) == 2
+ else:
+ # Clouds will insert the keys we've added to our accounts
+ # or for our launches
+ assert len(lines) >= 2
+ assert keys.public_key.strip() in contents
+ else:
+ assert contents.strip() == keys.public_key.strip()
+
+ # Ensure we can actually connect
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ paramiko_key = paramiko.RSAKey.from_private_key(
+ StringIO(keys.private_key)
+ )
+
+ # Will fail with AuthenticationException if
+ # we cannot connect
+ ssh.connect(
+ client.instance.ip,
+ username=user,
+ pkey=paramiko_key,
+ look_for_keys=False,
+ allow_agent=False,
+ )
+
+ # Ensure other uses can't connect using our key
+ other_users = [u[0] for u in expected_keys if u[2] != keys]
+ for other_user in other_users:
+ with pytest.raises(SSHException):
+ print(
+ "trying to connect as {} with key from {}".format(
+ other_user, user
+ )
+ )
+ ssh.connect(
+ client.instance.ip,
+ username=other_user,
+ pkey=paramiko_key,
+ look_for_keys=False,
+ allow_agent=False,
+ )
+
+ # Ensure we haven't messed with any /home permissions
+ # See LP: #1940233
+ home_dir = "/home/{}".format(user)
+ # Home permissions aren't consistent between releases. On ubuntu
+ # this can change to 750 once focal is unsupported.
+ if ImageSpecification.from_os_image().release in ("bionic", "focal"):
+ home_perms = "755"
+ else:
+ home_perms = "750"
+ if user == "root":
+ home_dir = "/root"
+ home_perms = "700"
+ assert "{} {}".format(user, home_perms) == client.execute(
+ 'stat -c "%U %a" {}'.format(home_dir)
+ )
+ if client.execute("test -d {}/.ssh".format(home_dir)).ok:
+ assert "{} 700".format(user) == client.execute(
+ 'stat -c "%U %a" {}/.ssh'.format(home_dir)
+ )
+ assert "{} 600".format(user) == client.execute(
+ 'stat -c "%U %a" {}'.format(filename)
+ )
+
+ # Also ensure ssh-keygen works as expected
+ client.execute("mkdir {}/.ssh".format(home_dir))
+ assert client.execute(
+ "ssh-keygen -b 2048 -t rsa -f {}/.ssh/id_rsa -q -N ''".format(
+ home_dir
+ )
+ ).ok
+ assert client.execute("test -f {}/.ssh/id_rsa".format(home_dir))
+ assert client.execute("test -f {}/.ssh/id_rsa.pub".format(home_dir))
+
+ assert "root 755" == client.execute('stat -c "%U %a" /home')
+
+
+DEFAULT_KEYS_USERDATA = _USERDATA.format(bootcmd='""')
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(DEFAULT_KEYS_USERDATA)
+def test_authorized_keys_default(client: IntegrationInstance):
+ expected_keys = [
+ (
+ "test_user1",
+ "/home/test_user1/.ssh/authorized_keys",
+ TEST_USER1_KEYS,
+ ),
+ (
+ "test_user2",
+ "/home/test_user2/.ssh/authorized_keys",
+ TEST_USER2_KEYS,
+ ),
+ ("ubuntu", "/home/ubuntu/.ssh/authorized_keys", TEST_DEFAULT_KEYS),
+ ("root", "/root/.ssh/authorized_keys", TEST_DEFAULT_KEYS),
+ ]
+ common_verify(client, expected_keys)
+
+
+AUTHORIZED_KEYS2_USERDATA = _USERDATA.format(
+ bootcmd=(
+ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile "
+ "/etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' "
+ "/etc/ssh/sshd_config"
+ )
+)
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(AUTHORIZED_KEYS2_USERDATA)
+def test_authorized_keys2(client: IntegrationInstance):
+ expected_keys = [
+ (
+ "test_user1",
+ "/home/test_user1/.ssh/authorized_keys2",
+ TEST_USER1_KEYS,
+ ),
+ (
+ "test_user2",
+ "/home/test_user2/.ssh/authorized_keys2",
+ TEST_USER2_KEYS,
+ ),
+ ("ubuntu", "/home/ubuntu/.ssh/authorized_keys2", TEST_DEFAULT_KEYS),
+ ("root", "/root/.ssh/authorized_keys2", TEST_DEFAULT_KEYS),
+ ]
+ common_verify(client, expected_keys)
+
+
+NESTED_KEYS_USERDATA = _USERDATA.format(
+ bootcmd=(
+ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile "
+ "/etc/ssh/authorized_keys %h/foo/bar/ssh/keys;' "
+ "/etc/ssh/sshd_config"
+ )
+)
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(NESTED_KEYS_USERDATA)
+def test_nested_keys(client: IntegrationInstance):
+ expected_keys = [
+ ("test_user1", "/home/test_user1/foo/bar/ssh/keys", TEST_USER1_KEYS),
+ ("test_user2", "/home/test_user2/foo/bar/ssh/keys", TEST_USER2_KEYS),
+ ("ubuntu", "/home/ubuntu/foo/bar/ssh/keys", TEST_DEFAULT_KEYS),
+ ("root", "/root/foo/bar/ssh/keys", TEST_DEFAULT_KEYS),
+ ]
+ common_verify(client, expected_keys)
+
+
+EXTERNAL_KEYS_USERDATA = _USERDATA.format(
+ bootcmd=(
+ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile "
+ "/etc/ssh/authorized_keys /etc/ssh/authorized_keys/%u/keys;' "
+ "/etc/ssh/sshd_config"
+ )
+)
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(EXTERNAL_KEYS_USERDATA)
+def test_external_keys(client: IntegrationInstance):
+ expected_keys = [
+ (
+ "test_user1",
+ "/etc/ssh/authorized_keys/test_user1/keys",
+ TEST_USER1_KEYS,
+ ),
+ (
+ "test_user2",
+ "/etc/ssh/authorized_keys/test_user2/keys",
+ TEST_USER2_KEYS,
+ ),
+ ("ubuntu", "/etc/ssh/authorized_keys/ubuntu/keys", TEST_DEFAULT_KEYS),
+ ("root", "/etc/ssh/authorized_keys/root/keys", TEST_DEFAULT_KEYS),
+ ]
+ common_verify(client, expected_keys)
diff --git a/tests/integration_tests/modules/test_timezone.py b/tests/integration_tests/modules/test_timezone.py
deleted file mode 100644
index 111d53f7..00000000
--- a/tests/integration_tests/modules/test_timezone.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""Integration test for the timezone module.
-
-This test specifies a timezone to be used by the ``timezone`` module
-and then checks that if that timezone was respected during boot.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/timezone.yaml``.)"""
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-timezone: US/Aleutian
-"""
-
-
-@pytest.mark.ci
-class TestTimezone:
-
- @pytest.mark.user_data(USER_DATA)
- def test_timezone(self, client):
- timezone_output = client.execute(
- 'date "+%Z" --date="Thu, 03 Nov 2016 00:47:00 -0400"')
- assert timezone_output.strip() == "HDT"
diff --git a/tests/integration_tests/modules/test_user_events.py b/tests/integration_tests/modules/test_user_events.py
new file mode 100644
index 00000000..e4a4241f
--- /dev/null
+++ b/tests/integration_tests/modules/test_user_events.py
@@ -0,0 +1,110 @@
+"""Test user-overridable events.
+
+This is currently limited to applying network config on BOOT events.
+"""
+
+import re
+
+import pytest
+import yaml
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+def _add_dummy_bridge_to_netplan(client: IntegrationInstance):
+ # Update netplan configuration to ensure it doesn't change on reboot
+ netplan = yaml.safe_load(
+ client.execute("cat /etc/netplan/50-cloud-init.yaml")
+ )
+ # Just a dummy bridge to do nothing
+ try:
+ netplan["network"]["bridges"]["dummy0"] = {"dhcp4": False}
+ except KeyError:
+ netplan["network"]["bridges"] = {"dummy0": {"dhcp4": False}}
+
+ dumped_netplan = yaml.dump(netplan)
+ client.write_to_file("/etc/netplan/50-cloud-init.yaml", dumped_netplan)
+
+
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.ec2
+@pytest.mark.gce
+@pytest.mark.oci
+@pytest.mark.openstack
+def test_boot_event_disabled_by_default(client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ if "network config is disabled" in log:
+ pytest.skip("network config disabled. Test doesn't apply")
+ assert "Applying network configuration" in log
+ assert "dummy0" not in client.execute("ls /sys/class/net")
+
+ _add_dummy_bridge_to_netplan(client)
+ client.execute("rm /var/log/cloud-init.log")
+
+ client.restart()
+ log2 = client.read_from_file("/var/log/cloud-init.log")
+
+ if "cache invalid in datasource" in log2:
+ # Invalid cache will get cleared, meaning we'll create a new
+ # "instance" and apply networking config, so events aren't
+ # really relevant here
+ pytest.skip("Test only valid for existing instances")
+
+ # We attempt to apply network config twice on every boot.
+ # Ensure neither time works.
+ assert 2 == len(
+ re.findall(
+ r"Event Denied: scopes=\['network'\] EventType=boot[^-]", log2
+ )
+ )
+ assert 2 == log2.count(
+ "Event Denied: scopes=['network'] EventType=boot-legacy"
+ )
+ assert 2 == log2.count(
+ "No network config applied. Neither a new instance"
+ " nor datasource network update allowed"
+ )
+
+ assert "dummy0" in client.execute("ls /sys/class/net")
+
+
+def _test_network_config_applied_on_reboot(client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ if "network config is disabled" in log:
+ pytest.skip("network config disabled. Test doesn't apply")
+ assert "Applying network configuration" in log
+ assert "dummy0" not in client.execute("ls /sys/class/net")
+
+ _add_dummy_bridge_to_netplan(client)
+ client.execute('echo "" > /var/log/cloud-init.log')
+ client.restart()
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+ if "cache invalid in datasource" in log:
+ # Invalid cache will get cleared, meaning we'll create a new
+ # "instance" and apply networking config, so events aren't
+ # really relevant here
+ pytest.skip("Test only valid for existing instances")
+
+ assert "Event Allowed: scope=network EventType=boot" in log
+ assert "Applying network configuration" in log
+ assert "dummy0" not in client.execute("ls /sys/class/net")
+
+
+@pytest.mark.azure
+def test_boot_event_enabled_by_default(client: IntegrationInstance):
+ _test_network_config_applied_on_reboot(client)
+
+
+USER_DATA = """\
+#cloud-config
+updates:
+ network:
+ when: [boot]
+"""
+
+
+@pytest.mark.user_data(USER_DATA)
+def test_boot_event_enabled(client: IntegrationInstance):
+ _test_network_config_applied_on_reboot(client)
diff --git a/tests/integration_tests/modules/test_users_groups.py b/tests/integration_tests/modules/test_users_groups.py
index 6a51f5a6..fddff681 100644
--- a/tests/integration_tests/modules/test_users_groups.py
+++ b/tests/integration_tests/modules/test_users_groups.py
@@ -1,12 +1,15 @@
-"""Integration test for the user_groups module.
+"""Integration tests for the user_groups module.
-This test specifies a number of users and groups via user-data, and confirms
-that they have been configured correctly in the system under test.
+TODO:
+* This module assumes that the "ubuntu" user will be created when "default" is
+ specified; this will need modification to run on other OSes.
"""
import re
import pytest
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
USER_DATA = """\
#cloud-config
@@ -41,6 +44,13 @@ AHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
@pytest.mark.ci
@pytest.mark.user_data(USER_DATA)
class TestUsersGroups:
+ """Test users and groups.
+
+ This test specifies a number of users and groups via user-data, and
+ confirms that they have been configured correctly in the system under test.
+ """
+
+ @pytest.mark.ubuntu
@pytest.mark.parametrize(
"getent_args,regex",
[
@@ -73,7 +83,9 @@ class TestUsersGroups:
assert re.search(regex, result.stdout) is not None, (
"'getent {}' resulted in '{}', "
"but expected to match regex {}".format(
- ' '.join(getent_args), result.stdout, regex))
+ " ".join(getent_args), result.stdout, regex
+ )
+ )
def test_user_root_in_secret(self, class_client):
"""Test root user is in 'secret' group."""
@@ -81,3 +93,33 @@ class TestUsersGroups:
_, groups_str = output.split(":", maxsplit=1)
groups = groups_str.split()
assert "secret" in groups
+
+
+@pytest.mark.user_data(USER_DATA)
+def test_sudoers_includedir(client: IntegrationInstance):
+ """Ensure we don't add additional #includedir to sudoers.
+
+ Newer versions of /etc/sudoers will use @includedir rather than
+ #includedir. Ensure we handle that properly and don't include an
+ additional #includedir when one isn't warranted.
+
+ https://github.com/canonical/cloud-init/pull/783
+ """
+ if ImageSpecification.from_os_image().release in [
+ "bionic",
+ "focal",
+ ]:
+ raise pytest.skip(
+ "Test requires version of sudo installed on groovy and later"
+ )
+ client.execute("sed -i 's/#include/@include/g' /etc/sudoers")
+
+ sudoers = client.read_from_file("/etc/sudoers")
+ if "@includedir /etc/sudoers.d" not in sudoers:
+ client.execute("echo '@includedir /etc/sudoers.d' >> /etc/sudoers")
+ client.instance.clean()
+ client.restart()
+ sudoers = client.read_from_file("/etc/sudoers")
+
+ assert "#includedir" not in sudoers
+ assert sudoers.count("includedir /etc/sudoers.d") == 1
diff --git a/tests/integration_tests/modules/test_version_change.py b/tests/integration_tests/modules/test_version_change.py
new file mode 100644
index 00000000..3168cd60
--- /dev/null
+++ b/tests/integration_tests/modules/test_version_change.py
@@ -0,0 +1,76 @@
+from pathlib import Path
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import ASSETS_DIR, verify_clean_log
+
+PICKLE_PATH = Path("/var/lib/cloud/instance/obj.pkl")
+TEST_PICKLE = ASSETS_DIR / "test_version_change.pkl"
+
+
+def _assert_no_pickle_problems(log):
+ assert "Failed loading pickled blob" not in log
+ verify_clean_log(log)
+
+
+def test_reboot_without_version_change(client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Python version change detected" not in log
+ assert "Cache compatibility status is currently unknown." not in log
+ _assert_no_pickle_problems(log)
+
+ client.restart()
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Python version change detected" not in log
+ assert "Could not determine Python version used to write cache" not in log
+ _assert_no_pickle_problems(log)
+
+ # Now ensure that loading a bad pickle gives us problems
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.restart()
+ log = client.read_from_file("/var/log/cloud-init.log")
+
+ # no cache found is an "expected" upgrade error, and
+ # "Failed" means we're unable to load the pickle
+ assert any(
+ [
+ "Failed loading pickled blob from {}".format(PICKLE_PATH) in log,
+ "no cache found" in log,
+ ]
+ )
+
+
+@pytest.mark.ec2
+@pytest.mark.gce
+@pytest.mark.oci
+@pytest.mark.openstack
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+# No Azure because the cache gets purged every reboot, so we'll never
+# get to the point where we need to purge cache due to version change
+def test_cache_purged_on_version_change(client: IntegrationInstance):
+ # Start by pushing the invalid pickle so we'll hit an error if the
+ # cache didn't actually get purged
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.execute("echo '1.0' > /var/lib/cloud/data/python-version")
+ client.restart()
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Python version change detected. Purging cache" in log
+ _assert_no_pickle_problems(log)
+
+
+def test_log_message_on_missing_version_file(client: IntegrationInstance):
+ # Start by pushing a pickle so we can see the log message
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.execute("rm /var/lib/cloud/data/python-version")
+ client.execute("rm /var/log/cloud-init.log")
+ client.restart()
+ log = client.read_from_file("/var/log/cloud-init.log")
+ if "no cache found" not in log:
+ # We don't expect the python version file to exist if we have no
+ # pre-existing cache
+ assert (
+ "Writing python-version file. "
+ "Cache compatibility status is currently unknown." in log
+ )
diff --git a/tests/integration_tests/modules/test_write_files.py b/tests/integration_tests/modules/test_write_files.py
index 15832ae3..1eb7e945 100644
--- a/tests/integration_tests/modules/test_write_files.py
+++ b/tests/integration_tests/modules/test_write_files.py
@@ -7,8 +7,8 @@ and then checks if those files were created during boot.
``tests/cloud_tests/testcases/modules/write_files.yaml``.)"""
import base64
-import pytest
+import pytest
ASCII_TEXT = "ASCII text"
B64_CONTENT = base64.b64encode(ASCII_TEXT.encode("utf-8"))
@@ -21,6 +21,9 @@ B64_CONTENT = base64.b64encode(ASCII_TEXT.encode("utf-8"))
#
USER_DATA = """\
#cloud-config
+users:
+- default
+- name: myuser
write_files:
- encoding: b64
content: {}
@@ -41,26 +44,50 @@ write_files:
H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
path: /root/file_gzip
permissions: '0755'
-""".format(B64_CONTENT.decode("ascii"))
+- path: '/home/testuser/my-file'
+ content: |
+ echo 'hello world!'
+ defer: true
+ owner: 'myuser'
+ permissions: '0644'
+""".format(
+ B64_CONTENT.decode("ascii")
+)
@pytest.mark.ci
@pytest.mark.user_data(USER_DATA)
class TestWriteFiles:
-
@pytest.mark.parametrize(
- "cmd,expected_out", (
+ "cmd,expected_out",
+ (
("file /root/file_b64", ASCII_TEXT),
("md5sum </root/file_binary", "3801184b97bb8c6e63fa0e1eae2920d7"),
- ("sha256sum </root/file_binary", (
+ (
+ "sha256sum </root/file_binary",
"2c791c4037ea5bd7e928d6a87380f8ba"
- "7a803cd83d5e4f269e28f5090f0f2c9a"
- )),
- ("file /root/file_gzip",
- "POSIX shell script, ASCII text executable"),
+ "7a803cd83d5e4f269e28f5090f0f2c9a",
+ ),
+ (
+ "file /root/file_gzip",
+ "POSIX shell script, ASCII text executable",
+ ),
("file /root/file_text", ASCII_TEXT),
- )
+ ),
)
def test_write_files(self, cmd, expected_out, class_client):
out = class_client.execute(cmd)
assert expected_out in out
+
+ def test_write_files_deferred(self, class_client):
+ """Test that write files deferred works as expected.
+
+ Users get created after write_files module runs, so ensure that
+ with `defer: true`, the file gets written with correct ownership.
+ """
+ out = class_client.read_from_file("/home/testuser/my-file")
+ assert "echo 'hello world!'" == out
+ assert (
+ class_client.execute('stat -c "%U %a" /home/testuser/my-file')
+ == "myuser 644"
+ )
diff --git a/tests/integration_tests/network/test_net_config_load.py b/tests/integration_tests/network/test_net_config_load.py
new file mode 100644
index 00000000..a6863b63
--- /dev/null
+++ b/tests/integration_tests/network/test_net_config_load.py
@@ -0,0 +1,27 @@
+"""Test loading the network config"""
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+def _customize_envionment(client: IntegrationInstance):
+ # Insert our "disable_network_config" file here
+ client.write_to_file(
+ "/etc/cloud/cloud.cfg.d/99-disable-network-config.cfg",
+ "network: {config: disabled}\n",
+ )
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+
+def test_network_disabled_via_etc_cloud(client: IntegrationInstance):
+ """Test that network can be disabled via config file in /etc/cloud"""
+ if client.settings.CLOUD_INIT_SOURCE == "IN_PLACE":
+ pytest.skip(
+ "IN_PLACE not supported as we mount /etc/cloud contents into the "
+ "container"
+ )
+ _customize_envionment(client)
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "network config is disabled by system_cfg" in log
diff --git a/tests/integration_tests/test_logging.py b/tests/integration_tests/test_logging.py
new file mode 100644
index 00000000..b31a0434
--- /dev/null
+++ b/tests/integration_tests/test_logging.py
@@ -0,0 +1,22 @@
+"""Integration tests relating to cloud-init's logging."""
+
+
+class TestVarLogCloudInitOutput:
+ """Integration tests relating to /var/log/cloud-init-output.log."""
+
+ def test_var_log_cloud_init_output_not_world_readable(self, client):
+ """
+ The log can contain sensitive data, it shouldn't be world-readable.
+
+ LP: #1918303
+ """
+ # Check the file exists
+ assert client.execute("test -f /var/log/cloud-init-output.log").ok
+
+ # Check its permissions are as we expect
+ perms, user, group = client.execute(
+ "stat -c %a:%U:%G /var/log/cloud-init-output.log"
+ ).split(":")
+ assert "640" == perms
+ assert "root" == user
+ assert "adm" == group
diff --git a/tests/integration_tests/test_shell_script_by_frequency.py b/tests/integration_tests/test_shell_script_by_frequency.py
new file mode 100644
index 00000000..25157722
--- /dev/null
+++ b/tests/integration_tests/test_shell_script_by_frequency.py
@@ -0,0 +1,48 @@
+"""Integration tests for various handlers."""
+
+from io import StringIO
+
+import pytest
+
+from cloudinit.cmd.devel.make_mime import create_mime_message
+from tests.integration_tests.instances import IntegrationInstance
+
+PER_FREQ_TEMPLATE = """\
+#!/bin/bash
+touch /tmp/test_per_freq_{}
+"""
+
+PER_ALWAYS_FILE = StringIO(PER_FREQ_TEMPLATE.format("always"))
+PER_INSTANCE_FILE = StringIO(PER_FREQ_TEMPLATE.format("instance"))
+PER_ONCE_FILE = StringIO(PER_FREQ_TEMPLATE.format("once"))
+
+FILES = [
+ (PER_ALWAYS_FILE, "always.sh", "x-shellscript-per-boot"),
+ (PER_INSTANCE_FILE, "instance.sh", "x-shellscript-per-instance"),
+ (PER_ONCE_FILE, "once.sh", "x-shellscript-per-once"),
+]
+
+USER_DATA, errors = create_mime_message(FILES)
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(USER_DATA)
+def test_per_freq(client: IntegrationInstance):
+ # Sanity test for scripts folder
+ cmd = "test -d /var/lib/cloud/scripts"
+ assert client.execute(cmd).ok
+ # Test per-boot
+ cmd = "test -f /var/lib/cloud/scripts/per-boot/always.sh"
+ assert client.execute(cmd).ok
+ cmd = "test -f /tmp/test_per_freq_always"
+ assert client.execute(cmd).ok
+ # Test per-instance
+ cmd = "test -f /var/lib/cloud/scripts/per-instance/instance.sh"
+ assert client.execute(cmd).ok
+ cmd = "test -f /tmp/test_per_freq_instance"
+ assert client.execute(cmd).ok
+ # Test per-once
+ cmd = "test -f /var/lib/cloud/scripts/per-once/once.sh"
+ assert client.execute(cmd).ok
+ cmd = "test -f /tmp/test_per_freq_once"
+ assert client.execute(cmd).ok
diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py
new file mode 100644
index 00000000..b13d4703
--- /dev/null
+++ b/tests/integration_tests/test_upgrade.py
@@ -0,0 +1,188 @@
+import json
+import logging
+import os
+
+import pytest
+
+from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud
+from tests.integration_tests.conftest import get_validated_source
+from tests.integration_tests.util import verify_clean_log
+
+LOG = logging.getLogger("integration_testing.test_upgrade")
+
+LOG_TEMPLATE = """\n\
+=== `systemd-analyze` before:
+{pre_systemd_analyze}
+=== `systemd-analyze` after:
+{post_systemd_analyze}
+
+=== `systemd-analyze blame` before (first 10 lines):
+{pre_systemd_blame}
+=== `systemd-analyze blame` after (first 10 lines):
+{post_systemd_blame}
+
+=== `cloud-init analyze show` before:')
+{pre_analyze_totals}
+=== `cloud-init analyze show` after:')
+{post_analyze_totals}
+
+=== `cloud-init analyze blame` before (first 10 lines): ')
+{pre_cloud_blame}
+=== `cloud-init analyze blame` after (first 10 lines): ')
+{post_cloud_blame}
+"""
+
+UNSUPPORTED_INSTALL_METHOD_MSG = (
+ "Install method '{}' not supported for this test"
+)
+USER_DATA = """\
+#cloud-config
+hostname: SRU-worked
+"""
+
+
+def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud):
+ source = get_validated_source(session_cloud)
+ if not source.installs_new_version():
+ pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source))
+ return # type checking doesn't understand that skip raises
+ if (
+ ImageSpecification.from_os_image().release == "bionic"
+ and session_cloud.settings.PLATFORM == "lxd_vm"
+ ):
+ # The issues that we see on Bionic VMs don't appear anywhere
+ # else, including when calling KVM directly. It likely has to
+ # do with the extra lxd-agent setup happening on bionic.
+ # Given that we still have Bionic covered on all other platforms,
+ # the risk of skipping bionic here seems low enough.
+ pytest.skip("Upgrade test doesn't run on LXD VMs and bionic")
+ return
+
+ launch_kwargs = {
+ "image_id": session_cloud.initial_image_id,
+ }
+
+ with session_cloud.launch(
+ launch_kwargs=launch_kwargs,
+ user_data=USER_DATA,
+ ) as instance:
+ # get pre values
+ pre_hostname = instance.execute("hostname")
+ pre_cloud_id = instance.execute("cloud-id")
+ pre_result = instance.execute("cat /run/cloud-init/result.json")
+ pre_network = instance.execute("cat /etc/netplan/50-cloud-init.yaml")
+ pre_systemd_analyze = instance.execute("systemd-analyze")
+ pre_systemd_blame = instance.execute("systemd-analyze blame")
+ pre_cloud_analyze = instance.execute("cloud-init analyze show")
+ pre_cloud_blame = instance.execute("cloud-init analyze blame")
+
+ # Ensure no issues pre-upgrade
+ log = instance.read_from_file("/var/log/cloud-init.log")
+ assert not json.loads(pre_result)["v1"]["errors"]
+
+ try:
+ verify_clean_log(log)
+ except AssertionError:
+ LOG.warning(
+ "There were errors/warnings/tracebacks pre-upgrade. "
+ "Any failures may be due to pre-upgrade problem"
+ )
+
+ # Upgrade
+ instance.install_new_cloud_init(source, take_snapshot=False)
+
+ # 'cloud-init init' helps us understand if our pickling upgrade paths
+ # have broken across re-constitution of a cached datasource. Some
+ # platforms invalidate their datasource cache on reboot, so we run
+ # it here to ensure we get a dirty run.
+ assert instance.execute("cloud-init init").ok
+
+ # Reboot
+ instance.execute("hostname something-else")
+ instance.restart()
+ assert instance.execute("cloud-init status --wait --long").ok
+
+ # get post values
+ post_hostname = instance.execute("hostname")
+ post_cloud_id = instance.execute("cloud-id")
+ post_result = instance.execute("cat /run/cloud-init/result.json")
+ post_network = instance.execute("cat /etc/netplan/50-cloud-init.yaml")
+ post_systemd_analyze = instance.execute("systemd-analyze")
+ post_systemd_blame = instance.execute("systemd-analyze blame")
+ post_cloud_analyze = instance.execute("cloud-init analyze show")
+ post_cloud_blame = instance.execute("cloud-init analyze blame")
+
+ # Ensure no issues post-upgrade
+ assert not json.loads(pre_result)["v1"]["errors"]
+
+ log = instance.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+
+ # Ensure important things stayed the same
+ assert pre_hostname == post_hostname
+ assert pre_cloud_id == post_cloud_id
+ try:
+ assert pre_result == post_result
+ except AssertionError:
+ if instance.settings.PLATFORM == "azure":
+ pre_json = json.loads(pre_result)
+ post_json = json.loads(post_result)
+ assert pre_json["v1"]["datasource"].startswith(
+ "DataSourceAzure"
+ )
+ assert post_json["v1"]["datasource"].startswith(
+ "DataSourceAzure"
+ )
+ assert pre_network == post_network
+
+ # Calculate and log all the boot numbers
+ pre_analyze_totals = [
+ x
+ for x in pre_cloud_analyze.splitlines()
+ if x.startswith("Finished stage") or x.startswith("Total Time")
+ ]
+ post_analyze_totals = [
+ x
+ for x in post_cloud_analyze.splitlines()
+ if x.startswith("Finished stage") or x.startswith("Total Time")
+ ]
+
+ # pylint: disable=logging-format-interpolation
+ LOG.info(
+ LOG_TEMPLATE.format(
+ pre_systemd_analyze=pre_systemd_analyze,
+ post_systemd_analyze=post_systemd_analyze,
+ pre_systemd_blame="\n".join(
+ pre_systemd_blame.splitlines()[:10]
+ ),
+ post_systemd_blame="\n".join(
+ post_systemd_blame.splitlines()[:10]
+ ),
+ pre_analyze_totals="\n".join(pre_analyze_totals),
+ post_analyze_totals="\n".join(post_analyze_totals),
+ pre_cloud_blame="\n".join(pre_cloud_blame.splitlines()[:10]),
+ post_cloud_blame="\n".join(post_cloud_blame.splitlines()[:10]),
+ )
+ )
+
+
+@pytest.mark.ci
+@pytest.mark.ubuntu
+def test_subsequent_boot_of_upgraded_package(session_cloud: IntegrationCloud):
+ source = get_validated_source(session_cloud)
+ if not source.installs_new_version():
+ if os.environ.get("TRAVIS"):
+ # If this isn't running on CI, we should know
+ pytest.fail(UNSUPPORTED_INSTALL_METHOD_MSG.format(source))
+ else:
+ pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source))
+ return # type checking doesn't understand that skip raises
+
+ launch_kwargs = {"image_id": session_cloud.initial_image_id}
+
+ with session_cloud.launch(launch_kwargs=launch_kwargs) as instance:
+ instance.install_new_cloud_init(
+ source, take_snapshot=False, clean=False
+ )
+ instance.restart()
+ assert instance.execute("cloud-init status --wait --long").ok
diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py
new file mode 100644
index 00000000..31fe69c0
--- /dev/null
+++ b/tests/integration_tests/util.py
@@ -0,0 +1,142 @@
+import functools
+import logging
+import multiprocessing
+import os
+import time
+from collections import namedtuple
+from contextlib import contextmanager
+from pathlib import Path
+
+log = logging.getLogger("integration_testing")
+key_pair = namedtuple("key_pair", "public_key private_key")
+
+ASSETS_DIR = Path("tests/integration_tests/assets")
+KEY_PATH = ASSETS_DIR / "keys"
+
+
+def verify_ordered_items_in_text(to_verify: list, text: str):
+ """Assert all items in list appear in order in text.
+
+ Examples:
+ verify_ordered_items_in_text(['a', '1'], 'ab1') # passes
+ verify_ordered_items_in_text(['1', 'a'], 'ab1') # raises AssertionError
+ """
+ index = 0
+ for item in to_verify:
+ index = text[index:].find(item)
+ assert index > -1, "Expected item not found: '{}'".format(item)
+
+
+def verify_clean_log(log):
+ """Assert no unexpected tracebacks or warnings in logs"""
+ warning_count = log.count("WARN")
+ expected_warnings = 0
+ traceback_count = log.count("Traceback")
+ expected_tracebacks = 0
+
+ warning_texts = [
+ # Consistently on all Azure launches:
+ # azure.py[WARNING]: No lease found; using default endpoint
+ "No lease found; using default endpoint"
+ ]
+ traceback_texts = []
+ if "oracle" in log:
+ # LP: #1842752
+ lease_exists_text = "Stderr: RTNETLINK answers: File exists"
+ warning_texts.append(lease_exists_text)
+ traceback_texts.append(lease_exists_text)
+ # LP: #1833446
+ fetch_error_text = (
+ "UrlError: 404 Client Error: Not Found for url: "
+ "http://169.254.169.254/latest/meta-data/"
+ )
+ warning_texts.append(fetch_error_text)
+ traceback_texts.append(fetch_error_text)
+ # Oracle has a file in /etc/cloud/cloud.cfg.d that contains
+ # users:
+ # - default
+ # - name: opc
+ # ssh_redirect_user: true
+ # This can trigger a warning about opc having no public key
+ warning_texts.append(
+ "Unable to disable SSH logins for opc given ssh_redirect_user"
+ )
+
+ for warning_text in warning_texts:
+ expected_warnings += log.count(warning_text)
+ for traceback_text in traceback_texts:
+ expected_tracebacks += log.count(traceback_text)
+
+ assert warning_count == expected_warnings
+ assert traceback_count == expected_tracebacks
+
+
+@contextmanager
+def emit_dots_on_travis():
+ """emit a dot every 60 seconds if running on Travis.
+
+ Travis will kill jobs that don't emit output for a certain amount of time.
+ This context manager spins up a background process which will emit a dot to
+ stdout every 60 seconds to avoid being killed.
+
+ It should be wrapped selectively around operations that are known to take a
+ long time.
+ """
+ if os.environ.get("TRAVIS") != "true":
+ # If we aren't on Travis, don't do anything.
+ yield
+ return
+
+ def emit_dots():
+ while True:
+ log.info(".")
+ time.sleep(60)
+
+ dot_process = multiprocessing.Process(target=emit_dots)
+ dot_process.start()
+ try:
+ yield
+ finally:
+ dot_process.terminate()
+
+
+def get_test_rsa_keypair(key_name: str = "test1") -> key_pair:
+ private_key_path = KEY_PATH / "id_rsa.{}".format(key_name)
+ public_key_path = KEY_PATH / "id_rsa.{}.pub".format(key_name)
+ with public_key_path.open() as public_file:
+ public_key = public_file.read()
+ with private_key_path.open() as private_file:
+ private_key = private_file.read()
+ return key_pair(public_key, private_key)
+
+
+def retry(*, tries: int = 30, delay: int = 1):
+ """Decorator for retries.
+
+ Retry a function until code no longer raises an exception or
+ max tries is reached.
+
+ Example:
+ @retry(tries=5, delay=1)
+ def try_something_that_may_not_be_ready():
+ ...
+ """
+
+ def _retry(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ last_error = None
+ for _ in range(tries):
+ try:
+ func(*args, **kwargs)
+ break
+ except Exception as e:
+ last_error = e
+ time.sleep(delay)
+ else:
+ if last_error:
+ raise last_error
+
+ return wrapper
+
+ return _retry
diff --git a/tests/unittests/__init__.py b/tests/unittests/__init__.py
index d89ed443..657cb399 100644
--- a/tests/unittests/__init__.py
+++ b/tests/unittests/__init__.py
@@ -4,6 +4,7 @@ try:
# For test cases, avoid the following UserWarning to stderr:
# You don't have the C version of NameMapper installed ...
from Cheetah import NameMapper as _nm
+
_nm.C_VERSION = True
except ImportError:
pass
diff --git a/tests/unittests/analyze/test_boot.py b/tests/unittests/analyze/test_boot.py
new file mode 100644
index 00000000..68db69ec
--- /dev/null
+++ b/tests/unittests/analyze/test_boot.py
@@ -0,0 +1,174 @@
+import os
+
+from cloudinit.analyze.__main__ import analyze_boot, get_parser
+from cloudinit.analyze.show import (
+ CONTAINER_CODE,
+ FAIL_CODE,
+ SystemctlReader,
+ dist_check_timestamp,
+)
+from tests.unittests.helpers import CiTestCase, mock
+
+err_code = (FAIL_CODE, -1, -1, -1)
+
+
+class TestDistroChecker(CiTestCase):
+ def test_blank_distro(self):
+ self.assertEqual(err_code, dist_check_timestamp())
+
+ @mock.patch("cloudinit.util.is_FreeBSD", return_value=True)
+ def test_freebsd_gentoo_cant_find(self, m_is_FreeBSD):
+ self.assertEqual(err_code, dist_check_timestamp())
+
+ @mock.patch("cloudinit.subp.subp", return_value=(0, 1))
+ def test_subp_fails(self, m_subp):
+ self.assertEqual(err_code, dist_check_timestamp())
+
+
+class TestSystemCtlReader(CiTestCase):
+ def test_systemctl_invalid_property(self):
+ reader = SystemctlReader("dummyProperty")
+ with self.assertRaises(RuntimeError):
+ reader.parse_epoch_as_float()
+
+ def test_systemctl_invalid_parameter(self):
+ reader = SystemctlReader("dummyProperty", "dummyParameter")
+ with self.assertRaises(RuntimeError):
+ reader.parse_epoch_as_float()
+
+ @mock.patch("cloudinit.subp.subp", return_value=("U=1000000", None))
+ def test_systemctl_works_correctly_threshold(self, m_subp):
+ reader = SystemctlReader("dummyProperty", "dummyParameter")
+ self.assertEqual(1.0, reader.parse_epoch_as_float())
+ thresh = 1.0 - reader.parse_epoch_as_float()
+ self.assertTrue(thresh < 1e-6)
+ self.assertTrue(thresh > (-1 * 1e-6))
+
+ @mock.patch("cloudinit.subp.subp", return_value=("U=0", None))
+ def test_systemctl_succeed_zero(self, m_subp):
+ reader = SystemctlReader("dummyProperty", "dummyParameter")
+ self.assertEqual(0.0, reader.parse_epoch_as_float())
+
+ @mock.patch("cloudinit.subp.subp", return_value=("U=1", None))
+ def test_systemctl_succeed_distinct(self, m_subp):
+ reader = SystemctlReader("dummyProperty", "dummyParameter")
+ val1 = reader.parse_epoch_as_float()
+ m_subp.return_value = ("U=2", None)
+ reader2 = SystemctlReader("dummyProperty", "dummyParameter")
+ val2 = reader2.parse_epoch_as_float()
+ self.assertNotEqual(val1, val2)
+
+ @mock.patch("cloudinit.subp.subp", return_value=("100", None))
+ def test_systemctl_epoch_not_splittable(self, m_subp):
+ reader = SystemctlReader("dummyProperty", "dummyParameter")
+ with self.assertRaises(IndexError):
+ reader.parse_epoch_as_float()
+
+ @mock.patch("cloudinit.subp.subp", return_value=("U=foobar", None))
+ def test_systemctl_cannot_convert_epoch_to_float(self, m_subp):
+ reader = SystemctlReader("dummyProperty", "dummyParameter")
+ with self.assertRaises(ValueError):
+ reader.parse_epoch_as_float()
+
+
+class TestAnalyzeBoot(CiTestCase):
+ def set_up_dummy_file_ci(self, path, log_path):
+ infh = open(path, "w+")
+ infh.write(
+ "2019-07-08 17:40:49,601 - util.py[DEBUG]: Cloud-init v. "
+ "19.1-1-gbaa47854-0ubuntu1~18.04.1 running 'init-local' "
+ "at Mon, 08 Jul 2019 17:40:49 +0000. Up 18.84 seconds."
+ )
+ infh.close()
+ outfh = open(log_path, "w+")
+ outfh.close()
+
+ def set_up_dummy_file(self, path, log_path):
+ infh = open(path, "w+")
+ infh.write("dummy data")
+ infh.close()
+ outfh = open(log_path, "w+")
+ outfh.close()
+
+ def remove_dummy_file(self, path, log_path):
+ if os.path.isfile(path):
+ os.remove(path)
+ if os.path.isfile(log_path):
+ os.remove(log_path)
+
+ @mock.patch(
+ "cloudinit.analyze.show.dist_check_timestamp", return_value=err_code
+ )
+ def test_boot_invalid_distro(self, m_dist_check_timestamp):
+
+ path = os.path.dirname(os.path.abspath(__file__))
+ log_path = path + "/boot-test.log"
+ path += "/dummy.log"
+ self.set_up_dummy_file(path, log_path)
+
+ parser = get_parser()
+ args = parser.parse_args(args=["boot", "-i", path, "-o", log_path])
+ name_default = ""
+ analyze_boot(name_default, args)
+ # now args have been tested, go into outfile and make sure error
+ # message is in the outfile
+ outfh = open(args.outfile, "r")
+ data = outfh.read()
+ err_string = (
+ "Your Linux distro or container does not support this "
+ "functionality.\nYou must be running a Kernel "
+ "Telemetry supported distro.\nPlease check "
+ "https://cloudinit.readthedocs.io/en/latest/topics"
+ "/analyze.html for more information on supported "
+ "distros.\n"
+ )
+
+ self.remove_dummy_file(path, log_path)
+ self.assertEqual(err_string, data)
+
+ @mock.patch("cloudinit.util.is_container", return_value=True)
+ @mock.patch("cloudinit.subp.subp", return_value=("U=1000000", None))
+ def test_container_no_ci_log_line(self, m_is_container, m_subp):
+ path = os.path.dirname(os.path.abspath(__file__))
+ log_path = path + "/boot-test.log"
+ path += "/dummy.log"
+ self.set_up_dummy_file(path, log_path)
+
+ parser = get_parser()
+ args = parser.parse_args(args=["boot", "-i", path, "-o", log_path])
+ name_default = ""
+
+ finish_code = analyze_boot(name_default, args)
+
+ self.remove_dummy_file(path, log_path)
+ self.assertEqual(FAIL_CODE, finish_code)
+
+ @mock.patch("cloudinit.util.is_container", return_value=True)
+ @mock.patch("cloudinit.subp.subp", return_value=("U=1000000", None))
+ @mock.patch(
+ "cloudinit.analyze.__main__._get_events",
+ return_value=[
+ {
+ "name": "init-local",
+ "description": "starting search",
+ "timestamp": 100000,
+ }
+ ],
+ )
+ @mock.patch(
+ "cloudinit.analyze.show.dist_check_timestamp",
+ return_value=(CONTAINER_CODE, 1, 1, 1),
+ )
+ def test_container_ci_log_line(self, m_is_container, m_subp, m_get, m_g):
+ path = os.path.dirname(os.path.abspath(__file__))
+ log_path = path + "/boot-test.log"
+ path += "/dummy.log"
+ self.set_up_dummy_file_ci(path, log_path)
+
+ parser = get_parser()
+ args = parser.parse_args(args=["boot", "-i", path, "-o", log_path])
+ name_default = ""
+ finish_code = analyze_boot(name_default, args)
+
+ self.remove_dummy_file(path, log_path)
+ self.assertEqual(CONTAINER_CODE, finish_code)
diff --git a/cloudinit/analyze/tests/test_dump.py b/tests/unittests/analyze/test_dump.py
index dac1efb6..56bbf97f 100644
--- a/cloudinit/analyze/tests/test_dump.py
+++ b/tests/unittests/analyze/test_dump.py
@@ -4,50 +4,54 @@ from datetime import datetime
from textwrap import dedent
from cloudinit.analyze.dump import (
- dump_events, parse_ci_logline, parse_timestamp)
-from cloudinit.util import write_file
+ dump_events,
+ parse_ci_logline,
+ parse_timestamp,
+)
from cloudinit.subp import which
-from cloudinit.tests.helpers import CiTestCase, mock, skipIf
+from cloudinit.util import write_file
+from tests.unittests.helpers import CiTestCase, mock, skipIf
class TestParseTimestamp(CiTestCase):
-
def test_parse_timestamp_handles_cloud_init_default_format(self):
"""Logs with cloud-init detailed formats will be properly parsed."""
- trusty_fmt = '%Y-%m-%d %H:%M:%S,%f'
- trusty_stamp = '2016-09-12 14:39:20,839'
+ trusty_fmt = "%Y-%m-%d %H:%M:%S,%f"
+ trusty_stamp = "2016-09-12 14:39:20,839"
dt = datetime.strptime(trusty_stamp, trusty_fmt)
self.assertEqual(
- float(dt.strftime('%s.%f')), parse_timestamp(trusty_stamp))
+ float(dt.strftime("%s.%f")), parse_timestamp(trusty_stamp)
+ )
def test_parse_timestamp_handles_syslog_adding_year(self):
"""Syslog timestamps lack a year. Add year and properly parse."""
- syslog_fmt = '%b %d %H:%M:%S %Y'
- syslog_stamp = 'Aug 08 15:12:51'
+ syslog_fmt = "%b %d %H:%M:%S %Y"
+ syslog_stamp = "Aug 08 15:12:51"
# convert stamp ourselves by adding the missing year value
year = datetime.now().year
dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt)
self.assertEqual(
- float(dt.strftime('%s.%f')),
- parse_timestamp(syslog_stamp))
+ float(dt.strftime("%s.%f")), parse_timestamp(syslog_stamp)
+ )
def test_parse_timestamp_handles_journalctl_format_adding_year(self):
"""Journalctl precise timestamps lack a year. Add year and parse."""
- journal_fmt = '%b %d %H:%M:%S.%f %Y'
- journal_stamp = 'Aug 08 17:15:50.606811'
+ journal_fmt = "%b %d %H:%M:%S.%f %Y"
+ journal_stamp = "Aug 08 17:15:50.606811"
# convert stamp ourselves by adding the missing year value
year = datetime.now().year
dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt)
self.assertEqual(
- float(dt.strftime('%s.%f')), parse_timestamp(journal_stamp))
+ float(dt.strftime("%s.%f")), parse_timestamp(journal_stamp)
+ )
@skipIf(not which("date"), "'date' command not available.")
def test_parse_unexpected_timestamp_format_with_date_command(self):
"""Dump sends unexpected timestamp formats to date for processing."""
- new_fmt = '%H:%M %m/%d %Y'
- new_stamp = '17:15 08/08'
+ new_fmt = "%H:%M %m/%d %Y"
+ new_stamp = "17:15 08/08"
# convert stamp ourselves by adding the missing year value
year = datetime.now().year
dt = datetime.strptime(new_stamp + " " + str(year), new_fmt)
@@ -55,15 +59,20 @@ class TestParseTimestamp(CiTestCase):
# use date(1)
with self.allow_subp(["date"]):
self.assertEqual(
- float(dt.strftime('%s.%f')), parse_timestamp(new_stamp))
+ float(dt.strftime("%s.%f")), parse_timestamp(new_stamp)
+ )
class TestParseCILogLine(CiTestCase):
-
def test_parse_logline_returns_none_without_separators(self):
"""When no separators are found, parse_ci_logline returns None."""
expected_parse_ignores = [
- '', '-', 'adsf-asdf', '2017-05-22 18:02:01,088', 'CLOUDINIT']
+ "",
+ "-",
+ "adsf-asdf",
+ "2017-05-22 18:02:01,088",
+ "CLOUDINIT",
+ ]
for parse_ignores in expected_parse_ignores:
self.assertIsNone(parse_ci_logline(parse_ignores))
@@ -72,79 +81,95 @@ class TestParseCILogLine(CiTestCase):
line = (
"2017-08-08 20:05:07,147 - util.py[DEBUG]: Cloud-init v. 0.7.9"
" running 'init-local' at Tue, 08 Aug 2017 20:05:07 +0000. Up"
- " 6.26 seconds.")
+ " 6.26 seconds."
+ )
dt = datetime.strptime(
- '2017-08-08 20:05:07,147', '%Y-%m-%d %H:%M:%S,%f')
- timestamp = float(dt.strftime('%s.%f'))
+ "2017-08-08 20:05:07,147", "%Y-%m-%d %H:%M:%S,%f"
+ )
+ timestamp = float(dt.strftime("%s.%f"))
expected = {
- 'description': 'starting search for local datasources',
- 'event_type': 'start',
- 'name': 'init-local',
- 'origin': 'cloudinit',
- 'timestamp': timestamp}
+ "description": "starting search for local datasources",
+ "event_type": "start",
+ "name": "init-local",
+ "origin": "cloudinit",
+ "timestamp": timestamp,
+ }
self.assertEqual(expected, parse_ci_logline(line))
def test_parse_logline_returns_event_for_journalctl_logs(self):
"""parse_ci_logline returns an event parse from journalctl format."""
- line = ("Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT]"
- " util.py[DEBUG]: Cloud-init v. 0.7.8 running 'init-local' at"
- " Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds.")
+ line = (
+ "Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT]"
+ " util.py[DEBUG]: Cloud-init v. 0.7.8 running 'init-local' at"
+ " Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds."
+ )
year = datetime.now().year
dt = datetime.strptime(
- 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
- timestamp = float(dt.strftime('%s.%f'))
+ "Nov 03 06:51:06.074410 %d" % year, "%b %d %H:%M:%S.%f %Y"
+ )
+ timestamp = float(dt.strftime("%s.%f"))
expected = {
- 'description': 'starting search for local datasources',
- 'event_type': 'start',
- 'name': 'init-local',
- 'origin': 'cloudinit',
- 'timestamp': timestamp}
+ "description": "starting search for local datasources",
+ "event_type": "start",
+ "name": "init-local",
+ "origin": "cloudinit",
+ "timestamp": timestamp,
+ }
self.assertEqual(expected, parse_ci_logline(line))
@mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
- def test_parse_logline_returns_event_for_finish_events(self,
- m_parse_from_date):
+ def test_parse_logline_returns_event_for_finish_events(
+ self, m_parse_from_date
+ ):
"""parse_ci_logline returns a finish event for a parsed log line."""
- line = ('2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]'
- ' handlers.py[DEBUG]: finish: modules-final: SUCCESS: running'
- ' modules for final')
+ line = (
+ "2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]"
+ " handlers.py[DEBUG]: finish: modules-final: SUCCESS: running"
+ " modules for final"
+ )
expected = {
- 'description': 'running modules for final',
- 'event_type': 'finish',
- 'name': 'modules-final',
- 'origin': 'cloudinit',
- 'result': 'SUCCESS',
- 'timestamp': 1472594005.972}
+ "description": "running modules for final",
+ "event_type": "finish",
+ "name": "modules-final",
+ "origin": "cloudinit",
+ "result": "SUCCESS",
+ "timestamp": 1472594005.972,
+ }
m_parse_from_date.return_value = "1472594005.972"
self.assertEqual(expected, parse_ci_logline(line))
m_parse_from_date.assert_has_calls(
- [mock.call("2016-08-30 21:53:25.972325+00:00")])
+ [mock.call("2016-08-30 21:53:25.972325+00:00")]
+ )
def test_parse_logline_returns_event_for_amazon_linux_2_line(self):
line = (
"Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start:"
- " init-local/check-cache: attempting to read from cache [check]")
+ " init-local/check-cache: attempting to read from cache [check]"
+ )
# Generate the expected value using `datetime`, so that TZ
# determination is consistent with the code under test.
timestamp_dt = datetime.strptime(
"Apr 30 19:39:11", "%b %d %H:%M:%S"
).replace(year=datetime.now().year)
expected = {
- 'description': 'attempting to read from cache [check]',
- 'event_type': 'start',
- 'name': 'init-local/check-cache',
- 'origin': 'cloudinit',
- 'timestamp': timestamp_dt.timestamp()}
+ "description": "attempting to read from cache [check]",
+ "event_type": "start",
+ "name": "init-local/check-cache",
+ "origin": "cloudinit",
+ "timestamp": timestamp_dt.timestamp(),
+ }
self.assertEqual(expected, parse_ci_logline(line))
-SAMPLE_LOGS = dedent("""\
+SAMPLE_LOGS = dedent(
+ """\
Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\
Cloud-init v. 0.7.8 running 'init-local' at Thu, 03 Nov 2016\
06:51:06 +0000. Up 1.0 seconds.
2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT] handlers.py[DEBUG]: finish:\
modules-final: SUCCESS: running modules for final
-""")
+"""
+)
class TestDumpEvents(CiTestCase):
@@ -158,51 +183,65 @@ class TestDumpEvents(CiTestCase):
expected_data = SAMPLE_LOGS.splitlines()
self.assertEqual(
[mock.call("2016-08-30 21:53:25.972325+00:00")],
- m_parse_from_date.call_args_list)
+ m_parse_from_date.call_args_list,
+ )
self.assertEqual(expected_data, data)
year = datetime.now().year
dt1 = datetime.strptime(
- 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
- timestamp1 = float(dt1.strftime('%s.%f'))
- expected_events = [{
- 'description': 'starting search for local datasources',
- 'event_type': 'start',
- 'name': 'init-local',
- 'origin': 'cloudinit',
- 'timestamp': timestamp1}, {
- 'description': 'running modules for final',
- 'event_type': 'finish',
- 'name': 'modules-final',
- 'origin': 'cloudinit',
- 'result': 'SUCCESS',
- 'timestamp': 1472594005.972}]
+ "Nov 03 06:51:06.074410 %d" % year, "%b %d %H:%M:%S.%f %Y"
+ )
+ timestamp1 = float(dt1.strftime("%s.%f"))
+ expected_events = [
+ {
+ "description": "starting search for local datasources",
+ "event_type": "start",
+ "name": "init-local",
+ "origin": "cloudinit",
+ "timestamp": timestamp1,
+ },
+ {
+ "description": "running modules for final",
+ "event_type": "finish",
+ "name": "modules-final",
+ "origin": "cloudinit",
+ "result": "SUCCESS",
+ "timestamp": 1472594005.972,
+ },
+ ]
self.assertEqual(expected_events, events)
@mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
def test_dump_events_with_cisource(self, m_parse_from_date):
"""Cisource file is read and parsed into a tuple of events and data."""
- tmpfile = self.tmp_path('logfile')
+ tmpfile = self.tmp_path("logfile")
write_file(tmpfile, SAMPLE_LOGS)
m_parse_from_date.return_value = 1472594005.972
events, data = dump_events(cisource=open(tmpfile))
year = datetime.now().year
dt1 = datetime.strptime(
- 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
- timestamp1 = float(dt1.strftime('%s.%f'))
- expected_events = [{
- 'description': 'starting search for local datasources',
- 'event_type': 'start',
- 'name': 'init-local',
- 'origin': 'cloudinit',
- 'timestamp': timestamp1}, {
- 'description': 'running modules for final',
- 'event_type': 'finish',
- 'name': 'modules-final',
- 'origin': 'cloudinit',
- 'result': 'SUCCESS',
- 'timestamp': 1472594005.972}]
+ "Nov 03 06:51:06.074410 %d" % year, "%b %d %H:%M:%S.%f %Y"
+ )
+ timestamp1 = float(dt1.strftime("%s.%f"))
+ expected_events = [
+ {
+ "description": "starting search for local datasources",
+ "event_type": "start",
+ "name": "init-local",
+ "origin": "cloudinit",
+ "timestamp": timestamp1,
+ },
+ {
+ "description": "running modules for final",
+ "event_type": "finish",
+ "name": "modules-final",
+ "origin": "cloudinit",
+ "result": "SUCCESS",
+ "timestamp": 1472594005.972,
+ },
+ ]
self.assertEqual(expected_events, events)
self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data])
m_parse_from_date.assert_has_calls(
- [mock.call("2016-08-30 21:53:25.972325+00:00")])
+ [mock.call("2016-08-30 21:53:25.972325+00:00")]
+ )
diff --git a/cloudinit/cmd/devel/tests/__init__.py b/tests/unittests/cloudinit/__init__py
index e69de29b..e69de29b 100644
--- a/cloudinit/cmd/devel/tests/__init__.py
+++ b/tests/unittests/cloudinit/__init__py
diff --git a/cloudinit/cmd/tests/__init__.py b/tests/unittests/cmd/__init__.py
index e69de29b..e69de29b 100644
--- a/cloudinit/cmd/tests/__init__.py
+++ b/tests/unittests/cmd/__init__.py
diff --git a/cloudinit/distros/tests/__init__.py b/tests/unittests/cmd/devel/__init__.py
index e69de29b..e69de29b 100644
--- a/cloudinit/distros/tests/__init__.py
+++ b/tests/unittests/cmd/devel/__init__.py
diff --git a/tests/unittests/cmd/devel/test_hotplug_hook.py b/tests/unittests/cmd/devel/test_hotplug_hook.py
new file mode 100644
index 00000000..5ecb5969
--- /dev/null
+++ b/tests/unittests/cmd/devel/test_hotplug_hook.py
@@ -0,0 +1,236 @@
+from collections import namedtuple
+from unittest import mock
+from unittest.mock import call
+
+import pytest
+
+from cloudinit.cmd.devel.hotplug_hook import handle_hotplug
+from cloudinit.distros import Distro
+from cloudinit.event import EventType
+from cloudinit.net.activators import NetworkActivator
+from cloudinit.net.network_state import NetworkState
+from cloudinit.sources import DataSource
+from cloudinit.stages import Init
+
+hotplug_args = namedtuple("hotplug_args", "udevaction, subsystem, devpath")
+FAKE_MAC = "11:22:33:44:55:66"
+
+
+@pytest.fixture
+def mocks():
+ m_init = mock.MagicMock(spec=Init)
+ m_distro = mock.MagicMock(spec=Distro)
+ m_datasource = mock.MagicMock(spec=DataSource)
+ m_datasource.distro = m_distro
+ m_init.datasource = m_datasource
+ m_init.fetch.return_value = m_datasource
+
+ read_sys_net = mock.patch(
+ "cloudinit.cmd.devel.hotplug_hook.read_sys_net_safe",
+ return_value=FAKE_MAC,
+ )
+
+ update_event_enabled = mock.patch(
+ "cloudinit.stages.update_event_enabled",
+ return_value=True,
+ )
+
+ m_network_state = mock.MagicMock(spec=NetworkState)
+ parse_net = mock.patch(
+ "cloudinit.cmd.devel.hotplug_hook.parse_net_config_data",
+ return_value=m_network_state,
+ )
+
+ m_activator = mock.MagicMock(spec=NetworkActivator)
+ select_activator = mock.patch(
+ "cloudinit.cmd.devel.hotplug_hook.activators.select_activator",
+ return_value=m_activator,
+ )
+
+ sleep = mock.patch("time.sleep")
+
+ read_sys_net.start()
+ update_event_enabled.start()
+ parse_net.start()
+ select_activator.start()
+ m_sleep = sleep.start()
+
+ yield namedtuple("mocks", "m_init m_network_state m_activator m_sleep")(
+ m_init=m_init,
+ m_network_state=m_network_state,
+ m_activator=m_activator,
+ m_sleep=m_sleep,
+ )
+
+ read_sys_net.stop()
+ update_event_enabled.stop()
+ parse_net.stop()
+ select_activator.stop()
+ sleep.stop()
+
+
+class TestUnsupportedActions:
+ def test_unsupported_subsystem(self, mocks):
+ with pytest.raises(
+ Exception, match="cannot handle events for subsystem: not_real"
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ subsystem="not_real",
+ udevaction="add",
+ )
+
+ def test_unsupported_udevaction(self, mocks):
+ with pytest.raises(ValueError, match="Unknown action: not_real"):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="not_real",
+ subsystem="net",
+ )
+
+
+class TestHotplug:
+ def test_succcessful_add(self, mocks):
+ init = mocks.m_init
+ mocks.m_network_state.iter_interfaces.return_value = [
+ {
+ "mac_address": FAKE_MAC,
+ }
+ ]
+ handle_hotplug(
+ hotplug_init=init,
+ devpath="/dev/fake",
+ udevaction="add",
+ subsystem="net",
+ )
+ init.datasource.update_metadata_if_supported.assert_called_once_with(
+ [EventType.HOTPLUG]
+ )
+ mocks.m_activator.bring_up_interface.assert_called_once_with("fake")
+ mocks.m_activator.bring_down_interface.assert_not_called()
+ init._write_to_cache.assert_called_once_with()
+
+ def test_successful_remove(self, mocks):
+ init = mocks.m_init
+ mocks.m_network_state.iter_interfaces.return_value = [{}]
+ handle_hotplug(
+ hotplug_init=init,
+ devpath="/dev/fake",
+ udevaction="remove",
+ subsystem="net",
+ )
+ init.datasource.update_metadata_if_supported.assert_called_once_with(
+ [EventType.HOTPLUG]
+ )
+ mocks.m_activator.bring_down_interface.assert_called_once_with("fake")
+ mocks.m_activator.bring_up_interface.assert_not_called()
+ init._write_to_cache.assert_called_once_with()
+
+ def test_update_event_disabled(self, mocks, caplog):
+ init = mocks.m_init
+ with mock.patch(
+ "cloudinit.stages.update_event_enabled", return_value=False
+ ):
+ handle_hotplug(
+ hotplug_init=init,
+ devpath="/dev/fake",
+ udevaction="remove",
+ subsystem="net",
+ )
+ assert "hotplug not enabled for event of type" in caplog.text
+ init.datasource.update_metadata_if_supported.assert_not_called()
+ mocks.m_activator.bring_up_interface.assert_not_called()
+ mocks.m_activator.bring_down_interface.assert_not_called()
+ init._write_to_cache.assert_not_called()
+
+ def test_update_metadata_failed(self, mocks):
+ mocks.m_init.datasource.update_metadata_if_supported.return_value = (
+ False
+ )
+ with pytest.raises(
+ RuntimeError, match="Datasource .* not updated for event hotplug"
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="remove",
+ subsystem="net",
+ )
+
+ def test_detect_hotplugged_device_not_detected_on_add(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [{}]
+ with pytest.raises(
+ RuntimeError,
+ match="Failed to detect {} in updated metadata".format(FAKE_MAC),
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="add",
+ subsystem="net",
+ )
+
+ def test_detect_hotplugged_device_detected_on_remove(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [
+ {
+ "mac_address": FAKE_MAC,
+ }
+ ]
+ with pytest.raises(
+ RuntimeError, match="Failed to detect .* in updated metadata"
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="remove",
+ subsystem="net",
+ )
+
+ def test_apply_failed_on_add(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [
+ {
+ "mac_address": FAKE_MAC,
+ }
+ ]
+ mocks.m_activator.bring_up_interface.return_value = False
+ with pytest.raises(
+ RuntimeError, match="Failed to bring up device: /dev/fake"
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="add",
+ subsystem="net",
+ )
+
+ def test_apply_failed_on_remove(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [{}]
+ mocks.m_activator.bring_down_interface.return_value = False
+ with pytest.raises(
+ RuntimeError, match="Failed to bring down device: /dev/fake"
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="remove",
+ subsystem="net",
+ )
+
+ def test_retry(self, mocks):
+ with pytest.raises(RuntimeError):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="add",
+ subsystem="net",
+ )
+ assert mocks.m_sleep.call_count == 5
+ assert mocks.m_sleep.call_args_list == [
+ call(1),
+ call(3),
+ call(5),
+ call(10),
+ call(30),
+ ]
diff --git a/tests/unittests/cmd/devel/test_logs.py b/tests/unittests/cmd/devel/test_logs.py
new file mode 100644
index 00000000..73ed3c65
--- /dev/null
+++ b/tests/unittests/cmd/devel/test_logs.py
@@ -0,0 +1,213 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+from datetime import datetime
+from io import StringIO
+
+from cloudinit.cmd.devel import logs
+from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
+from cloudinit.subp import subp
+from cloudinit.util import ensure_dir, load_file, write_file
+from tests.unittests.helpers import (
+ FilesystemMockingTestCase,
+ mock,
+ wrap_and_call,
+)
+
+
+@mock.patch("cloudinit.cmd.devel.logs.os.getuid")
+class TestCollectLogs(FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestCollectLogs, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.run_dir = self.tmp_path("run", self.new_root)
+
+ def test_collect_logs_with_userdata_requires_root_user(self, m_getuid):
+ """collect-logs errors when non-root user collects userdata ."""
+ m_getuid.return_value = 100 # non-root
+ output_tarfile = self.tmp_path("logs.tgz")
+ with mock.patch("sys.stderr", new_callable=StringIO) as m_stderr:
+ self.assertEqual(
+ 1, logs.collect_logs(output_tarfile, include_userdata=True)
+ )
+ self.assertEqual(
+ "To include userdata, root user is required."
+ " Try sudo cloud-init collect-logs\n",
+ m_stderr.getvalue(),
+ )
+
+ def test_collect_logs_creates_tarfile(self, m_getuid):
+ """collect-logs creates a tarfile with all related cloud-init info."""
+ m_getuid.return_value = 100
+ log1 = self.tmp_path("cloud-init.log", self.new_root)
+ write_file(log1, "cloud-init-log")
+ log2 = self.tmp_path("cloud-init-output.log", self.new_root)
+ write_file(log2, "cloud-init-output-log")
+ ensure_dir(self.run_dir)
+ write_file(self.tmp_path("results.json", self.run_dir), "results")
+ write_file(
+ self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir),
+ "sensitive",
+ )
+ output_tarfile = self.tmp_path("logs.tgz")
+
+ date = datetime.utcnow().date().strftime("%Y-%m-%d")
+ date_logdir = "cloud-init-logs-{0}".format(date)
+
+ version_out = "/usr/bin/cloud-init 18.2fake\n"
+ expected_subp = {
+ (
+ "dpkg-query",
+ "--show",
+ "-f=${Version}\n",
+ "cloud-init",
+ ): "0.7fake\n",
+ ("cloud-init", "--version"): version_out,
+ ("dmesg",): "dmesg-out\n",
+ ("journalctl", "--boot=0", "-o", "short-precise"): "journal-out\n",
+ ("tar", "czvf", output_tarfile, date_logdir): "",
+ }
+
+ def fake_subp(cmd):
+ cmd_tuple = tuple(cmd)
+ if cmd_tuple not in expected_subp:
+ raise AssertionError(
+ "Unexpected command provided to subp: {0}".format(cmd)
+ )
+ if cmd == ["tar", "czvf", output_tarfile, date_logdir]:
+ subp(cmd) # Pass through tar cmd so we can check output
+ return expected_subp[cmd_tuple], ""
+
+ fake_stderr = mock.MagicMock()
+
+ wrap_and_call(
+ "cloudinit.cmd.devel.logs",
+ {
+ "subp": {"side_effect": fake_subp},
+ "sys.stderr": {"new": fake_stderr},
+ "CLOUDINIT_LOGS": {"new": [log1, log2]},
+ "CLOUDINIT_RUN_DIR": {"new": self.run_dir},
+ },
+ logs.collect_logs,
+ output_tarfile,
+ include_userdata=False,
+ )
+ # unpack the tarfile and check file contents
+ subp(["tar", "zxvf", output_tarfile, "-C", self.new_root])
+ out_logdir = self.tmp_path(date_logdir, self.new_root)
+ self.assertFalse(
+ os.path.exists(
+ os.path.join(
+ out_logdir,
+ "run",
+ "cloud-init",
+ INSTANCE_JSON_SENSITIVE_FILE,
+ )
+ ),
+ "Unexpected file found: %s" % INSTANCE_JSON_SENSITIVE_FILE,
+ )
+ self.assertEqual(
+ "0.7fake\n", load_file(os.path.join(out_logdir, "dpkg-version"))
+ )
+ self.assertEqual(
+ version_out, load_file(os.path.join(out_logdir, "version"))
+ )
+ self.assertEqual(
+ "cloud-init-log",
+ load_file(os.path.join(out_logdir, "cloud-init.log")),
+ )
+ self.assertEqual(
+ "cloud-init-output-log",
+ load_file(os.path.join(out_logdir, "cloud-init-output.log")),
+ )
+ self.assertEqual(
+ "dmesg-out\n", load_file(os.path.join(out_logdir, "dmesg.txt"))
+ )
+ self.assertEqual(
+ "journal-out\n", load_file(os.path.join(out_logdir, "journal.txt"))
+ )
+ self.assertEqual(
+ "results",
+ load_file(
+ os.path.join(out_logdir, "run", "cloud-init", "results.json")
+ ),
+ )
+ fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile)
+
+ def test_collect_logs_includes_optional_userdata(self, m_getuid):
+ """collect-logs include userdata when --include-userdata is set."""
+ m_getuid.return_value = 0
+ log1 = self.tmp_path("cloud-init.log", self.new_root)
+ write_file(log1, "cloud-init-log")
+ log2 = self.tmp_path("cloud-init-output.log", self.new_root)
+ write_file(log2, "cloud-init-output-log")
+ userdata = self.tmp_path("user-data.txt", self.new_root)
+ write_file(userdata, "user-data")
+ ensure_dir(self.run_dir)
+ write_file(self.tmp_path("results.json", self.run_dir), "results")
+ write_file(
+ self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir),
+ "sensitive",
+ )
+ output_tarfile = self.tmp_path("logs.tgz")
+
+ date = datetime.utcnow().date().strftime("%Y-%m-%d")
+ date_logdir = "cloud-init-logs-{0}".format(date)
+
+ version_out = "/usr/bin/cloud-init 18.2fake\n"
+ expected_subp = {
+ (
+ "dpkg-query",
+ "--show",
+ "-f=${Version}\n",
+ "cloud-init",
+ ): "0.7fake",
+ ("cloud-init", "--version"): version_out,
+ ("dmesg",): "dmesg-out\n",
+ ("journalctl", "--boot=0", "-o", "short-precise"): "journal-out\n",
+ ("tar", "czvf", output_tarfile, date_logdir): "",
+ }
+
+ def fake_subp(cmd):
+ cmd_tuple = tuple(cmd)
+ if cmd_tuple not in expected_subp:
+ raise AssertionError(
+ "Unexpected command provided to subp: {0}".format(cmd)
+ )
+ if cmd == ["tar", "czvf", output_tarfile, date_logdir]:
+ subp(cmd) # Pass through tar cmd so we can check output
+ return expected_subp[cmd_tuple], ""
+
+ fake_stderr = mock.MagicMock()
+
+ wrap_and_call(
+ "cloudinit.cmd.devel.logs",
+ {
+ "subp": {"side_effect": fake_subp},
+ "sys.stderr": {"new": fake_stderr},
+ "CLOUDINIT_LOGS": {"new": [log1, log2]},
+ "CLOUDINIT_RUN_DIR": {"new": self.run_dir},
+ "USER_DATA_FILE": {"new": userdata},
+ },
+ logs.collect_logs,
+ output_tarfile,
+ include_userdata=True,
+ )
+ # unpack the tarfile and check file contents
+ subp(["tar", "zxvf", output_tarfile, "-C", self.new_root])
+ out_logdir = self.tmp_path(date_logdir, self.new_root)
+ self.assertEqual(
+ "user-data", load_file(os.path.join(out_logdir, "user-data.txt"))
+ )
+ self.assertEqual(
+ "sensitive",
+ load_file(
+ os.path.join(
+ out_logdir,
+ "run",
+ "cloud-init",
+ INSTANCE_JSON_SENSITIVE_FILE,
+ )
+ ),
+ )
+ fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile)
diff --git a/tests/unittests/cmd/devel/test_render.py b/tests/unittests/cmd/devel/test_render.py
new file mode 100644
index 00000000..4afc64f0
--- /dev/null
+++ b/tests/unittests/cmd/devel/test_render.py
@@ -0,0 +1,154 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+from collections import namedtuple
+from io import StringIO
+
+from cloudinit.cmd.devel import render
+from cloudinit.helpers import Paths
+from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE
+from cloudinit.util import ensure_dir, write_file
+from tests.unittests.helpers import CiTestCase, mock, skipUnlessJinja
+
+
+class TestRender(CiTestCase):
+
+ with_logs = True
+
+ args = namedtuple("renderargs", "user_data instance_data debug")
+
+ def setUp(self):
+ super(TestRender, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_handle_args_error_on_missing_user_data(self):
+ """When user_data file path does not exist, log an error."""
+ absent_file = self.tmp_path("user-data", dir=self.tmp)
+ instance_data = self.tmp_path("instance-data", dir=self.tmp)
+ write_file(instance_data, "{}")
+ args = self.args(
+ user_data=absent_file, instance_data=instance_data, debug=False
+ )
+ with mock.patch("sys.stderr", new_callable=StringIO):
+ self.assertEqual(1, render.handle_args("anyname", args))
+ self.assertIn(
+ "Missing user-data file: %s" % absent_file, self.logs.getvalue()
+ )
+
+ def test_handle_args_error_on_missing_instance_data(self):
+ """When instance_data file path does not exist, log an error."""
+ user_data = self.tmp_path("user-data", dir=self.tmp)
+ absent_file = self.tmp_path("instance-data", dir=self.tmp)
+ args = self.args(
+ user_data=user_data, instance_data=absent_file, debug=False
+ )
+ with mock.patch("sys.stderr", new_callable=StringIO):
+ self.assertEqual(1, render.handle_args("anyname", args))
+ self.assertIn(
+ "Missing instance-data.json file: %s" % absent_file,
+ self.logs.getvalue(),
+ )
+
+ def test_handle_args_defaults_instance_data(self):
+ """When no instance_data argument, default to configured run_dir."""
+ user_data = self.tmp_path("user-data", dir=self.tmp)
+ run_dir = self.tmp_path("run_dir", dir=self.tmp)
+ ensure_dir(run_dir)
+ paths = Paths({"run_dir": run_dir})
+ self.add_patch("cloudinit.cmd.devel.render.read_cfg_paths", "m_paths")
+ self.m_paths.return_value = paths
+ args = self.args(user_data=user_data, instance_data=None, debug=False)
+ with mock.patch("sys.stderr", new_callable=StringIO):
+ self.assertEqual(1, render.handle_args("anyname", args))
+ json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
+ self.assertIn(
+ "Missing instance-data.json file: %s" % json_file,
+ self.logs.getvalue(),
+ )
+
+ def test_handle_args_root_fallback_from_sensitive_instance_data(self):
+ """When root user defaults to sensitive.json."""
+ user_data = self.tmp_path("user-data", dir=self.tmp)
+ run_dir = self.tmp_path("run_dir", dir=self.tmp)
+ ensure_dir(run_dir)
+ paths = Paths({"run_dir": run_dir})
+ self.add_patch("cloudinit.cmd.devel.render.read_cfg_paths", "m_paths")
+ self.m_paths.return_value = paths
+ args = self.args(user_data=user_data, instance_data=None, debug=False)
+ with mock.patch("sys.stderr", new_callable=StringIO):
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 0
+ self.assertEqual(1, render.handle_args("anyname", args))
+ json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
+ json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+ self.assertIn(
+ "WARNING: Missing root-readable %s. Using redacted %s"
+ % (json_sensitive, json_file),
+ self.logs.getvalue(),
+ )
+ self.assertIn(
+ "ERROR: Missing instance-data.json file: %s" % json_file,
+ self.logs.getvalue(),
+ )
+
+ def test_handle_args_root_uses_sensitive_instance_data(self):
+ """When root user, and no instance-data arg, use sensitive.json."""
+ user_data = self.tmp_path("user-data", dir=self.tmp)
+ write_file(user_data, "##template: jinja\nrendering: {{ my_var }}")
+ run_dir = self.tmp_path("run_dir", dir=self.tmp)
+ ensure_dir(run_dir)
+ json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+ write_file(json_sensitive, '{"my-var": "jinja worked"}')
+ paths = Paths({"run_dir": run_dir})
+ self.add_patch("cloudinit.cmd.devel.render.read_cfg_paths", "m_paths")
+ self.m_paths.return_value = paths
+ args = self.args(user_data=user_data, instance_data=None, debug=False)
+ with mock.patch("sys.stderr", new_callable=StringIO):
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 0
+ self.assertEqual(0, render.handle_args("anyname", args))
+ self.assertIn("rendering: jinja worked", m_stdout.getvalue())
+
+ @skipUnlessJinja()
+ def test_handle_args_renders_instance_data_vars_in_template(self):
+ """If user_data file is a jinja template render instance-data vars."""
+ user_data = self.tmp_path("user-data", dir=self.tmp)
+ write_file(user_data, "##template: jinja\nrendering: {{ my_var }}")
+ instance_data = self.tmp_path("instance-data", dir=self.tmp)
+ write_file(instance_data, '{"my-var": "jinja worked"}')
+ args = self.args(
+ user_data=user_data, instance_data=instance_data, debug=True
+ )
+ with mock.patch("sys.stderr", new_callable=StringIO) as m_console_err:
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, render.handle_args("anyname", args))
+ self.assertIn(
+ "DEBUG: Converted jinja variables\n{", self.logs.getvalue()
+ )
+ self.assertIn(
+ "DEBUG: Converted jinja variables\n{", m_console_err.getvalue()
+ )
+ self.assertEqual("rendering: jinja worked", m_stdout.getvalue())
+
+ @skipUnlessJinja()
+ def test_handle_args_warns_and_gives_up_on_invalid_jinja_operation(self):
+ """If user_data file has invalid jinja operations log warnings."""
+ user_data = self.tmp_path("user-data", dir=self.tmp)
+ write_file(user_data, "##template: jinja\nrendering: {{ my-var }}")
+ instance_data = self.tmp_path("instance-data", dir=self.tmp)
+ write_file(instance_data, '{"my-var": "jinja worked"}')
+ args = self.args(
+ user_data=user_data, instance_data=instance_data, debug=True
+ )
+ with mock.patch("sys.stderr", new_callable=StringIO):
+ self.assertEqual(1, render.handle_args("anyname", args))
+ self.assertIn(
+ "WARNING: Ignoring jinja template for %s: Undefined jinja"
+ ' variable: "my-var". Jinja tried subtraction. Perhaps you meant'
+ ' "my_var"?' % user_data,
+ self.logs.getvalue(),
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/cmd/test_clean.py b/tests/unittests/cmd/test_clean.py
new file mode 100644
index 00000000..7d12017e
--- /dev/null
+++ b/tests/unittests/cmd/test_clean.py
@@ -0,0 +1,211 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+from collections import namedtuple
+from io import StringIO
+
+from cloudinit.cmd import clean
+from cloudinit.util import ensure_dir, sym_link, write_file
+from tests.unittests.helpers import CiTestCase, mock, wrap_and_call
+
+mypaths = namedtuple("MyPaths", "cloud_dir")
+
+
+class TestClean(CiTestCase):
+ def setUp(self):
+ super(TestClean, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.artifact_dir = self.tmp_path("artifacts", self.new_root)
+ self.log1 = self.tmp_path("cloud-init.log", self.new_root)
+ self.log2 = self.tmp_path("cloud-init-output.log", self.new_root)
+
+ class FakeInit(object):
+ cfg = {
+ "def_log_file": self.log1,
+ "output": {"all": "|tee -a {0}".format(self.log2)},
+ }
+ # Ensure cloud_dir has a trailing slash, to match real behaviour
+ paths = mypaths(cloud_dir="{}/".format(self.artifact_dir))
+
+ def __init__(self, ds_deps):
+ pass
+
+ def read_cfg(self):
+ pass
+
+ self.init_class = FakeInit
+
+ def test_remove_artifacts_removes_logs(self):
+ """remove_artifacts removes logs when remove_logs is True."""
+ write_file(self.log1, "cloud-init-log")
+ write_file(self.log2, "cloud-init-output-log")
+
+ self.assertFalse(
+ os.path.exists(self.artifact_dir), "Unexpected artifacts dir"
+ )
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {"Init": {"side_effect": self.init_class}},
+ clean.remove_artifacts,
+ remove_logs=True,
+ )
+ self.assertFalse(os.path.exists(self.log1), "Unexpected file")
+ self.assertFalse(os.path.exists(self.log2), "Unexpected file")
+ self.assertEqual(0, retcode)
+
+ def test_remove_artifacts_preserves_logs(self):
+ """remove_artifacts leaves logs when remove_logs is False."""
+ write_file(self.log1, "cloud-init-log")
+ write_file(self.log2, "cloud-init-output-log")
+
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {"Init": {"side_effect": self.init_class}},
+ clean.remove_artifacts,
+ remove_logs=False,
+ )
+ self.assertTrue(os.path.exists(self.log1), "Missing expected file")
+ self.assertTrue(os.path.exists(self.log2), "Missing expected file")
+ self.assertEqual(0, retcode)
+
+ def test_remove_artifacts_removes_unlinks_symlinks(self):
+ """remove_artifacts cleans artifacts dir unlinking any symlinks."""
+ dir1 = os.path.join(self.artifact_dir, "dir1")
+ ensure_dir(dir1)
+ symlink = os.path.join(self.artifact_dir, "mylink")
+ sym_link(dir1, symlink)
+
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {"Init": {"side_effect": self.init_class}},
+ clean.remove_artifacts,
+ remove_logs=False,
+ )
+ self.assertEqual(0, retcode)
+ for path in (dir1, symlink):
+ self.assertFalse(
+ os.path.exists(path), "Unexpected {0} dir".format(path)
+ )
+
+ def test_remove_artifacts_removes_artifacts_skipping_seed(self):
+ """remove_artifacts cleans artifacts dir with exception of seed dir."""
+ dirs = [
+ self.artifact_dir,
+ os.path.join(self.artifact_dir, "seed"),
+ os.path.join(self.artifact_dir, "dir1"),
+ os.path.join(self.artifact_dir, "dir2"),
+ ]
+ for _dir in dirs:
+ ensure_dir(_dir)
+
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {"Init": {"side_effect": self.init_class}},
+ clean.remove_artifacts,
+ remove_logs=False,
+ )
+ self.assertEqual(0, retcode)
+ for expected_dir in dirs[:2]:
+ self.assertTrue(
+ os.path.exists(expected_dir),
+ "Missing {0} dir".format(expected_dir),
+ )
+ for deleted_dir in dirs[2:]:
+ self.assertFalse(
+ os.path.exists(deleted_dir),
+ "Unexpected {0} dir".format(deleted_dir),
+ )
+
+ def test_remove_artifacts_removes_artifacts_removes_seed(self):
+ """remove_artifacts removes seed dir when remove_seed is True."""
+ dirs = [
+ self.artifact_dir,
+ os.path.join(self.artifact_dir, "seed"),
+ os.path.join(self.artifact_dir, "dir1"),
+ os.path.join(self.artifact_dir, "dir2"),
+ ]
+ for _dir in dirs:
+ ensure_dir(_dir)
+
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {"Init": {"side_effect": self.init_class}},
+ clean.remove_artifacts,
+ remove_logs=False,
+ remove_seed=True,
+ )
+ self.assertEqual(0, retcode)
+ self.assertTrue(
+ os.path.exists(self.artifact_dir), "Missing artifact dir"
+ )
+ for deleted_dir in dirs[1:]:
+ self.assertFalse(
+ os.path.exists(deleted_dir),
+ "Unexpected {0} dir".format(deleted_dir),
+ )
+
+ def test_remove_artifacts_returns_one_on_errors(self):
+ """remove_artifacts returns non-zero on failure and prints an error."""
+ ensure_dir(self.artifact_dir)
+ ensure_dir(os.path.join(self.artifact_dir, "dir1"))
+
+ with mock.patch("sys.stderr", new_callable=StringIO) as m_stderr:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {
+ "del_dir": {"side_effect": OSError("oops")},
+ "Init": {"side_effect": self.init_class},
+ },
+ clean.remove_artifacts,
+ remove_logs=False,
+ )
+ self.assertEqual(1, retcode)
+ self.assertEqual(
+ "Error:\nCould not remove %s/dir1: oops\n" % self.artifact_dir,
+ m_stderr.getvalue(),
+ )
+
+ def test_handle_clean_args_reboots(self):
+ """handle_clean_args_reboots when reboot arg is provided."""
+
+ called_cmds = []
+
+ def fake_subp(cmd, capture):
+ called_cmds.append((cmd, capture))
+ return "", ""
+
+ myargs = namedtuple("MyArgs", "remove_logs remove_seed reboot")
+ cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True)
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {
+ "subp": {"side_effect": fake_subp},
+ "Init": {"side_effect": self.init_class},
+ },
+ clean.handle_clean_args,
+ name="does not matter",
+ args=cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ self.assertEqual([(["shutdown", "-r", "now"], False)], called_cmds)
+
+ def test_status_main(self):
+ """clean.main can be run as a standalone script."""
+ write_file(self.log1, "cloud-init-log")
+ with self.assertRaises(SystemExit) as context_manager:
+ wrap_and_call(
+ "cloudinit.cmd.clean",
+ {
+ "Init": {"side_effect": self.init_class},
+ "sys.argv": {"new": ["clean", "--logs"]},
+ },
+ clean.main,
+ )
+
+ self.assertEqual(0, context_manager.exception.code)
+ self.assertFalse(
+ os.path.exists(self.log1), "Unexpected log {0}".format(self.log1)
+ )
+
+
+# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/cmd/test_cloud_id.py b/tests/unittests/cmd/test_cloud_id.py
new file mode 100644
index 00000000..907297a6
--- /dev/null
+++ b/tests/unittests/cmd/test_cloud_id.py
@@ -0,0 +1,187 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests for cloud-id command line utility."""
+
+from collections import namedtuple
+
+import pytest
+
+from cloudinit import util
+from cloudinit.cmd import cloud_id
+from tests.unittests.helpers import mock
+
+M_PATH = "cloudinit.cmd.cloud_id."
+
+
+class TestCloudId:
+
+ args = namedtuple("cloudidargs", "instance_data json long")
+
+ def test_cloud_id_arg_parser_defaults(self):
+ """Validate the argument defaults when not provided by the end-user."""
+ cmd = ["cloud-id"]
+ with mock.patch("sys.argv", cmd):
+ args = cloud_id.get_parser().parse_args()
+ assert "/run/cloud-init/instance-data.json" == args.instance_data
+ assert False is args.long
+ assert False is args.json
+
+ def test_cloud_id_arg_parse_overrides(self, tmpdir):
+ """Override argument defaults by specifying values for each param."""
+ instance_data = tmpdir.join("instance-data.json")
+ instance_data.write("{}")
+ cmd = [
+ "cloud-id",
+ "--instance-data",
+ instance_data.strpath,
+ "--long",
+ "--json",
+ ]
+ with mock.patch("sys.argv", cmd):
+ args = cloud_id.get_parser().parse_args()
+ assert instance_data.strpath == args.instance_data
+ assert True is args.long
+ assert True is args.json
+
+ @mock.patch(M_PATH + "get_status_details")
+ def test_cloud_id_missing_instance_data_json(
+ self, get_status_details, tmpdir, capsys
+ ):
+ """Exit error when the provided instance-data.json does not exist."""
+ get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ instance_data = tmpdir.join("instance-data.json")
+ cmd = ["cloud-id", "--instance-data", instance_data.strpath]
+ with mock.patch("sys.argv", cmd):
+ with pytest.raises(SystemExit) as context_manager:
+ cloud_id.main()
+ assert 1 == context_manager.value.code
+ _out, err = capsys.readouterr()
+ assert "Error:\nFile not found '%s'" % instance_data.strpath in err
+
+ @mock.patch(M_PATH + "get_status_details")
+ def test_cloud_id_non_json_instance_data(
+ self, get_status_details, tmpdir, capsys
+ ):
+ """Exit error when the provided instance-data.json is not json."""
+ get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ instance_data = tmpdir.join("instance-data.json")
+ cmd = ["cloud-id", "--instance-data", instance_data.strpath]
+ instance_data.write("{")
+ with mock.patch("sys.argv", cmd):
+ with pytest.raises(SystemExit) as context_manager:
+ cloud_id.main()
+ assert 1 == context_manager.value.code
+ _out, err = capsys.readouterr()
+ assert (
+ "Error:\nFile '%s' is not valid json." % instance_data.strpath
+ in err
+ )
+
+ @mock.patch(M_PATH + "get_status_details")
+ def test_cloud_id_from_cloud_name_in_instance_data(
+ self, get_status_details, tmpdir, capsys
+ ):
+ """Report canonical cloud-id from cloud_name in instance-data."""
+ instance_data = tmpdir.join("instance-data.json")
+ get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ instance_data.write(
+ '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}',
+ )
+ cmd = ["cloud-id", "--instance-data", instance_data.strpath]
+ with mock.patch("sys.argv", cmd):
+ with pytest.raises(SystemExit) as context_manager:
+ cloud_id.main()
+ assert 0 == context_manager.value.code
+ out, _err = capsys.readouterr()
+ assert "mycloud\n" == out
+
+ @mock.patch(M_PATH + "get_status_details")
+ def test_cloud_id_long_name_from_instance_data(
+ self, get_status_details, tmpdir, capsys
+ ):
+ """Report long cloud-id format from cloud_name and region."""
+ get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ instance_data = tmpdir.join("instance-data.json")
+ instance_data.write(
+ '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}',
+ )
+ cmd = ["cloud-id", "--instance-data", instance_data.strpath, "--long"]
+ with mock.patch("sys.argv", cmd):
+ with pytest.raises(SystemExit) as context_manager:
+ cloud_id.main()
+ out, _err = capsys.readouterr()
+ assert 0 == context_manager.value.code
+ assert "mycloud\tsomereg\n" == out
+
+ @mock.patch(M_PATH + "get_status_details")
+ def test_cloud_id_lookup_from_instance_data_region(
+ self, get_status_details, tmpdir, capsys
+ ):
+ """Report discovered canonical cloud_id when region lookup matches."""
+ get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ instance_data = tmpdir.join("instance-data.json")
+ instance_data.write(
+ '{"v1": {"cloud_name": "aws", "region": "cn-north-1",'
+ ' "platform": "ec2"}}',
+ )
+ cmd = ["cloud-id", "--instance-data", instance_data.strpath, "--long"]
+ with mock.patch("sys.argv", cmd):
+ with pytest.raises(SystemExit) as context_manager:
+ cloud_id.main()
+ assert 0 == context_manager.value.code
+ out, _err = capsys.readouterr()
+ assert "aws-china\tcn-north-1\n" == out
+
+ @mock.patch(M_PATH + "get_status_details")
+ def test_cloud_id_lookup_json_instance_data_adds_cloud_id_to_json(
+ self, get_status_details, tmpdir, capsys
+ ):
+ """Report v1 instance-data content with cloud_id when --json set."""
+ get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ instance_data = tmpdir.join("instance-data.json")
+ instance_data.write(
+ '{"v1": {"cloud_name": "unknown", "region": "dfw",'
+ ' "platform": "openstack", "public_ssh_keys": []}}',
+ )
+ expected = util.json_dumps(
+ {
+ "cloud_id": "openstack",
+ "cloud_name": "unknown",
+ "platform": "openstack",
+ "public_ssh_keys": [],
+ "region": "dfw",
+ }
+ )
+ cmd = ["cloud-id", "--instance-data", instance_data.strpath, "--json"]
+ with mock.patch("sys.argv", cmd):
+ with pytest.raises(SystemExit) as context_manager:
+ cloud_id.main()
+ out, _err = capsys.readouterr()
+ assert 0 == context_manager.value.code
+ assert expected + "\n" == out
+
+ @pytest.mark.parametrize(
+ "status, exit_code",
+ (
+ (cloud_id.UXAppStatus.DISABLED, 2),
+ (cloud_id.UXAppStatus.NOT_RUN, 3),
+ (cloud_id.UXAppStatus.RUNNING, 0),
+ ),
+ )
+ @mock.patch(M_PATH + "get_status_details")
+ def test_cloud_id_unique_exit_codes_for_status(
+ self, get_status_details, status, exit_code, tmpdir, capsys
+ ):
+ """cloud-id returns unique exit codes for status."""
+ get_status_details.return_value = status, "n/a", ""
+ instance_data = tmpdir.join("instance-data.json")
+ if status == cloud_id.UXAppStatus.RUNNING:
+ instance_data.write("{}")
+ cmd = ["cloud-id", "--instance-data", instance_data.strpath, "--json"]
+ with mock.patch("sys.argv", cmd):
+ with pytest.raises(SystemExit) as context_manager:
+ cloud_id.main()
+ assert exit_code == context_manager.value.code
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/cmd/test_main.py b/tests/unittests/cmd/test_main.py
new file mode 100644
index 00000000..3e778b0b
--- /dev/null
+++ b/tests/unittests/cmd/test_main.py
@@ -0,0 +1,241 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import os
+from collections import namedtuple
+from io import StringIO
+from unittest import mock
+
+import pytest
+
+from cloudinit import safeyaml
+from cloudinit.cmd import main
+from cloudinit.util import ensure_dir, load_file, write_file
+from tests.unittests.helpers import FilesystemMockingTestCase, wrap_and_call
+
+mypaths = namedtuple("MyPaths", "run_dir")
+myargs = namedtuple("MyArgs", "debug files force local reporter subcommand")
+
+
+class TestMain(FilesystemMockingTestCase):
+ with_logs = True
+ allowed_subp = False
+
+ def setUp(self):
+ super(TestMain, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.cloud_dir = self.tmp_path("var/lib/cloud/", dir=self.new_root)
+ os.makedirs(self.cloud_dir)
+ self.replicateTestRoot("simple_ubuntu", self.new_root)
+ self.cfg = {
+ "datasource_list": ["None"],
+ "runcmd": ["ls /etc"], # test ALL_DISTROS
+ "system_info": {
+ "paths": {
+ "cloud_dir": self.cloud_dir,
+ "run_dir": self.new_root,
+ }
+ },
+ "write_files": [
+ {
+ "path": "/etc/blah.ini",
+ "content": "blah",
+ "permissions": 0o755,
+ },
+ ],
+ "cloud_init_modules": ["write-files", "runcmd"],
+ }
+ cloud_cfg = safeyaml.dumps(self.cfg)
+ ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
+ self.cloud_cfg_file = os.path.join(
+ self.new_root, "etc", "cloud", "cloud.cfg"
+ )
+ write_file(self.cloud_cfg_file, cloud_cfg)
+ self.patchOS(self.new_root)
+ self.patchUtils(self.new_root)
+ self.stderr = StringIO()
+ self.patchStdoutAndStderr(stderr=self.stderr)
+
+ def test_main_init_run_net_stops_on_file_no_net(self):
+ """When no-net file is present, main_init does not process modules."""
+ stop_file = os.path.join(self.cloud_dir, "data", "no-net") # stop file
+ write_file(stop_file, "")
+ cmdargs = myargs(
+ debug=False,
+ files=None,
+ force=False,
+ local=False,
+ reporter=None,
+ subcommand="init",
+ )
+ (_item1, item2) = wrap_and_call(
+ "cloudinit.cmd.main",
+ {
+ "util.close_stdin": True,
+ "netinfo.debug_info": "my net debug info",
+ "util.fixup_output": ("outfmt", "errfmt"),
+ },
+ main.main_init,
+ "init",
+ cmdargs,
+ )
+ # We should not run write_files module
+ self.assertFalse(
+ os.path.exists(os.path.join(self.new_root, "etc/blah.ini")),
+ "Unexpected run of write_files module produced blah.ini",
+ )
+ self.assertEqual([], item2)
+ # Instancify is called
+ instance_id_path = "var/lib/cloud/data/instance-id"
+ self.assertFalse(
+ os.path.exists(os.path.join(self.new_root, instance_id_path)),
+ "Unexpected call to datasource.instancify produced instance-id",
+ )
+ expected_logs = [
+ "Exiting. stop file ['{stop_file}'] existed\n".format(
+ stop_file=stop_file
+ ),
+ "my net debug info", # netinfo.debug_info
+ ]
+ for log in expected_logs:
+ self.assertIn(log, self.stderr.getvalue())
+
+ def test_main_init_run_net_runs_modules(self):
+ """Modules like write_files are run in 'net' mode."""
+ cmdargs = myargs(
+ debug=False,
+ files=None,
+ force=False,
+ local=False,
+ reporter=None,
+ subcommand="init",
+ )
+ (_item1, item2) = wrap_and_call(
+ "cloudinit.cmd.main",
+ {
+ "util.close_stdin": True,
+ "netinfo.debug_info": "my net debug info",
+ "util.fixup_output": ("outfmt", "errfmt"),
+ },
+ main.main_init,
+ "init",
+ cmdargs,
+ )
+ self.assertEqual([], item2)
+ # Instancify is called
+ instance_id_path = "var/lib/cloud/data/instance-id"
+ self.assertEqual(
+ "iid-datasource-none\n",
+ os.path.join(
+ load_file(os.path.join(self.new_root, instance_id_path))
+ ),
+ )
+ # modules are run (including write_files)
+ self.assertEqual(
+ "blah", load_file(os.path.join(self.new_root, "etc/blah.ini"))
+ )
+ expected_logs = [
+ "network config is disabled by fallback", # apply_network_config
+ "my net debug info", # netinfo.debug_info
+ "no previous run detected",
+ ]
+ for log in expected_logs:
+ self.assertIn(log, self.stderr.getvalue())
+
+ def test_main_init_run_net_calls_set_hostname_when_metadata_present(self):
+ """When local-hostname metadata is present, call cc_set_hostname."""
+ self.cfg["datasource"] = {
+ "None": {"metadata": {"local-hostname": "md-hostname"}}
+ }
+ cloud_cfg = safeyaml.dumps(self.cfg)
+ write_file(self.cloud_cfg_file, cloud_cfg)
+ cmdargs = myargs(
+ debug=False,
+ files=None,
+ force=False,
+ local=False,
+ reporter=None,
+ subcommand="init",
+ )
+
+ def set_hostname(name, cfg, cloud, log, args):
+ self.assertEqual("set-hostname", name)
+ updated_cfg = copy.deepcopy(self.cfg)
+ updated_cfg.update(
+ {
+ "def_log_file": "/var/log/cloud-init.log",
+ "log_cfgs": [],
+ "syslog_fix_perms": [
+ "syslog:adm",
+ "root:adm",
+ "root:wheel",
+ "root:root",
+ ],
+ "vendor_data": {"enabled": True, "prefix": []},
+ "vendor_data2": {"enabled": True, "prefix": []},
+ }
+ )
+ updated_cfg.pop("system_info")
+
+ self.assertEqual(updated_cfg, cfg)
+ self.assertEqual(main.LOG, log)
+ self.assertIsNone(args)
+
+ (_item1, item2) = wrap_and_call(
+ "cloudinit.cmd.main",
+ {
+ "util.close_stdin": True,
+ "netinfo.debug_info": "my net debug info",
+ "cc_set_hostname.handle": {"side_effect": set_hostname},
+ "util.fixup_output": ("outfmt", "errfmt"),
+ },
+ main.main_init,
+ "init",
+ cmdargs,
+ )
+ self.assertEqual([], item2)
+ # Instancify is called
+ instance_id_path = "var/lib/cloud/data/instance-id"
+ self.assertEqual(
+ "iid-datasource-none\n",
+ os.path.join(
+ load_file(os.path.join(self.new_root, instance_id_path))
+ ),
+ )
+ # modules are run (including write_files)
+ self.assertEqual(
+ "blah", load_file(os.path.join(self.new_root, "etc/blah.ini"))
+ )
+ expected_logs = [
+ "network config is disabled by fallback", # apply_network_config
+ "my net debug info", # netinfo.debug_info
+ "no previous run detected",
+ ]
+ for log in expected_logs:
+ self.assertIn(log, self.stderr.getvalue())
+
+
+class TestShouldBringUpInterfaces:
+ @pytest.mark.parametrize(
+ "cfg_disable,args_local,expected",
+ [
+ (True, True, False),
+ (True, False, False),
+ (False, True, False),
+ (False, False, True),
+ ],
+ )
+ def test_should_bring_up_interfaces(
+ self, cfg_disable, args_local, expected
+ ):
+ init = mock.Mock()
+ init.cfg = {"disable_network_activation": cfg_disable}
+
+ args = mock.Mock()
+ args.local = args_local
+
+ result = main._should_bring_up_interfaces(init, args)
+ assert result == expected
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/cmd/test_query.py b/tests/unittests/cmd/test_query.py
new file mode 100644
index 00000000..03a73bb5
--- /dev/null
+++ b/tests/unittests/cmd/test_query.py
@@ -0,0 +1,537 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import errno
+import gzip
+import json
+import os
+from collections import namedtuple
+from io import BytesIO
+from textwrap import dedent
+
+import pytest
+
+from cloudinit.cmd import query
+from cloudinit.helpers import Paths
+from cloudinit.sources import (
+ INSTANCE_JSON_FILE,
+ INSTANCE_JSON_SENSITIVE_FILE,
+ REDACT_SENSITIVE_VALUE,
+)
+from cloudinit.util import b64e, write_file
+from tests.unittests.helpers import mock
+
+
+def _gzip_data(data):
+ with BytesIO() as iobuf:
+ with gzip.GzipFile(mode="wb", fileobj=iobuf) as gzfp:
+ gzfp.write(data)
+ return iobuf.getvalue()
+
+
+@mock.patch("cloudinit.cmd.query.addLogHandlerCLI", lambda *args: "")
+class TestQuery:
+
+ args = namedtuple(
+ "queryargs",
+ "debug dump_all format instance_data list_keys user_data vendor_data"
+ " varname",
+ )
+
+ def _setup_paths(self, tmpdir, ud_val=None, vd_val=None):
+ """Write userdata and vendordata into a tmpdir.
+
+ Return:
+ 4-tuple : (paths, run_dir_path, userdata_path, vendordata_path)
+ """
+ if ud_val:
+ user_data = tmpdir.join("user-data")
+ write_file(user_data.strpath, ud_val)
+ else:
+ user_data = None
+ if vd_val:
+ vendor_data = tmpdir.join("vendor-data")
+ write_file(vendor_data.strpath, vd_val)
+ else:
+ vendor_data = None
+ run_dir = tmpdir.join("run_dir")
+ run_dir.ensure_dir()
+
+ cloud_dir = tmpdir.join("cloud_dir")
+ cloud_dir.ensure_dir()
+
+ return (
+ Paths(
+ {"cloud_dir": cloud_dir.strpath, "run_dir": run_dir.strpath}
+ ),
+ run_dir,
+ user_data,
+ vendor_data,
+ )
+
+ def test_handle_args_error_on_missing_param(self, caplog, capsys):
+ """Error when missing required parameters and print usage."""
+ args = self.args(
+ debug=False,
+ dump_all=False,
+ format=None,
+ instance_data=None,
+ list_keys=False,
+ user_data=None,
+ vendor_data=None,
+ varname=None,
+ )
+ with mock.patch(
+ "cloudinit.cmd.query.addLogHandlerCLI", return_value=""
+ ) as m_cli_log:
+ assert 1 == query.handle_args("anyname", args)
+ expected_error = (
+ "Expected one of the options: --all, --format, --list-keys"
+ " or varname\n"
+ )
+ assert expected_error in caplog.text
+ out, _err = capsys.readouterr()
+ assert "usage: query" in out
+ assert 1 == m_cli_log.call_count
+
+ @pytest.mark.parametrize(
+ "inst_data,varname,expected_error",
+ (
+ (
+ '{"v1": {"key-2": "value-2"}}',
+ "v1.absent_leaf",
+ "instance-data 'v1' has no 'absent_leaf'\n",
+ ),
+ (
+ '{"v1": {"key-2": "value-2"}}',
+ "absent_key",
+ "Undefined instance-data key 'absent_key'\n",
+ ),
+ ),
+ )
+ def test_handle_args_error_on_invalid_vaname_paths(
+ self, inst_data, varname, expected_error, caplog, tmpdir
+ ):
+ """Error when varname is not a valid instance-data variable path."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write(inst_data)
+ args = self.args(
+ debug=False,
+ dump_all=False,
+ format=None,
+ instance_data=instance_data.strpath,
+ list_keys=False,
+ user_data=None,
+ vendor_data=None,
+ varname=varname,
+ )
+ paths, _, _, _ = self._setup_paths(tmpdir)
+ with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ m_paths.return_value = paths
+ with mock.patch(
+ "cloudinit.cmd.query.addLogHandlerCLI", return_value=""
+ ):
+ with mock.patch("cloudinit.cmd.query.load_userdata") as m_lud:
+ m_lud.return_value = "ud"
+ assert 1 == query.handle_args("anyname", args)
+ assert expected_error in caplog.text
+
+ def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir):
+ """When instance_data file path does not exist, log an error."""
+ absent_fn = tmpdir.join("absent")
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=absent_fn.strpath,
+ list_keys=False,
+ user_data="ud",
+ vendor_data="vd",
+ varname=None,
+ )
+ assert 1 == query.handle_args("anyname", args)
+
+ msg = "Missing instance-data file: %s" % absent_fn
+ assert msg in caplog.text
+
+ def test_handle_args_error_when_no_read_permission_instance_data(
+ self, caplog, tmpdir
+ ):
+ """When instance_data file is unreadable, log an error."""
+ noread_fn = tmpdir.join("unreadable")
+ noread_fn.write("thou shall not pass")
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=noread_fn.strpath,
+ list_keys=False,
+ user_data="ud",
+ vendor_data="vd",
+ varname=None,
+ )
+ with mock.patch("cloudinit.cmd.query.util.load_file") as m_load:
+ m_load.side_effect = OSError(errno.EACCES, "Not allowed")
+ assert 1 == query.handle_args("anyname", args)
+ msg = "No read permission on '%s'. Try sudo" % noread_fn
+ assert msg in caplog.text
+
+ def test_handle_args_defaults_instance_data(self, caplog, tmpdir):
+ """When no instance_data argument, default to configured run_dir."""
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=None,
+ list_keys=False,
+ user_data=None,
+ vendor_data=None,
+ varname=None,
+ )
+ paths, run_dir, _, _ = self._setup_paths(tmpdir)
+ with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ m_paths.return_value = paths
+ assert 1 == query.handle_args("anyname", args)
+ json_file = run_dir.join(INSTANCE_JSON_FILE)
+ msg = "Missing instance-data file: %s" % json_file.strpath
+ assert msg in caplog.text
+
+ def test_handle_args_root_fallsback_to_instance_data(self, caplog, tmpdir):
+ """When no instance_data argument, root falls back to redacted json."""
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=None,
+ list_keys=False,
+ user_data=None,
+ vendor_data=None,
+ varname=None,
+ )
+ paths, run_dir, _, _ = self._setup_paths(tmpdir)
+ with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ m_paths.return_value = paths
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 0
+ assert 1 == query.handle_args("anyname", args)
+ json_file = run_dir.join(INSTANCE_JSON_FILE)
+ sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ msg = "Missing root-readable %s. Using redacted %s instead." % (
+ sensitive_file.strpath,
+ json_file.strpath,
+ )
+ assert msg in caplog.text
+
+ @pytest.mark.parametrize(
+ "ud_src,ud_expected,vd_src,vd_expected",
+ (
+ ("hi mom", "hi mom", "hi pops", "hi pops"),
+ ("ud".encode("utf-8"), "ud", "vd".encode("utf-8"), "vd"),
+ (_gzip_data(b"ud"), "ud", _gzip_data(b"vd"), "vd"),
+ (_gzip_data("ud".encode("utf-8")), "ud", _gzip_data(b"vd"), "vd"),
+ ),
+ )
+ def test_handle_args_root_processes_user_data(
+ self, ud_src, ud_expected, vd_src, vd_expected, capsys, tmpdir
+ ):
+ """Support reading multiple user-data file content types"""
+ paths, run_dir, user_data, vendor_data = self._setup_paths(
+ tmpdir, ud_val=ud_src, vd_val=vd_src
+ )
+ sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ sensitive_file.write('{"my-var": "it worked"}')
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=None,
+ list_keys=False,
+ user_data=user_data.strpath,
+ vendor_data=vendor_data.strpath,
+ varname=None,
+ )
+ with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ m_paths.return_value = paths
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 0
+ assert 0 == query.handle_args("anyname", args)
+ out, _err = capsys.readouterr()
+ cmd_output = json.loads(out)
+ assert "it worked" == cmd_output["my-var"]
+ if ud_expected == "ci-b64:":
+ ud_expected = "ci-b64:{}".format(b64e(ud_src))
+ if vd_expected == "ci-b64:":
+ vd_expected = "ci-b64:{}".format(b64e(vd_src))
+ assert ud_expected == cmd_output["userdata"]
+ assert vd_expected == cmd_output["vendordata"]
+
+ def test_handle_args_user_vendor_data_defaults_to_instance_link(
+ self, capsys, tmpdir
+ ):
+ """When no instance_data argument, root uses sensitive json."""
+ paths, run_dir, _, _ = self._setup_paths(tmpdir)
+ sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ sensitive_file.write('{"my-var": "it worked"}')
+
+ ud_path = os.path.join(paths.instance_link, "user-data.txt")
+ write_file(ud_path, "instance_link_ud")
+ vd_path = os.path.join(paths.instance_link, "vendor-data.txt")
+ write_file(vd_path, "instance_link_vd")
+
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=None,
+ list_keys=False,
+ user_data=None,
+ vendor_data=None,
+ varname=None,
+ )
+ with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ m_paths.return_value = paths
+ with mock.patch("os.getuid", return_value=0):
+ assert 0 == query.handle_args("anyname", args)
+ expected = (
+ '{\n "my-var": "it worked",\n '
+ '"userdata": "instance_link_ud",\n '
+ '"vendordata": "instance_link_vd"\n}\n'
+ )
+ out, _ = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_root_uses_instance_sensitive_data(
+ self, capsys, tmpdir
+ ):
+ """When no instance_data argument, root uses sensitive json."""
+ paths, run_dir, user_data, vendor_data = self._setup_paths(
+ tmpdir, ud_val="ud", vd_val="vd"
+ )
+ sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ sensitive_file.write('{"my-var": "it worked"}')
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=None,
+ list_keys=False,
+ user_data=user_data.strpath,
+ vendor_data=vendor_data.strpath,
+ varname=None,
+ )
+ with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ m_paths.return_value = paths
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 0
+ assert 0 == query.handle_args("anyname", args)
+ expected = (
+ '{\n "my-var": "it worked",\n '
+ '"userdata": "ud",\n "vendordata": "vd"\n}\n'
+ )
+ out, _err = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_dumps_all_instance_data(self, capsys, tmpdir):
+ """When --all is specified query will dump all instance data vars."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write('{"my-var": "it worked"}')
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=instance_data.strpath,
+ list_keys=False,
+ user_data="ud",
+ vendor_data="vd",
+ varname=None,
+ )
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args("anyname", args)
+ expected = (
+ '{\n "my-var": "it worked",\n "userdata": "<%s> file:ud",\n'
+ ' "vendordata": "<%s> file:vd"\n}\n'
+ % (REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE)
+ )
+ out, _err = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_returns_top_level_varname(self, capsys, tmpdir):
+ """When the argument varname is passed, report its value."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write('{"my-var": "it worked"}')
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=instance_data.strpath,
+ list_keys=False,
+ user_data="ud",
+ vendor_data="vd",
+ varname="my_var",
+ )
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args("anyname", args)
+ out, _err = capsys.readouterr()
+ assert "it worked\n" == out
+
+ @pytest.mark.parametrize(
+ "inst_data,varname,expected",
+ (
+ (
+ '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}',
+ "v1.key_2",
+ "value-2\n",
+ ),
+ # Assert no jinja underscore-delimited aliases are reported on CLI
+ (
+ '{"v1": {"something-hyphenated": {"no.underscores":"x",'
+ ' "no-alias": "y"}}, "my-var": "it worked"}',
+ "v1.something_hyphenated",
+ '{\n "no-alias": "y",\n "no.underscores": "x"\n}\n',
+ ),
+ ),
+ )
+ def test_handle_args_returns_nested_varname(
+ self, inst_data, varname, expected, capsys, tmpdir
+ ):
+ """If user_data file is a jinja template render instance-data vars."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write(inst_data)
+ args = self.args(
+ debug=False,
+ dump_all=False,
+ format=None,
+ instance_data=instance_data.strpath,
+ user_data="ud",
+ vendor_data="vd",
+ list_keys=False,
+ varname=varname,
+ )
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args("anyname", args)
+ out, _err = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_returns_standardized_vars_to_top_level_aliases(
+ self, capsys, tmpdir
+ ):
+ """Any standardized vars under v# are promoted as top-level aliases."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write(
+ '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
+ ' "top": "gun"}'
+ )
+ expected = dedent(
+ """\
+ {
+ "top": "gun",
+ "userdata": "<redacted for non-root user> file:ud",
+ "v1": {
+ "v1_1": "val1.1"
+ },
+ "v1_1": "val1.1",
+ "v2": {
+ "v2_2": "val2.2"
+ },
+ "v2_2": "val2.2",
+ "vendordata": "<redacted for non-root user> file:vd"
+ }
+ """
+ )
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=instance_data.strpath,
+ user_data="ud",
+ vendor_data="vd",
+ list_keys=False,
+ varname=None,
+ )
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args("anyname", args)
+ out, _err = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(
+ self, capsys, tmpdir
+ ):
+ """Sort all top-level keys when only --list-keys provided."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write(
+ '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
+ ' "top": "gun"}'
+ )
+ expected = "top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n"
+ args = self.args(
+ debug=False,
+ dump_all=False,
+ format=None,
+ instance_data=instance_data.strpath,
+ list_keys=True,
+ user_data="ud",
+ vendor_data="vd",
+ varname=None,
+ )
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args("anyname", args)
+ out, _err = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_list_keys_sorts_nested_keys_when_varname(
+ self, capsys, tmpdir
+ ):
+ """Sort all nested keys of varname object when --list-keys provided."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write(
+ '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":'
+ + ' {"v2_2": "val2.2"}, "top": "gun"}'
+ )
+ expected = "v1_1\nv1_2\n"
+ args = self.args(
+ debug=False,
+ dump_all=False,
+ format=None,
+ instance_data=instance_data.strpath,
+ list_keys=True,
+ user_data="ud",
+ vendor_data="vd",
+ varname="v1",
+ )
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args("anyname", args)
+ out, _err = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(
+ self, caplog, tmpdir
+ ):
+ """Raise an error when --list-keys and varname specify a non-list."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write(
+ '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": '
+ + '{"v2_2": "val2.2"}, "top": "gun"}'
+ )
+ expected_error = "--list-keys provided but 'top' is not a dict"
+ args = self.args(
+ debug=False,
+ dump_all=False,
+ format=None,
+ instance_data=instance_data.strpath,
+ list_keys=True,
+ user_data="ud",
+ vendor_data="vd",
+ varname="top",
+ )
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 100
+ assert 1 == query.handle_args("anyname", args)
+ assert expected_error in caplog.text
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/cmd/test_status.py b/tests/unittests/cmd/test_status.py
new file mode 100644
index 00000000..c5f424da
--- /dev/null
+++ b/tests/unittests/cmd/test_status.py
@@ -0,0 +1,548 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+from collections import namedtuple
+from io import StringIO
+from textwrap import dedent
+
+from cloudinit.atomic_helper import write_json
+from cloudinit.cmd import status
+from cloudinit.util import ensure_file
+from tests.unittests.helpers import CiTestCase, mock, wrap_and_call
+
+mypaths = namedtuple("MyPaths", "run_dir")
+myargs = namedtuple("MyArgs", "long wait")
+
+
+class TestStatus(CiTestCase):
+ def setUp(self):
+ super(TestStatus, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.status_file = self.tmp_path("status.json", self.new_root)
+ self.disable_file = self.tmp_path("cloudinit-disable", self.new_root)
+ self.paths = mypaths(run_dir=self.new_root)
+
+ class FakeInit(object):
+ paths = self.paths
+
+ def __init__(self, ds_deps):
+ pass
+
+ def read_cfg(self):
+ pass
+
+ self.init_class = FakeInit
+
+ def test__is_cloudinit_disabled_false_on_sysvinit(self):
+ """When not in an environment using systemd, return False."""
+ ensure_file(self.disable_file) # Create the ignored disable file
+ (is_disabled, reason) = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "uses_systemd": False,
+ "get_cmdline": "root=/dev/my-root not-important",
+ },
+ status._is_cloudinit_disabled,
+ self.disable_file,
+ self.paths,
+ )
+ self.assertFalse(
+ is_disabled, "expected enabled cloud-init on sysvinit"
+ )
+ self.assertEqual("Cloud-init enabled on sysvinit", reason)
+
+ def test__is_cloudinit_disabled_true_on_disable_file(self):
+ """When using systemd and disable_file is present return disabled."""
+ ensure_file(self.disable_file) # Create observed disable file
+ (is_disabled, reason) = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "uses_systemd": True,
+ "get_cmdline": "root=/dev/my-root not-important",
+ },
+ status._is_cloudinit_disabled,
+ self.disable_file,
+ self.paths,
+ )
+ self.assertTrue(is_disabled, "expected disabled cloud-init")
+ self.assertEqual(
+ "Cloud-init disabled by {0}".format(self.disable_file), reason
+ )
+
+ def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self):
+ """Not disabled when using systemd and enabled via commandline."""
+ ensure_file(self.disable_file) # Create ignored disable file
+ (is_disabled, reason) = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "uses_systemd": True,
+ "get_cmdline": "something cloud-init=enabled else",
+ },
+ status._is_cloudinit_disabled,
+ self.disable_file,
+ self.paths,
+ )
+ self.assertFalse(is_disabled, "expected enabled cloud-init")
+ self.assertEqual(
+ "Cloud-init enabled by kernel command line cloud-init=enabled",
+ reason,
+ )
+
+ def test__is_cloudinit_disabled_true_on_kernel_cmdline(self):
+ """When kernel command line disables cloud-init return True."""
+ (is_disabled, reason) = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "uses_systemd": True,
+ "get_cmdline": "something cloud-init=disabled else",
+ },
+ status._is_cloudinit_disabled,
+ self.disable_file,
+ self.paths,
+ )
+ self.assertTrue(is_disabled, "expected disabled cloud-init")
+ self.assertEqual(
+ "Cloud-init disabled by kernel parameter cloud-init=disabled",
+ reason,
+ )
+
+ def test__is_cloudinit_disabled_true_when_generator_disables(self):
+ """When cloud-init-generator writes disabled file return True."""
+ disabled_file = os.path.join(self.paths.run_dir, "disabled")
+ ensure_file(disabled_file)
+ (is_disabled, reason) = wrap_and_call(
+ "cloudinit.cmd.status",
+ {"uses_systemd": True, "get_cmdline": "something"},
+ status._is_cloudinit_disabled,
+ self.disable_file,
+ self.paths,
+ )
+ self.assertTrue(is_disabled, "expected disabled cloud-init")
+ self.assertEqual("Cloud-init disabled by cloud-init-generator", reason)
+
+ def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self):
+ """Report enabled when systemd generator creates the enabled file."""
+ enabled_file = os.path.join(self.paths.run_dir, "enabled")
+ ensure_file(enabled_file)
+ (is_disabled, reason) = wrap_and_call(
+ "cloudinit.cmd.status",
+ {"uses_systemd": True, "get_cmdline": "something ignored"},
+ status._is_cloudinit_disabled,
+ self.disable_file,
+ self.paths,
+ )
+ self.assertFalse(is_disabled, "expected enabled cloud-init")
+ self.assertEqual(
+ "Cloud-init enabled by systemd cloud-init-generator", reason
+ )
+
+ def test_status_returns_not_run(self):
+ """When status.json does not exist yet, return 'not run'."""
+ self.assertFalse(
+ os.path.exists(self.status_file), "Unexpected status.json found"
+ )
+ cmdargs = myargs(long=False, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ self.assertEqual("status: not run\n", m_stdout.getvalue())
+
+ def test_status_returns_disabled_long_on_presence_of_disable_file(self):
+ """When cloudinit is disabled, return disabled reason."""
+
+ checked_files = []
+
+ def fakeexists(filepath):
+ checked_files.append(filepath)
+ status_file = os.path.join(self.paths.run_dir, "status.json")
+ return bool(not filepath == status_file)
+
+ cmdargs = myargs(long=True, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "os.path.exists": {"side_effect": fakeexists},
+ "_is_cloudinit_disabled": (
+ True,
+ "disabled for some reason",
+ ),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ self.assertEqual(
+ [os.path.join(self.paths.run_dir, "status.json")], checked_files
+ )
+ expected = dedent(
+ """\
+ status: disabled
+ detail:
+ disabled for some reason
+ """
+ )
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_status_returns_running_on_no_results_json(self):
+ """Report running when status.json exists but result.json does not."""
+ result_file = self.tmp_path("result.json", self.new_root)
+ write_json(self.status_file, {})
+ self.assertFalse(
+ os.path.exists(result_file), "Unexpected result.json found"
+ )
+ cmdargs = myargs(long=False, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ self.assertEqual("status: running\n", m_stdout.getvalue())
+
+ def test_status_returns_running(self):
+ """Report running when status exists with an unfinished stage."""
+ ensure_file(self.tmp_path("result.json", self.new_root))
+ write_json(
+ self.status_file, {"v1": {"init": {"start": 1, "finished": None}}}
+ )
+ cmdargs = myargs(long=False, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ self.assertEqual("status: running\n", m_stdout.getvalue())
+
+ def test_status_returns_done(self):
+ """Report done results.json exists no stages are unfinished."""
+ ensure_file(self.tmp_path("result.json", self.new_root))
+ write_json(
+ self.status_file,
+ {
+ "v1": {
+ "stage": None, # No current stage running
+ "datasource": (
+ "DataSourceNoCloud [seed=/var/.../seed/nocloud-net]"
+ "[dsmode=net]"
+ ),
+ "blah": {"finished": 123.456},
+ "init": {
+ "errors": [],
+ "start": 124.567,
+ "finished": 125.678,
+ },
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ },
+ )
+ cmdargs = myargs(long=False, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ self.assertEqual("status: done\n", m_stdout.getvalue())
+
+ def test_status_returns_done_long(self):
+ """Long format of done status includes datasource info."""
+ ensure_file(self.tmp_path("result.json", self.new_root))
+ write_json(
+ self.status_file,
+ {
+ "v1": {
+ "stage": None,
+ "datasource": (
+ "DataSourceNoCloud [seed=/var/.../seed/nocloud-net]"
+ "[dsmode=net]"
+ ),
+ "init": {"start": 124.567, "finished": 125.678},
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ },
+ )
+ cmdargs = myargs(long=True, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ expected = dedent(
+ """\
+ status: done
+ time: Thu, 01 Jan 1970 00:02:05 +0000
+ detail:
+ DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net]
+ """
+ )
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_status_on_errors(self):
+ """Reports error when any stage has errors."""
+ write_json(
+ self.status_file,
+ {
+ "v1": {
+ "stage": None,
+ "blah": {"errors": [], "finished": 123.456},
+ "init": {
+ "errors": ["error1"],
+ "start": 124.567,
+ "finished": 125.678,
+ },
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ },
+ )
+ cmdargs = myargs(long=False, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(1, retcode)
+ self.assertEqual("status: error\n", m_stdout.getvalue())
+
+ def test_status_on_errors_long(self):
+ """Long format of error status includes all error messages."""
+ write_json(
+ self.status_file,
+ {
+ "v1": {
+ "stage": None,
+ "datasource": (
+ "DataSourceNoCloud [seed=/var/.../seed/nocloud-net]"
+ "[dsmode=net]"
+ ),
+ "init": {
+ "errors": ["error1"],
+ "start": 124.567,
+ "finished": 125.678,
+ },
+ "init-local": {
+ "errors": ["error2", "error3"],
+ "start": 123.45,
+ "finished": 123.46,
+ },
+ }
+ },
+ )
+ cmdargs = myargs(long=True, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(1, retcode)
+ expected = dedent(
+ """\
+ status: error
+ time: Thu, 01 Jan 1970 00:02:05 +0000
+ detail:
+ error1
+ error2
+ error3
+ """
+ )
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_status_returns_running_long_format(self):
+ """Long format reports the stage in which we are running."""
+ write_json(
+ self.status_file,
+ {
+ "v1": {
+ "stage": "init",
+ "init": {"start": 124.456, "finished": None},
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ },
+ )
+ cmdargs = myargs(long=True, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ expected = dedent(
+ """\
+ status: running
+ time: Thu, 01 Jan 1970 00:02:04 +0000
+ detail:
+ Running in stage: init
+ """
+ )
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_status_wait_blocks_until_done(self):
+ """Specifying wait will poll every 1/4 second until done state."""
+ running_json = {
+ "v1": {
+ "stage": "init",
+ "init": {"start": 124.456, "finished": None},
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ }
+ done_json = {
+ "v1": {
+ "stage": None,
+ "init": {"start": 124.456, "finished": 125.678},
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ }
+
+ self.sleep_calls = 0
+
+ def fake_sleep(interval):
+ self.assertEqual(0.25, interval)
+ self.sleep_calls += 1
+ if self.sleep_calls == 2:
+ write_json(self.status_file, running_json)
+ elif self.sleep_calls == 3:
+ write_json(self.status_file, done_json)
+ result_file = self.tmp_path("result.json", self.new_root)
+ ensure_file(result_file)
+
+ cmdargs = myargs(long=False, wait=True)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "sleep": {"side_effect": fake_sleep},
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ self.assertEqual(4, self.sleep_calls)
+ self.assertEqual("....\nstatus: done\n", m_stdout.getvalue())
+
+ def test_status_wait_blocks_until_error(self):
+ """Specifying wait will poll every 1/4 second until error state."""
+ running_json = {
+ "v1": {
+ "stage": "init",
+ "init": {"start": 124.456, "finished": None},
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ }
+ error_json = {
+ "v1": {
+ "stage": None,
+ "init": {
+ "errors": ["error1"],
+ "start": 124.456,
+ "finished": 125.678,
+ },
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ }
+
+ self.sleep_calls = 0
+
+ def fake_sleep(interval):
+ self.assertEqual(0.25, interval)
+ self.sleep_calls += 1
+ if self.sleep_calls == 2:
+ write_json(self.status_file, running_json)
+ elif self.sleep_calls == 3:
+ write_json(self.status_file, error_json)
+
+ cmdargs = myargs(long=False, wait=True)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "sleep": {"side_effect": fake_sleep},
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(1, retcode)
+ self.assertEqual(4, self.sleep_calls)
+ self.assertEqual("....\nstatus: error\n", m_stdout.getvalue())
+
+ def test_status_main(self):
+ """status.main can be run as a standalone script."""
+ write_json(
+ self.status_file, {"v1": {"init": {"start": 1, "finished": None}}}
+ )
+ with self.assertRaises(SystemExit) as context_manager:
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "sys.argv": {"new": ["status"]},
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.main,
+ )
+ self.assertEqual(0, context_manager.exception.code)
+ self.assertEqual("status: running\n", m_stdout.getvalue())
+
+
+# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/net/tests/__init__.py b/tests/unittests/config/__init__.py
index e69de29b..e69de29b 100644
--- a/cloudinit/net/tests/__init__.py
+++ b/tests/unittests/config/__init__.py
diff --git a/tests/unittests/test_handler/test_handler_apt_conf_v1.py b/tests/unittests/config/test_apt_conf_v1.py
index 6a4b03ee..5a75cf0a 100644
--- a/tests/unittests/test_handler/test_handler_apt_conf_v1.py
+++ b/tests/unittests/config/test_apt_conf_v1.py
@@ -1,16 +1,15 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.config import cc_apt_configure
-from cloudinit import util
-
-from cloudinit.tests.helpers import TestCase
-
import copy
import os
import re
import shutil
import tempfile
+from cloudinit import util
+from cloudinit.config import cc_apt_configure
+from tests.unittests.helpers import TestCase
+
class TestAptProxyConfig(TestCase):
def setUp(self):
@@ -23,10 +22,12 @@ class TestAptProxyConfig(TestCase):
def _search_apt_config(self, contents, ptype, value):
return re.search(
r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value),
- contents, flags=re.IGNORECASE)
+ contents,
+ flags=re.IGNORECASE,
+ )
def test_apt_proxy_written(self):
- cfg = {'proxy': 'myproxy'}
+ cfg = {"proxy": "myproxy"}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
@@ -36,7 +37,7 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_http_proxy_written(self):
- cfg = {'http_proxy': 'myproxy'}
+ cfg = {"http_proxy": "myproxy"}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
@@ -46,14 +47,17 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_all_proxy_written(self):
- cfg = {'http_proxy': 'myproxy_http_proxy',
- 'https_proxy': 'myproxy_https_proxy',
- 'ftp_proxy': 'myproxy_ftp_proxy'}
-
- values = {'http': cfg['http_proxy'],
- 'https': cfg['https_proxy'],
- 'ftp': cfg['ftp_proxy'],
- }
+ cfg = {
+ "http_proxy": "myproxy_http_proxy",
+ "https_proxy": "myproxy_https_proxy",
+ "ftp_proxy": "myproxy_ftp_proxy",
+ }
+
+ values = {
+ "http": cfg["http_proxy"],
+ "https": cfg["https_proxy"],
+ "ftp": cfg["ftp_proxy"],
+ }
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
@@ -73,15 +77,16 @@ class TestAptProxyConfig(TestCase):
def test_proxy_replaced(self):
util.write_file(self.cfile, "content doesnt matter")
- cc_apt_configure.apply_apt_config({'proxy': "foo"},
- self.pfile, self.cfile)
+ cc_apt_configure.apply_apt_config(
+ {"proxy": "foo"}, self.pfile, self.cfile
+ )
self.assertTrue(os.path.isfile(self.pfile))
contents = util.load_file(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "foo"))
def test_config_written(self):
- payload = 'this is my apt config'
- cfg = {'conf': payload}
+ payload = "this is my apt config"
+ cfg = {"conf": payload}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
@@ -92,8 +97,9 @@ class TestAptProxyConfig(TestCase):
def test_config_replaced(self):
util.write_file(self.pfile, "content doesnt matter")
- cc_apt_configure.apply_apt_config({'conf': "foo"},
- self.pfile, self.cfile)
+ cc_apt_configure.apply_apt_config(
+ {"conf": "foo"}, self.pfile, self.cfile
+ )
self.assertTrue(os.path.isfile(self.cfile))
self.assertEqual(util.load_file(self.cfile), "foo")
@@ -109,21 +115,23 @@ class TestConversion(TestCase):
def test_convert_with_apt_mirror_as_empty_string(self):
# an empty apt_mirror is the same as no apt_mirror
empty_m_found = cc_apt_configure.convert_to_v3_apt_format(
- {'apt_mirror': ''})
+ {"apt_mirror": ""}
+ )
default_found = cc_apt_configure.convert_to_v3_apt_format({})
self.assertEqual(default_found, empty_m_found)
def test_convert_with_apt_mirror(self):
- mirror = 'http://my.mirror/ubuntu'
- f = cc_apt_configure.convert_to_v3_apt_format({'apt_mirror': mirror})
- self.assertIn(mirror, set(m['uri'] for m in f['apt']['primary']))
+ mirror = "http://my.mirror/ubuntu"
+ f = cc_apt_configure.convert_to_v3_apt_format({"apt_mirror": mirror})
+ self.assertIn(mirror, set(m["uri"] for m in f["apt"]["primary"]))
def test_no_old_content(self):
- mirror = 'http://my.mirror/ubuntu'
- mydata = {'apt': {'primary': {'arches': ['default'], 'uri': mirror}}}
+ mirror = "http://my.mirror/ubuntu"
+ mydata = {"apt": {"primary": {"arches": ["default"], "uri": mirror}}}
expected = copy.deepcopy(mydata)
- self.assertEqual(expected,
- cc_apt_configure.convert_to_v3_apt_format(mydata))
+ self.assertEqual(
+ expected, cc_apt_configure.convert_to_v3_apt_format(mydata)
+ )
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py b/tests/unittests/config/test_apt_configure_sources_list_v1.py
index 369480be..d4ade106 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
+++ b/tests/unittests/config/test_apt_configure_sources_list_v1.py
@@ -9,19 +9,11 @@ import shutil
import tempfile
from unittest import mock
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import templater
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, templater, util
from cloudinit.config import cc_apt_configure
-from cloudinit.sources import DataSourceNone
-
from cloudinit.distros.debian import Distro
-
-from cloudinit.tests import helpers as t_help
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
@@ -44,8 +36,7 @@ apt_custom_sources_list: |
# FIND_SOMETHING_SPECIAL
"""
-EXPECTED_CONVERTED_CONTENT = (
- """## Note, this file is written by cloud-init on first boot of an instance
+EXPECTED_CONVERTED_CONTENT = """## Note, this file is written by cloud-init on first boot of an instance
## modifications made here will not survive a re-bundle.
## if you wish to make changes you can:
## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg
@@ -58,13 +49,14 @@ EXPECTED_CONVERTED_CONTENT = (
deb http://archive.ubuntu.com/ubuntu/ fakerelease main restricted
deb-src http://archive.ubuntu.com/ubuntu/ fakerelease main restricted
# FIND_SOMETHING_SPECIAL
-""")
+"""
class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
"""TestAptSourceConfigSourceList
Main Class to test sources list rendering
"""
+
def setUp(self):
super(TestAptSourceConfigSourceList, self).setUp()
self.subp = subp.subp
@@ -73,23 +65,13 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
rpatcher = mock.patch("cloudinit.util.lsb_release")
get_rel = rpatcher.start()
- get_rel.return_value = {'codename': "fakerelease"}
+ get_rel.return_value = {"codename": "fakerelease"}
self.addCleanup(rpatcher.stop)
apatcher = mock.patch("cloudinit.util.get_dpkg_architecture")
get_arch = apatcher.start()
- get_arch.return_value = 'amd64'
+ get_arch.return_value = "amd64"
self.addCleanup(apatcher.stop)
- def _get_cloud(self, distro, metadata=None):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- if metadata:
- myds.metadata.update(metadata)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
def apt_source_list(self, distro, mirror, mirrorcheck=None):
"""apt_source_list
Test rendering of a source.list from template for a given distro
@@ -98,47 +80,57 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
mirrorcheck = mirror
if isinstance(mirror, list):
- cfg = {'apt_mirror_search': mirror}
+ cfg = {"apt_mirror_search": mirror}
else:
- cfg = {'apt_mirror': mirror}
+ cfg = {"apt_mirror": mirror}
- mycloud = self._get_cloud(distro)
+ mycloud = get_cloud(distro)
- with mock.patch.object(util, 'write_file') as mockwf:
- with mock.patch.object(util, 'load_file',
- return_value="faketmpl") as mocklf:
- with mock.patch.object(os.path, 'isfile',
- return_value=True) as mockisfile:
+ with mock.patch.object(util, "write_file") as mockwf:
+ with mock.patch.object(
+ util, "load_file", return_value="faketmpl"
+ ) as mocklf:
+ with mock.patch.object(
+ os.path, "isfile", return_value=True
+ ) as mockisfile:
with mock.patch.object(
- templater, 'render_string',
- return_value='fake') as mockrnd:
- with mock.patch.object(util, 'rename'):
- cc_apt_configure.handle("test", cfg, mycloud,
- LOG, None)
+ templater, "render_string", return_value="fake"
+ ) as mockrnd:
+ with mock.patch.object(util, "rename"):
+ cc_apt_configure.handle(
+ "test", cfg, mycloud, LOG, None
+ )
mockisfile.assert_any_call(
- ('/etc/cloud/templates/sources.list.%s.tmpl' % distro))
+ "/etc/cloud/templates/sources.list.%s.tmpl" % distro
+ )
mocklf.assert_any_call(
- ('/etc/cloud/templates/sources.list.%s.tmpl' % distro))
- mockrnd.assert_called_once_with('faketmpl',
- {'RELEASE': 'fakerelease',
- 'PRIMARY': mirrorcheck,
- 'MIRROR': mirrorcheck,
- 'SECURITY': mirrorcheck,
- 'codename': 'fakerelease',
- 'primary': mirrorcheck,
- 'mirror': mirrorcheck,
- 'security': mirrorcheck})
- mockwf.assert_called_once_with('/etc/apt/sources.list', 'fake',
- mode=0o644)
+ "/etc/cloud/templates/sources.list.%s.tmpl" % distro
+ )
+ mockrnd.assert_called_once_with(
+ "faketmpl",
+ {
+ "RELEASE": "fakerelease",
+ "PRIMARY": mirrorcheck,
+ "MIRROR": mirrorcheck,
+ "SECURITY": mirrorcheck,
+ "codename": "fakerelease",
+ "primary": mirrorcheck,
+ "mirror": mirrorcheck,
+ "security": mirrorcheck,
+ },
+ )
+ mockwf.assert_called_once_with(
+ "/etc/apt/sources.list", "fake", mode=0o644
+ )
def test_apt_v1_source_list_debian(self):
"""Test rendering of a source.list from template for debian"""
- self.apt_source_list('debian', 'http://httpredir.debian.org/debian')
+ self.apt_source_list("debian", "http://httpredir.debian.org/debian")
def test_apt_v1_source_list_ubuntu(self):
"""Test rendering of a source.list from template for ubuntu"""
- self.apt_source_list('ubuntu', 'http://archive.ubuntu.com/ubuntu/')
+ self.apt_source_list("ubuntu", "http://archive.ubuntu.com/ubuntu/")
@staticmethod
def myresolve(name):
@@ -152,43 +144,51 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
def test_apt_v1_srcl_debian_mirrorfail(self):
"""Test rendering of a source.list from template for debian"""
- with mock.patch.object(util, 'is_resolvable',
- side_effect=self.myresolve) as mockresolve:
- self.apt_source_list('debian',
- ['http://does.not.exist',
- 'http://httpredir.debian.org/debian'],
- 'http://httpredir.debian.org/debian')
+ with mock.patch.object(
+ util, "is_resolvable", side_effect=self.myresolve
+ ) as mockresolve:
+ self.apt_source_list(
+ "debian",
+ [
+ "http://does.not.exist",
+ "http://httpredir.debian.org/debian",
+ ],
+ "http://httpredir.debian.org/debian",
+ )
mockresolve.assert_any_call("does.not.exist")
mockresolve.assert_any_call("httpredir.debian.org")
def test_apt_v1_srcl_ubuntu_mirrorfail(self):
"""Test rendering of a source.list from template for ubuntu"""
- with mock.patch.object(util, 'is_resolvable',
- side_effect=self.myresolve) as mockresolve:
- self.apt_source_list('ubuntu',
- ['http://does.not.exist',
- 'http://archive.ubuntu.com/ubuntu/'],
- 'http://archive.ubuntu.com/ubuntu/')
+ with mock.patch.object(
+ util, "is_resolvable", side_effect=self.myresolve
+ ) as mockresolve:
+ self.apt_source_list(
+ "ubuntu",
+ ["http://does.not.exist", "http://archive.ubuntu.com/ubuntu/"],
+ "http://archive.ubuntu.com/ubuntu/",
+ )
mockresolve.assert_any_call("does.not.exist")
mockresolve.assert_any_call("archive.ubuntu.com")
def test_apt_v1_srcl_custom(self):
"""Test rendering from a custom source.list template"""
cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL)
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud()
# the second mock restores the original subp
- with mock.patch.object(util, 'write_file') as mockwrite:
- with mock.patch.object(subp, 'subp', self.subp):
- with mock.patch.object(Distro, 'get_primary_arch',
- return_value='amd64'):
- cc_apt_configure.handle("notimportant", cfg, mycloud,
- LOG, None)
+ with mock.patch.object(util, "write_file") as mockwrite:
+ with mock.patch.object(subp, "subp", self.subp):
+ with mock.patch.object(
+ Distro, "get_primary_arch", return_value="amd64"
+ ):
+ cc_apt_configure.handle(
+ "notimportant", cfg, mycloud, LOG, None
+ )
mockwrite.assert_called_once_with(
- '/etc/apt/sources.list',
- EXPECTED_CONVERTED_CONTENT,
- mode=420)
+ "/etc/apt/sources.list", EXPECTED_CONVERTED_CONTENT, mode=420
+ )
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py b/tests/unittests/config/test_apt_configure_sources_list_v3.py
index b96fd4d4..d9ec6f74 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
+++ b/tests/unittests/config/test_apt_configure_sources_list_v3.py
@@ -7,21 +7,15 @@ import logging
import os
import shutil
import tempfile
+from contextlib import ExitStack
from unittest import mock
from unittest.mock import call
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, util
from cloudinit.config import cc_apt_configure
-from cloudinit.sources import DataSourceNone
-
from cloudinit.distros.debian import Distro
-
-from cloudinit.tests import helpers as t_help
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
@@ -69,30 +63,31 @@ deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted
"""
-EXPECTED_BASE_CONTENT = ("""
+EXPECTED_BASE_CONTENT = """
deb http://test.ubuntu.com/ubuntu/ notouched main restricted
deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted
deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted
-""")
+"""
-EXPECTED_MIRROR_CONTENT = ("""
+EXPECTED_MIRROR_CONTENT = """
deb http://test.ubuntu.com/ubuntu/ notouched main restricted
deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted
deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
deb http://test.ubuntu.com/ubuntu/ notouched-security main restricted
-""")
+"""
-EXPECTED_PRIMSEC_CONTENT = ("""
+EXPECTED_PRIMSEC_CONTENT = """
deb http://test.ubuntu.com/ubuntu/ notouched main restricted
deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted
deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted
-""")
+"""
class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
"""TestAptSourceConfigSourceList - Class to test sources list rendering"""
+
def setUp(self):
super(TestAptSourceConfigSourceList, self).setUp()
self.subp = subp.subp
@@ -101,57 +96,60 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
rpatcher = mock.patch("cloudinit.util.lsb_release")
get_rel = rpatcher.start()
- get_rel.return_value = {'codename': "fakerel"}
+ get_rel.return_value = {"codename": "fakerel"}
self.addCleanup(rpatcher.stop)
apatcher = mock.patch("cloudinit.util.get_dpkg_architecture")
get_arch = apatcher.start()
- get_arch.return_value = 'amd64'
+ get_arch.return_value = "amd64"
self.addCleanup(apatcher.stop)
- def _get_cloud(self, distro, metadata=None):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- if metadata:
- myds.metadata.update(metadata)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
def _apt_source_list(self, distro, cfg, cfg_on_empty=False):
"""_apt_source_list - Test rendering from template (generic)"""
# entry at top level now, wrap in 'apt' key
- cfg = {'apt': cfg}
- mycloud = self._get_cloud(distro)
-
- with mock.patch.object(util, 'write_file') as mock_writefile:
- with mock.patch.object(util, 'load_file',
- return_value=MOCKED_APT_SRC_LIST
- ) as mock_loadfile:
- with mock.patch.object(os.path, 'isfile',
- return_value=True) as mock_isfile:
- cfg_func = ('cloudinit.config.cc_apt_configure.' +
- '_should_configure_on_empty_apt')
- with mock.patch(cfg_func,
- return_value=(cfg_on_empty, "test")
- ) as mock_shouldcfg:
- cc_apt_configure.handle("test", cfg, mycloud, LOG,
- None)
-
- return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg
+ cfg = {"apt": cfg}
+ mycloud = get_cloud(distro)
+
+ with ExitStack() as stack:
+ mock_writefile = stack.enter_context(
+ mock.patch.object(util, "write_file")
+ )
+ mock_loadfile = stack.enter_context(
+ mock.patch.object(
+ util, "load_file", return_value=MOCKED_APT_SRC_LIST
+ )
+ )
+ mock_isfile = stack.enter_context(
+ mock.patch.object(os.path, "isfile", return_value=True)
+ )
+ stack.enter_context(mock.patch.object(util, "del_file"))
+ cfg_func = (
+ "cloudinit.config.cc_apt_configure."
+ "_should_configure_on_empty_apt"
+ )
+ mock_shouldcfg = stack.enter_context(
+ mock.patch(cfg_func, return_value=(cfg_on_empty, "test"))
+ )
+ cc_apt_configure.handle("test", cfg, mycloud, LOG, None)
+
+ return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg
def test_apt_v3_source_list_debian(self):
"""test_apt_v3_source_list_debian - without custom sources or parms"""
cfg = {}
- distro = 'debian'
+ distro = "debian"
expected = EXPECTED_BASE_CONTENT
- mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = (
- self._apt_source_list(distro, cfg, cfg_on_empty=True))
-
- template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro
- mock_writefile.assert_called_once_with('/etc/apt/sources.list',
- expected, mode=0o644)
+ (
+ mock_writefile,
+ mock_load_file,
+ mock_isfile,
+ mock_shouldcfg,
+ ) = self._apt_source_list(distro, cfg, cfg_on_empty=True)
+
+ template = "/etc/cloud/templates/sources.list.%s.tmpl" % distro
+ mock_writefile.assert_called_once_with(
+ "/etc/apt/sources.list", expected, mode=0o644
+ )
mock_load_file.assert_called_with(template)
mock_isfile.assert_any_call(template)
self.assertEqual(1, mock_shouldcfg.call_count)
@@ -159,15 +157,20 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
def test_apt_v3_source_list_ubuntu(self):
"""test_apt_v3_source_list_ubuntu - without custom sources or parms"""
cfg = {}
- distro = 'ubuntu'
+ distro = "ubuntu"
expected = EXPECTED_BASE_CONTENT
- mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = (
- self._apt_source_list(distro, cfg, cfg_on_empty=True))
-
- template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro
- mock_writefile.assert_called_once_with('/etc/apt/sources.list',
- expected, mode=0o644)
+ (
+ mock_writefile,
+ mock_load_file,
+ mock_isfile,
+ mock_shouldcfg,
+ ) = self._apt_source_list(distro, cfg, cfg_on_empty=True)
+
+ template = "/etc/cloud/templates/sources.list.%s.tmpl" % distro
+ mock_writefile.assert_called_once_with(
+ "/etc/apt/sources.list", expected, mode=0o644
+ )
mock_load_file.assert_called_with(template)
mock_isfile.assert_any_call(template)
self.assertEqual(1, mock_shouldcfg.call_count)
@@ -175,12 +178,13 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
def test_apt_v3_source_list_ubuntu_snappy(self):
"""test_apt_v3_source_list_ubuntu_snappy - without custom sources or
parms"""
- cfg = {'apt': {}}
- mycloud = self._get_cloud('ubuntu')
+ cfg = {"apt": {}}
+ mycloud = get_cloud()
- with mock.patch.object(util, 'write_file') as mock_writefile:
- with mock.patch.object(util, 'system_is_snappy',
- return_value=True) as mock_issnappy:
+ with mock.patch.object(util, "write_file") as mock_writefile:
+ with mock.patch.object(
+ util, "system_is_snappy", return_value=True
+ ) as mock_issnappy:
cc_apt_configure.handle("test", cfg, mycloud, LOG, None)
self.assertEqual(0, mock_writefile.call_count)
@@ -189,7 +193,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
def test_apt_v3_source_list_centos(self):
"""test_apt_v3_source_list_centos - without custom sources or parms"""
cfg = {}
- distro = 'rhel'
+ distro = "rhel"
mock_writefile, _, _, _ = self._apt_source_list(distro, cfg)
@@ -197,41 +201,47 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
def test_apt_v3_source_list_psm(self):
"""test_apt_v3_source_list_psm - Test specifying prim+sec mirrors"""
- pm = 'http://test.ubuntu.com/ubuntu/'
- sm = 'http://testsec.ubuntu.com/ubuntu/'
- cfg = {'preserve_sources_list': False,
- 'primary': [{'arches': ["default"],
- 'uri': pm}],
- 'security': [{'arches': ["default"],
- 'uri': sm}]}
- distro = 'ubuntu'
+ pm = "http://test.ubuntu.com/ubuntu/"
+ sm = "http://testsec.ubuntu.com/ubuntu/"
+ cfg = {
+ "preserve_sources_list": False,
+ "primary": [{"arches": ["default"], "uri": pm}],
+ "security": [{"arches": ["default"], "uri": sm}],
+ }
+ distro = "ubuntu"
expected = EXPECTED_PRIMSEC_CONTENT
- mock_writefile, mock_load_file, mock_isfile, _ = (
- self._apt_source_list(distro, cfg, cfg_on_empty=True))
+ mock_writefile, mock_load_file, mock_isfile, _ = self._apt_source_list(
+ distro, cfg, cfg_on_empty=True
+ )
- template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro
- mock_writefile.assert_called_once_with('/etc/apt/sources.list',
- expected, mode=0o644)
+ template = "/etc/cloud/templates/sources.list.%s.tmpl" % distro
+ mock_writefile.assert_called_once_with(
+ "/etc/apt/sources.list", expected, mode=0o644
+ )
mock_load_file.assert_called_with(template)
mock_isfile.assert_any_call(template)
def test_apt_v3_srcl_custom(self):
"""test_apt_v3_srcl_custom - Test rendering a custom source template"""
cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL)
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud()
# the second mock restores the original subp
- with mock.patch.object(util, 'write_file') as mockwrite:
- with mock.patch.object(subp, 'subp', self.subp):
- with mock.patch.object(Distro, 'get_primary_arch',
- return_value='amd64'):
- cc_apt_configure.handle("notimportant", cfg, mycloud,
- LOG, None)
-
- calls = [call('/etc/apt/sources.list',
- EXPECTED_CONVERTED_CONTENT,
- mode=0o644)]
+ with mock.patch.object(util, "write_file") as mockwrite:
+ with mock.patch.object(subp, "subp", self.subp):
+ with mock.patch.object(
+ Distro, "get_primary_arch", return_value="amd64"
+ ):
+ cc_apt_configure.handle(
+ "notimportant", cfg, mycloud, LOG, None
+ )
+
+ calls = [
+ call(
+ "/etc/apt/sources.list", EXPECTED_CONVERTED_CONTENT, mode=0o644
+ )
+ ]
mockwrite.assert_has_calls(calls)
diff --git a/tests/unittests/config/test_apt_key.py b/tests/unittests/config/test_apt_key.py
new file mode 100644
index 00000000..9fcf3039
--- /dev/null
+++ b/tests/unittests/config/test_apt_key.py
@@ -0,0 +1,124 @@
+import os
+from unittest import mock
+
+from cloudinit import subp, util
+from cloudinit.config import cc_apt_configure
+
+TEST_KEY_HUMAN = """
+/etc/apt/cloud-init.gpg.d/my_key.gpg
+--------------------------------------------
+pub rsa4096 2021-10-22 [SC]
+ 3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85
+uid [ unknown] Brett Holman <brett.holman@canonical.com>
+sub rsa4096 2021-10-22 [A]
+sub rsa4096 2021-10-22 [E]
+"""
+
+TEST_KEY_MACHINE = """
+tru::1:1635129362:0:3:1:5
+pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0:
+fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85:
+uid:-::::1634912922::64F1F1D6FA96316752D635D7C6406C52C40713C7::Brett Holman \
+<brett.holman@canonical.com>::::::::::0:
+sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23:
+fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04:
+sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23:
+fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092:
+"""
+
+TEST_KEY_FINGERPRINT_HUMAN = (
+ "3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85"
+)
+
+TEST_KEY_FINGERPRINT_MACHINE = "3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85"
+
+
+class TestAptKey:
+ """TestAptKey
+ Class to test apt-key commands
+ """
+
+ @mock.patch.object(subp, "subp", return_value=("fakekey", ""))
+ @mock.patch.object(util, "write_file")
+ def _apt_key_add_success_helper(self, directory, *args, hardened=False):
+ file = cc_apt_configure.apt_key(
+ "add", output_file="my-key", data="fakekey", hardened=hardened
+ )
+ assert file == directory + "/my-key.gpg"
+
+ def test_apt_key_add_success(self):
+ """Verify the right directory path gets returned for unhardened case"""
+ self._apt_key_add_success_helper("/etc/apt/trusted.gpg.d")
+
+ def test_apt_key_add_success_hardened(self):
+ """Verify the right directory path gets returned for hardened case"""
+ self._apt_key_add_success_helper(
+ "/etc/apt/cloud-init.gpg.d", hardened=True
+ )
+
+ def test_apt_key_add_fail_no_file_name(self):
+ """Verify that null filename gets handled correctly"""
+ file = cc_apt_configure.apt_key("add", output_file=None, data="")
+ assert "/dev/null" == file
+
+ def _apt_key_fail_helper(self):
+ file = cc_apt_configure.apt_key(
+ "add", output_file="my-key", data="fakekey"
+ )
+ assert file == "/dev/null"
+
+ @mock.patch.object(subp, "subp", side_effect=subp.ProcessExecutionError)
+ def test_apt_key_add_fail_no_file_name_subproc(self, *args):
+ """Verify that bad key value gets handled correctly"""
+ self._apt_key_fail_helper()
+
+ @mock.patch.object(
+ subp, "subp", side_effect=UnicodeDecodeError("test", b"", 1, 1, "")
+ )
+ def test_apt_key_add_fail_no_file_name_unicode(self, *args):
+ """Verify that bad key encoding gets handled correctly"""
+ self._apt_key_fail_helper()
+
+ def _apt_key_list_success_helper(self, finger, key, human_output=True):
+ @mock.patch.object(os, "listdir", return_value=("/fake/dir/key.gpg",))
+ @mock.patch.object(subp, "subp", return_value=(key, ""))
+ def mocked_list(*a):
+
+ keys = cc_apt_configure.apt_key("list", human_output)
+ assert finger in keys
+
+ mocked_list()
+
+ def test_apt_key_list_success_human(self):
+ """Verify expected key output, human"""
+ self._apt_key_list_success_helper(
+ TEST_KEY_FINGERPRINT_HUMAN, TEST_KEY_HUMAN
+ )
+
+ def test_apt_key_list_success_machine(self):
+ """Verify expected key output, machine"""
+ self._apt_key_list_success_helper(
+ TEST_KEY_FINGERPRINT_MACHINE, TEST_KEY_MACHINE, human_output=False
+ )
+
+ @mock.patch.object(os, "listdir", return_value=())
+ @mock.patch.object(subp, "subp", return_value=("", ""))
+ def test_apt_key_list_fail_no_keys(self, *args):
+ """Ensure falsy output for no keys"""
+ keys = cc_apt_configure.apt_key("list")
+ assert not keys
+
+ @mock.patch.object(os, "listdir", return_value="file_not_gpg_key.txt")
+ @mock.patch.object(subp, "subp", return_value=("", ""))
+ def test_apt_key_list_fail_no_keys_file(self, *args):
+ """Ensure non-gpg file is not returned.
+
+ apt-key used file extensions for this, so we do too
+ """
+ assert not cc_apt_configure.apt_key("list")
+
+ @mock.patch.object(subp, "subp", side_effect=subp.ProcessExecutionError)
+ @mock.patch.object(os, "listdir", return_value="bad_gpg_key.gpg")
+ def test_apt_key_list_fail_bad_key_file(self, *args):
+ """Ensure bad gpg key doesn't throw exeption."""
+ assert not cc_apt_configure.apt_key("list")
diff --git a/tests/unittests/config/test_apt_source_v1.py b/tests/unittests/config/test_apt_source_v1.py
new file mode 100644
index 00000000..fbc2bf45
--- /dev/null
+++ b/tests/unittests/config/test_apt_source_v1.py
@@ -0,0 +1,852 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+""" test_handler_apt_source_v1
+Testing various config variations of the apt_source config
+This calls all things with v1 format to stress the conversion code on top of
+the actually tested code.
+"""
+import os
+import pathlib
+import re
+import shutil
+import tempfile
+from unittest import mock
+from unittest.mock import call
+
+from cloudinit import gpg, subp, util
+from cloudinit.config import cc_apt_configure
+from tests.unittests.helpers import TestCase
+
+EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ
+NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy
+8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0
+HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG
+CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29
+OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ
+FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm
+S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
+=ACB2
+-----END PGP PUBLIC KEY BLOCK-----"""
+
+ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
+
+
+class FakeDistro(object):
+ """Fake Distro helper object"""
+
+ def update_package_sources(self):
+ """Fake update_package_sources helper method"""
+ return
+
+
+class FakeDatasource:
+ """Fake Datasource helper object"""
+
+ def __init__(self):
+ self.region = "region"
+
+
+class FakeCloud(object):
+ """Fake Cloud helper object"""
+
+ def __init__(self):
+ self.distro = FakeDistro()
+ self.datasource = FakeDatasource()
+
+
+class TestAptSourceConfig(TestCase):
+ """TestAptSourceConfig
+ Main Class to test apt_source configs
+ """
+
+ release = "fantastic"
+
+ def setUp(self):
+ super(TestAptSourceConfig, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+ self.aptlistfile = os.path.join(self.tmp, "single-deb.list")
+ self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")
+ self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
+ self.join = os.path.join
+ self.matcher = re.compile(ADD_APT_REPO_MATCH).search
+ # mock fallback filename into writable tmp dir
+ self.fallbackfn = os.path.join(
+ self.tmp, "etc/apt/sources.list.d/", "cloud_config_sources.list"
+ )
+
+ self.fakecloud = FakeCloud()
+
+ rpatcher = mock.patch("cloudinit.util.lsb_release")
+ get_rel = rpatcher.start()
+ get_rel.return_value = {"codename": self.release}
+ self.addCleanup(rpatcher.stop)
+ apatcher = mock.patch("cloudinit.util.get_dpkg_architecture")
+ get_arch = apatcher.start()
+ get_arch.return_value = "amd64"
+ self.addCleanup(apatcher.stop)
+
+ def _get_default_params(self):
+ """get_default_params
+ Get the most basic default mrror and release info to be used in tests
+ """
+ params = {}
+ params["RELEASE"] = self.release
+ params["MIRROR"] = "http://archive.ubuntu.com/ubuntu"
+ return params
+
+ def wrapv1conf(self, cfg):
+ params = self._get_default_params()
+ # old v1 list format under old keys, but callabe to main handler
+ # disable source.list rendering and set mirror to avoid other code
+ return {
+ "apt_preserve_sources_list": True,
+ "apt_mirror": params["MIRROR"],
+ "apt_sources": cfg,
+ }
+
+ def myjoin(self, *args, **kwargs):
+ """myjoin - redir into writable tmpdir"""
+ if (
+ args[0] == "/etc/apt/sources.list.d/"
+ and args[1] == "cloud_config_sources.list"
+ and len(args) == 2
+ ):
+ return self.join(self.tmp, args[0].lstrip("/"), args[1])
+ else:
+ return self.join(*args, **kwargs)
+
+ def apt_src_basic(self, filename, cfg):
+ """apt_src_basic
+ Test Fix deb source string, has to overwrite mirror conf in params
+ """
+ cfg = self.wrapv1conf(cfg)
+
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = util.load_file(filename)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://archive.ubuntu.com/ubuntu",
+ "karmic-backports",
+ "main universe multiverse restricted",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_src_basic(self):
+ """Test deb source string, overwrite mirror and filename"""
+ cfg = {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " karmic-backports"
+ " main universe multiverse restricted"
+ ),
+ "filename": self.aptlistfile,
+ }
+ self.apt_src_basic(self.aptlistfile, [cfg])
+
+ def test_apt_src_basic_dict(self):
+ """Test deb source string, overwrite mirror and filename (dict)"""
+ cfg = {
+ self.aptlistfile: {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " karmic-backports"
+ " main universe multiverse restricted"
+ )
+ }
+ }
+ self.apt_src_basic(self.aptlistfile, cfg)
+
+ def apt_src_basic_tri(self, cfg):
+ """apt_src_basic_tri
+ Test Fix three deb source string, has to overwrite mirror conf in
+ params. Test with filenames provided in config.
+ generic part to check three files with different content
+ """
+ self.apt_src_basic(self.aptlistfile, cfg)
+
+ # extra verify on two extra files of this test
+ contents = util.load_file(self.aptlistfile2)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://archive.ubuntu.com/ubuntu",
+ "precise-backports",
+ "main universe multiverse restricted",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+ contents = util.load_file(self.aptlistfile3)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://archive.ubuntu.com/ubuntu",
+ "lucid-backports",
+ "main universe multiverse restricted",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_src_basic_tri(self):
+ """Test Fix three deb source string with filenames"""
+ cfg1 = {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " karmic-backports"
+ " main universe multiverse restricted"
+ ),
+ "filename": self.aptlistfile,
+ }
+ cfg2 = {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " precise-backports"
+ " main universe multiverse restricted"
+ ),
+ "filename": self.aptlistfile2,
+ }
+ cfg3 = {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " lucid-backports"
+ " main universe multiverse restricted"
+ ),
+ "filename": self.aptlistfile3,
+ }
+ self.apt_src_basic_tri([cfg1, cfg2, cfg3])
+
+ def test_apt_src_basic_dict_tri(self):
+ """Test Fix three deb source string with filenames (dict)"""
+ cfg = {
+ self.aptlistfile: {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " karmic-backports"
+ " main universe multiverse restricted"
+ )
+ },
+ self.aptlistfile2: {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " precise-backports"
+ " main universe multiverse restricted"
+ )
+ },
+ self.aptlistfile3: {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " lucid-backports"
+ " main universe multiverse restricted"
+ )
+ },
+ }
+ self.apt_src_basic_tri(cfg)
+
+ def test_apt_src_basic_nofn(self):
+ """Test Fix three deb source string without filenames (dict)"""
+ cfg = {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " karmic-backports"
+ " main universe multiverse restricted"
+ )
+ }
+ with mock.patch.object(os.path, "join", side_effect=self.myjoin):
+ self.apt_src_basic(self.fallbackfn, [cfg])
+
+ def apt_src_replacement(self, filename, cfg):
+ """apt_src_replace
+ Test Autoreplacement of MIRROR and RELEASE in source specs
+ """
+ cfg = self.wrapv1conf(cfg)
+ params = self._get_default_params()
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = util.load_file(filename)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % ("deb", params["MIRROR"], params["RELEASE"], "multiverse"),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_src_replace(self):
+ """Test Autoreplacement of MIRROR and RELEASE in source specs"""
+ cfg = {
+ "source": "deb $MIRROR $RELEASE multiverse",
+ "filename": self.aptlistfile,
+ }
+ self.apt_src_replacement(self.aptlistfile, [cfg])
+
+ def apt_src_replace_tri(self, cfg):
+ """apt_src_replace_tri
+ Test three autoreplacements of MIRROR and RELEASE in source specs with
+ generic part
+ """
+ self.apt_src_replacement(self.aptlistfile, cfg)
+
+ # extra verify on two extra files of this test
+ params = self._get_default_params()
+ contents = util.load_file(self.aptlistfile2)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % ("deb", params["MIRROR"], params["RELEASE"], "main"),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+ contents = util.load_file(self.aptlistfile3)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % ("deb", params["MIRROR"], params["RELEASE"], "universe"),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_src_replace_tri(self):
+ """Test triple Autoreplacement of MIRROR and RELEASE in source specs"""
+ cfg1 = {
+ "source": "deb $MIRROR $RELEASE multiverse",
+ "filename": self.aptlistfile,
+ }
+ cfg2 = {
+ "source": "deb $MIRROR $RELEASE main",
+ "filename": self.aptlistfile2,
+ }
+ cfg3 = {
+ "source": "deb $MIRROR $RELEASE universe",
+ "filename": self.aptlistfile3,
+ }
+ self.apt_src_replace_tri([cfg1, cfg2, cfg3])
+
+ def test_apt_src_replace_dict_tri(self):
+ """Test triple Autoreplacement in source specs (dict)"""
+ cfg = {
+ self.aptlistfile: {"source": "deb $MIRROR $RELEASE multiverse"},
+ "notused": {
+ "source": "deb $MIRROR $RELEASE main",
+ "filename": self.aptlistfile2,
+ },
+ self.aptlistfile3: {"source": "deb $MIRROR $RELEASE universe"},
+ }
+ self.apt_src_replace_tri(cfg)
+
+ def test_apt_src_replace_nofn(self):
+ """Test Autoreplacement of MIRROR and RELEASE in source specs nofile"""
+ cfg = {"source": "deb $MIRROR $RELEASE multiverse"}
+ with mock.patch.object(os.path, "join", side_effect=self.myjoin):
+ self.apt_src_replacement(self.fallbackfn, [cfg])
+
+ def apt_src_keyid(self, filename, cfg, keynum):
+ """apt_src_keyid
+ Test specification of a source + keyid
+ """
+ cfg = self.wrapv1conf(cfg)
+
+ with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj:
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+
+ # check if it added the right number of keys
+ calls = []
+ sources = cfg["apt"]["sources"]
+ for src in sources:
+ print(sources[src])
+ calls.append(call(sources[src], None))
+
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = util.load_file(filename)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "main",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_src_keyid(self):
+ """Test specification of a source + keyid with filename being set"""
+ cfg = {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "keyid": "03683F77",
+ "filename": self.aptlistfile,
+ }
+ self.apt_src_keyid(self.aptlistfile, [cfg], 1)
+
+ def test_apt_src_keyid_tri(self):
+ """Test 3x specification of a source + keyid with filename being set"""
+ cfg1 = {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "keyid": "03683F77",
+ "filename": self.aptlistfile,
+ }
+ cfg2 = {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial universe"
+ ),
+ "keyid": "03683F77",
+ "filename": self.aptlistfile2,
+ }
+ cfg3 = {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial multiverse"
+ ),
+ "keyid": "03683F77",
+ "filename": self.aptlistfile3,
+ }
+
+ self.apt_src_keyid(self.aptlistfile, [cfg1, cfg2, cfg3], 3)
+ contents = util.load_file(self.aptlistfile2)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "universe",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+ contents = util.load_file(self.aptlistfile3)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "multiverse",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_src_keyid_nofn(self):
+ """Test specification of a source + keyid without filename being set"""
+ cfg = {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "keyid": "03683F77",
+ }
+ with mock.patch.object(os.path, "join", side_effect=self.myjoin):
+ self.apt_src_keyid(self.fallbackfn, [cfg], 1)
+
+ def apt_src_key(self, filename, cfg):
+ """apt_src_key
+ Test specification of a source + key
+ """
+ cfg = self.wrapv1conf([cfg])
+
+ with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj:
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+
+ # check if it added the right amount of keys
+ sources = cfg["apt"]["sources"]
+ calls = []
+ for src in sources:
+ print(sources[src])
+ calls.append(call(sources[src], None))
+
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = util.load_file(filename)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "main",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_src_key(self):
+ """Test specification of a source + key with filename being set"""
+ cfg = {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "key": "fakekey 4321",
+ "filename": self.aptlistfile,
+ }
+ self.apt_src_key(self.aptlistfile, cfg)
+
+ def test_apt_src_key_nofn(self):
+ """Test specification of a source + key without filename being set"""
+ cfg = {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "key": "fakekey 4321",
+ }
+ with mock.patch.object(os.path, "join", side_effect=self.myjoin):
+ self.apt_src_key(self.fallbackfn, cfg)
+
+ def test_apt_src_keyonly(self):
+ """Test specifying key without source"""
+ cfg = {"key": "fakekey 4242", "filename": self.aptlistfile}
+ cfg = self.wrapv1conf([cfg])
+ with mock.patch.object(cc_apt_configure, "apt_key") as mockobj:
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+
+ calls = (
+ call(
+ "add",
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data="fakekey 4242",
+ hardened=False,
+ ),
+ )
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_src_keyidonly(self):
+ """Test specification of a keyid without source"""
+ cfg = {"keyid": "03683F77", "filename": self.aptlistfile}
+ cfg = self.wrapv1conf([cfg])
+
+ with mock.patch.object(
+ subp, "subp", return_value=("fakekey 1212", "")
+ ):
+ with mock.patch.object(cc_apt_configure, "apt_key") as mockobj:
+ cc_apt_configure.handle(
+ "test", cfg, self.fakecloud, None, None
+ )
+
+ calls = (
+ call(
+ "add",
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data="fakekey 1212",
+ hardened=False,
+ ),
+ )
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None):
+ """apt_src_keyid_real
+ Test specification of a keyid without source including
+ up to addition of the key (add_apt_key_raw mocked to keep the
+ environment as is)
+ """
+ key = cfg["keyid"]
+ keyserver = cfg.get("keyserver", "keyserver.ubuntu.com")
+ cfg = self.wrapv1conf([cfg])
+
+ with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockkey:
+ with mock.patch.object(
+ gpg, "getkeybyid", return_value=expectedkey
+ ) as mockgetkey:
+ cc_apt_configure.handle(
+ "test", cfg, self.fakecloud, None, None
+ )
+ if is_hardened is not None:
+ mockkey.assert_called_with(
+ expectedkey, self.aptlistfile, hardened=is_hardened
+ )
+ else:
+ mockkey.assert_called_with(expectedkey, self.aptlistfile)
+ mockgetkey.assert_called_with(key, keyserver)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_src_keyid_real(self):
+ """test_apt_src_keyid_real - Test keyid including key add"""
+ keyid = "03683F77"
+ cfg = {"keyid": keyid, "filename": self.aptlistfile}
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
+
+ def test_apt_src_longkeyid_real(self):
+ """test_apt_src_longkeyid_real - Test long keyid including key add"""
+ keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
+ cfg = {"keyid": keyid, "filename": self.aptlistfile}
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
+
+ def test_apt_src_longkeyid_ks_real(self):
+ """test_apt_src_longkeyid_ks_real - Test long keyid from other ks"""
+ keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
+ cfg = {
+ "keyid": keyid,
+ "keyserver": "keys.gnupg.net",
+ "filename": self.aptlistfile,
+ }
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
+
+ def test_apt_src_ppa(self):
+ """Test adding a ppa"""
+ cfg = {
+ "source": "ppa:smoser/cloud-init-test",
+ "filename": self.aptlistfile,
+ }
+ cfg = self.wrapv1conf([cfg])
+
+ with mock.patch.object(subp, "subp") as mockobj:
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+ mockobj.assert_called_once_with(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test"], target=None
+ )
+
+ # adding ppa should ignore filename (uses add-apt-repository)
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_src_ppa_tri(self):
+ """Test adding three ppa's"""
+ cfg1 = {
+ "source": "ppa:smoser/cloud-init-test",
+ "filename": self.aptlistfile,
+ }
+ cfg2 = {
+ "source": "ppa:smoser/cloud-init-test2",
+ "filename": self.aptlistfile2,
+ }
+ cfg3 = {
+ "source": "ppa:smoser/cloud-init-test3",
+ "filename": self.aptlistfile3,
+ }
+ cfg = self.wrapv1conf([cfg1, cfg2, cfg3])
+
+ with mock.patch.object(subp, "subp") as mockobj:
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+ calls = [
+ call(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test"],
+ target=None,
+ ),
+ call(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test2"],
+ target=None,
+ ),
+ call(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test3"],
+ target=None,
+ ),
+ ]
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ # adding ppa should ignore all filenames (uses add-apt-repository)
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+ self.assertFalse(os.path.isfile(self.aptlistfile2))
+ self.assertFalse(os.path.isfile(self.aptlistfile3))
+
+ def test_convert_to_new_format(self):
+ """Test the conversion of old to new format"""
+ cfg1 = {
+ "source": "deb $MIRROR $RELEASE multiverse",
+ "filename": self.aptlistfile,
+ }
+ cfg2 = {
+ "source": "deb $MIRROR $RELEASE main",
+ "filename": self.aptlistfile2,
+ }
+ cfg3 = {
+ "source": "deb $MIRROR $RELEASE universe",
+ "filename": self.aptlistfile3,
+ }
+ cfg = {"apt_sources": [cfg1, cfg2, cfg3]}
+ checkcfg = {
+ self.aptlistfile: {
+ "filename": self.aptlistfile,
+ "source": "deb $MIRROR $RELEASE multiverse",
+ },
+ self.aptlistfile2: {
+ "filename": self.aptlistfile2,
+ "source": "deb $MIRROR $RELEASE main",
+ },
+ self.aptlistfile3: {
+ "filename": self.aptlistfile3,
+ "source": "deb $MIRROR $RELEASE universe",
+ },
+ }
+
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg)
+ self.assertEqual(newcfg["apt"]["sources"], checkcfg)
+
+ # convert again, should stay the same
+ newcfg2 = cc_apt_configure.convert_to_v3_apt_format(newcfg)
+ self.assertEqual(newcfg2["apt"]["sources"], checkcfg)
+
+ # should work without raising an exception
+ cc_apt_configure.convert_to_v3_apt_format({})
+
+ with self.assertRaises(ValueError):
+ cc_apt_configure.convert_to_v3_apt_format({"apt_sources": 5})
+
+ def test_convert_to_new_format_collision(self):
+ """Test the conversion of old to new format with collisions
+ That matches e.g. the MAAS case specifying old and new config"""
+ cfg_1_and_3 = {
+ "apt": {"proxy": "http://192.168.122.1:8000/"},
+ "apt_proxy": "http://192.168.122.1:8000/",
+ }
+ cfg_3_only = {"apt": {"proxy": "http://192.168.122.1:8000/"}}
+ cfgconflict = {
+ "apt": {"proxy": "http://192.168.122.1:8000/"},
+ "apt_proxy": "ftp://192.168.122.1:8000/",
+ }
+
+ # collision (equal)
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3)
+ self.assertEqual(newcfg, cfg_3_only)
+ # collision (equal, so ok to remove)
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only)
+ self.assertEqual(newcfg, cfg_3_only)
+ # collision (unequal)
+ match = "Old and New.*unequal.*apt_proxy"
+ with self.assertRaisesRegex(ValueError, match):
+ cc_apt_configure.convert_to_v3_apt_format(cfgconflict)
+
+ def test_convert_to_new_format_dict_collision(self):
+ cfg1 = {
+ "source": "deb $MIRROR $RELEASE multiverse",
+ "filename": self.aptlistfile,
+ }
+ cfg2 = {
+ "source": "deb $MIRROR $RELEASE main",
+ "filename": self.aptlistfile2,
+ }
+ cfg3 = {
+ "source": "deb $MIRROR $RELEASE universe",
+ "filename": self.aptlistfile3,
+ }
+ fullv3 = {
+ self.aptlistfile: {
+ "filename": self.aptlistfile,
+ "source": "deb $MIRROR $RELEASE multiverse",
+ },
+ self.aptlistfile2: {
+ "filename": self.aptlistfile2,
+ "source": "deb $MIRROR $RELEASE main",
+ },
+ self.aptlistfile3: {
+ "filename": self.aptlistfile3,
+ "source": "deb $MIRROR $RELEASE universe",
+ },
+ }
+ cfg_3_only = {"apt": {"sources": fullv3}}
+ cfg_1_and_3 = {"apt_sources": [cfg1, cfg2, cfg3]}
+ cfg_1_and_3.update(cfg_3_only)
+
+ # collision (equal, so ok to remove)
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3)
+ self.assertEqual(newcfg, cfg_3_only)
+ # no old spec (same result)
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only)
+ self.assertEqual(newcfg, cfg_3_only)
+
+ diff = {
+ self.aptlistfile: {
+ "filename": self.aptlistfile,
+ "source": "deb $MIRROR $RELEASE DIFFERENTVERSE",
+ },
+ self.aptlistfile2: {
+ "filename": self.aptlistfile2,
+ "source": "deb $MIRROR $RELEASE main",
+ },
+ self.aptlistfile3: {
+ "filename": self.aptlistfile3,
+ "source": "deb $MIRROR $RELEASE universe",
+ },
+ }
+ cfg_3_only = {"apt": {"sources": diff}}
+ cfg_1_and_3_different = {"apt_sources": [cfg1, cfg2, cfg3]}
+ cfg_1_and_3_different.update(cfg_3_only)
+
+ # collision (unequal by dict having a different entry)
+ with self.assertRaises(ValueError):
+ cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_different)
+
+ missing = {
+ self.aptlistfile: {
+ "filename": self.aptlistfile,
+ "source": "deb $MIRROR $RELEASE multiverse",
+ }
+ }
+ cfg_3_only = {"apt": {"sources": missing}}
+ cfg_1_and_3_missing = {"apt_sources": [cfg1, cfg2, cfg3]}
+ cfg_1_and_3_missing.update(cfg_3_only)
+ # collision (unequal by dict missing an entry)
+ with self.assertRaises(ValueError):
+ cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_missing)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_apt_source_v3.py b/tests/unittests/config/test_apt_source_v3.py
new file mode 100644
index 00000000..75adc647
--- /dev/null
+++ b/tests/unittests/config/test_apt_source_v3.py
@@ -0,0 +1,1442 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""test_handler_apt_source_v3
+Testing various config variations of the apt_source custom config
+This tries to call all in the new v3 format and cares about new features
+"""
+import glob
+import os
+import pathlib
+import re
+import shutil
+import socket
+import tempfile
+from unittest import TestCase, mock
+from unittest.mock import call
+
+from cloudinit import gpg, subp, util
+from cloudinit.config import cc_apt_configure
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
+
+EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ
+NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy
+8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0
+HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG
+CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29
+OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ
+FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm
+S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
+=ACB2
+-----END PGP PUBLIC KEY BLOCK-----"""
+
+ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
+
+TARGET = None
+
+MOCK_LSB_RELEASE_DATA = {
+ "id": "Ubuntu",
+ "description": "Ubuntu 18.04.1 LTS",
+ "release": "18.04",
+ "codename": "bionic",
+}
+
+
+class FakeDatasource:
+ """Fake Datasource helper object"""
+
+ def __init__(self):
+ self.region = "region"
+
+
+class FakeCloud:
+ """Fake Cloud helper object"""
+
+ def __init__(self):
+ self.datasource = FakeDatasource()
+
+
+class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
+ """TestAptSourceConfig
+ Main Class to test apt configs
+ """
+
+ def setUp(self):
+ super(TestAptSourceConfig, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+ self.addCleanup(shutil.rmtree, self.new_root)
+ self.aptlistfile = os.path.join(self.tmp, "single-deb.list")
+ self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")
+ self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
+ self.join = os.path.join
+ self.matcher = re.compile(ADD_APT_REPO_MATCH).search
+ self.add_patch(
+ "cloudinit.config.cc_apt_configure.util.lsb_release",
+ "m_lsb_release",
+ return_value=MOCK_LSB_RELEASE_DATA.copy(),
+ )
+
+ @staticmethod
+ def _add_apt_sources(*args, **kwargs):
+ with mock.patch.object(cc_apt_configure, "update_packages"):
+ cc_apt_configure.add_apt_sources(*args, **kwargs)
+
+ @staticmethod
+ def _get_default_params():
+ """get_default_params
+ Get the most basic default mrror and release info to be used in tests
+ """
+ params = {}
+ params["RELEASE"] = MOCK_LSB_RELEASE_DATA["release"]
+ arch = "amd64"
+ params["MIRROR"] = cc_apt_configure.get_default_mirrors(arch)[
+ "PRIMARY"
+ ]
+ return params
+
+ def _myjoin(self, *args, **kwargs):
+ """_myjoin - redir into writable tmpdir"""
+ if (
+ args[0] == "/etc/apt/sources.list.d/"
+ and args[1] == "cloud_config_sources.list"
+ and len(args) == 2
+ ):
+ return self.join(self.tmp, args[0].lstrip("/"), args[1])
+ else:
+ return self.join(*args, **kwargs)
+
+ def _apt_src_basic(self, filename, cfg):
+ """_apt_src_basic
+ Test Fix deb source string, has to overwrite mirror conf in params
+ """
+ params = self._get_default_params()
+
+ self._add_apt_sources(
+ cfg, TARGET, template_params=params, aa_repo_match=self.matcher
+ )
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = util.load_file(filename)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://test.ubuntu.com/ubuntu",
+ "karmic-backports",
+ "main universe multiverse restricted",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_v3_src_basic(self):
+ """test_apt_v3_src_basic - Test fix deb source string"""
+ cfg = {
+ self.aptlistfile: {
+ "source": (
+ "deb http://test.ubuntu.com/ubuntu"
+ " karmic-backports"
+ " main universe multiverse restricted"
+ )
+ }
+ }
+ self._apt_src_basic(self.aptlistfile, cfg)
+
+ def test_apt_v3_src_basic_tri(self):
+ """test_apt_v3_src_basic_tri - Test multiple fix deb source strings"""
+ cfg = {
+ self.aptlistfile: {
+ "source": (
+ "deb http://test.ubuntu.com/ubuntu"
+ " karmic-backports"
+ " main universe multiverse restricted"
+ )
+ },
+ self.aptlistfile2: {
+ "source": (
+ "deb http://test.ubuntu.com/ubuntu"
+ " precise-backports"
+ " main universe multiverse restricted"
+ )
+ },
+ self.aptlistfile3: {
+ "source": (
+ "deb http://test.ubuntu.com/ubuntu"
+ " lucid-backports"
+ " main universe multiverse restricted"
+ )
+ },
+ }
+ self._apt_src_basic(self.aptlistfile, cfg)
+
+ # extra verify on two extra files of this test
+ contents = util.load_file(self.aptlistfile2)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://test.ubuntu.com/ubuntu",
+ "precise-backports",
+ "main universe multiverse restricted",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+ contents = util.load_file(self.aptlistfile3)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://test.ubuntu.com/ubuntu",
+ "lucid-backports",
+ "main universe multiverse restricted",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def _apt_src_replacement(self, filename, cfg):
+ """apt_src_replace
+ Test Autoreplacement of MIRROR and RELEASE in source specs
+ """
+ params = self._get_default_params()
+ self._add_apt_sources(
+ cfg, TARGET, template_params=params, aa_repo_match=self.matcher
+ )
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = util.load_file(filename)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % ("deb", params["MIRROR"], params["RELEASE"], "multiverse"),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_v3_src_replace(self):
+ """test_apt_v3_src_replace - Test replacement of MIRROR & RELEASE"""
+ cfg = {self.aptlistfile: {"source": "deb $MIRROR $RELEASE multiverse"}}
+ self._apt_src_replacement(self.aptlistfile, cfg)
+
+ def test_apt_v3_src_replace_fn(self):
+ """test_apt_v3_src_replace_fn - Test filename overwritten in dict"""
+ cfg = {
+ "ignored": {
+ "source": "deb $MIRROR $RELEASE multiverse",
+ "filename": self.aptlistfile,
+ }
+ }
+ # second file should overwrite the dict key
+ self._apt_src_replacement(self.aptlistfile, cfg)
+
+ def _apt_src_replace_tri(self, cfg):
+ """_apt_src_replace_tri
+ Test three autoreplacements of MIRROR and RELEASE in source specs with
+ generic part
+ """
+ self._apt_src_replacement(self.aptlistfile, cfg)
+
+ # extra verify on two extra files of this test
+ params = self._get_default_params()
+ contents = util.load_file(self.aptlistfile2)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % ("deb", params["MIRROR"], params["RELEASE"], "main"),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+ contents = util.load_file(self.aptlistfile3)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % ("deb", params["MIRROR"], params["RELEASE"], "universe"),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_v3_src_replace_tri(self):
+ """test_apt_v3_src_replace_tri - Test multiple replace/overwrites"""
+ cfg = {
+ self.aptlistfile: {"source": "deb $MIRROR $RELEASE multiverse"},
+ "notused": {
+ "source": "deb $MIRROR $RELEASE main",
+ "filename": self.aptlistfile2,
+ },
+ self.aptlistfile3: {"source": "deb $MIRROR $RELEASE universe"},
+ }
+ self._apt_src_replace_tri(cfg)
+
+ def _apt_src_keyid(self, filename, cfg, keynum, is_hardened=None):
+ """_apt_src_keyid
+ Test specification of a source + keyid
+ """
+ params = self._get_default_params()
+
+ with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj:
+ self._add_apt_sources(
+ cfg, TARGET, template_params=params, aa_repo_match=self.matcher
+ )
+
+ # check if it added the right number of keys
+ calls = []
+ for key in cfg:
+ if is_hardened is not None:
+ calls.append(call(cfg[key], hardened=is_hardened))
+ else:
+ calls.append(call(cfg[key], TARGET))
+
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = util.load_file(filename)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "main",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_v3_src_keyid(self):
+ """test_apt_v3_src_keyid - Test source + keyid with filename"""
+ cfg = {
+ self.aptlistfile: {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "filename": self.aptlistfile,
+ "keyid": "03683F77",
+ }
+ }
+ self._apt_src_keyid(self.aptlistfile, cfg, 1)
+
+ def test_apt_v3_src_keyid_tri(self):
+ """test_apt_v3_src_keyid_tri - Test multiple src+key+filen writes"""
+ cfg = {
+ self.aptlistfile: {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "keyid": "03683F77",
+ },
+ "ignored": {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial universe"
+ ),
+ "keyid": "03683F77",
+ "filename": self.aptlistfile2,
+ },
+ self.aptlistfile3: {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial multiverse"
+ ),
+ "filename": self.aptlistfile3,
+ "keyid": "03683F77",
+ },
+ }
+
+ self._apt_src_keyid(self.aptlistfile, cfg, 3)
+ contents = util.load_file(self.aptlistfile2)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "universe",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+ contents = util.load_file(self.aptlistfile3)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "multiverse",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_v3_src_key(self):
+ """test_apt_v3_src_key - Test source + key"""
+ params = self._get_default_params()
+ cfg = {
+ self.aptlistfile: {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "filename": self.aptlistfile,
+ "key": "fakekey 4321",
+ }
+ }
+
+ with mock.patch.object(cc_apt_configure, "apt_key") as mockobj:
+ self._add_apt_sources(
+ cfg, TARGET, template_params=params, aa_repo_match=self.matcher
+ )
+
+ calls = (
+ call(
+ "add",
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data="fakekey 4321",
+ hardened=False,
+ ),
+ )
+ mockobj.assert_has_calls(calls, any_order=True)
+ self.assertTrue(os.path.isfile(self.aptlistfile))
+
+ contents = util.load_file(self.aptlistfile)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "main",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_v3_src_keyonly(self):
+ """test_apt_v3_src_keyonly - Test key without source"""
+ params = self._get_default_params()
+ cfg = {self.aptlistfile: {"key": "fakekey 4242"}}
+
+ with mock.patch.object(cc_apt_configure, "apt_key") as mockobj:
+ self._add_apt_sources(
+ cfg, TARGET, template_params=params, aa_repo_match=self.matcher
+ )
+
+ calls = (
+ call(
+ "add",
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data="fakekey 4242",
+ hardened=False,
+ ),
+ )
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_v3_src_keyidonly(self):
+ """test_apt_v3_src_keyidonly - Test keyid without source"""
+ params = self._get_default_params()
+ cfg = {self.aptlistfile: {"keyid": "03683F77"}}
+ with mock.patch.object(
+ subp, "subp", return_value=("fakekey 1212", "")
+ ):
+ with mock.patch.object(cc_apt_configure, "apt_key") as mockobj:
+ self._add_apt_sources(
+ cfg,
+ TARGET,
+ template_params=params,
+ aa_repo_match=self.matcher,
+ )
+
+ calls = (
+ call(
+ "add",
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data="fakekey 1212",
+ hardened=False,
+ ),
+ )
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None):
+ """apt_src_keyid_real
+ Test specification of a keyid without source including
+ up to addition of the key (add_apt_key_raw mocked to keep the
+ environment as is)
+ """
+ params = self._get_default_params()
+
+ with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockkey:
+ with mock.patch.object(
+ gpg, "getkeybyid", return_value=expectedkey
+ ) as mockgetkey:
+ self._add_apt_sources(
+ cfg,
+ TARGET,
+ template_params=params,
+ aa_repo_match=self.matcher,
+ )
+
+ keycfg = cfg[self.aptlistfile]
+ mockgetkey.assert_called_with(
+ keycfg["keyid"], keycfg.get("keyserver", "keyserver.ubuntu.com")
+ )
+ if is_hardened is not None:
+ mockkey.assert_called_with(
+ expectedkey, keycfg["keyfile"], hardened=is_hardened
+ )
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_v3_src_keyid_real(self):
+ """test_apt_v3_src_keyid_real - Test keyid including key add"""
+ keyid = "03683F77"
+ cfg = {self.aptlistfile: {"keyid": keyid, "keyfile": self.aptlistfile}}
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
+
+ def test_apt_v3_src_longkeyid_real(self):
+ """test_apt_v3_src_longkeyid_real Test long keyid including key add"""
+ keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
+ cfg = {self.aptlistfile: {"keyid": keyid, "keyfile": self.aptlistfile}}
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
+
+ def test_apt_v3_src_longkeyid_ks_real(self):
+ """test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks"""
+ keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
+ cfg = {
+ self.aptlistfile: {
+ "keyid": keyid,
+ "keyfile": self.aptlistfile,
+ "keyserver": "keys.gnupg.net",
+ }
+ }
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY)
+
+ def test_apt_v3_src_keyid_keyserver(self):
+ """test_apt_v3_src_keyid_keyserver - Test custom keyserver"""
+ keyid = "03683F77"
+ params = self._get_default_params()
+ cfg = {
+ self.aptlistfile: {
+ "keyid": keyid,
+ "keyfile": self.aptlistfile,
+ "keyserver": "test.random.com",
+ }
+ }
+
+ # in some test environments only *.ubuntu.com is reachable
+ # so mock the call and check if the config got there
+ with mock.patch.object(
+ gpg, "getkeybyid", return_value="fakekey"
+ ) as mockgetkey:
+ with mock.patch.object(
+ cc_apt_configure, "add_apt_key_raw"
+ ) as mockadd:
+ self._add_apt_sources(
+ cfg,
+ TARGET,
+ template_params=params,
+ aa_repo_match=self.matcher,
+ )
+
+ mockgetkey.assert_called_with("03683F77", "test.random.com")
+ mockadd.assert_called_with("fakekey", self.aptlistfile, hardened=False)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_v3_src_ppa(self):
+ """test_apt_v3_src_ppa - Test specification of a ppa"""
+ params = self._get_default_params()
+ cfg = {self.aptlistfile: {"source": "ppa:smoser/cloud-init-test"}}
+
+ with mock.patch("cloudinit.subp.subp") as mockobj:
+ self._add_apt_sources(
+ cfg, TARGET, template_params=params, aa_repo_match=self.matcher
+ )
+ mockobj.assert_any_call(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test"], target=TARGET
+ )
+
+ # adding ppa should ignore filename (uses add-apt-repository)
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_v3_src_ppa_tri(self):
+ """test_apt_v3_src_ppa_tri - Test specification of multiple ppa's"""
+ params = self._get_default_params()
+ cfg = {
+ self.aptlistfile: {"source": "ppa:smoser/cloud-init-test"},
+ self.aptlistfile2: {"source": "ppa:smoser/cloud-init-test2"},
+ self.aptlistfile3: {"source": "ppa:smoser/cloud-init-test3"},
+ }
+
+ with mock.patch("cloudinit.subp.subp") as mockobj:
+ self._add_apt_sources(
+ cfg, TARGET, template_params=params, aa_repo_match=self.matcher
+ )
+ calls = [
+ call(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test"],
+ target=TARGET,
+ ),
+ call(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test2"],
+ target=TARGET,
+ ),
+ call(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test3"],
+ target=TARGET,
+ ),
+ ]
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ # adding ppa should ignore all filenames (uses add-apt-repository)
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+ self.assertFalse(os.path.isfile(self.aptlistfile2))
+ self.assertFalse(os.path.isfile(self.aptlistfile3))
+
+ @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
+ def test_apt_v3_list_rename(self, m_get_dpkg_architecture):
+ """test_apt_v3_list_rename - Test find mirror and apt list renaming"""
+ pre = "/var/lib/apt/lists"
+ # filenames are archive dependent
+
+ arch = "s390x"
+ m_get_dpkg_architecture.return_value = arch
+ component = "ubuntu-ports"
+ archive = "ports.ubuntu.com"
+
+ cfg = {
+ "primary": [
+ {
+ "arches": ["default"],
+ "uri": "http://test.ubuntu.com/%s/" % component,
+ }
+ ],
+ "security": [
+ {
+ "arches": ["default"],
+ "uri": "http://testsec.ubuntu.com/%s/" % component,
+ }
+ ],
+ }
+ post = "%s_dists_%s-updates_InRelease" % (
+ component,
+ MOCK_LSB_RELEASE_DATA["codename"],
+ )
+ fromfn = "%s/%s_%s" % (pre, archive, post)
+ tofn = "%s/test.ubuntu.com_%s" % (pre, post)
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch)
+
+ self.assertEqual(
+ mirrors["MIRROR"], "http://test.ubuntu.com/%s/" % component
+ )
+ self.assertEqual(
+ mirrors["PRIMARY"], "http://test.ubuntu.com/%s/" % component
+ )
+ self.assertEqual(
+ mirrors["SECURITY"], "http://testsec.ubuntu.com/%s/" % component
+ )
+
+ with mock.patch.object(os, "rename") as mockren:
+ with mock.patch.object(glob, "glob", return_value=[fromfn]):
+ cc_apt_configure.rename_apt_lists(mirrors, TARGET, arch)
+
+ mockren.assert_any_call(fromfn, tofn)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
+ def test_apt_v3_list_rename_non_slash(self, m_get_dpkg_architecture):
+ target = os.path.join(self.tmp, "rename_non_slash")
+ apt_lists_d = os.path.join(target, "./" + cc_apt_configure.APT_LISTS)
+
+ arch = "amd64"
+ m_get_dpkg_architecture.return_value = arch
+
+ mirror_path = "some/random/path/"
+ primary = "http://test.ubuntu.com/" + mirror_path
+ security = "http://test-security.ubuntu.com/" + mirror_path
+ mirrors = {"PRIMARY": primary, "SECURITY": security}
+
+ # these match default archive prefixes
+ opri_pre = "archive.ubuntu.com_ubuntu_dists_xenial"
+ osec_pre = "security.ubuntu.com_ubuntu_dists_xenial"
+ # this one won't match and should not be renamed defaults.
+ other_pre = "dl.google.com_linux_chrome_deb_dists_stable"
+ # these are our new expected prefixes
+ npri_pre = "test.ubuntu.com_some_random_path_dists_xenial"
+ nsec_pre = "test-security.ubuntu.com_some_random_path_dists_xenial"
+
+ files = [
+ # orig prefix, new prefix, suffix
+ (opri_pre, npri_pre, "_main_binary-amd64_Packages"),
+ (opri_pre, npri_pre, "_main_binary-amd64_InRelease"),
+ (opri_pre, npri_pre, "-updates_main_binary-amd64_Packages"),
+ (opri_pre, npri_pre, "-updates_main_binary-amd64_InRelease"),
+ (other_pre, other_pre, "_main_binary-amd64_Packages"),
+ (other_pre, other_pre, "_Release"),
+ (other_pre, other_pre, "_Release.gpg"),
+ (osec_pre, nsec_pre, "_InRelease"),
+ (osec_pre, nsec_pre, "_main_binary-amd64_Packages"),
+ (osec_pre, nsec_pre, "_universe_binary-amd64_Packages"),
+ ]
+
+ expected = sorted([npre + suff for opre, npre, suff in files])
+ # create files
+ for (opre, _npre, suff) in files:
+ fpath = os.path.join(apt_lists_d, opre + suff)
+ util.write_file(fpath, content=fpath)
+
+ cc_apt_configure.rename_apt_lists(mirrors, target, arch)
+ found = sorted(os.listdir(apt_lists_d))
+ self.assertEqual(expected, found)
+
+ @staticmethod
+ def test_apt_v3_proxy():
+ """test_apt_v3_proxy - Test apt_*proxy configuration"""
+ cfg = {
+ "proxy": "foobar1",
+ "http_proxy": "foobar2",
+ "ftp_proxy": "foobar3",
+ "https_proxy": "foobar4",
+ }
+
+ with mock.patch.object(util, "write_file") as mockobj:
+ cc_apt_configure.apply_apt_config(cfg, "proxyfn", "notused")
+
+ mockobj.assert_called_with(
+ "proxyfn",
+ 'Acquire::http::Proxy "foobar1";\n'
+ 'Acquire::http::Proxy "foobar2";\n'
+ 'Acquire::ftp::Proxy "foobar3";\n'
+ 'Acquire::https::Proxy "foobar4";\n',
+ )
+
+ def test_apt_v3_mirror(self):
+ """test_apt_v3_mirror - Test defining a mirror"""
+ pmir = "http://us.archive.ubuntu.com/ubuntu/"
+ smir = "http://security.ubuntu.com/ubuntu/"
+ cfg = {
+ "primary": [{"arches": ["default"], "uri": pmir}],
+ "security": [{"arches": ["default"], "uri": smir}],
+ }
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(
+ cfg, FakeCloud(), "amd64"
+ )
+
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ def test_apt_v3_mirror_default(self):
+ """test_apt_v3_mirror_default - Test without defining a mirror"""
+ arch = "amd64"
+ default_mirrors = cc_apt_configure.get_default_mirrors(arch)
+ pmir = default_mirrors["PRIMARY"]
+ smir = default_mirrors["SECURITY"]
+ mycloud = get_cloud()
+ mirrors = cc_apt_configure.find_apt_mirror_info({}, mycloud, arch)
+
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ def test_apt_v3_mirror_arches(self):
+ """test_apt_v3_mirror_arches - Test arches selection of mirror"""
+ pmir = "http://my-primary.ubuntu.com/ubuntu/"
+ smir = "http://my-security.ubuntu.com/ubuntu/"
+ arch = "ppc64el"
+ cfg = {
+ "primary": [
+ {"arches": ["default"], "uri": "notthis-primary"},
+ {"arches": [arch], "uri": pmir},
+ ],
+ "security": [
+ {"arches": ["default"], "uri": "nothis-security"},
+ {"arches": [arch], "uri": smir},
+ ],
+ }
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch)
+
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ def test_apt_v3_mirror_arches_default(self):
+ """test_apt_v3_mirror_arches - Test falling back to default arch"""
+ pmir = "http://us.archive.ubuntu.com/ubuntu/"
+ smir = "http://security.ubuntu.com/ubuntu/"
+ cfg = {
+ "primary": [
+ {"arches": ["default"], "uri": pmir},
+ {"arches": ["thisarchdoesntexist"], "uri": "notthis"},
+ ],
+ "security": [
+ {"arches": ["thisarchdoesntexist"], "uri": "nothat"},
+ {"arches": ["default"], "uri": smir},
+ ],
+ }
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(
+ cfg, FakeCloud(), "amd64"
+ )
+
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
+ def test_apt_v3_get_def_mir_non_intel_no_arch(
+ self, m_get_dpkg_architecture
+ ):
+ arch = "ppc64el"
+ m_get_dpkg_architecture.return_value = arch
+ expected = {
+ "PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
+ "SECURITY": "http://ports.ubuntu.com/ubuntu-ports",
+ }
+ self.assertEqual(expected, cc_apt_configure.get_default_mirrors())
+
+ def test_apt_v3_get_default_mirrors_non_intel_with_arch(self):
+ found = cc_apt_configure.get_default_mirrors("ppc64el")
+
+ expected = {
+ "PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
+ "SECURITY": "http://ports.ubuntu.com/ubuntu-ports",
+ }
+ self.assertEqual(expected, found)
+
+ def test_apt_v3_mirror_arches_sysdefault(self):
+ """test_apt_v3_mirror_arches - Test arches fallback to sys default"""
+ arch = "amd64"
+ default_mirrors = cc_apt_configure.get_default_mirrors(arch)
+ pmir = default_mirrors["PRIMARY"]
+ smir = default_mirrors["SECURITY"]
+ mycloud = get_cloud()
+ cfg = {
+ "primary": [
+ {"arches": ["thisarchdoesntexist_64"], "uri": "notthis"},
+ {"arches": ["thisarchdoesntexist"], "uri": "notthiseither"},
+ ],
+ "security": [
+ {"arches": ["thisarchdoesntexist"], "uri": "nothat"},
+ {"arches": ["thisarchdoesntexist_64"], "uri": "nothateither"},
+ ],
+ }
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ def test_apt_v3_mirror_search(self):
+ """test_apt_v3_mirror_search - Test searching mirrors in a list
+ mock checks to avoid relying on network connectivity"""
+ pmir = "http://us.archive.ubuntu.com/ubuntu/"
+ smir = "http://security.ubuntu.com/ubuntu/"
+ cfg = {
+ "primary": [{"arches": ["default"], "search": ["pfailme", pmir]}],
+ "security": [{"arches": ["default"], "search": ["sfailme", smir]}],
+ }
+
+ with mock.patch.object(
+ cc_apt_configure.util,
+ "search_for_mirror",
+ side_effect=[pmir, smir],
+ ) as mocksearch:
+ mirrors = cc_apt_configure.find_apt_mirror_info(
+ cfg, FakeCloud(), "amd64"
+ )
+
+ calls = [call(["pfailme", pmir]), call(["sfailme", smir])]
+ mocksearch.assert_has_calls(calls)
+
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ def test_apt_v3_mirror_search_many2(self):
+ """test_apt_v3_mirror_search_many3 - Test both mirrors specs at once"""
+ pmir = "http://us.archive.ubuntu.com/ubuntu/"
+ smir = "http://security.ubuntu.com/ubuntu/"
+ cfg = {
+ "primary": [
+ {
+ "arches": ["default"],
+ "uri": pmir,
+ "search": ["pfailme", "foo"],
+ }
+ ],
+ "security": [
+ {
+ "arches": ["default"],
+ "uri": smir,
+ "search": ["sfailme", "bar"],
+ }
+ ],
+ }
+
+ arch = "amd64"
+
+ # should be called only once per type, despite two mirror configs
+ mycloud = None
+ with mock.patch.object(
+ cc_apt_configure, "get_mirror", return_value="http://mocked/foo"
+ ) as mockgm:
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+ calls = [
+ call(cfg, "primary", arch, mycloud),
+ call(cfg, "security", arch, mycloud),
+ ]
+ mockgm.assert_has_calls(calls)
+
+ # should not be called, since primary is specified
+ with mock.patch.object(
+ cc_apt_configure.util, "search_for_mirror"
+ ) as mockse:
+ mirrors = cc_apt_configure.find_apt_mirror_info(
+ cfg, FakeCloud(), arch
+ )
+ mockse.assert_not_called()
+
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ def test_apt_v3_url_resolvable(self):
+ """test_apt_v3_url_resolvable - Test resolving urls"""
+
+ with mock.patch.object(util, "is_resolvable") as mockresolve:
+ util.is_resolvable_url("http://1.2.3.4/ubuntu")
+ mockresolve.assert_called_with("1.2.3.4")
+
+ with mock.patch.object(util, "is_resolvable") as mockresolve:
+ util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
+ mockresolve.assert_called_with("us.archive.ubuntu.com")
+
+ # former tests can leave this set (or not if the test is ran directly)
+ # do a hard reset to ensure a stable result
+ util._DNS_REDIRECT_IP = None
+ bad = [(None, None, None, "badname", ["10.3.2.1"])]
+ good = [(None, None, None, "goodname", ["10.2.3.4"])]
+ with mock.patch.object(
+ socket, "getaddrinfo", side_effect=[bad, bad, bad, good, good]
+ ) as mocksock:
+ ret = util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
+ ret2 = util.is_resolvable_url("http://1.2.3.4/ubuntu")
+ mocksock.assert_any_call(
+ "does-not-exist.example.com.", None, 0, 0, 1, 2
+ )
+ mocksock.assert_any_call("example.invalid.", None, 0, 0, 1, 2)
+ mocksock.assert_any_call("us.archive.ubuntu.com", None)
+ mocksock.assert_any_call("1.2.3.4", None)
+
+ self.assertTrue(ret)
+ self.assertTrue(ret2)
+
+ # side effect need only bad ret after initial call
+ with mock.patch.object(
+ socket, "getaddrinfo", side_effect=[bad]
+ ) as mocksock:
+ ret3 = util.is_resolvable_url("http://failme.com/ubuntu")
+ calls = [call("failme.com", None)]
+ mocksock.assert_has_calls(calls)
+ self.assertFalse(ret3)
+
+ def test_apt_v3_disable_suites(self):
+ """test_disable_suites - disable_suites with many configurations"""
+ release = "xenial"
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+
+ # disable nothing
+ disabled = []
+ expect = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable release suite
+ disabled = ["$RELEASE"]
+ expect = """\
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable other suite
+ disabled = ["$RELEASE-updates"]
+ expect = (
+ """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu"""
+ """ xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ )
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # multi disable
+ disabled = ["$RELEASE-updates", "$RELEASE-security"]
+ expect = (
+ """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-updates main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ )
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # multi line disable (same suite multiple times in input)
+ disabled = ["$RELEASE-updates", "$RELEASE-security"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://UBUNTU.com//ubuntu xenial-updates main
+deb http://UBUNTU.COM//ubuntu xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = (
+ """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-updates main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+# suite disabled by cloud-init: deb http://UBUNTU.com//ubuntu """
+ """xenial-updates main
+# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """
+ """xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ )
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # comment in input
+ disabled = ["$RELEASE-updates", "$RELEASE-security"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+#foo
+#deb http://UBUNTU.com//ubuntu xenial-updates main
+deb http://UBUNTU.COM//ubuntu xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = (
+ """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-updates main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+#foo
+#deb http://UBUNTU.com//ubuntu xenial-updates main
+# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """
+ """xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ )
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable custom suite
+ disabled = ["foobar"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb http://ubuntu.com/ubuntu/ foobar main"""
+ expect = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+# suite disabled by cloud-init: deb http://ubuntu.com/ubuntu/ foobar main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable non existing suite
+ disabled = ["foobar"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb http://ubuntu.com/ubuntu/ notfoobar main"""
+ expect = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb http://ubuntu.com/ubuntu/ notfoobar main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable suite with option
+ disabled = ["$RELEASE-updates"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb [a=b] http://ubu.com//ubu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = (
+ """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb [a=b] http://ubu.com//ubu """
+ """xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ )
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable suite with more options and auto $RELEASE expansion
+ disabled = ["updates"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb [a=b c=d] http://ubu.com//ubu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb [a=b c=d] \
+http://ubu.com//ubu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable suite while options at others
+ disabled = ["$RELEASE-security"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = (
+ """deb http://ubuntu.com//ubuntu xenial main
+deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ )
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ def test_disable_suites_blank_lines(self):
+ """test_disable_suites_blank_lines - ensure blank lines allowed"""
+ lines = [
+ "deb %(repo)s %(rel)s main universe",
+ "",
+ "deb %(repo)s %(rel)s-updates main universe",
+ " # random comment",
+ "#comment here",
+ "",
+ ]
+ rel = "trusty"
+ repo = "http://example.com/mirrors/ubuntu"
+ orig = "\n".join(lines) % {"repo": repo, "rel": rel}
+ self.assertEqual(
+ orig, cc_apt_configure.disable_suites(["proposed"], orig, rel)
+ )
+
+ @mock.patch("cloudinit.util.get_hostname", return_value="abc.localdomain")
+ def test_apt_v3_mirror_search_dns(self, m_get_hostname):
+ """test_apt_v3_mirror_search_dns - Test searching dns patterns"""
+ pmir = "phit"
+ smir = "shit"
+ arch = "amd64"
+ mycloud = get_cloud("ubuntu")
+ cfg = {
+ "primary": [{"arches": ["default"], "search_dns": True}],
+ "security": [{"arches": ["default"], "search_dns": True}],
+ }
+
+ with mock.patch.object(
+ cc_apt_configure, "get_mirror", return_value="http://mocked/foo"
+ ) as mockgm:
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+ calls = [
+ call(cfg, "primary", arch, mycloud),
+ call(cfg, "security", arch, mycloud),
+ ]
+ mockgm.assert_has_calls(calls)
+
+ with mock.patch.object(
+ cc_apt_configure,
+ "search_for_mirror_dns",
+ return_value="http://mocked/foo",
+ ) as mocksdns:
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+ calls = [
+ call(True, "primary", cfg, mycloud),
+ call(True, "security", cfg, mycloud),
+ ]
+ mocksdns.assert_has_calls(calls)
+
+ # first return is for the non-dns call before
+ with mock.patch.object(
+ cc_apt_configure.util,
+ "search_for_mirror",
+ side_effect=[None, pmir, None, smir],
+ ) as mockse:
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+
+ calls = [
+ call(None),
+ call(
+ [
+ "http://ubuntu-mirror.localdomain/ubuntu",
+ "http://ubuntu-mirror/ubuntu",
+ ]
+ ),
+ call(None),
+ call(
+ [
+ "http://ubuntu-security-mirror.localdomain/ubuntu",
+ "http://ubuntu-security-mirror/ubuntu",
+ ]
+ ),
+ ]
+ mockse.assert_has_calls(calls)
+
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ def test_apt_v3_add_mirror_keys(self):
+ """test_apt_v3_add_mirror_keys - Test adding key for mirrors"""
+ arch = "amd64"
+ cfg = {
+ "primary": [
+ {
+ "arches": [arch],
+ "uri": "http://test.ubuntu.com/",
+ "filename": "primary",
+ "key": "fakekey_primary",
+ }
+ ],
+ "security": [
+ {
+ "arches": [arch],
+ "uri": "http://testsec.ubuntu.com/",
+ "filename": "security",
+ "key": "fakekey_security",
+ }
+ ],
+ }
+
+ with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockadd:
+ cc_apt_configure.add_mirror_keys(cfg, TARGET)
+ calls = [
+ mock.call("fakekey_primary", "primary", hardened=False),
+ mock.call("fakekey_security", "security", hardened=False),
+ ]
+ mockadd.assert_has_calls(calls, any_order=True)
+
+
+class TestDebconfSelections(TestCase):
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
+ def test_set_sel_appends_newline_if_absent(self, m_subp):
+ """Automatically append a newline to debconf-set-selections config."""
+ selections = b"some/setting boolean true"
+ cc_apt_configure.debconf_set_selections(selections=selections)
+ cc_apt_configure.debconf_set_selections(selections=selections + b"\n")
+ m_call = mock.call(
+ ["debconf-set-selections"],
+ data=selections + b"\n",
+ capture=True,
+ target=None,
+ )
+ self.assertEqual([m_call, m_call], m_subp.call_args_list)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
+ def test_no_set_sel_if_none_to_set(self, m_set_sel):
+ cc_apt_configure.apply_debconf_selections({"foo": "bar"})
+ m_set_sel.assert_not_called()
+
+ @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
+ @mock.patch(
+ "cloudinit.config.cc_apt_configure.util.get_installed_packages"
+ )
+ def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel):
+ data = {
+ "set1": "pkga pkga/q1 mybool false",
+ "set2": (
+ "pkgb\tpkgb/b1\tstr\tthis is a string\n"
+ "pkgc\tpkgc/ip\tstring\t10.0.0.1"
+ ),
+ }
+ lines = "\n".join(data.values()).split("\n")
+
+ m_get_inst.return_value = ["adduser", "apparmor"]
+ m_set_sel.return_value = None
+
+ cc_apt_configure.apply_debconf_selections({"debconf_selections": data})
+ self.assertTrue(m_get_inst.called)
+ self.assertEqual(m_set_sel.call_count, 1)
+
+ # assumes called with *args value.
+ selections = m_set_sel.call_args_list[0][0][0].decode()
+
+ missing = [
+ line for line in lines if line not in selections.splitlines()
+ ]
+ self.assertEqual([], missing)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure")
+ @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
+ @mock.patch(
+ "cloudinit.config.cc_apt_configure.util.get_installed_packages"
+ )
+ def test_reconfigure_if_intersection(
+ self, m_get_inst, m_set_sel, m_dpkg_r
+ ):
+ data = {
+ "set1": "pkga pkga/q1 mybool false",
+ "set2": (
+ "pkgb\tpkgb/b1\tstr\tthis is a string\n"
+ "pkgc\tpkgc/ip\tstring\t10.0.0.1"
+ ),
+ "cloud-init": "cloud-init cloud-init/datasourcesmultiselect MAAS",
+ }
+
+ m_set_sel.return_value = None
+ m_get_inst.return_value = [
+ "adduser",
+ "apparmor",
+ "pkgb",
+ "cloud-init",
+ "zdog",
+ ]
+
+ cc_apt_configure.apply_debconf_selections({"debconf_selections": data})
+
+ # reconfigure should be called with the intersection
+ # of (packages in config, packages installed)
+ self.assertEqual(m_dpkg_r.call_count, 1)
+ # assumes called with *args (dpkg_reconfigure([a,b,c], target=))
+ packages = m_dpkg_r.call_args_list[0][0][0]
+ self.assertEqual(set(["cloud-init", "pkgb"]), set(packages))
+
+ @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure")
+ @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
+ @mock.patch(
+ "cloudinit.config.cc_apt_configure.util.get_installed_packages"
+ )
+ def test_reconfigure_if_no_intersection(
+ self, m_get_inst, m_set_sel, m_dpkg_r
+ ):
+ data = {"set1": "pkga pkga/q1 mybool false"}
+
+ m_get_inst.return_value = [
+ "adduser",
+ "apparmor",
+ "pkgb",
+ "cloud-init",
+ "zdog",
+ ]
+ m_set_sel.return_value = None
+
+ cc_apt_configure.apply_debconf_selections({"debconf_selections": data})
+
+ self.assertTrue(m_get_inst.called)
+ self.assertEqual(m_dpkg_r.call_count, 0)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
+ def test_dpkg_reconfigure_does_reconfigure(self, m_subp):
+ target = "/foo-target"
+
+ # due to the way the cleaners are called (via dictionary reference)
+ # mocking clean_cloud_init directly does not work. So we mock
+ # the CONFIG_CLEANERS dictionary and assert our cleaner is called.
+ ci_cleaner = mock.MagicMock()
+ with mock.patch.dict(
+ "cloudinit.config.cc_apt_configure.CONFIG_CLEANERS",
+ values={"cloud-init": ci_cleaner},
+ clear=True,
+ ):
+ cc_apt_configure.dpkg_reconfigure(
+ ["pkga", "cloud-init"], target=target
+ )
+ # cloud-init is actually the only package we have a cleaner for
+ # so for now, its the only one that should reconfigured
+ self.assertTrue(m_subp.called)
+ ci_cleaner.assert_called_with(target)
+ self.assertEqual(m_subp.call_count, 1)
+ found = m_subp.call_args_list[0][0][0]
+ expected = [
+ "dpkg-reconfigure",
+ "--frontend=noninteractive",
+ "cloud-init",
+ ]
+ self.assertEqual(expected, found)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
+ def test_dpkg_reconfigure_not_done_on_no_data(self, m_subp):
+ cc_apt_configure.dpkg_reconfigure([])
+ m_subp.assert_not_called()
+
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
+ def test_dpkg_reconfigure_not_done_if_no_cleaners(self, m_subp):
+ cc_apt_configure.dpkg_reconfigure(["pkgfoo", "pkgbar"])
+ m_subp.assert_not_called()
+
+
+#
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_apk_configure.py b/tests/unittests/config/test_cc_apk_configure.py
new file mode 100644
index 00000000..85dd028f
--- /dev/null
+++ b/tests/unittests/config/test_cc_apk_configure.py
@@ -0,0 +1,410 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+""" test_apk_configure
+Test creation of repositories file
+"""
+
+import logging
+import os
+import re
+import textwrap
+
+import pytest
+
+from cloudinit import cloud, helpers, util
+from cloudinit.config import cc_apk_configure
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import (
+ FilesystemMockingTestCase,
+ mock,
+ skipUnlessJsonSchema,
+)
+
+REPO_FILE = "/etc/apk/repositories"
+DEFAULT_MIRROR_URL = "https://alpine.global.ssl.fastly.net/alpine"
+CC_APK = "cloudinit.config.cc_apk_configure"
+
+
+class TestNoConfig(FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestNoConfig, self).setUp()
+ self.add_patch(CC_APK + "._write_repositories_file", "m_write_repos")
+ self.name = "apk-configure"
+ self.cloud_init = None
+ self.log = logging.getLogger("TestNoConfig")
+ self.args = []
+
+ def test_no_config(self):
+ """
+ Test that nothing is done if no apk-configure
+ configuration is provided.
+ """
+ config = util.get_builtin_cfg()
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud_init, self.log, self.args
+ )
+
+ self.assertEqual(0, self.m_write_repos.call_count)
+
+
+class TestConfig(FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestConfig, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.new_root = self.reRoot(root=self.new_root)
+ for dirname in ["tmp", "etc/apk"]:
+ util.ensure_dir(os.path.join(self.new_root, dirname))
+ self.paths = helpers.Paths({"templates_dir": self.new_root})
+ self.name = "apk-configure"
+ self.cloud = cloud.Cloud(None, self.paths, None, None, None)
+ self.log = logging.getLogger("TestNoConfig")
+ self.args = []
+
+ @mock.patch(CC_APK + "._write_repositories_file")
+ def test_no_repo_settings(self, m_write_repos):
+ """
+ Test that nothing is written if the 'alpine-repo' key
+ is not present.
+ """
+ config = {"apk_repos": {}}
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ self.assertEqual(0, m_write_repos.call_count)
+
+ @mock.patch(CC_APK + "._write_repositories_file")
+ def test_empty_repo_settings(self, m_write_repos):
+ """
+ Test that nothing is written if 'alpine_repo' list is empty.
+ """
+ config = {"apk_repos": {"alpine_repo": []}}
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ self.assertEqual(0, m_write_repos.call_count)
+
+ def test_only_main_repo(self):
+ """
+ Test when only details of main repo is written to file.
+ """
+ alpine_version = "v3.12"
+ config = {"apk_repos": {"alpine_repo": {"version": alpine_version}}}
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ expected_content = textwrap.dedent(
+ """\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+
+ """.format(
+ DEFAULT_MIRROR_URL, alpine_version
+ )
+ )
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_main_and_community_repos(self):
+ """
+ Test when only details of main and community repos are
+ written to file.
+ """
+ alpine_version = "edge"
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True,
+ }
+ }
+ }
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ expected_content = textwrap.dedent(
+ """\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+
+ """.format(
+ DEFAULT_MIRROR_URL, alpine_version
+ )
+ )
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_main_community_testing_repos(self):
+ """
+ Test when details of main, community and testing repos
+ are written to file.
+ """
+ alpine_version = "v3.12"
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True,
+ "testing_enabled": True,
+ }
+ }
+ }
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ expected_content = textwrap.dedent(
+ """\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+ #
+ # Testing - using with non-Edge installation may cause problems!
+ #
+ {0}/edge/testing
+
+ """.format(
+ DEFAULT_MIRROR_URL, alpine_version
+ )
+ )
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_edge_main_community_testing_repos(self):
+ """
+ Test when details of main, community and testing repos
+ for Edge version of Alpine are written to file.
+ """
+ alpine_version = "edge"
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True,
+ "testing_enabled": True,
+ }
+ }
+ }
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ expected_content = textwrap.dedent(
+ """\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+ {0}/{1}/testing
+
+ """.format(
+ DEFAULT_MIRROR_URL, alpine_version
+ )
+ )
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_main_community_testing_local_repos(self):
+ """
+ Test when details of main, community, testing and
+ local repos are written to file.
+ """
+ alpine_version = "v3.12"
+ local_repo_url = "http://some.mirror/whereever"
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True,
+ "testing_enabled": True,
+ },
+ "local_repo_base_url": local_repo_url,
+ }
+ }
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ expected_content = textwrap.dedent(
+ """\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+ #
+ # Testing - using with non-Edge installation may cause problems!
+ #
+ {0}/edge/testing
+
+ #
+ # Local repo
+ #
+ {2}/{1}
+
+ """.format(
+ DEFAULT_MIRROR_URL, alpine_version, local_repo_url
+ )
+ )
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_edge_main_community_testing_local_repos(self):
+ """
+ Test when details of main, community, testing and local repos
+ for Edge version of Alpine are written to file.
+ """
+ alpine_version = "edge"
+ local_repo_url = "http://some.mirror/whereever"
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True,
+ "testing_enabled": True,
+ },
+ "local_repo_base_url": local_repo_url,
+ }
+ }
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ expected_content = textwrap.dedent(
+ """\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+ {0}/edge/testing
+
+ #
+ # Local repo
+ #
+ {2}/{1}
+
+ """.format(
+ DEFAULT_MIRROR_URL, alpine_version, local_repo_url
+ )
+ )
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+
+class TestApkConfigureSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas
+ ({"apk_repos": {"preserve_repositories": True}}, None),
+ ({"apk_repos": {"alpine_repo": None}}, None),
+ ({"apk_repos": {"alpine_repo": {"version": "v3.21"}}}, None),
+ (
+ {
+ "apk_repos": {
+ "alpine_repo": {
+ "base_url": "http://yep",
+ "community_enabled": True,
+ "testing_enabled": True,
+ "version": "v3.21",
+ }
+ }
+ },
+ None,
+ ),
+ ({"apk_repos": {"local_repo_base_url": "http://some"}}, None),
+ # Invalid schemas
+ (
+ {"apk_repos": {"alpine_repo": {"version": False}}},
+ "apk_repos.alpine_repo.version: False is not of type"
+ " 'string'",
+ ),
+ (
+ {
+ "apk_repos": {
+ "alpine_repo": {"version": "v3.12", "bogus": 1}
+ }
+ },
+ re.escape(
+ "apk_repos.alpine_repo: Additional properties are not"
+ " allowed ('bogus' was unexpected)"
+ ),
+ ),
+ (
+ {"apk_repos": {"alpine_repo": {}}},
+ "apk_repos.alpine_repo: 'version' is a required property,"
+ " apk_repos.alpine_repo: {} does not have enough properties",
+ ),
+ (
+ {"apk_repos": {"alpine_repo": True}},
+ "apk_repos.alpine_repo: True is not of type 'object', 'null'",
+ ),
+ (
+ {"apk_repos": {"preserve_repositories": "wrongtype"}},
+ "apk_repos.preserve_repositories: 'wrongtype' is not of type"
+ " 'boolean'",
+ ),
+ (
+ {"apk_repos": {}},
+ "apk_repos: {} does not have enough properties",
+ ),
+ (
+ {"apk_repos": {"local_repo_base_url": None}},
+ "apk_repos.local_repo_base_url: None is not of type 'string'",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_apt_configure.py b/tests/unittests/config/test_cc_apt_configure.py
new file mode 100644
index 00000000..bd1bb963
--- /dev/null
+++ b/tests/unittests/config/test_cc_apt_configure.py
@@ -0,0 +1,202 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+""" Tests for cc_apt_configure module """
+
+import re
+
+import pytest
+
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import skipUnlessJsonSchema
+
+
+class TestAPTConfigureSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Supplement valid schemas from examples tested in test_schema
+ ({"apt": {"preserve_sources_list": True}}, None),
+ # Invalid schemas
+ (
+ {"apt": "nonobject"},
+ "apt: 'nonobject' is not of type 'object",
+ ),
+ (
+ {"apt": {"boguskey": True}},
+ re.escape(
+ "apt: Additional properties are not allowed"
+ " ('boguskey' was unexpected)"
+ ),
+ ),
+ ({"apt": {}}, "apt: {} does not have enough properties"),
+ (
+ {"apt": {"preserve_sources_list": 1}},
+ "apt.preserve_sources_list: 1 is not of type 'boolean'",
+ ),
+ (
+ {"apt": {"disable_suites": 1}},
+ "apt.disable_suites: 1 is not of type 'array'",
+ ),
+ (
+ {"apt": {"disable_suites": []}},
+ re.escape("apt.disable_suites: [] is too short"),
+ ),
+ (
+ {"apt": {"disable_suites": [1]}},
+ "apt.disable_suites.0: 1 is not of type 'string'",
+ ),
+ (
+ {"apt": {"disable_suites": ["a", "a"]}},
+ re.escape(
+ "apt.disable_suites: ['a', 'a'] has non-unique elements"
+ ),
+ ),
+ # All apt: primary tests are applicable for "security" key too.
+ # Those apt:security tests are exercised in the unittest below
+ (
+ {"apt": {"primary": "nonlist"}},
+ "apt.primary: 'nonlist' is not of type 'array'",
+ ),
+ (
+ {"apt": {"primary": []}},
+ re.escape("apt.primary: [] is too short"),
+ ),
+ (
+ {"apt": {"primary": ["nonobj"]}},
+ "apt.primary.0: 'nonobj' is not of type 'object'",
+ ),
+ (
+ {"apt": {"primary": [{}]}},
+ "apt.primary.0: 'arches' is a required property",
+ ),
+ (
+ {"apt": {"primary": [{"boguskey": True}]}},
+ re.escape(
+ "apt.primary.0: Additional properties are not allowed"
+ " ('boguskey' was unexpected)"
+ ),
+ ),
+ (
+ {"apt": {"primary": [{"arches": True}]}},
+ "apt.primary.0.arches: True is not of type 'array'",
+ ),
+ (
+ {"apt": {"primary": [{"uri": True}]}},
+ "apt.primary.0.uri: True is not of type 'string'",
+ ),
+ (
+ {
+ "apt": {
+ "primary": [
+ {"arches": ["amd64"], "search": "non-array"}
+ ]
+ }
+ },
+ "apt.primary.0.search: 'non-array' is not of type 'array'",
+ ),
+ (
+ {"apt": {"primary": [{"arches": ["amd64"], "search": []}]}},
+ re.escape("apt.primary.0.search: [] is too short"),
+ ),
+ (
+ {
+ "apt": {
+ "primary": [{"arches": ["amd64"], "search_dns": "a"}]
+ }
+ },
+ "apt.primary.0.search_dns: 'a' is not of type 'boolean'",
+ ),
+ (
+ {"apt": {"primary": [{"arches": ["amd64"], "keyid": 1}]}},
+ "apt.primary.0.keyid: 1 is not of type 'string'",
+ ),
+ (
+ {"apt": {"primary": [{"arches": ["amd64"], "key": 1}]}},
+ "apt.primary.0.key: 1 is not of type 'string'",
+ ),
+ (
+ {"apt": {"primary": [{"arches": ["amd64"], "keyserver": 1}]}},
+ "apt.primary.0.keyserver: 1 is not of type 'string'",
+ ),
+ (
+ {"apt": {"add_apt_repo_match": True}},
+ "apt.add_apt_repo_match: True is not of type 'string'",
+ ),
+ (
+ {"apt": {"debconf_selections": True}},
+ "apt.debconf_selections: True is not of type 'object'",
+ ),
+ (
+ {"apt": {"debconf_selections": {}}},
+ "apt.debconf_selections: {} does not have enough properties",
+ ),
+ (
+ {"apt": {"sources_list": True}},
+ "apt.sources_list: True is not of type 'string'",
+ ),
+ (
+ {"apt": {"conf": True}},
+ "apt.conf: True is not of type 'string'",
+ ),
+ (
+ {"apt": {"http_proxy": True}},
+ "apt.http_proxy: True is not of type 'string'",
+ ),
+ (
+ {"apt": {"https_proxy": True}},
+ "apt.https_proxy: True is not of type 'string'",
+ ),
+ (
+ {"apt": {"proxy": True}},
+ "apt.proxy: True is not of type 'string'",
+ ),
+ (
+ {"apt": {"ftp_proxy": True}},
+ "apt.ftp_proxy: True is not of type 'string'",
+ ),
+ (
+ {"apt": {"sources": True}},
+ "apt.sources: True is not of type 'object'",
+ ),
+ (
+ {"apt": {"sources": {"opaquekey": True}}},
+ "apt.sources.opaquekey: True is not of type 'object'",
+ ),
+ (
+ {"apt": {"sources": {"opaquekey": {}}}},
+ "apt.sources.opaquekey: {} does not have enough properties",
+ ),
+ (
+ {"apt": {"sources": {"opaquekey": {"boguskey": True}}}},
+ re.escape(
+ "apt.sources.opaquekey: Additional properties are not"
+ " allowed ('boguskey' was unexpected)"
+ ),
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+ # Note apt['primary'] and apt['security'] have same defition
+ # Avoid test setup duplicates by running same test using 'security'
+ if isinstance(config.get("apt"), dict) and config["apt"].get(
+ "primary"
+ ):
+ # To exercise security schema, rename test key from primary
+ config["apt"]["security"] = config["apt"].pop("primary")
+ error_msg = error_msg.replace("primary", "security")
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_apt_pipelining.py b/tests/unittests/config/test_cc_apt_pipelining.py
new file mode 100644
index 00000000..0f72d32b
--- /dev/null
+++ b/tests/unittests/config/test_cc_apt_pipelining.py
@@ -0,0 +1,65 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests cc_apt_pipelining handler"""
+
+import pytest
+
+import cloudinit.config.cc_apt_pipelining as cc_apt_pipelining
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import mock, skipUnlessJsonSchema
+
+
+class TestAptPipelining:
+ @mock.patch("cloudinit.config.cc_apt_pipelining.util.write_file")
+ def test_not_disabled_by_default(self, m_write_file):
+ """ensure that default behaviour is to not disable pipelining"""
+ cc_apt_pipelining.handle("foo", {}, None, mock.MagicMock(), None)
+ assert 0 == m_write_file.call_count
+
+ @mock.patch("cloudinit.config.cc_apt_pipelining.util.write_file")
+ def test_false_disables_pipelining(self, m_write_file):
+ """ensure that pipelining can be disabled with correct config"""
+ cc_apt_pipelining.handle(
+ "foo", {"apt_pipelining": "false"}, None, mock.MagicMock(), None
+ )
+ assert 1 == m_write_file.call_count
+ args, _ = m_write_file.call_args
+ assert cc_apt_pipelining.DEFAULT_FILE == args[0]
+ assert 'Pipeline-Depth "0"' in args[1]
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas
+ ({}, None),
+ ({"apt_pipelining": 1}, None),
+ ({"apt_pipelining": True}, None),
+ ({"apt_pipelining": False}, None),
+ ({"apt_pipelining": "none"}, None),
+ ({"apt_pipelining": "unchanged"}, None),
+ ({"apt_pipelining": "os"}, None),
+ # Invalid schemas
+ (
+ {"apt_pipelining": "bogus"},
+ "Cloud config schema errors: apt_pipelining: 'bogus' is not"
+ " valid under any of the given schema",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_bootcmd.py b/tests/unittests/config/test_cc_bootcmd.py
new file mode 100644
index 00000000..34b16b85
--- /dev/null
+++ b/tests/unittests/config/test_cc_bootcmd.py
@@ -0,0 +1,165 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import re
+import tempfile
+
+import pytest
+
+from cloudinit import subp, util
+from cloudinit.config.cc_bootcmd import handle
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+
+class FakeExtendedTempFile(object):
+ def __init__(self, suffix):
+ self.suffix = suffix
+ self.handle = tempfile.NamedTemporaryFile(
+ prefix="ci-%s." % self.__class__.__name__, delete=False
+ )
+
+ def __enter__(self):
+ return self.handle
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.handle.close()
+ util.del_file(self.handle.name)
+
+
+class TestBootcmd(CiTestCase):
+
+ with_logs = True
+
+ _etmpfile_path = (
+ "cloudinit.config.cc_bootcmd.temp_utils.ExtendedTemporaryFile"
+ )
+
+ def setUp(self):
+ super(TestBootcmd, self).setUp()
+ self.subp = subp.subp
+ self.new_root = self.tmp_dir()
+
+ def test_handler_skip_if_no_bootcmd(self):
+ """When the provided config doesn't contain bootcmd, skip it."""
+ cfg = {}
+ mycloud = get_cloud()
+ handle("notimportant", cfg, mycloud, LOG, None)
+ self.assertIn(
+ "Skipping module named notimportant, no 'bootcmd' key",
+ self.logs.getvalue(),
+ )
+
+ def test_handler_invalid_command_set(self):
+ """Commands which can't be converted to shell will raise errors."""
+ invalid_config = {"bootcmd": 1}
+ cc = get_cloud()
+ with self.assertRaises(TypeError) as context_manager:
+ handle("cc_bootcmd", invalid_config, cc, LOG, [])
+ self.assertIn("Failed to shellify bootcmd", self.logs.getvalue())
+ self.assertEqual(
+ "Input to shellify was type 'int'. Expected list or tuple.",
+ str(context_manager.exception),
+ )
+
+ invalid_config = {
+ "bootcmd": ["ls /", 20, ["wget", "http://stuff/blah"], {"a": "n"}]
+ }
+ cc = get_cloud()
+ with self.assertRaises(TypeError) as context_manager:
+ handle("cc_bootcmd", invalid_config, cc, LOG, [])
+ logs = self.logs.getvalue()
+ self.assertIn("Failed to shellify", logs)
+ self.assertEqual(
+ "Unable to shellify type 'int'. Expected list, string, tuple. "
+ "Got: 20",
+ str(context_manager.exception),
+ )
+
+ def test_handler_creates_and_runs_bootcmd_script_with_instance_id(self):
+ """Valid schema runs a bootcmd script with INSTANCE_ID in the env."""
+ cc = get_cloud()
+ out_file = self.tmp_path("bootcmd.out", self.new_root)
+ my_id = "b6ea0f59-e27d-49c6-9f87-79f19765a425"
+ valid_config = {
+ "bootcmd": ["echo {0} $INSTANCE_ID > {1}".format(my_id, out_file)]
+ }
+
+ with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
+ with self.allow_subp(["/bin/sh"]):
+ handle("cc_bootcmd", valid_config, cc, LOG, [])
+ self.assertEqual(
+ my_id + " iid-datasource-none\n", util.load_file(out_file)
+ )
+
+ def test_handler_runs_bootcmd_script_with_error(self):
+ """When a valid script generates an error, that error is raised."""
+ cc = get_cloud()
+ valid_config = {"bootcmd": ["exit 1"]} # Script with error
+
+ with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
+ with self.allow_subp(["/bin/sh"]):
+ with self.assertRaises(subp.ProcessExecutionError) as ctxt:
+ handle("does-not-matter", valid_config, cc, LOG, [])
+ self.assertIn(
+ "Unexpected error while running command.\nCommand: ['/bin/sh',",
+ str(ctxt.exception),
+ )
+ self.assertIn(
+ "Failed to run bootcmd module does-not-matter",
+ self.logs.getvalue(),
+ )
+
+
+@skipUnlessJsonSchema()
+class TestBootCMDSchema:
+ """Directly test schema rather than through handle."""
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas tested by meta.examples in test_schema
+ # Invalid schemas
+ (
+ {"bootcmd": 1},
+ "Cloud config schema errors: bootcmd: 1 is not of type"
+ " 'array'",
+ ),
+ ({"bootcmd": []}, re.escape("bootcmd: [] is too short")),
+ (
+ {"bootcmd": []},
+ re.escape(
+ "Cloud config schema errors: bootcmd: [] is too short"
+ ),
+ ),
+ (
+ {
+ "bootcmd": [
+ "ls /",
+ 20,
+ ["wget", "http://stuff/blah"],
+ {"a": "n"},
+ ]
+ },
+ "Cloud config schema errors: bootcmd.1: 20 is not valid under"
+ " any of the given schemas, bootcmd.3: {'a': 'n'} is not"
+ " valid under any of the given schemas",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_byobu.py b/tests/unittests/config/test_cc_byobu.py
new file mode 100644
index 00000000..fbdf3403
--- /dev/null
+++ b/tests/unittests/config/test_cc_byobu.py
@@ -0,0 +1,51 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import re
+
+import pytest
+
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import skipUnlessJsonSchema
+
+
+class TestByobuSchema:
+ """Directly test schema rather than through handle."""
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Supplement valid schemas tested by meta.examples in test_schema
+ ({"byobu_by_default": "enable"}, None),
+ # Invalid schemas
+ (
+ {"byobu_by_default": 1},
+ "byobu_by_default: 1 is not of type 'string'",
+ ),
+ (
+ {"byobu_by_default": "bogusenum"},
+ re.escape(
+ "byobu_by_default: 'bogusenum' is not one of"
+ " ['enable-system', 'enable-user', 'disable-system',"
+ " 'disable-user', 'enable', 'disable',"
+ " 'user', 'system']"
+ ),
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_ca_certs.py b/tests/unittests/config/test_cc_ca_certs.py
new file mode 100644
index 00000000..39614635
--- /dev/null
+++ b/tests/unittests/config/test_cc_ca_certs.py
@@ -0,0 +1,507 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import re
+import shutil
+import tempfile
+import unittest
+from contextlib import ExitStack
+from unittest import mock
+
+import pytest
+
+from cloudinit import distros, helpers, subp, util
+from cloudinit.config import cc_ca_certs
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import TestCase, skipUnlessJsonSchema
+from tests.unittests.util import get_cloud
+
+
+class TestNoConfig(unittest.TestCase):
+ def setUp(self):
+ super(TestNoConfig, self).setUp()
+ self.name = "ca-certs"
+ self.cloud_init = None
+ self.log = logging.getLogger("TestNoConfig")
+ self.args = []
+
+ def test_no_config(self):
+ """
+ Test that nothing is done if no ca-certs configuration is provided.
+ """
+ config = util.get_builtin_cfg()
+ with ExitStack() as mocks:
+ util_mock = mocks.enter_context(
+ mock.patch.object(util, "write_file")
+ )
+ certs_mock = mocks.enter_context(
+ mock.patch.object(cc_ca_certs, "update_ca_certs")
+ )
+
+ cc_ca_certs.handle(
+ self.name, config, self.cloud_init, self.log, self.args
+ )
+
+ self.assertEqual(util_mock.call_count, 0)
+ self.assertEqual(certs_mock.call_count, 0)
+
+
+class TestConfig(TestCase):
+ def setUp(self):
+ super(TestConfig, self).setUp()
+ self.name = "ca-certs"
+ self.paths = None
+ self.log = logging.getLogger("TestNoConfig")
+ self.args = []
+
+ def _fetch_distro(self, kind):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({})
+ return cls(kind, {}, paths)
+
+ def _mock_init(self):
+ self.mocks = ExitStack()
+ self.addCleanup(self.mocks.close)
+
+ # Mock out the functions that actually modify the system
+ self.mock_add = self.mocks.enter_context(
+ mock.patch.object(cc_ca_certs, "add_ca_certs")
+ )
+ self.mock_update = self.mocks.enter_context(
+ mock.patch.object(cc_ca_certs, "update_ca_certs")
+ )
+ self.mock_remove = self.mocks.enter_context(
+ mock.patch.object(cc_ca_certs, "remove_default_ca_certs")
+ )
+
+ def test_no_trusted_list(self):
+ """
+ Test that no certificates are written if the 'trusted' key is not
+ present.
+ """
+ config = {"ca-certs": {}}
+
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = get_cloud(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+
+ self.assertEqual(self.mock_add.call_count, 0)
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
+
+ def test_empty_trusted_list(self):
+ """Test that no certificate are written if 'trusted' list is empty."""
+ config = {"ca-certs": {"trusted": []}}
+
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = get_cloud(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+
+ self.assertEqual(self.mock_add.call_count, 0)
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
+
+ def test_single_trusted(self):
+ """Test that a single cert gets passed to add_ca_certs."""
+ config = {"ca-certs": {"trusted": ["CERT1"]}}
+
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = get_cloud(distro_name)
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+
+ self.mock_add.assert_called_once_with(conf, ["CERT1"])
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
+
+ def test_multiple_trusted(self):
+ """Test that multiple certs get passed to add_ca_certs."""
+ config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}}
+
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = get_cloud(distro_name)
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+
+ self.mock_add.assert_called_once_with(conf, ["CERT1", "CERT2"])
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
+
+ def test_remove_default_ca_certs(self):
+ """Test remove_defaults works as expected."""
+ config = {"ca_certs": {"remove_defaults": True}}
+
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = get_cloud(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+
+ self.assertEqual(self.mock_add.call_count, 0)
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 1)
+
+ def test_no_remove_defaults_if_false(self):
+ """Test remove_defaults is not called when config value is False."""
+ config = {"ca_certs": {"remove_defaults": False}}
+
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = get_cloud(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+
+ self.assertEqual(self.mock_add.call_count, 0)
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
+
+ def test_correct_order_for_remove_then_add(self):
+ """Test remove_defaults is not called when config value is False."""
+ config = {"ca_certs": {"remove_defaults": True, "trusted": ["CERT1"]}}
+
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = get_cloud(distro_name)
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+
+ self.mock_add.assert_called_once_with(conf, ["CERT1"])
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 1)
+
+
+class TestAddCaCerts(TestCase):
+ def setUp(self):
+ super(TestAddCaCerts, self).setUp()
+ tmpdir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, tmpdir)
+ self.paths = helpers.Paths(
+ {
+ "cloud_dir": tmpdir,
+ }
+ )
+ self.add_patch("cloudinit.config.cc_ca_certs.os.stat", "m_stat")
+
+ def _fetch_distro(self, kind):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({})
+ return cls(kind, {}, paths)
+
+ def test_no_certs_in_list(self):
+ """Test that no certificate are written if not provided."""
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ with mock.patch.object(util, "write_file") as mockobj:
+ cc_ca_certs.add_ca_certs(conf, [])
+ self.assertEqual(mockobj.call_count, 0)
+
+ def test_single_cert_trailing_cr(self):
+ """Test adding a single certificate to the trusted CAs
+ when existing ca-certificates has trailing newline"""
+ cert = "CERT1\nLINE2\nLINE3"
+
+ ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n"
+ expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n"
+
+ self.m_stat.return_value.st_size = 1
+
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+
+ with ExitStack() as mocks:
+ mock_write = mocks.enter_context(
+ mock.patch.object(util, "write_file")
+ )
+ mock_load = mocks.enter_context(
+ mock.patch.object(
+ util, "load_file", return_value=ca_certs_content
+ )
+ )
+
+ cc_ca_certs.add_ca_certs(conf, [cert])
+
+ mock_write.assert_has_calls(
+ [mock.call(conf["ca_cert_full_path"], cert, mode=0o644)]
+ )
+ if conf["ca_cert_config"] is not None:
+ mock_write.assert_has_calls(
+ [
+ mock.call(
+ conf["ca_cert_config"], expected, omode="wb"
+ )
+ ]
+ )
+ mock_load.assert_called_once_with(conf["ca_cert_config"])
+
+ def test_single_cert_no_trailing_cr(self):
+ """Test adding a single certificate to the trusted CAs
+ when existing ca-certificates has no trailing newline"""
+ cert = "CERT1\nLINE2\nLINE3"
+
+ ca_certs_content = "line1\nline2\nline3"
+
+ self.m_stat.return_value.st_size = 1
+
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+
+ with ExitStack() as mocks:
+ mock_write = mocks.enter_context(
+ mock.patch.object(util, "write_file")
+ )
+ mock_load = mocks.enter_context(
+ mock.patch.object(
+ util, "load_file", return_value=ca_certs_content
+ )
+ )
+
+ cc_ca_certs.add_ca_certs(conf, [cert])
+
+ mock_write.assert_has_calls(
+ [mock.call(conf["ca_cert_full_path"], cert, mode=0o644)]
+ )
+ if conf["ca_cert_config"] is not None:
+ mock_write.assert_has_calls(
+ [
+ mock.call(
+ conf["ca_cert_config"],
+ "%s\n%s\n"
+ % (ca_certs_content, conf["ca_cert_filename"]),
+ omode="wb",
+ )
+ ]
+ )
+
+ mock_load.assert_called_once_with(conf["ca_cert_config"])
+
+ def test_single_cert_to_empty_existing_ca_file(self):
+ """Test adding a single certificate to the trusted CAs
+ when existing ca-certificates.conf is empty"""
+ cert = "CERT1\nLINE2\nLINE3"
+
+ expected = "cloud-init-ca-certs.crt\n"
+
+ self.m_stat.return_value.st_size = 0
+
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ with mock.patch.object(
+ util, "write_file", autospec=True
+ ) as m_write:
+
+ cc_ca_certs.add_ca_certs(conf, [cert])
+
+ m_write.assert_has_calls(
+ [mock.call(conf["ca_cert_full_path"], cert, mode=0o644)]
+ )
+ if conf["ca_cert_config"] is not None:
+ m_write.assert_has_calls(
+ [
+ mock.call(
+ conf["ca_cert_config"], expected, omode="wb"
+ )
+ ]
+ )
+
+ def test_multiple_certs(self):
+ """Test adding multiple certificates to the trusted CAs."""
+ certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"]
+ expected_cert_file = "\n".join(certs)
+ ca_certs_content = "line1\nline2\nline3"
+
+ self.m_stat.return_value.st_size = 1
+
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+
+ with ExitStack() as mocks:
+ mock_write = mocks.enter_context(
+ mock.patch.object(util, "write_file")
+ )
+ mock_load = mocks.enter_context(
+ mock.patch.object(
+ util, "load_file", return_value=ca_certs_content
+ )
+ )
+
+ cc_ca_certs.add_ca_certs(conf, certs)
+
+ mock_write.assert_has_calls(
+ [
+ mock.call(
+ conf["ca_cert_full_path"],
+ expected_cert_file,
+ mode=0o644,
+ )
+ ]
+ )
+ if conf["ca_cert_config"] is not None:
+ mock_write.assert_has_calls(
+ [
+ mock.call(
+ conf["ca_cert_config"],
+ "%s\n%s\n"
+ % (ca_certs_content, conf["ca_cert_filename"]),
+ omode="wb",
+ )
+ ]
+ )
+
+ mock_load.assert_called_once_with(conf["ca_cert_config"])
+
+
+class TestUpdateCaCerts(unittest.TestCase):
+ def test_commands(self):
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ with mock.patch.object(subp, "subp") as mockobj:
+ cc_ca_certs.update_ca_certs(conf)
+ mockobj.assert_called_once_with(
+ conf["ca_cert_update_cmd"], capture=False
+ )
+
+
+class TestRemoveDefaultCaCerts(TestCase):
+ def setUp(self):
+ super(TestRemoveDefaultCaCerts, self).setUp()
+ tmpdir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, tmpdir)
+ self.paths = helpers.Paths(
+ {
+ "cloud_dir": tmpdir,
+ }
+ )
+
+ def test_commands(self):
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+
+ with ExitStack() as mocks:
+ mock_delete = mocks.enter_context(
+ mock.patch.object(util, "delete_dir_contents")
+ )
+ mock_write = mocks.enter_context(
+ mock.patch.object(util, "write_file")
+ )
+ mock_subp = mocks.enter_context(
+ mock.patch.object(subp, "subp")
+ )
+
+ cc_ca_certs.remove_default_ca_certs(distro_name, conf)
+
+ mock_delete.assert_has_calls(
+ [
+ mock.call(conf["ca_cert_path"]),
+ mock.call(conf["ca_cert_system_path"]),
+ ]
+ )
+
+ if conf["ca_cert_config"] is not None:
+ mock_write.assert_called_once_with(
+ conf["ca_cert_config"], "", mode=0o644
+ )
+
+ if distro_name in ["debian", "ubuntu"]:
+ mock_subp.assert_called_once_with(
+ ("debconf-set-selections", "-"),
+ "ca-certificates ca-certificates/trust_new_crts"
+ " select no",
+ )
+
+
+class TestCACertsSchema:
+ """Directly test schema rather than through handle."""
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid, yet deprecated schemas
+ ({"ca-certs": {"remove-defaults": True}}, None),
+ # Invalid schemas
+ (
+ {"ca_certs": 1},
+ "ca_certs: 1 is not of type 'object'",
+ ),
+ (
+ {"ca_certs": {}},
+ re.escape("ca_certs: {} does not have enough properties"),
+ ),
+ (
+ {"ca_certs": {"boguskey": 1}},
+ re.escape(
+ "ca_certs: Additional properties are not allowed"
+ " ('boguskey' was unexpected)"
+ ),
+ ),
+ (
+ {"ca_certs": {"remove_defaults": 1}},
+ "ca_certs.remove_defaults: 1 is not of type 'boolean'",
+ ),
+ (
+ {"ca_certs": {"trusted": [1]}},
+ "ca_certs.trusted.0: 1 is not of type 'string'",
+ ),
+ (
+ {"ca_certs": {"trusted": []}},
+ re.escape("ca_certs.trusted: [] is too short"),
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+ @mock.patch.object(cc_ca_certs, "update_ca_certs")
+ def test_deprecate_key_warnings(self, update_ca_certs, caplog):
+ """Assert warnings are logged for deprecated keys."""
+ log = logging.getLogger("CALogTest")
+ cloud = get_cloud("ubuntu")
+ cc_ca_certs.handle(
+ "IGNORE", {"ca-certs": {"remove-defaults": False}}, cloud, log, []
+ )
+ expected_warnings = [
+ "DEPRECATION: key 'ca-certs' is now deprecated. Use 'ca_certs'"
+ " instead.",
+ "DEPRECATION: key 'ca-certs.remove-defaults' is now deprecated."
+ " Use 'ca_certs.remove_defaults' instead.",
+ ]
+ for warning in expected_warnings:
+ assert warning in caplog.text
+ assert 1 == update_ca_certs.call_count
+
+ @mock.patch.object(cc_ca_certs, "update_ca_certs")
+ def test_duplicate_keys(self, update_ca_certs, caplog):
+ """Assert warnings are logged for deprecated keys."""
+ log = logging.getLogger("CALogTest")
+ cloud = get_cloud("ubuntu")
+ cc_ca_certs.handle(
+ "IGNORE",
+ {
+ "ca-certs": {"remove-defaults": True},
+ "ca_certs": {"remove_defaults": False},
+ },
+ cloud,
+ log,
+ [],
+ )
+ expected_warning = (
+ "Found both ca-certs (deprecated) and ca_certs config keys."
+ " Ignoring ca-certs."
+ )
+ assert expected_warning in caplog.text
+ assert 1 == update_ca_certs.call_count
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_chef.py b/tests/unittests/config/test_cc_chef.py
new file mode 100644
index 00000000..f86be293
--- /dev/null
+++ b/tests/unittests/config/test_cc_chef.py
@@ -0,0 +1,464 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+import logging
+import os
+import re
+
+import httpretty
+import pytest
+
+from cloudinit import util
+from cloudinit.config import cc_chef
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import (
+ FilesystemMockingTestCase,
+ HttprettyTestCase,
+ cloud_init_project_dir,
+ mock,
+ skipIf,
+ skipUnlessJsonSchema,
+)
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+CLIENT_TEMPL = cloud_init_project_dir("templates/chef_client.rb.tmpl")
+
+# This is adjusted to use http because using with https causes issue
+# in some openssl/httpretty combinations.
+# https://github.com/gabrielfalcao/HTTPretty/issues/242
+# We saw issue in opensuse 42.3 with
+# httpretty=0.8.8-7.1 ndg-httpsclient=0.4.0-3.2 pyOpenSSL=16.0.0-4.1
+OMNIBUS_URL_HTTP = cc_chef.OMNIBUS_URL.replace("https:", "http:")
+
+
+class TestInstallChefOmnibus(HttprettyTestCase):
+ def setUp(self):
+ super(TestInstallChefOmnibus, self).setUp()
+ self.new_root = self.tmp_dir()
+
+ @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP)
+ def test_install_chef_from_omnibus_runs_chef_url_content(self):
+ """install_chef_from_omnibus calls subp_blob_in_tempfile."""
+ response = b'#!/bin/bash\necho "Hi Mom"'
+ httpretty.register_uri(
+ httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200
+ )
+ ret = (None, None) # stdout, stderr but capture=False
+
+ with mock.patch(
+ "cloudinit.config.cc_chef.subp_blob_in_tempfile", return_value=ret
+ ) as m_subp_blob:
+ cc_chef.install_chef_from_omnibus()
+ # admittedly whitebox, but assuming subp_blob_in_tempfile works
+ # this should be fine.
+ self.assertEqual(
+ [
+ mock.call(
+ blob=response,
+ args=[],
+ basename="chef-omnibus-install",
+ capture=False,
+ )
+ ],
+ m_subp_blob.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_chef.url_helper.readurl")
+ @mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile")
+ def test_install_chef_from_omnibus_retries_url(self, m_subp_blob, m_rdurl):
+ """install_chef_from_omnibus retries OMNIBUS_URL upon failure."""
+
+ class FakeURLResponse(object):
+ contents = '#!/bin/bash\necho "Hi Mom" > {0}/chef.out'.format(
+ self.new_root
+ )
+
+ m_rdurl.return_value = FakeURLResponse()
+
+ cc_chef.install_chef_from_omnibus()
+ expected_kwargs = {
+ "retries": cc_chef.OMNIBUS_URL_RETRIES,
+ "url": cc_chef.OMNIBUS_URL,
+ }
+ self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[0][1])
+ cc_chef.install_chef_from_omnibus(retries=10)
+ expected_kwargs = {"retries": 10, "url": cc_chef.OMNIBUS_URL}
+ self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[1][1])
+ expected_subp_kwargs = {
+ "args": ["-v", "2.0"],
+ "basename": "chef-omnibus-install",
+ "blob": m_rdurl.return_value.contents,
+ "capture": False,
+ }
+ self.assertCountEqual(
+ expected_subp_kwargs, m_subp_blob.call_args_list[0][1]
+ )
+
+ @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP)
+ @mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile")
+ def test_install_chef_from_omnibus_has_omnibus_version(self, m_subp_blob):
+ """install_chef_from_omnibus provides version arg to OMNIBUS_URL."""
+ chef_outfile = self.tmp_path("chef.out", self.new_root)
+ response = '#!/bin/bash\necho "Hi Mom" > {0}'.format(chef_outfile)
+ httpretty.register_uri(
+ httpretty.GET, cc_chef.OMNIBUS_URL, body=response
+ )
+ cc_chef.install_chef_from_omnibus(omnibus_version="2.0")
+
+ called_kwargs = m_subp_blob.call_args_list[0][1]
+ expected_kwargs = {
+ "args": ["-v", "2.0"],
+ "basename": "chef-omnibus-install",
+ "blob": response,
+ "capture": False,
+ }
+ self.assertCountEqual(expected_kwargs, called_kwargs)
+
+
+class TestChef(FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestChef, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_no_config(self):
+ self.patchUtils(self.tmp)
+ self.patchOS(self.tmp)
+
+ cfg = {}
+ cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ for d in cc_chef.CHEF_DIRS:
+ self.assertFalse(os.path.isdir(d))
+
+ @skipIf(
+ not os.path.isfile(CLIENT_TEMPL), CLIENT_TEMPL + " is not available"
+ )
+ def test_basic_config(self):
+ """
+ test basic config looks sane
+
+ # This should create a file of the format...
+ # Created by cloud-init v. 0.7.6 on Sat, 11 Oct 2014 23:57:21 +0000
+ chef_license "accept"
+ log_level :info
+ ssl_verify_mode :verify_none
+ log_location "/var/log/chef/client.log"
+ validation_client_name "bob"
+ validation_key "/etc/chef/validation.pem"
+ client_key "/etc/chef/client.pem"
+ chef_server_url "localhost"
+ environment "_default"
+ node_name "iid-datasource-none"
+ json_attribs "/etc/chef/firstboot.json"
+ file_cache_path "/var/cache/chef"
+ file_backup_path "/var/backups/chef"
+ pid_file "/var/run/chef/client.pid"
+ Chef::Log::Formatter.show_time = true
+ encrypted_data_bag_secret "/etc/chef/encrypted_data_bag_secret"
+ """
+ tpl_file = util.load_file(CLIENT_TEMPL)
+ self.patchUtils(self.tmp)
+ self.patchOS(self.tmp)
+
+ util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file)
+ cfg = {
+ "chef": {
+ "chef_license": "accept",
+ "server_url": "localhost",
+ "validation_name": "bob",
+ "validation_key": "/etc/chef/vkey.pem",
+ "validation_cert": "this is my cert",
+ "encrypted_data_bag_secret": (
+ "/etc/chef/encrypted_data_bag_secret"
+ ),
+ },
+ }
+ cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ for d in cc_chef.CHEF_DIRS:
+ self.assertTrue(os.path.isdir(d))
+ c = util.load_file(cc_chef.CHEF_RB_PATH)
+
+ # the content of these keys is not expected to be rendered to tmpl
+ unrendered_keys = ("validation_cert",)
+ for k, v in cfg["chef"].items():
+ if k in unrendered_keys:
+ continue
+ self.assertIn(v, c)
+ for k, v in cc_chef.CHEF_RB_TPL_DEFAULTS.items():
+ if k in unrendered_keys:
+ continue
+ # the value from the cfg overrides that in the default
+ val = cfg["chef"].get(k, v)
+ if isinstance(val, str):
+ self.assertIn(val, c)
+ c = util.load_file(cc_chef.CHEF_FB_PATH)
+ self.assertEqual({}, json.loads(c))
+
+ def test_firstboot_json(self):
+ self.patchUtils(self.tmp)
+ self.patchOS(self.tmp)
+
+ cfg = {
+ "chef": {
+ "server_url": "localhost",
+ "validation_name": "bob",
+ "run_list": ["a", "b", "c"],
+ "initial_attributes": {
+ "c": "d",
+ },
+ },
+ }
+ cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ c = util.load_file(cc_chef.CHEF_FB_PATH)
+ self.assertEqual(
+ {
+ "run_list": ["a", "b", "c"],
+ "c": "d",
+ },
+ json.loads(c),
+ )
+
+ @skipIf(
+ not os.path.isfile(CLIENT_TEMPL), CLIENT_TEMPL + " is not available"
+ )
+ def test_template_deletes(self):
+ tpl_file = util.load_file(CLIENT_TEMPL)
+ self.patchUtils(self.tmp)
+ self.patchOS(self.tmp)
+
+ util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file)
+ cfg = {
+ "chef": {
+ "server_url": "localhost",
+ "validation_name": "bob",
+ "json_attribs": None,
+ "show_time": None,
+ },
+ }
+ cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ c = util.load_file(cc_chef.CHEF_RB_PATH)
+ self.assertNotIn("json_attribs", c)
+ self.assertNotIn("Formatter.show_time", c)
+
+ @skipIf(
+ not os.path.isfile(CLIENT_TEMPL), CLIENT_TEMPL + " is not available"
+ )
+ def test_validation_cert_and_validation_key(self):
+ # test validation_cert content is written to validation_key path
+ tpl_file = util.load_file(CLIENT_TEMPL)
+ self.patchUtils(self.tmp)
+ self.patchOS(self.tmp)
+
+ util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file)
+ v_path = "/etc/chef/vkey.pem"
+ v_cert = "this is my cert"
+ cfg = {
+ "chef": {
+ "server_url": "localhost",
+ "validation_name": "bob",
+ "validation_key": v_path,
+ "validation_cert": v_cert,
+ },
+ }
+ cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ content = util.load_file(cc_chef.CHEF_RB_PATH)
+ self.assertIn(v_path, content)
+ util.load_file(v_path)
+ self.assertEqual(v_cert, util.load_file(v_path))
+
+ def test_validation_cert_with_system(self):
+ # test validation_cert content is not written over system file
+ tpl_file = util.load_file(CLIENT_TEMPL)
+ self.patchUtils(self.tmp)
+ self.patchOS(self.tmp)
+
+ v_path = "/etc/chef/vkey.pem"
+ v_cert = "system"
+ expected_cert = "this is the system file certificate"
+ cfg = {
+ "chef": {
+ "server_url": "localhost",
+ "validation_name": "bob",
+ "validation_key": v_path,
+ "validation_cert": v_cert,
+ },
+ }
+ util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file)
+ util.write_file(v_path, expected_cert)
+ cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ content = util.load_file(cc_chef.CHEF_RB_PATH)
+ self.assertIn(v_path, content)
+ util.load_file(v_path)
+ self.assertEqual(expected_cert, util.load_file(v_path))
+
+
+@skipUnlessJsonSchema()
+class TestBootCMDSchema:
+ """Directly test schema rather than through handle."""
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas tested by meta.examples in test_schema
+ # Invalid schemas
+ (
+ {"chef": 1},
+ "chef: 1 is not of type 'object'",
+ ),
+ (
+ {"chef": {}},
+ re.escape(" chef: {} does not have enough properties"),
+ ),
+ (
+ {"chef": {"boguskey": True}},
+ re.escape(
+ "chef: Additional properties are not allowed"
+ " ('boguskey' was unexpected)"
+ ),
+ ),
+ (
+ {"chef": {"directories": 1}},
+ "chef.directories: 1 is not of type 'array'",
+ ),
+ (
+ {"chef": {"directories": []}},
+ re.escape("chef.directories: [] is too short"),
+ ),
+ (
+ {"chef": {"directories": [1]}},
+ "chef.directories.0: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"directories": ["a", "a"]}},
+ re.escape(
+ "chef.directories: ['a', 'a'] has non-unique elements"
+ ),
+ ),
+ (
+ {"chef": {"validation_cert": 1}},
+ "chef.validation_cert: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"validation_key": 1}},
+ "chef.validation_key: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"firstboot_path": 1}},
+ "chef.firstboot_path: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"client_key": 1}},
+ "chef.client_key: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"encrypted_data_bag_secret": 1}},
+ "chef.encrypted_data_bag_secret: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"environment": 1}},
+ "chef.environment: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"file_backup_path": 1}},
+ "chef.file_backup_path: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"file_cache_path": 1}},
+ "chef.file_cache_path: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"json_attribs": 1}},
+ "chef.json_attribs: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"log_level": 1}},
+ "chef.log_level: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"log_location": 1}},
+ "chef.log_location: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"node_name": 1}},
+ "chef.node_name: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"omnibus_url": 1}},
+ "chef.omnibus_url: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"omnibus_url_retries": "one"}},
+ "chef.omnibus_url_retries: 'one' is not of type 'integer'",
+ ),
+ (
+ {"chef": {"omnibus_version": 1}},
+ "chef.omnibus_version: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"omnibus_version": 1}},
+ "chef.omnibus_version: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"pid_file": 1}},
+ "chef.pid_file: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"server_url": 1}},
+ "chef.server_url: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"show_time": 1}},
+ "chef.show_time: 1 is not of type 'boolean'",
+ ),
+ (
+ {"chef": {"ssl_verify_mode": 1}},
+ "chef.ssl_verify_mode: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"validation_name": 1}},
+ "chef.validation_name: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"force_install": 1}},
+ "chef.force_install: 1 is not of type 'boolean'",
+ ),
+ (
+ {"chef": {"initial_attributes": 1}},
+ "chef.initial_attributes: 1 is not of type 'object'",
+ ),
+ (
+ {"chef": {"install_type": 1}},
+ "chef.install_type: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"install_type": "bogusenum"}},
+ re.escape(
+ "chef.install_type: 'bogusenum' is not one of"
+ " ['packages', 'gems', 'omnibus']"
+ ),
+ ),
+ (
+ {"chef": {"run_list": 1}},
+ "chef.run_list: 1 is not of type 'array'",
+ ),
+ (
+ {"chef": {"chef_license": 1}},
+ "chef.chef_license: 1 is not of type 'string'",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_debug.py b/tests/unittests/config/test_cc_debug.py
new file mode 100644
index 00000000..fc8d43dc
--- /dev/null
+++ b/tests/unittests/config/test_cc_debug.py
@@ -0,0 +1,112 @@
+# Copyright (C) 2014 Yahoo! Inc.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import re
+import shutil
+import tempfile
+
+import pytest
+
+from cloudinit import util
+from cloudinit.config import cc_debug
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import (
+ FilesystemMockingTestCase,
+ mock,
+ skipUnlessJsonSchema,
+)
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+
+@mock.patch("cloudinit.distros.debian.read_system_locale")
+class TestDebug(FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestDebug, self).setUp()
+ self.new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.new_root)
+ self.patchUtils(self.new_root)
+
+ def test_debug_write(self, m_locale):
+ m_locale.return_value = "en_US.UTF-8"
+ cfg = {
+ "abc": "123",
+ "c": "\u20a0",
+ "debug": {
+ "verbose": True,
+ # Does not actually write here due to mocking...
+ "output": "/var/log/cloud-init-debug.log",
+ },
+ }
+ cc = get_cloud()
+ cc_debug.handle("cc_debug", cfg, cc, LOG, [])
+ contents = util.load_file("/var/log/cloud-init-debug.log")
+ # Some basic sanity tests...
+ self.assertNotEqual(0, len(contents))
+ for k in cfg.keys():
+ self.assertIn(k, contents)
+
+ def test_debug_no_write(self, m_locale):
+ m_locale.return_value = "en_US.UTF-8"
+ cfg = {
+ "abc": "123",
+ "debug": {
+ "verbose": False,
+ # Does not actually write here due to mocking...
+ "output": "/var/log/cloud-init-debug.log",
+ },
+ }
+ cc = get_cloud()
+ cc_debug.handle("cc_debug", cfg, cc, LOG, [])
+ self.assertRaises(
+ IOError, util.load_file, "/var/log/cloud-init-debug.log"
+ )
+
+
+@skipUnlessJsonSchema()
+class TestDebugSchema:
+ """Directly test schema rather than through handle."""
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas tested by meta.examples in test_schema
+ # Invalid schemas
+ ({"debug": 1}, "debug: 1 is not of type 'object'"),
+ (
+ {"debug": {}},
+ re.escape("debug: {} does not have enough properties"),
+ ),
+ (
+ {"debug": {"boguskey": True}},
+ re.escape(
+ "Additional properties are not allowed ('boguskey' was"
+ " unexpected)"
+ ),
+ ),
+ (
+ {"debug": {"verbose": 1}},
+ "debug.verbose: 1 is not of type 'boolean'",
+ ),
+ (
+ {"debug": {"output": 1}},
+ "debug.output: 1 is not of type 'string'",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_disable_ec2_metadata.py b/tests/unittests/config/test_cc_disable_ec2_metadata.py
new file mode 100644
index 00000000..5755e29e
--- /dev/null
+++ b/tests/unittests/config/test_cc_disable_ec2_metadata.py
@@ -0,0 +1,81 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests cc_disable_ec2_metadata handler"""
+
+import logging
+
+import pytest
+
+import cloudinit.config.cc_disable_ec2_metadata as ec2_meta
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
+
+LOG = logging.getLogger(__name__)
+
+DISABLE_CFG = {"disable_ec2_metadata": "true"}
+
+
+class TestEC2MetadataRoute(CiTestCase):
+ @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.which")
+ @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.subp")
+ def test_disable_ifconfig(self, m_subp, m_which):
+ """Set the route if ifconfig command is available"""
+ m_which.side_effect = lambda x: x if x == "ifconfig" else None
+ ec2_meta.handle("foo", DISABLE_CFG, None, LOG, None)
+ m_subp.assert_called_with(
+ ["route", "add", "-host", "169.254.169.254", "reject"],
+ capture=False,
+ )
+
+ @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.which")
+ @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.subp")
+ def test_disable_ip(self, m_subp, m_which):
+ """Set the route if ip command is available"""
+ m_which.side_effect = lambda x: x if x == "ip" else None
+ ec2_meta.handle("foo", DISABLE_CFG, None, LOG, None)
+ m_subp.assert_called_with(
+ ["ip", "route", "add", "prohibit", "169.254.169.254"],
+ capture=False,
+ )
+
+ @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.which")
+ @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.subp")
+ def test_disable_no_tool(self, m_subp, m_which):
+ """Log error when neither route nor ip commands are available"""
+ m_which.return_value = None # Find neither ifconfig nor ip
+ ec2_meta.handle("foo", DISABLE_CFG, None, LOG, None)
+ self.assertEqual(
+ [mock.call("ip"), mock.call("ifconfig")], m_which.call_args_list
+ )
+ m_subp.assert_not_called()
+
+
+@skipUnlessJsonSchema()
+class TestDisableEc2MetadataSchema:
+ """Directly test schema rather than through handle."""
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas tested by meta.examples in test_schema
+ # Invalid schemas
+ (
+ {"disable_ec2_metadata": 1},
+ "disable_ec2_metadata: 1 is not of type 'boolean'",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_disk_setup.py b/tests/unittests/config/test_cc_disk_setup.py
new file mode 100644
index 00000000..f2796e83
--- /dev/null
+++ b/tests/unittests/config/test_cc_disk_setup.py
@@ -0,0 +1,333 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import random
+import re
+
+import pytest
+
+from cloudinit.config import cc_disk_setup
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import (
+ CiTestCase,
+ ExitStack,
+ TestCase,
+ mock,
+ skipUnlessJsonSchema,
+)
+
+
+class TestIsDiskUsed(TestCase):
+ def setUp(self):
+ super(TestIsDiskUsed, self).setUp()
+ self.patches = ExitStack()
+ mod_name = "cloudinit.config.cc_disk_setup"
+ self.enumerate_disk = self.patches.enter_context(
+ mock.patch("{0}.enumerate_disk".format(mod_name))
+ )
+ self.check_fs = self.patches.enter_context(
+ mock.patch("{0}.check_fs".format(mod_name))
+ )
+
+ def tearDown(self):
+ super(TestIsDiskUsed, self).tearDown()
+ self.patches.close()
+
+ def test_multiple_child_nodes_returns_true(self):
+ self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(2))
+ self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock())
+ self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock()))
+
+ def test_valid_filesystem_returns_true(self):
+ self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1))
+ self.check_fs.return_value = (
+ mock.MagicMock(),
+ "ext4",
+ mock.MagicMock(),
+ )
+ self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock()))
+
+ def test_one_child_nodes_and_no_fs_returns_false(self):
+ self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1))
+ self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock())
+ self.assertFalse(cc_disk_setup.is_disk_used(mock.MagicMock()))
+
+
+class TestGetMbrHddSize(TestCase):
+ def setUp(self):
+ super(TestGetMbrHddSize, self).setUp()
+ self.patches = ExitStack()
+ self.subp = self.patches.enter_context(
+ mock.patch.object(cc_disk_setup.subp, "subp")
+ )
+
+ def tearDown(self):
+ super(TestGetMbrHddSize, self).tearDown()
+ self.patches.close()
+
+ def _configure_subp_mock(self, hdd_size_in_bytes, sector_size_in_bytes):
+ def _subp(cmd, *args, **kwargs):
+ self.assertEqual(3, len(cmd))
+ if "--getsize64" in cmd:
+ return hdd_size_in_bytes, None
+ elif "--getss" in cmd:
+ return sector_size_in_bytes, None
+ raise Exception("Unexpected blockdev command called")
+
+ self.subp.side_effect = _subp
+
+ def _test_for_sector_size(self, sector_size):
+ size_in_bytes = random.randint(10000, 10000000) * 512
+ size_in_sectors = size_in_bytes / sector_size
+ self._configure_subp_mock(size_in_bytes, sector_size)
+ self.assertEqual(
+ size_in_sectors, cc_disk_setup.get_hdd_size("/dev/sda1")
+ )
+
+ def test_size_for_512_byte_sectors(self):
+ self._test_for_sector_size(512)
+
+ def test_size_for_1024_byte_sectors(self):
+ self._test_for_sector_size(1024)
+
+ def test_size_for_2048_byte_sectors(self):
+ self._test_for_sector_size(2048)
+
+ def test_size_for_4096_byte_sectors(self):
+ self._test_for_sector_size(4096)
+
+
+class TestGetPartitionMbrLayout(TestCase):
+ def test_single_partition_using_boolean(self):
+ self.assertEqual(
+ "0,", cc_disk_setup.get_partition_mbr_layout(1000, True)
+ )
+
+ def test_single_partition_using_list(self):
+ disk_size = random.randint(1000000, 1000000000000)
+ self.assertEqual(
+ ",,83", cc_disk_setup.get_partition_mbr_layout(disk_size, [100])
+ )
+
+ def test_half_and_half(self):
+ disk_size = random.randint(1000000, 1000000000000)
+ expected_partition_size = int(float(disk_size) / 2)
+ self.assertEqual(
+ ",{0},83\n,,83".format(expected_partition_size),
+ cc_disk_setup.get_partition_mbr_layout(disk_size, [50, 50]),
+ )
+
+ def test_thirds_with_different_partition_type(self):
+ disk_size = random.randint(1000000, 1000000000000)
+ expected_partition_size = int(float(disk_size) * 0.33)
+ self.assertEqual(
+ ",{0},83\n,,82".format(expected_partition_size),
+ cc_disk_setup.get_partition_mbr_layout(disk_size, [33, [66, 82]]),
+ )
+
+
+class TestUpdateFsSetupDevices(TestCase):
+ def test_regression_1634678(self):
+ # Cf. https://bugs.launchpad.net/cloud-init/+bug/1634678
+ fs_setup = {
+ "partition": "auto",
+ "device": "/dev/xvdb1",
+ "overwrite": False,
+ "label": "test",
+ "filesystem": "ext4",
+ }
+
+ cc_disk_setup.update_fs_setup_devices(
+ [fs_setup], lambda device: device
+ )
+
+ self.assertEqual(
+ {
+ "_origname": "/dev/xvdb1",
+ "partition": "auto",
+ "device": "/dev/xvdb1",
+ "overwrite": False,
+ "label": "test",
+ "filesystem": "ext4",
+ },
+ fs_setup,
+ )
+
+ def test_dotted_devname(self):
+ fs_setup = {
+ "partition": "auto",
+ "device": "ephemeral0.0",
+ "label": "test2",
+ "filesystem": "xfs",
+ }
+
+ cc_disk_setup.update_fs_setup_devices(
+ [fs_setup], lambda device: device
+ )
+
+ self.assertEqual(
+ {
+ "_origname": "ephemeral0.0",
+ "_partition": "auto",
+ "partition": "0",
+ "device": "ephemeral0",
+ "label": "test2",
+ "filesystem": "xfs",
+ },
+ fs_setup,
+ )
+
+ def test_dotted_devname_populates_partition(self):
+ fs_setup = {
+ "device": "ephemeral0.1",
+ "label": "test2",
+ "filesystem": "xfs",
+ }
+ cc_disk_setup.update_fs_setup_devices(
+ [fs_setup], lambda device: device
+ )
+ self.assertEqual(
+ {
+ "_origname": "ephemeral0.1",
+ "device": "ephemeral0",
+ "partition": "1",
+ "label": "test2",
+ "filesystem": "xfs",
+ },
+ fs_setup,
+ )
+
+
+@mock.patch(
+ "cloudinit.config.cc_disk_setup.assert_and_settle_device",
+ return_value=None,
+)
+@mock.patch(
+ "cloudinit.config.cc_disk_setup.find_device_node",
+ return_value=("/dev/xdb1", False),
+)
+@mock.patch("cloudinit.config.cc_disk_setup.device_type", return_value=None)
+@mock.patch("cloudinit.config.cc_disk_setup.subp.subp", return_value=("", ""))
+class TestMkfsCommandHandling(CiTestCase):
+
+ with_logs = True
+
+ def test_with_cmd(self, subp, *args):
+ """mkfs honors cmd and logs warnings when extra_opts or overwrite are
+ provided."""
+ cc_disk_setup.mkfs(
+ {
+ "cmd": "mkfs -t %(filesystem)s -L %(label)s %(device)s",
+ "filesystem": "ext4",
+ "device": "/dev/xdb1",
+ "label": "with_cmd",
+ "extra_opts": ["should", "generate", "warning"],
+ "overwrite": "should generate warning too",
+ }
+ )
+
+ self.assertIn(
+ "extra_opts "
+ + "ignored because cmd was specified: mkfs -t ext4 -L with_cmd "
+ + "/dev/xdb1",
+ self.logs.getvalue(),
+ )
+ self.assertIn(
+ "overwrite "
+ + "ignored because cmd was specified: mkfs -t ext4 -L with_cmd "
+ + "/dev/xdb1",
+ self.logs.getvalue(),
+ )
+
+ subp.assert_called_once_with(
+ "mkfs -t ext4 -L with_cmd /dev/xdb1", shell=True
+ )
+
+ @mock.patch("cloudinit.config.cc_disk_setup.subp.which")
+ def test_overwrite_and_extra_opts_without_cmd(self, m_which, subp, *args):
+ """mkfs observes extra_opts and overwrite settings when cmd is not
+ present."""
+ m_which.side_effect = lambda p: {"mkfs.ext4": "/sbin/mkfs.ext4"}[p]
+ cc_disk_setup.mkfs(
+ {
+ "filesystem": "ext4",
+ "device": "/dev/xdb1",
+ "label": "without_cmd",
+ "extra_opts": ["are", "added"],
+ "overwrite": True,
+ }
+ )
+
+ subp.assert_called_once_with(
+ [
+ "/sbin/mkfs.ext4",
+ "/dev/xdb1",
+ "-L",
+ "without_cmd",
+ "-F",
+ "are",
+ "added",
+ ],
+ shell=False,
+ )
+
+ @mock.patch("cloudinit.config.cc_disk_setup.subp.which")
+ def test_mkswap(self, m_which, subp, *args):
+ """mkfs observes extra_opts and overwrite settings when cmd is not
+ present."""
+ m_which.side_effect = iter([None, "/sbin/mkswap"])
+ cc_disk_setup.mkfs(
+ {
+ "filesystem": "swap",
+ "device": "/dev/xdb1",
+ "label": "swap",
+ "overwrite": True,
+ }
+ )
+
+ self.assertEqual(
+ [mock.call("mkfs.swap"), mock.call("mkswap")],
+ m_which.call_args_list,
+ )
+ subp.assert_called_once_with(
+ ["/sbin/mkswap", "/dev/xdb1", "-L", "swap", "-f"], shell=False
+ )
+
+
+@skipUnlessJsonSchema()
+class TestDebugSchema:
+ """Directly test schema rather than through handle."""
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas tested by meta.examples in test_schema
+ # Invalid schemas
+ ({"disk_setup": 1}, "disk_setup: 1 is not of type 'object'"),
+ ({"fs_setup": 1}, "fs_setup: 1 is not of type 'array'"),
+ (
+ {"device_aliases": 1},
+ "device_aliases: 1 is not of type 'object'",
+ ),
+ (
+ {"debug": {"boguskey": True}},
+ re.escape(
+ "Additional properties are not allowed ('boguskey' was"
+ " unexpected)"
+ ),
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_final_message.py b/tests/unittests/config/test_cc_final_message.py
index 46ba99b2..46ba99b2 100644
--- a/cloudinit/config/tests/test_final_message.py
+++ b/tests/unittests/config/test_cc_final_message.py
diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/config/test_cc_growpart.py
index 7f039b79..ba66f136 100644
--- a/tests/unittests/test_handler/test_handler_growpart.py
+++ b/tests/unittests/config/test_cc_growpart.py
@@ -1,19 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import cloud
-from cloudinit.config import cc_growpart
-from cloudinit import subp
-
-from cloudinit.tests.helpers import TestCase
-
import errno
import logging
import os
import re
+import shutil
+import stat
import unittest
from contextlib import ExitStack
from unittest import mock
+from cloudinit import cloud, subp, temp_utils
+from cloudinit.config import cc_growpart
+from tests.unittests.helpers import TestCase
+
# growpart:
# mode: auto # off, on, auto, 'growpart'
# devices: ['root']
@@ -58,6 +58,33 @@ usage: gpart add -t type [-a alignment] [-b start] <SNIP> geom
"""
+class Dir:
+ """Stub object"""
+
+ def __init__(self, name):
+ self.name = name
+ self.st_mode = name
+
+ def is_dir(self, *args, **kwargs):
+ return True
+
+ def stat(self, *args, **kwargs):
+ return self
+
+
+class Scanner:
+ """Stub object"""
+
+ def __enter__(self):
+ return (
+ Dir(""),
+ Dir(""),
+ )
+
+ def __exit__(self, *args):
+ pass
+
+
class TestDisabled(unittest.TestCase):
def setUp(self):
super(TestDisabled, self).setUp()
@@ -72,11 +99,12 @@ class TestDisabled(unittest.TestCase):
# Test that nothing is done if mode is off.
# this really only verifies that resizer_factory isn't called
- config = {'growpart': {'mode': 'off'}}
+ config = {"growpart": {"mode": "off"}}
- with mock.patch.object(cc_growpart, 'resizer_factory') as mockobj:
- self.handle(self.name, config, self.cloud_init, self.log,
- self.args)
+ with mock.patch.object(cc_growpart, "resizer_factory") as mockobj:
+ self.handle(
+ self.name, config, self.cloud_init, self.log, self.args
+ )
self.assertEqual(mockobj.call_count, 0)
@@ -91,79 +119,154 @@ class TestConfig(TestCase):
self.cloud_init = None
self.handle = cc_growpart.handle
+ self.tmppath = "/tmp/cloudinit-test-file"
+ self.tmpdir = os.scandir("/tmp")
+ self.tmpfile = open(self.tmppath, "w")
+
+ def tearDown(self):
+ self.tmpfile.close()
+ os.remove(self.tmppath)
@mock.patch.dict("os.environ", clear=True)
def test_no_resizers_auto_is_fine(self):
with mock.patch.object(
- subp, 'subp',
- return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj:
-
- config = {'growpart': {'mode': 'auto'}}
- self.handle(self.name, config, self.cloud_init, self.log,
- self.args)
-
- mockobj.assert_has_calls([
- mock.call(['growpart', '--help'], env={'LANG': 'C'}),
- mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])])
+ subp, "subp", return_value=(HELP_GROWPART_NO_RESIZE, "")
+ ) as mockobj:
+
+ config = {"growpart": {"mode": "auto"}}
+ self.handle(
+ self.name, config, self.cloud_init, self.log, self.args
+ )
+
+ mockobj.assert_has_calls(
+ [
+ mock.call(["growpart", "--help"], env={"LANG": "C"}),
+ mock.call(
+ ["gpart", "help"], env={"LANG": "C"}, rcs=[0, 1]
+ ),
+ ]
+ )
@mock.patch.dict("os.environ", clear=True)
def test_no_resizers_mode_growpart_is_exception(self):
with mock.patch.object(
- subp, 'subp',
- return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj:
- config = {'growpart': {'mode': "growpart"}}
+ subp, "subp", return_value=(HELP_GROWPART_NO_RESIZE, "")
+ ) as mockobj:
+ config = {"growpart": {"mode": "growpart"}}
self.assertRaises(
- ValueError, self.handle, self.name, config,
- self.cloud_init, self.log, self.args)
+ ValueError,
+ self.handle,
+ self.name,
+ config,
+ self.cloud_init,
+ self.log,
+ self.args,
+ )
mockobj.assert_called_once_with(
- ['growpart', '--help'], env={'LANG': 'C'})
+ ["growpart", "--help"], env={"LANG": "C"}
+ )
@mock.patch.dict("os.environ", clear=True)
def test_mode_auto_prefers_growpart(self):
with mock.patch.object(
- subp, 'subp',
- return_value=(HELP_GROWPART_RESIZE, "")) as mockobj:
+ subp, "subp", return_value=(HELP_GROWPART_RESIZE, "")
+ ) as mockobj:
ret = cc_growpart.resizer_factory(mode="auto")
self.assertIsInstance(ret, cc_growpart.ResizeGrowPart)
mockobj.assert_called_once_with(
- ['growpart', '--help'], env={'LANG': 'C'})
+ ["growpart", "--help"], env={"LANG": "C"}
+ )
+
+ @mock.patch.dict("os.environ", {"LANG": "cs_CZ.UTF-8"}, clear=True)
+ @mock.patch.object(temp_utils, "mkdtemp", return_value="/tmp/much-random")
+ @mock.patch.object(stat, "S_ISDIR", return_value=False)
+ @mock.patch.object(os.path, "samestat", return_value=True)
+ @mock.patch.object(os.path, "join", return_value="/tmp")
+ @mock.patch.object(os, "scandir", return_value=Scanner())
+ @mock.patch.object(os, "mkdir")
+ @mock.patch.object(os, "unlink")
+ @mock.patch.object(os, "rmdir")
+ @mock.patch.object(os, "open", return_value=1)
+ @mock.patch.object(os, "close")
+ @mock.patch.object(shutil, "rmtree")
+ @mock.patch.object(os, "lseek", return_value=1024)
+ @mock.patch.object(os, "lstat", return_value="interesting metadata")
+ def test_force_lang_check_tempfile(self, *args, **kwargs):
+ with mock.patch.object(
+ subp, "subp", return_value=(HELP_GROWPART_RESIZE, "")
+ ) as mockobj:
- @mock.patch.dict("os.environ", clear=True)
+ ret = cc_growpart.resizer_factory(mode="auto")
+ self.assertIsInstance(ret, cc_growpart.ResizeGrowPart)
+ diskdev = "/dev/sdb"
+ partnum = 1
+ partdev = "/dev/sdb"
+ ret.resize(diskdev, partnum, partdev)
+ mockobj.assert_has_calls(
+ [
+ mock.call(
+ ["growpart", "--dry-run", diskdev, partnum],
+ env={"LANG": "C", "TMPDIR": "/tmp"},
+ ),
+ mock.call(
+ ["growpart", diskdev, partnum],
+ env={"LANG": "C", "TMPDIR": "/tmp"},
+ ),
+ ]
+ )
+
+ @mock.patch.dict("os.environ", {"LANG": "cs_CZ.UTF-8"}, clear=True)
def test_mode_auto_falls_back_to_gpart(self):
with mock.patch.object(
- subp, 'subp',
- return_value=("", HELP_GPART)) as mockobj:
+ subp, "subp", return_value=("", HELP_GPART)
+ ) as mockobj:
ret = cc_growpart.resizer_factory(mode="auto")
self.assertIsInstance(ret, cc_growpart.ResizeGpart)
- mockobj.assert_has_calls([
- mock.call(['growpart', '--help'], env={'LANG': 'C'}),
- mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])])
+ mockobj.assert_has_calls(
+ [
+ mock.call(["growpart", "--help"], env={"LANG": "C"}),
+ mock.call(
+ ["gpart", "help"], env={"LANG": "C"}, rcs=[0, 1]
+ ),
+ ]
+ )
def test_handle_with_no_growpart_entry(self):
# if no 'growpart' entry in config, then mode=auto should be used
myresizer = object()
- retval = (("/", cc_growpart.RESIZE.CHANGED, "my-message",),)
+ retval = (
+ (
+ "/",
+ cc_growpart.RESIZE.CHANGED,
+ "my-message",
+ ),
+ )
with ExitStack() as mocks:
factory = mocks.enter_context(
- mock.patch.object(cc_growpart, 'resizer_factory',
- return_value=myresizer))
+ mock.patch.object(
+ cc_growpart, "resizer_factory", return_value=myresizer
+ )
+ )
rsdevs = mocks.enter_context(
- mock.patch.object(cc_growpart, 'resize_devices',
- return_value=retval))
+ mock.patch.object(
+ cc_growpart, "resize_devices", return_value=retval
+ )
+ )
mocks.enter_context(
- mock.patch.object(cc_growpart, 'RESIZERS',
- (('mysizer', object),)
- ))
+ mock.patch.object(
+ cc_growpart, "RESIZERS", (("mysizer", object),)
+ )
+ )
self.handle(self.name, {}, self.cloud_init, self.log, self.args)
- factory.assert_called_once_with('auto')
- rsdevs.assert_called_once_with(myresizer, ['/'])
+ factory.assert_called_once_with("auto")
+ rsdevs.assert_called_once_with(myresizer, ["/"])
class TestResize(unittest.TestCase):
@@ -177,9 +280,18 @@ class TestResize(unittest.TestCase):
# this patches out devent2dev, os.stat, and device_part_info
# so in the end, doesn't test a lot
devs = ["/dev/XXda1", "/dev/YYda2"]
- devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5,
- st_nlink=1, st_uid=0, st_gid=6, st_size=0,
- st_atime=0, st_mtime=0, st_ctime=0)
+ devstat_ret = Bunch(
+ st_mode=25008,
+ st_ino=6078,
+ st_dev=5,
+ st_nlink=1,
+ st_uid=0,
+ st_gid=6,
+ st_size=0,
+ st_atime=0,
+ st_mtime=0,
+ st_ctime=0,
+ )
enoent = ["/dev/NOENT"]
real_stat = os.stat
resize_calls = []
@@ -213,12 +325,15 @@ class TestResize(unittest.TestCase):
return f
return None
- self.assertEqual(cc_growpart.RESIZE.NOCHANGE,
- find("/dev/XXda1", resized)[1])
- self.assertEqual(cc_growpart.RESIZE.CHANGED,
- find("/dev/YYda2", resized)[1])
- self.assertEqual(cc_growpart.RESIZE.SKIPPED,
- find(enoent[0], resized)[1])
+ self.assertEqual(
+ cc_growpart.RESIZE.NOCHANGE, find("/dev/XXda1", resized)[1]
+ )
+ self.assertEqual(
+ cc_growpart.RESIZE.CHANGED, find("/dev/YYda2", resized)[1]
+ )
+ self.assertEqual(
+ cc_growpart.RESIZE.SKIPPED, find(enoent[0], resized)[1]
+ )
# self.assertEqual(resize_calls,
# [("/dev/XXda", "1", "/dev/XXda1"),
# ("/dev/YYda", "2", "/dev/YYda2")])
diff --git a/cloudinit/config/tests/test_grub_dpkg.py b/tests/unittests/config/test_cc_grub_dpkg.py
index 99c05bb5..5151a7b5 100644
--- a/cloudinit/config/tests/test_grub_dpkg.py
+++ b/tests/unittests/config/test_cc_grub_dpkg.py
@@ -1,11 +1,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from logging import Logger
+from unittest import mock
+
import pytest
-from unittest import mock
-from logging import Logger
-from cloudinit.subp import ProcessExecutionError
from cloudinit.config.cc_grub_dpkg import fetch_idevs, handle
+from cloudinit.subp import ProcessExecutionError
class TestFetchIdevs:
@@ -21,73 +22,78 @@ class TestFetchIdevs:
ProcessExecutionError(reason=FileNotFoundError()),
False,
mock.call("'grub-probe' not found in $PATH"),
- '',
- '',
+ "",
+ "",
),
# Inside a container, grub installed
(
ProcessExecutionError(stderr="failed to get canonical path"),
False,
mock.call("grub-probe 'failed to get canonical path'"),
- '',
- '',
+ "",
+ "",
),
# KVM Instance
(
- ['/dev/vda'],
+ ["/dev/vda"],
True,
None,
(
- '/dev/disk/by-path/pci-0000:00:00.0 ',
- '/dev/disk/by-path/virtio-pci-0000:00:00.0 '
+ "/dev/disk/by-path/pci-0000:00:00.0 ",
+ "/dev/disk/by-path/virtio-pci-0000:00:00.0 ",
),
- '/dev/vda',
+ "/dev/vda",
),
# Xen Instance
(
- ['/dev/xvda'],
+ ["/dev/xvda"],
True,
None,
- '',
- '/dev/xvda',
+ "",
+ "/dev/xvda",
),
# NVMe Hardware Instance
(
- ['/dev/nvme1n1'],
+ ["/dev/nvme1n1"],
True,
None,
(
- '/dev/disk/by-id/nvme-Company_hash000 ',
- '/dev/disk/by-id/nvme-nvme.000-000-000-000-000 ',
- '/dev/disk/by-path/pci-0000:00:00.0-nvme-0 '
+ "/dev/disk/by-id/nvme-Company_hash000 ",
+ "/dev/disk/by-id/nvme-nvme.000-000-000-000-000 ",
+ "/dev/disk/by-path/pci-0000:00:00.0-nvme-0 ",
),
- '/dev/disk/by-id/nvme-Company_hash000',
+ "/dev/disk/by-id/nvme-Company_hash000",
),
# SCSI Hardware Instance
(
- ['/dev/sda'],
+ ["/dev/sda"],
True,
None,
(
- '/dev/disk/by-id/company-user-1 ',
- '/dev/disk/by-id/scsi-0Company_user-1 ',
- '/dev/disk/by-path/pci-0000:00:00.0-scsi-0:0:0:0 '
+ "/dev/disk/by-id/company-user-1 ",
+ "/dev/disk/by-id/scsi-0Company_user-1 ",
+ "/dev/disk/by-path/pci-0000:00:00.0-scsi-0:0:0:0 ",
),
- '/dev/disk/by-id/company-user-1',
+ "/dev/disk/by-id/company-user-1",
),
],
)
@mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
@mock.patch("cloudinit.config.cc_grub_dpkg.os.path.exists")
@mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
- def test_fetch_idevs(self, m_subp, m_exists, m_logexc, grub_output,
- path_exists, expected_log_call, udevadm_output,
- expected_idevs):
+ def test_fetch_idevs(
+ self,
+ m_subp,
+ m_exists,
+ m_logexc,
+ grub_output,
+ path_exists,
+ expected_log_call,
+ udevadm_output,
+ expected_idevs,
+ ):
"""Tests outputs from grub-probe and udevadm info against grub-dpkg"""
- m_subp.side_effect = [
- grub_output,
- ["".join(udevadm_output)]
- ]
+ m_subp.side_effect = [grub_output, ["".join(udevadm_output)]]
m_exists.return_value = path_exists
log = mock.Mock(spec=Logger)
idevs = fetch_idevs(log)
@@ -106,67 +112,72 @@ class TestHandle:
# No configuration
None,
None,
- '/dev/disk/by-id/nvme-Company_hash000',
+ "/dev/disk/by-id/nvme-Company_hash000",
(
"Setting grub debconf-set-selections with ",
- "'/dev/disk/by-id/nvme-Company_hash000','false'"
+ "'/dev/disk/by-id/nvme-Company_hash000','false'",
),
),
(
# idevs set, idevs_empty unset
- '/dev/sda',
+ "/dev/sda",
None,
- '/dev/sda',
+ "/dev/sda",
(
"Setting grub debconf-set-selections with ",
- "'/dev/sda','false'"
+ "'/dev/sda','false'",
),
),
(
# idevs unset, idevs_empty set
None,
- 'true',
- '/dev/xvda',
+ "true",
+ "/dev/xvda",
(
"Setting grub debconf-set-selections with ",
- "'/dev/xvda','true'"
+ "'/dev/xvda','true'",
),
),
(
# idevs set, idevs_empty set
- '/dev/vda',
- 'false',
- '/dev/disk/by-id/company-user-1',
+ "/dev/vda",
+ "false",
+ "/dev/disk/by-id/company-user-1",
(
"Setting grub debconf-set-selections with ",
- "'/dev/vda','false'"
+ "'/dev/vda','false'",
),
),
(
# idevs set, idevs_empty set
# Respect what the user defines, even if its logically wrong
- '/dev/nvme0n1',
- 'true',
- '',
+ "/dev/nvme0n1",
+ "true",
+ "",
(
"Setting grub debconf-set-selections with ",
- "'/dev/nvme0n1','true'"
+ "'/dev/nvme0n1','true'",
),
- )
+ ),
],
)
@mock.patch("cloudinit.config.cc_grub_dpkg.fetch_idevs")
@mock.patch("cloudinit.config.cc_grub_dpkg.util.get_cfg_option_str")
@mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
@mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
- def test_handle(self, m_subp, m_logexc, m_get_cfg_str, m_fetch_idevs,
- cfg_idevs, cfg_idevs_empty, fetch_idevs_output,
- expected_log_output):
+ def test_handle(
+ self,
+ m_subp,
+ m_logexc,
+ m_get_cfg_str,
+ m_fetch_idevs,
+ cfg_idevs,
+ cfg_idevs_empty,
+ fetch_idevs_output,
+ expected_log_output,
+ ):
"""Test setting of correct debconf database entries"""
- m_get_cfg_str.side_effect = [
- cfg_idevs,
- cfg_idevs_empty
- ]
+ m_get_cfg_str.side_effect = [cfg_idevs, cfg_idevs_empty]
m_fetch_idevs.return_value = fetch_idevs_output
log = mock.Mock(spec=Logger)
handle(mock.Mock(), mock.Mock(), mock.Mock(), log, mock.Mock())
diff --git a/tests/unittests/config/test_cc_install_hotplug.py b/tests/unittests/config/test_cc_install_hotplug.py
new file mode 100644
index 00000000..e67fce60
--- /dev/null
+++ b/tests/unittests/config/test_cc_install_hotplug.py
@@ -0,0 +1,129 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from collections import namedtuple
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_install_hotplug import (
+ HOTPLUG_UDEV_PATH,
+ HOTPLUG_UDEV_RULES_TEMPLATE,
+ handle,
+)
+from cloudinit.event import EventScope, EventType
+
+
+@pytest.fixture()
+def mocks():
+ m_update_enabled = mock.patch("cloudinit.stages.update_event_enabled")
+ m_write = mock.patch("cloudinit.util.write_file", autospec=True)
+ m_del = mock.patch("cloudinit.util.del_file", autospec=True)
+ m_subp = mock.patch("cloudinit.subp.subp")
+ m_which = mock.patch("cloudinit.subp.which", return_value=None)
+ m_path_exists = mock.patch("os.path.exists", return_value=False)
+
+ yield namedtuple(
+ "Mocks", "m_update_enabled m_write m_del m_subp m_which m_path_exists"
+ )(
+ m_update_enabled.start(),
+ m_write.start(),
+ m_del.start(),
+ m_subp.start(),
+ m_which.start(),
+ m_path_exists.start(),
+ )
+
+ m_update_enabled.stop()
+ m_write.stop()
+ m_del.stop()
+ m_subp.stop()
+ m_which.stop()
+ m_path_exists.stop()
+
+
+class TestInstallHotplug:
+ @pytest.mark.parametrize("libexec_exists", [True, False])
+ def test_rules_installed_when_supported_and_enabled(
+ self, mocks, libexec_exists
+ ):
+ mocks.m_which.return_value = "udevadm"
+ mocks.m_update_enabled.return_value = True
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {
+ EventScope.NETWORK: {EventType.HOTPLUG}
+ }
+
+ if libexec_exists:
+ libexecdir = "/usr/libexec/cloud-init"
+ else:
+ libexecdir = "/usr/lib/cloud-init"
+ with mock.patch("os.path.exists", return_value=libexec_exists):
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ mocks.m_write.assert_called_once_with(
+ filename=HOTPLUG_UDEV_PATH,
+ content=HOTPLUG_UDEV_RULES_TEMPLATE.format(
+ libexecdir=libexecdir
+ ),
+ )
+ assert mocks.m_subp.call_args_list == [
+ mock.call(
+ [
+ "udevadm",
+ "control",
+ "--reload-rules",
+ ]
+ )
+ ]
+ assert mocks.m_del.call_args_list == []
+
+ def test_rules_not_installed_when_unsupported(self, mocks):
+ mocks.m_update_enabled.return_value = True
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {}
+
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ assert mocks.m_write.call_args_list == []
+ assert mocks.m_del.call_args_list == []
+ assert mocks.m_subp.call_args_list == []
+
+ def test_rules_not_installed_when_disabled(self, mocks):
+ mocks.m_update_enabled.return_value = False
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {
+ EventScope.NETWORK: {EventType.HOTPLUG}
+ }
+
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ assert mocks.m_write.call_args_list == []
+ assert mocks.m_del.call_args_list == []
+ assert mocks.m_subp.call_args_list == []
+
+ def test_rules_uninstalled_when_disabled(self, mocks):
+ mocks.m_path_exists.return_value = True
+ mocks.m_update_enabled.return_value = False
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {}
+
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ mocks.m_del.assert_called_with(HOTPLUG_UDEV_PATH)
+ assert mocks.m_subp.call_args_list == [
+ mock.call(
+ [
+ "udevadm",
+ "control",
+ "--reload-rules",
+ ]
+ )
+ ]
+ assert mocks.m_write.call_args_list == []
+
+ def test_rules_not_installed_when_no_udevadm(self, mocks):
+ mocks.m_update_enabled.return_value = True
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {
+ EventScope.NETWORK: {EventType.HOTPLUG}
+ }
+
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ assert mocks.m_del.call_args_list == []
+ assert mocks.m_write.call_args_list == []
+ assert mocks.m_subp.call_args_list == []
diff --git a/tests/unittests/config/test_cc_keys_to_console.py b/tests/unittests/config/test_cc_keys_to_console.py
new file mode 100644
index 00000000..9efc2b48
--- /dev/null
+++ b/tests/unittests/config/test_cc_keys_to_console.py
@@ -0,0 +1,40 @@
+"""Tests for cc_keys_to_console."""
+from unittest import mock
+
+import pytest
+
+from cloudinit.config import cc_keys_to_console
+
+
+class TestHandle:
+ """Tests for cloudinit.config.cc_keys_to_console.handle.
+
+ TODO: These tests only cover the emit_keys_to_console config option, they
+ should be expanded to cover the full functionality.
+ """
+
+ @mock.patch("cloudinit.config.cc_keys_to_console.util.multi_log")
+ @mock.patch("cloudinit.config.cc_keys_to_console.os.path.exists")
+ @mock.patch("cloudinit.config.cc_keys_to_console.subp.subp")
+ @pytest.mark.parametrize(
+ "cfg,subp_called",
+ [
+ ({}, True), # Default to emitting keys
+ ({"ssh": {}}, True), # Default even if we have the parent key
+ (
+ {"ssh": {"emit_keys_to_console": True}},
+ True,
+ ), # Explicitly enabled
+ ({"ssh": {"emit_keys_to_console": False}}, False), # Disabled
+ ],
+ )
+ def test_emit_keys_to_console_config(
+ self, m_subp, m_path_exists, _m_multi_log, cfg, subp_called
+ ):
+ # Ensure we always find the helper
+ m_path_exists.return_value = True
+ m_subp.return_value = ("", "")
+
+ cc_keys_to_console.handle("name", cfg, mock.Mock(), mock.Mock(), ())
+
+ assert subp_called == (m_subp.call_count == 1)
diff --git a/tests/unittests/config/test_cc_landscape.py b/tests/unittests/config/test_cc_landscape.py
new file mode 100644
index 00000000..efddc1b6
--- /dev/null
+++ b/tests/unittests/config/test_cc_landscape.py
@@ -0,0 +1,170 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+
+from configobj import ConfigObj
+
+from cloudinit import util
+from cloudinit.config import cc_landscape
+from tests.unittests.helpers import (
+ FilesystemMockingTestCase,
+ mock,
+ wrap_and_call,
+)
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+
+class TestLandscape(FilesystemMockingTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestLandscape, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.conf = self.tmp_path("client.conf", self.new_root)
+ self.default_file = self.tmp_path("default_landscape", self.new_root)
+ self.patchUtils(self.new_root)
+ self.add_patch(
+ "cloudinit.distros.ubuntu.Distro.install_packages",
+ "m_install_packages",
+ )
+
+ def test_handler_skips_empty_landscape_cloudconfig(self):
+ """Empty landscape cloud-config section does no work."""
+ mycloud = get_cloud("ubuntu")
+ mycloud.distro = mock.MagicMock()
+ cfg = {"landscape": {}}
+ cc_landscape.handle("notimportant", cfg, mycloud, LOG, None)
+ self.assertFalse(mycloud.distro.install_packages.called)
+
+ def test_handler_error_on_invalid_landscape_type(self):
+ """Raise an error when landscape configuraiton option is invalid."""
+ mycloud = get_cloud("ubuntu")
+ cfg = {"landscape": "wrongtype"}
+ with self.assertRaises(RuntimeError) as context_manager:
+ cc_landscape.handle("notimportant", cfg, mycloud, LOG, None)
+ self.assertIn(
+ "'landscape' key existed in config, but not a dict",
+ str(context_manager.exception),
+ )
+
+ @mock.patch("cloudinit.config.cc_landscape.subp")
+ def test_handler_restarts_landscape_client(self, m_subp):
+ """handler restarts lansdscape-client after install."""
+ mycloud = get_cloud("ubuntu")
+ cfg = {"landscape": {"client": {}}}
+ wrap_and_call(
+ "cloudinit.config.cc_landscape",
+ {"LSC_CLIENT_CFG_FILE": {"new": self.conf}},
+ cc_landscape.handle,
+ "notimportant",
+ cfg,
+ mycloud,
+ LOG,
+ None,
+ )
+ self.assertEqual(
+ [mock.call(["service", "landscape-client", "restart"])],
+ m_subp.subp.call_args_list,
+ )
+
+ def test_handler_installs_client_and_creates_config_file(self):
+ """Write landscape client.conf and install landscape-client."""
+ mycloud = get_cloud("ubuntu")
+ cfg = {"landscape": {"client": {}}}
+ expected = {
+ "client": {
+ "log_level": "info",
+ "url": "https://landscape.canonical.com/message-system",
+ "ping_url": "http://landscape.canonical.com/ping",
+ "data_path": "/var/lib/landscape/client",
+ }
+ }
+ mycloud.distro = mock.MagicMock()
+ wrap_and_call(
+ "cloudinit.config.cc_landscape",
+ {
+ "LSC_CLIENT_CFG_FILE": {"new": self.conf},
+ "LS_DEFAULT_FILE": {"new": self.default_file},
+ },
+ cc_landscape.handle,
+ "notimportant",
+ cfg,
+ mycloud,
+ LOG,
+ None,
+ )
+ self.assertEqual(
+ [mock.call("landscape-client")],
+ mycloud.distro.install_packages.call_args,
+ )
+ self.assertEqual(expected, dict(ConfigObj(self.conf)))
+ self.assertIn(
+ "Wrote landscape config file to {0}".format(self.conf),
+ self.logs.getvalue(),
+ )
+ default_content = util.load_file(self.default_file)
+ self.assertEqual("RUN=1\n", default_content)
+
+ def test_handler_writes_merged_client_config_file_with_defaults(self):
+ """Merge and write options from LSC_CLIENT_CFG_FILE with defaults."""
+ # Write existing sparse client.conf file
+ util.write_file(self.conf, "[client]\ncomputer_title = My PC\n")
+ mycloud = get_cloud("ubuntu")
+ cfg = {"landscape": {"client": {}}}
+ expected = {
+ "client": {
+ "log_level": "info",
+ "url": "https://landscape.canonical.com/message-system",
+ "ping_url": "http://landscape.canonical.com/ping",
+ "data_path": "/var/lib/landscape/client",
+ "computer_title": "My PC",
+ }
+ }
+ wrap_and_call(
+ "cloudinit.config.cc_landscape",
+ {"LSC_CLIENT_CFG_FILE": {"new": self.conf}},
+ cc_landscape.handle,
+ "notimportant",
+ cfg,
+ mycloud,
+ LOG,
+ None,
+ )
+ self.assertEqual(expected, dict(ConfigObj(self.conf)))
+ self.assertIn(
+ "Wrote landscape config file to {0}".format(self.conf),
+ self.logs.getvalue(),
+ )
+
+ def test_handler_writes_merged_provided_cloudconfig_with_defaults(self):
+ """Merge and write options from cloud-config options with defaults."""
+ # Write empty sparse client.conf file
+ util.write_file(self.conf, "")
+ mycloud = get_cloud("ubuntu")
+ cfg = {"landscape": {"client": {"computer_title": "My PC"}}}
+ expected = {
+ "client": {
+ "log_level": "info",
+ "url": "https://landscape.canonical.com/message-system",
+ "ping_url": "http://landscape.canonical.com/ping",
+ "data_path": "/var/lib/landscape/client",
+ "computer_title": "My PC",
+ }
+ }
+ wrap_and_call(
+ "cloudinit.config.cc_landscape",
+ {"LSC_CLIENT_CFG_FILE": {"new": self.conf}},
+ cc_landscape.handle,
+ "notimportant",
+ cfg,
+ mycloud,
+ LOG,
+ None,
+ )
+ self.assertEqual(expected, dict(ConfigObj(self.conf)))
+ self.assertIn(
+ "Wrote landscape config file to {0}".format(self.conf),
+ self.logs.getvalue(),
+ )
diff --git a/tests/unittests/config/test_cc_locale.py b/tests/unittests/config/test_cc_locale.py
new file mode 100644
index 00000000..7190bc68
--- /dev/null
+++ b/tests/unittests/config/test_cc_locale.py
@@ -0,0 +1,123 @@
+# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import os
+import shutil
+import tempfile
+from io import BytesIO
+from unittest import mock
+
+from configobj import ConfigObj
+
+from cloudinit import util
+from cloudinit.config import cc_locale
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+
+class TestLocale(t_help.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestLocale, self).setUp()
+ self.new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.new_root)
+ self.patchUtils(self.new_root)
+
+ def test_set_locale_arch(self):
+ locale = "en_GB.UTF-8"
+ locale_configfile = "/etc/invalid-locale-path"
+ cfg = {
+ "locale": locale,
+ "locale_configfile": locale_configfile,
+ }
+ cc = get_cloud("arch")
+
+ with mock.patch("cloudinit.distros.arch.subp.subp") as m_subp:
+ with mock.patch("cloudinit.distros.arch.LOG.warning") as m_LOG:
+ cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+ m_LOG.assert_called_with(
+ "Invalid locale_configfile %s, "
+ "only supported value is "
+ "/etc/locale.conf",
+ locale_configfile,
+ )
+
+ contents = util.load_file(cc.distro.locale_gen_fn)
+ self.assertIn("%s UTF-8" % locale, contents)
+ m_subp.assert_called_with(
+ ["localectl", "set-locale", locale], capture=False
+ )
+
+ def test_set_locale_sles(self):
+
+ cfg = {
+ "locale": "My.Locale",
+ }
+ cc = get_cloud("sles")
+ cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+ if cc.distro.uses_systemd():
+ locale_conf = cc.distro.systemd_locale_conf_fn
+ else:
+ locale_conf = cc.distro.locale_conf_fn
+ contents = util.load_file(locale_conf, decode=False)
+ n_cfg = ConfigObj(BytesIO(contents))
+ if cc.distro.uses_systemd():
+ self.assertEqual({"LANG": cfg["locale"]}, dict(n_cfg))
+ else:
+ self.assertEqual({"RC_LANG": cfg["locale"]}, dict(n_cfg))
+
+ def test_set_locale_sles_default(self):
+ cfg = {}
+ cc = get_cloud("sles")
+ cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+
+ if cc.distro.uses_systemd():
+ locale_conf = cc.distro.systemd_locale_conf_fn
+ keyname = "LANG"
+ else:
+ locale_conf = cc.distro.locale_conf_fn
+ keyname = "RC_LANG"
+
+ contents = util.load_file(locale_conf, decode=False)
+ n_cfg = ConfigObj(BytesIO(contents))
+ self.assertEqual({keyname: "en_US.UTF-8"}, dict(n_cfg))
+
+ def test_locale_update_config_if_different_than_default(self):
+ """Test cc_locale writes updates conf if different than default"""
+ locale_conf = os.path.join(self.new_root, "etc/default/locale")
+ util.write_file(locale_conf, 'LANG="en_US.UTF-8"\n')
+ cfg = {"locale": "C.UTF-8"}
+ cc = get_cloud("ubuntu")
+ with mock.patch("cloudinit.distros.debian.subp.subp") as m_subp:
+ with mock.patch(
+ "cloudinit.distros.debian.LOCALE_CONF_FN", locale_conf
+ ):
+ cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+ m_subp.assert_called_with(
+ [
+ "update-locale",
+ "--locale-file=%s" % locale_conf,
+ "LANG=C.UTF-8",
+ ],
+ capture=False,
+ )
+
+ def test_locale_rhel_defaults_en_us_utf8(self):
+ """Test cc_locale gets en_US.UTF-8 from distro get_locale fallback"""
+ cfg = {}
+ cc = get_cloud("rhel")
+ update_sysconfig = "cloudinit.distros.rhel_util.update_sysconfig_file"
+ with mock.patch.object(cc.distro, "uses_systemd") as m_use_sd:
+ m_use_sd.return_value = True
+ with mock.patch(update_sysconfig) as m_update_syscfg:
+ cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+ m_update_syscfg.assert_called_with(
+ "/etc/locale.conf", {"LANG": "en_US.UTF-8"}
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_lxd.py b/tests/unittests/config/test_cc_lxd.py
new file mode 100644
index 00000000..720274d6
--- /dev/null
+++ b/tests/unittests/config/test_cc_lxd.py
@@ -0,0 +1,272 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from unittest import mock
+
+from cloudinit.config import cc_lxd
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
+
+
+class TestLxd(t_help.CiTestCase):
+
+ with_logs = True
+
+ lxd_cfg = {
+ "lxd": {
+ "init": {
+ "network_address": "0.0.0.0",
+ "storage_backend": "zfs",
+ "storage_pool": "poolname",
+ }
+ }
+ }
+
+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_lxd_init(self, mock_subp, m_maybe_clean):
+ cc = get_cloud()
+ mock_subp.which.return_value = True
+ m_maybe_clean.return_value = None
+ cc_lxd.handle("cc_lxd", self.lxd_cfg, cc, self.logger, [])
+ self.assertTrue(mock_subp.which.called)
+ # no bridge config, so maybe_cleanup should not be called.
+ self.assertFalse(m_maybe_clean.called)
+ self.assertEqual(
+ [
+ mock.call(["lxd", "waitready", "--timeout=300"]),
+ mock.call(
+ [
+ "lxd",
+ "init",
+ "--auto",
+ "--network-address=0.0.0.0",
+ "--storage-backend=zfs",
+ "--storage-pool=poolname",
+ ]
+ ),
+ ],
+ mock_subp.subp.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_lxd_install(self, mock_subp, m_maybe_clean):
+ cc = get_cloud()
+ cc.distro = mock.MagicMock()
+ mock_subp.which.return_value = None
+ cc_lxd.handle("cc_lxd", self.lxd_cfg, cc, self.logger, [])
+ self.assertNotIn("WARN", self.logs.getvalue())
+ self.assertTrue(cc.distro.install_packages.called)
+ cc_lxd.handle("cc_lxd", self.lxd_cfg, cc, self.logger, [])
+ self.assertFalse(m_maybe_clean.called)
+ install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
+ self.assertEqual(sorted(install_pkg), ["lxd", "zfsutils-linux"])
+
+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_no_init_does_nothing(self, mock_subp, m_maybe_clean):
+ cc = get_cloud()
+ cc.distro = mock.MagicMock()
+ cc_lxd.handle("cc_lxd", {"lxd": {}}, cc, self.logger, [])
+ self.assertFalse(cc.distro.install_packages.called)
+ self.assertFalse(mock_subp.subp.called)
+ self.assertFalse(m_maybe_clean.called)
+
+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean):
+ cc = get_cloud()
+ cc.distro = mock.MagicMock()
+ cc_lxd.handle("cc_lxd", {"package_update": True}, cc, self.logger, [])
+ self.assertFalse(cc.distro.install_packages.called)
+ self.assertFalse(mock_subp.subp.called)
+ self.assertFalse(m_maybe_clean.called)
+
+ def test_lxd_debconf_new_full(self):
+ data = {
+ "mode": "new",
+ "name": "testbr0",
+ "ipv4_address": "10.0.8.1",
+ "ipv4_netmask": "24",
+ "ipv4_dhcp_first": "10.0.8.2",
+ "ipv4_dhcp_last": "10.0.8.254",
+ "ipv4_dhcp_leases": "250",
+ "ipv4_nat": "true",
+ "ipv6_address": "fd98:9e0:3744::1",
+ "ipv6_netmask": "64",
+ "ipv6_nat": "true",
+ "domain": "lxd",
+ }
+ self.assertEqual(
+ cc_lxd.bridge_to_debconf(data),
+ {
+ "lxd/setup-bridge": "true",
+ "lxd/bridge-name": "testbr0",
+ "lxd/bridge-ipv4": "true",
+ "lxd/bridge-ipv4-address": "10.0.8.1",
+ "lxd/bridge-ipv4-netmask": "24",
+ "lxd/bridge-ipv4-dhcp-first": "10.0.8.2",
+ "lxd/bridge-ipv4-dhcp-last": "10.0.8.254",
+ "lxd/bridge-ipv4-dhcp-leases": "250",
+ "lxd/bridge-ipv4-nat": "true",
+ "lxd/bridge-ipv6": "true",
+ "lxd/bridge-ipv6-address": "fd98:9e0:3744::1",
+ "lxd/bridge-ipv6-netmask": "64",
+ "lxd/bridge-ipv6-nat": "true",
+ "lxd/bridge-domain": "lxd",
+ },
+ )
+
+ def test_lxd_debconf_new_partial(self):
+ data = {
+ "mode": "new",
+ "ipv6_address": "fd98:9e0:3744::1",
+ "ipv6_netmask": "64",
+ "ipv6_nat": "true",
+ }
+ self.assertEqual(
+ cc_lxd.bridge_to_debconf(data),
+ {
+ "lxd/setup-bridge": "true",
+ "lxd/bridge-ipv6": "true",
+ "lxd/bridge-ipv6-address": "fd98:9e0:3744::1",
+ "lxd/bridge-ipv6-netmask": "64",
+ "lxd/bridge-ipv6-nat": "true",
+ },
+ )
+
+ def test_lxd_debconf_existing(self):
+ data = {"mode": "existing", "name": "testbr0"}
+ self.assertEqual(
+ cc_lxd.bridge_to_debconf(data),
+ {
+ "lxd/setup-bridge": "false",
+ "lxd/use-existing-bridge": "true",
+ "lxd/bridge-name": "testbr0",
+ },
+ )
+
+ def test_lxd_debconf_none(self):
+ data = {"mode": "none"}
+ self.assertEqual(
+ cc_lxd.bridge_to_debconf(data),
+ {"lxd/setup-bridge": "false", "lxd/bridge-name": ""},
+ )
+
+ def test_lxd_cmd_new_full(self):
+ data = {
+ "mode": "new",
+ "name": "testbr0",
+ "ipv4_address": "10.0.8.1",
+ "ipv4_netmask": "24",
+ "ipv4_dhcp_first": "10.0.8.2",
+ "ipv4_dhcp_last": "10.0.8.254",
+ "ipv4_dhcp_leases": "250",
+ "ipv4_nat": "true",
+ "ipv6_address": "fd98:9e0:3744::1",
+ "ipv6_netmask": "64",
+ "ipv6_nat": "true",
+ "domain": "lxd",
+ }
+ self.assertEqual(
+ cc_lxd.bridge_to_cmd(data),
+ (
+ [
+ "network",
+ "create",
+ "testbr0",
+ "ipv4.address=10.0.8.1/24",
+ "ipv4.nat=true",
+ "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254",
+ "ipv6.address=fd98:9e0:3744::1/64",
+ "ipv6.nat=true",
+ "dns.domain=lxd",
+ ],
+ ["network", "attach-profile", "testbr0", "default", "eth0"],
+ ),
+ )
+
+ def test_lxd_cmd_new_partial(self):
+ data = {
+ "mode": "new",
+ "ipv6_address": "fd98:9e0:3744::1",
+ "ipv6_netmask": "64",
+ "ipv6_nat": "true",
+ }
+ self.assertEqual(
+ cc_lxd.bridge_to_cmd(data),
+ (
+ [
+ "network",
+ "create",
+ "lxdbr0",
+ "ipv4.address=none",
+ "ipv6.address=fd98:9e0:3744::1/64",
+ "ipv6.nat=true",
+ ],
+ ["network", "attach-profile", "lxdbr0", "default", "eth0"],
+ ),
+ )
+
+ def test_lxd_cmd_existing(self):
+ data = {"mode": "existing", "name": "testbr0"}
+ self.assertEqual(
+ cc_lxd.bridge_to_cmd(data),
+ (
+ None,
+ ["network", "attach-profile", "testbr0", "default", "eth0"],
+ ),
+ )
+
+ def test_lxd_cmd_none(self):
+ data = {"mode": "none"}
+ self.assertEqual(cc_lxd.bridge_to_cmd(data), (None, None))
+
+
+class TestLxdMaybeCleanupDefault(t_help.CiTestCase):
+ """Test the implementation of maybe_cleanup_default."""
+
+ defnet = cc_lxd._DEFAULT_NETWORK_NAME
+
+ @mock.patch("cloudinit.config.cc_lxd._lxc")
+ def test_network_other_than_default_not_deleted(self, m_lxc):
+ """deletion or removal should only occur if bridge is default."""
+ cc_lxd.maybe_cleanup_default(
+ net_name="lxdbr1", did_init=True, create=True, attach=True
+ )
+ m_lxc.assert_not_called()
+
+ @mock.patch("cloudinit.config.cc_lxd._lxc")
+ def test_did_init_false_does_not_delete(self, m_lxc):
+ """deletion or removal should only occur if did_init is True."""
+ cc_lxd.maybe_cleanup_default(
+ net_name=self.defnet, did_init=False, create=True, attach=True
+ )
+ m_lxc.assert_not_called()
+
+ @mock.patch("cloudinit.config.cc_lxd._lxc")
+ def test_network_deleted_if_create_true(self, m_lxc):
+ """deletion of network should occur if create is True."""
+ cc_lxd.maybe_cleanup_default(
+ net_name=self.defnet, did_init=True, create=True, attach=False
+ )
+ m_lxc.assert_called_with(["network", "delete", self.defnet])
+
+ @mock.patch("cloudinit.config.cc_lxd._lxc")
+ def test_device_removed_if_attach_true(self, m_lxc):
+ """deletion of network should occur if create is True."""
+ nic_name = "my_nic"
+ profile = "my_profile"
+ cc_lxd.maybe_cleanup_default(
+ net_name=self.defnet,
+ did_init=True,
+ create=False,
+ attach=True,
+ profile=profile,
+ nic_name=nic_name,
+ )
+ m_lxc.assert_called_once_with(
+ ["profile", "device", "remove", profile, nic_name]
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/config/test_cc_mcollective.py
index 6891e15f..5cbdeb76 100644
--- a/tests/unittests/test_handler/test_handler_mcollective.py
+++ b/tests/unittests/config/test_cc_mcollective.py
@@ -1,18 +1,17 @@
# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import (cloud, distros, helpers, util)
-from cloudinit.config import cc_mcollective
-from cloudinit.sources import DataSourceNoCloud
-
-from cloudinit.tests import helpers as t_help
-
-import configobj
import logging
import os
import shutil
import tempfile
from io import BytesIO
+import configobj
+
+from cloudinit import util
+from cloudinit.config import cc_mcollective
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
+
LOG = logging.getLogger(__name__)
@@ -48,108 +47,112 @@ class TestConfig(t_help.FilesystemMockingTestCase):
self.addCleanup(shutil.rmtree, self.tmp)
# "./": make os.path.join behave correctly with abs path as second arg
self.server_cfg = os.path.join(
- self.tmp, "./" + cc_mcollective.SERVER_CFG)
+ self.tmp, "./" + cc_mcollective.SERVER_CFG
+ )
self.pubcert_file = os.path.join(
- self.tmp, "./" + cc_mcollective.PUBCERT_FILE)
+ self.tmp, "./" + cc_mcollective.PUBCERT_FILE
+ )
self.pricert_file = os.path.join(
- self.tmp, self.tmp, "./" + cc_mcollective.PRICERT_FILE)
+ self.tmp, self.tmp, "./" + cc_mcollective.PRICERT_FILE
+ )
def test_basic_config(self):
cfg = {
- 'mcollective': {
- 'conf': {
- 'loglevel': 'debug',
- 'connector': 'rabbitmq',
- 'logfile': '/var/log/mcollective.log',
- 'ttl': '4294957',
- 'collectives': 'mcollective',
- 'main_collective': 'mcollective',
- 'securityprovider': 'psk',
- 'daemonize': '1',
- 'factsource': 'yaml',
- 'direct_addressing': '1',
- 'plugin.psk': 'unset',
- 'libdir': '/usr/share/mcollective/plugins',
- 'identity': '1',
+ "mcollective": {
+ "conf": {
+ "loglevel": "debug",
+ "connector": "rabbitmq",
+ "logfile": "/var/log/mcollective.log",
+ "ttl": "4294957",
+ "collectives": "mcollective",
+ "main_collective": "mcollective",
+ "securityprovider": "psk",
+ "daemonize": "1",
+ "factsource": "yaml",
+ "direct_addressing": "1",
+ "plugin.psk": "unset",
+ "libdir": "/usr/share/mcollective/plugins",
+ "identity": "1",
},
},
}
- expected = cfg['mcollective']['conf']
+ expected = cfg["mcollective"]["conf"]
self.patchUtils(self.tmp)
- cc_mcollective.configure(cfg['mcollective']['conf'])
+ cc_mcollective.configure(cfg["mcollective"]["conf"])
contents = util.load_file(cc_mcollective.SERVER_CFG, decode=False)
contents = configobj.ConfigObj(BytesIO(contents))
self.assertEqual(expected, dict(contents))
def test_existing_config_is_saved(self):
- cfg = {'loglevel': 'warn'}
+ cfg = {"loglevel": "warn"}
util.write_file(self.server_cfg, STOCK_CONFIG)
cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg)
self.assertTrue(os.path.exists(self.server_cfg))
self.assertTrue(os.path.exists(self.server_cfg + ".old"))
- self.assertEqual(util.load_file(self.server_cfg + ".old"),
- STOCK_CONFIG)
+ self.assertEqual(
+ util.load_file(self.server_cfg + ".old"), STOCK_CONFIG
+ )
def test_existing_updated(self):
- cfg = {'loglevel': 'warn'}
+ cfg = {"loglevel": "warn"}
util.write_file(self.server_cfg, STOCK_CONFIG)
cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg)
cfgobj = configobj.ConfigObj(self.server_cfg)
- self.assertEqual(cfg['loglevel'], cfgobj['loglevel'])
+ self.assertEqual(cfg["loglevel"], cfgobj["loglevel"])
def test_certificats_written(self):
# check public-cert and private-cert keys in config get written
- cfg = {'loglevel': 'debug',
- 'public-cert': "this is my public-certificate",
- 'private-cert': "secret private certificate"}
+ cfg = {
+ "loglevel": "debug",
+ "public-cert": "this is my public-certificate",
+ "private-cert": "secret private certificate",
+ }
cc_mcollective.configure(
- config=cfg, server_cfg=self.server_cfg,
- pricert_file=self.pricert_file, pubcert_file=self.pubcert_file)
+ config=cfg,
+ server_cfg=self.server_cfg,
+ pricert_file=self.pricert_file,
+ pubcert_file=self.pubcert_file,
+ )
found = configobj.ConfigObj(self.server_cfg)
# make sure these didnt get written in
- self.assertFalse('public-cert' in found)
- self.assertFalse('private-cert' in found)
+ self.assertFalse("public-cert" in found)
+ self.assertFalse("private-cert" in found)
# these need updating to the specified paths
- self.assertEqual(found['plugin.ssl_server_public'], self.pubcert_file)
- self.assertEqual(found['plugin.ssl_server_private'], self.pricert_file)
+ self.assertEqual(found["plugin.ssl_server_public"], self.pubcert_file)
+ self.assertEqual(found["plugin.ssl_server_private"], self.pricert_file)
# and the security provider should be ssl
- self.assertEqual(found['securityprovider'], 'ssl')
+ self.assertEqual(found["securityprovider"], "ssl")
self.assertEqual(
- util.load_file(self.pricert_file), cfg['private-cert'])
- self.assertEqual(
- util.load_file(self.pubcert_file), cfg['public-cert'])
+ util.load_file(self.pricert_file), cfg["private-cert"]
+ )
+ self.assertEqual(util.load_file(self.pubcert_file), cfg["public-cert"])
class TestHandler(t_help.TestCase):
- def _get_cloud(self, distro):
- cls = distros.fetch(distro)
- paths = helpers.Paths({})
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
@t_help.mock.patch("cloudinit.config.cc_mcollective.subp")
@t_help.mock.patch("cloudinit.config.cc_mcollective.util")
def test_mcollective_install(self, mock_util, mock_subp):
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud()
cc.distro = t_help.mock.MagicMock()
mock_util.load_file.return_value = b""
- mycfg = {'mcollective': {'conf': {'loglevel': 'debug'}}}
- cc_mcollective.handle('cc_mcollective', mycfg, cc, LOG, [])
+ mycfg = {"mcollective": {"conf": {"loglevel": "debug"}}}
+ cc_mcollective.handle("cc_mcollective", mycfg, cc, LOG, [])
self.assertTrue(cc.distro.install_packages.called)
install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
- self.assertEqual(install_pkg, ('mcollective',))
+ self.assertEqual(install_pkg, ("mcollective",))
self.assertTrue(mock_subp.subp.called)
- self.assertEqual(mock_subp.subp.call_args_list[0][0][0],
- ['service', 'mcollective', 'restart'])
+ self.assertEqual(
+ mock_subp.subp.call_args_list[0][0][0],
+ ["service", "mcollective", "restart"],
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_mounts.py b/tests/unittests/config/test_cc_mounts.py
new file mode 100644
index 00000000..084faacd
--- /dev/null
+++ b/tests/unittests/config/test_cc_mounts.py
@@ -0,0 +1,522 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os.path
+from unittest import mock
+
+import pytest
+
+from cloudinit.config import cc_mounts
+from cloudinit.config.cc_mounts import create_swapfile
+from cloudinit.subp import ProcessExecutionError
+from tests.unittests import helpers as test_helpers
+
+M_PATH = "cloudinit.config.cc_mounts."
+
+
+class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestSanitizeDevname, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.patchOS(self.new_root)
+
+ def _touch(self, path):
+ path = os.path.join(self.new_root, path.lstrip("/"))
+ basedir = os.path.dirname(path)
+ if not os.path.exists(basedir):
+ os.makedirs(basedir)
+ open(path, "a").close()
+
+ def _makedirs(self, directory):
+ directory = os.path.join(self.new_root, directory.lstrip("/"))
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+ def mock_existence_of_disk(self, disk_path):
+ self._touch(disk_path)
+ self._makedirs(os.path.join("/sys/block", disk_path.split("/")[-1]))
+
+ def mock_existence_of_partition(self, disk_path, partition_number):
+ self.mock_existence_of_disk(disk_path)
+ self._touch(disk_path + str(partition_number))
+ disk_name = disk_path.split("/")[-1]
+ self._makedirs(
+ os.path.join(
+ "/sys/block", disk_name, disk_name + str(partition_number)
+ )
+ )
+
+ def test_existent_full_disk_path_is_returned(self):
+ disk_path = "/dev/sda"
+ self.mock_existence_of_disk(disk_path)
+ self.assertEqual(
+ disk_path,
+ cc_mounts.sanitize_devname(disk_path, lambda x: None, mock.Mock()),
+ )
+
+ def test_existent_disk_name_returns_full_path(self):
+ disk_name = "sda"
+ disk_path = "/dev/" + disk_name
+ self.mock_existence_of_disk(disk_path)
+ self.assertEqual(
+ disk_path,
+ cc_mounts.sanitize_devname(disk_name, lambda x: None, mock.Mock()),
+ )
+
+ def test_existent_meta_disk_is_returned(self):
+ actual_disk_path = "/dev/sda"
+ self.mock_existence_of_disk(actual_disk_path)
+ self.assertEqual(
+ actual_disk_path,
+ cc_mounts.sanitize_devname(
+ "ephemeral0", lambda x: actual_disk_path, mock.Mock()
+ ),
+ )
+
+ def test_existent_meta_partition_is_returned(self):
+ disk_name, partition_part = "/dev/sda", "1"
+ actual_partition_path = disk_name + partition_part
+ self.mock_existence_of_partition(disk_name, partition_part)
+ self.assertEqual(
+ actual_partition_path,
+ cc_mounts.sanitize_devname(
+ "ephemeral0.1", lambda x: disk_name, mock.Mock()
+ ),
+ )
+
+ def test_existent_meta_partition_with_p_is_returned(self):
+ disk_name, partition_part = "/dev/sda", "p1"
+ actual_partition_path = disk_name + partition_part
+ self.mock_existence_of_partition(disk_name, partition_part)
+ self.assertEqual(
+ actual_partition_path,
+ cc_mounts.sanitize_devname(
+ "ephemeral0.1", lambda x: disk_name, mock.Mock()
+ ),
+ )
+
+ def test_first_partition_returned_if_existent_disk_is_partitioned(self):
+ disk_name, partition_part = "/dev/sda", "1"
+ actual_partition_path = disk_name + partition_part
+ self.mock_existence_of_partition(disk_name, partition_part)
+ self.assertEqual(
+ actual_partition_path,
+ cc_mounts.sanitize_devname(
+ "ephemeral0", lambda x: disk_name, mock.Mock()
+ ),
+ )
+
+ def test_nth_partition_returned_if_requested(self):
+ disk_name, partition_part = "/dev/sda", "3"
+ actual_partition_path = disk_name + partition_part
+ self.mock_existence_of_partition(disk_name, partition_part)
+ self.assertEqual(
+ actual_partition_path,
+ cc_mounts.sanitize_devname(
+ "ephemeral0.3", lambda x: disk_name, mock.Mock()
+ ),
+ )
+
+ def test_transformer_returning_none_returns_none(self):
+ self.assertIsNone(
+ cc_mounts.sanitize_devname(
+ "ephemeral0", lambda x: None, mock.Mock()
+ )
+ )
+
+ def test_missing_device_returns_none(self):
+ self.assertIsNone(
+ cc_mounts.sanitize_devname("/dev/sda", None, mock.Mock())
+ )
+
+ def test_missing_sys_returns_none(self):
+ disk_path = "/dev/sda"
+ self._makedirs(disk_path)
+ self.assertIsNone(
+ cc_mounts.sanitize_devname(disk_path, None, mock.Mock())
+ )
+
+ def test_existent_disk_but_missing_partition_returns_none(self):
+ disk_path = "/dev/sda"
+ self.mock_existence_of_disk(disk_path)
+ self.assertIsNone(
+ cc_mounts.sanitize_devname(
+ "ephemeral0.1", lambda x: disk_path, mock.Mock()
+ )
+ )
+
+ def test_network_device_returns_network_device(self):
+ disk_path = "netdevice:/path"
+ self.assertEqual(
+ disk_path, cc_mounts.sanitize_devname(disk_path, None, mock.Mock())
+ )
+
+ def test_device_aliases_remapping(self):
+ disk_path = "/dev/sda"
+ self.mock_existence_of_disk(disk_path)
+ self.assertEqual(
+ disk_path,
+ cc_mounts.sanitize_devname(
+ "mydata", lambda x: None, mock.Mock(), {"mydata": disk_path}
+ ),
+ )
+
+
+class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestSwapFileCreation, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.patchOS(self.new_root)
+
+ self.fstab_path = os.path.join(self.new_root, "etc/fstab")
+ self.swap_path = os.path.join(self.new_root, "swap.img")
+ self._makedirs("/etc")
+
+ self.add_patch(
+ "cloudinit.config.cc_mounts.FSTAB_PATH",
+ "mock_fstab_path",
+ self.fstab_path,
+ autospec=False,
+ )
+
+ self.add_patch("cloudinit.config.cc_mounts.subp.subp", "m_subp_subp")
+
+ self.add_patch(
+ "cloudinit.config.cc_mounts.util.mounts",
+ "mock_util_mounts",
+ return_value={
+ "/dev/sda1": {
+ "fstype": "ext4",
+ "mountpoint": "/",
+ "opts": "rw,relatime,discard",
+ }
+ },
+ )
+
+ self.mock_cloud = mock.Mock()
+ self.mock_log = mock.Mock()
+ self.mock_cloud.device_name_to_device = self.device_name_to_device
+
+ self.cc = {
+ "swap": {
+ "filename": self.swap_path,
+ "size": "512",
+ "maxsize": "512",
+ }
+ }
+
+ def _makedirs(self, directory):
+ directory = os.path.join(self.new_root, directory.lstrip("/"))
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+ def device_name_to_device(self, path):
+ if path == "swap":
+ return self.swap_path
+ else:
+ dev = None
+
+ return dev
+
+ @mock.patch("cloudinit.util.get_mount_info")
+ @mock.patch("cloudinit.util.kernel_version")
+ def test_swap_creation_method_fallocate_on_xfs(
+ self, m_kernel_version, m_get_mount_info
+ ):
+ m_kernel_version.return_value = (4, 20)
+ m_get_mount_info.return_value = ["", "xfs"]
+
+ cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ self.m_subp_subp.assert_has_calls(
+ [
+ mock.call(
+ ["fallocate", "-l", "0M", self.swap_path], capture=True
+ ),
+ mock.call(["mkswap", self.swap_path]),
+ mock.call(["swapon", "-a"]),
+ ]
+ )
+
+ @mock.patch("cloudinit.util.get_mount_info")
+ @mock.patch("cloudinit.util.kernel_version")
+ def test_swap_creation_method_xfs(
+ self, m_kernel_version, m_get_mount_info
+ ):
+ m_kernel_version.return_value = (3, 18)
+ m_get_mount_info.return_value = ["", "xfs"]
+
+ cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ self.m_subp_subp.assert_has_calls(
+ [
+ mock.call(
+ [
+ "dd",
+ "if=/dev/zero",
+ "of=" + self.swap_path,
+ "bs=1M",
+ "count=0",
+ ],
+ capture=True,
+ ),
+ mock.call(["mkswap", self.swap_path]),
+ mock.call(["swapon", "-a"]),
+ ]
+ )
+
+ @mock.patch("cloudinit.util.get_mount_info")
+ @mock.patch("cloudinit.util.kernel_version")
+ def test_swap_creation_method_btrfs(
+ self, m_kernel_version, m_get_mount_info
+ ):
+ m_kernel_version.return_value = (4, 20)
+ m_get_mount_info.return_value = ["", "btrfs"]
+
+ cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ self.m_subp_subp.assert_has_calls(
+ [
+ mock.call(
+ [
+ "dd",
+ "if=/dev/zero",
+ "of=" + self.swap_path,
+ "bs=1M",
+ "count=0",
+ ],
+ capture=True,
+ ),
+ mock.call(["mkswap", self.swap_path]),
+ mock.call(["swapon", "-a"]),
+ ]
+ )
+
+ @mock.patch("cloudinit.util.get_mount_info")
+ @mock.patch("cloudinit.util.kernel_version")
+ def test_swap_creation_method_ext4(
+ self, m_kernel_version, m_get_mount_info
+ ):
+ m_kernel_version.return_value = (5, 14)
+ m_get_mount_info.return_value = ["", "ext4"]
+
+ cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ self.m_subp_subp.assert_has_calls(
+ [
+ mock.call(
+ ["fallocate", "-l", "0M", self.swap_path], capture=True
+ ),
+ mock.call(["mkswap", self.swap_path]),
+ mock.call(["swapon", "-a"]),
+ ]
+ )
+
+
+class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
+
+ swap_path = "/dev/sdb1"
+
+ def setUp(self):
+ super(TestFstabHandling, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.patchOS(self.new_root)
+
+ self.fstab_path = os.path.join(self.new_root, "etc/fstab")
+ self._makedirs("/etc")
+
+ self.add_patch(
+ "cloudinit.config.cc_mounts.FSTAB_PATH",
+ "mock_fstab_path",
+ self.fstab_path,
+ autospec=False,
+ )
+
+ self.add_patch(
+ "cloudinit.config.cc_mounts._is_block_device",
+ "mock_is_block_device",
+ return_value=True,
+ )
+
+ self.add_patch("cloudinit.config.cc_mounts.subp.subp", "m_subp_subp")
+
+ self.add_patch(
+ "cloudinit.config.cc_mounts.util.mounts",
+ "mock_util_mounts",
+ return_value={
+ "/dev/sda1": {
+ "fstype": "ext4",
+ "mountpoint": "/",
+ "opts": "rw,relatime,discard",
+ }
+ },
+ )
+
+ self.mock_cloud = mock.Mock()
+ self.mock_log = mock.Mock()
+ self.mock_cloud.device_name_to_device = self.device_name_to_device
+
+ def _makedirs(self, directory):
+ directory = os.path.join(self.new_root, directory.lstrip("/"))
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+ def device_name_to_device(self, path):
+ if path == "swap":
+ return self.swap_path
+ else:
+ dev = None
+
+ return dev
+
+ def test_no_fstab(self):
+ """Handle images which do not include an fstab."""
+ self.assertFalse(os.path.exists(cc_mounts.FSTAB_PATH))
+ fstab_expected_content = (
+ "%s\tnone\tswap\tsw,comment=cloudconfig\t0\t0\n"
+ % (self.swap_path,)
+ )
+ cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+ with open(cc_mounts.FSTAB_PATH, "r") as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+
+ def test_swap_integrity(self):
+ """Ensure that the swap file is correctly created and can
+ swapon successfully. Fixing the corner case of:
+ kernel: swapon: swapfile has holes"""
+
+ fstab = "/swap.img swap swap defaults 0 0\n"
+
+ with open(cc_mounts.FSTAB_PATH, "w") as fd:
+ fd.write(fstab)
+ cc = {"swap": ["filename: /swap.img", "size: 512", "maxsize: 512"]}
+ cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
+
+ def test_fstab_no_swap_device(self):
+ """Ensure that cloud-init adds a discovered swap partition
+ to /etc/fstab."""
+
+ fstab_original_content = ""
+ fstab_expected_content = (
+ "%s\tnone\tswap\tsw,comment=cloudconfig\t0\t0\n"
+ % (self.swap_path,)
+ )
+
+ with open(cc_mounts.FSTAB_PATH, "w") as fd:
+ fd.write(fstab_original_content)
+
+ cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+
+ with open(cc_mounts.FSTAB_PATH, "r") as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+
+ def test_fstab_same_swap_device_already_configured(self):
+ """Ensure that cloud-init will not add a swap device if the same
+ device already exists in /etc/fstab."""
+
+ fstab_original_content = "%s swap swap defaults 0 0\n" % (
+ self.swap_path,
+ )
+ fstab_expected_content = fstab_original_content
+
+ with open(cc_mounts.FSTAB_PATH, "w") as fd:
+ fd.write(fstab_original_content)
+
+ cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+
+ with open(cc_mounts.FSTAB_PATH, "r") as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+
+ def test_fstab_alternate_swap_device_already_configured(self):
+ """Ensure that cloud-init will add a discovered swap device to
+ /etc/fstab even when there exists a swap definition on another
+ device."""
+
+ fstab_original_content = "/dev/sdc1 swap swap defaults 0 0\n"
+ fstab_expected_content = (
+ fstab_original_content
+ + "%s\tnone\tswap\tsw,comment=cloudconfig\t0\t0\n"
+ % (self.swap_path,)
+ )
+
+ with open(cc_mounts.FSTAB_PATH, "w") as fd:
+ fd.write(fstab_original_content)
+
+ cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+
+ with open(cc_mounts.FSTAB_PATH, "r") as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+
+ def test_no_change_fstab_sets_needs_mount_all(self):
+ """verify unchanged fstab entries are mounted if not call mount -a"""
+ fstab_original_content = (
+ "LABEL=cloudimg-rootfs / ext4 defaults 0 0\n"
+ "LABEL=UEFI /boot/efi vfat defaults 0 0\n"
+ "/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n"
+ )
+ fstab_expected_content = fstab_original_content
+ cc = {"mounts": [["/dev/vdb", "/mnt", "auto", "defaults,noexec"]]}
+ with open(cc_mounts.FSTAB_PATH, "w") as fd:
+ fd.write(fstab_original_content)
+ with open(cc_mounts.FSTAB_PATH, "r") as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+ cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
+ self.m_subp_subp.assert_has_calls(
+ [
+ mock.call(["mount", "-a"]),
+ mock.call(["systemctl", "daemon-reload"]),
+ ]
+ )
+
+
+class TestCreateSwapfile:
+ @pytest.mark.parametrize("fstype", ("xfs", "btrfs", "ext4", "other"))
+ @mock.patch(M_PATH + "util.get_mount_info")
+ @mock.patch(M_PATH + "subp.subp")
+ def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir):
+ swap_file = tmpdir.join("swap-file")
+ fname = str(swap_file)
+
+ # Some of the calls to subp.subp should create the swap file; this
+ # roughly approximates that
+ m_subp.side_effect = lambda *args, **kwargs: swap_file.write("")
+
+ m_get_mount_info.return_value = (mock.ANY, fstype)
+
+ create_swapfile(fname, "")
+ assert mock.call(["mkswap", fname]) in m_subp.call_args_list
+
+ @mock.patch(M_PATH + "util.get_mount_info")
+ @mock.patch(M_PATH + "subp.subp")
+ def test_fallback_from_fallocate_to_dd(
+ self, m_subp, m_get_mount_info, caplog, tmpdir
+ ):
+ swap_file = tmpdir.join("swap-file")
+ fname = str(swap_file)
+
+ def subp_side_effect(cmd, *args, **kwargs):
+ # Mock fallocate failing, to initiate fallback
+ if cmd[0] == "fallocate":
+ raise ProcessExecutionError()
+
+ m_subp.side_effect = subp_side_effect
+ # Use ext4 so both fallocate and dd are valid swap creation methods
+ m_get_mount_info.return_value = (mock.ANY, "ext4")
+
+ create_swapfile(fname, "")
+
+ cmds = [args[0][0] for args, _kwargs in m_subp.call_args_list]
+ assert "fallocate" in cmds, "fallocate was not called"
+ assert "dd" in cmds, "fallocate failure did not fallback to dd"
+
+ assert cmds.index("dd") > cmds.index(
+ "fallocate"
+ ), "dd ran before fallocate"
+
+ assert mock.call(["mkswap", fname]) in m_subp.call_args_list
+
+ msg = "fallocate swap creation failed, will attempt with dd"
+ assert msg in caplog.text
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/config/test_cc_ntp.py
index 6b9c8377..fba141aa 100644
--- a/tests/unittests/test_handler/test_handler_ntp.py
+++ b/tests/unittests/config/test_cc_ntp.py
@@ -1,17 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_ntp
-from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, util)
-
-from cloudinit.tests.helpers import (
- CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema)
-
-
import copy
import os
-from os.path import dirname
import shutil
+from functools import partial
+from os.path import dirname
+
+from cloudinit import helpers, util
+from cloudinit.config import cc_ntp
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ skipUnlessJsonSchema,
+)
+from tests.unittests.util import get_cloud
NTP_TEMPLATE = """\
## template: jinja
@@ -35,25 +37,18 @@ class TestNtp(FilesystemMockingTestCase):
def setUp(self):
super(TestNtp, self).setUp()
self.new_root = self.tmp_dir()
- self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy')
+ self.add_patch("cloudinit.util.system_is_snappy", "m_snappy")
self.m_snappy.return_value = False
- self.add_patch('cloudinit.util.system_info', 'm_sysinfo')
- self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')}
-
- def _get_cloud(self, distro, sys_cfg=None):
- self.new_root = self.reRoot(root=self.new_root)
- paths = helpers.Paths({'templates_dir': self.new_root})
- cls = distros.fetch(distro)
- if not sys_cfg:
- sys_cfg = {}
- mydist = cls(distro, sys_cfg, paths)
- myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, paths)
- return cloud.Cloud(myds, paths, sys_cfg, mydist, None)
+ self.new_root = self.reRoot()
+ self._get_cloud = partial(
+ get_cloud, paths=helpers.Paths({"templates_dir": self.new_root})
+ )
def _get_template_path(self, template_name, distro, basepath=None):
# ntp.conf.{distro} -> ntp.conf.debian.tmpl
- template_fn = '{0}.tmpl'.format(
- template_name.replace('{distro}', distro))
+ template_fn = "{0}.tmpl".format(
+ template_name.replace("{distro}", distro)
+ )
if not basepath:
basepath = self.new_root
path = os.path.join(basepath, template_fn)
@@ -62,25 +57,25 @@ class TestNtp(FilesystemMockingTestCase):
def _generate_template(self, template=None):
if not template:
template = NTP_TEMPLATE
- confpath = os.path.join(self.new_root, 'client.conf')
- template_fn = os.path.join(self.new_root, 'client.conf.tmpl')
+ confpath = os.path.join(self.new_root, "client.conf")
+ template_fn = os.path.join(self.new_root, "client.conf.tmpl")
util.write_file(template_fn, content=template)
return (confpath, template_fn)
def _mock_ntp_client_config(self, client=None, distro=None):
if not client:
- client = 'ntp'
+ client = "ntp"
if not distro:
- distro = 'ubuntu'
+ distro = "ubuntu"
dcfg = cc_ntp.distro_ntp_client_configs(distro)
- if client == 'systemd-timesyncd':
+ if client == "systemd-timesyncd":
template = TIMESYNCD_TEMPLATE
else:
template = NTP_TEMPLATE
(confpath, _template_fn) = self._generate_template(template=template)
ntpconfig = copy.deepcopy(dcfg[client])
- ntpconfig['confpath'] = confpath
- ntpconfig['template_name'] = os.path.basename(confpath)
+ ntpconfig["confpath"] = confpath
+ ntpconfig["template_name"] = os.path.basename(confpath)
return ntpconfig
@mock.patch("cloudinit.config.cc_ntp.subp")
@@ -88,19 +83,21 @@ class TestNtp(FilesystemMockingTestCase):
"""ntp_install_client runs install_func when check_exe is absent."""
mock_subp.which.return_value = None # check_exe not found.
install_func = mock.MagicMock()
- cc_ntp.install_ntp_client(install_func,
- packages=['ntpx'], check_exe='ntpdx')
- mock_subp.which.assert_called_with('ntpdx')
- install_func.assert_called_once_with(['ntpx'])
+ cc_ntp.install_ntp_client(
+ install_func, packages=["ntpx"], check_exe="ntpdx"
+ )
+ mock_subp.which.assert_called_with("ntpdx")
+ install_func.assert_called_once_with(["ntpx"])
@mock.patch("cloudinit.config.cc_ntp.subp")
def test_ntp_install_not_needed(self, mock_subp):
"""ntp_install_client doesn't install when check_exe is found."""
- client = 'chrony'
+ client = "chrony"
mock_subp.which.return_value = [client] # check_exe found.
install_func = mock.MagicMock()
- cc_ntp.install_ntp_client(install_func, packages=[client],
- check_exe=client)
+ cc_ntp.install_ntp_client(
+ install_func, packages=[client], check_exe=client
+ )
install_func.assert_not_called()
@mock.patch("cloudinit.config.cc_ntp.subp")
@@ -108,26 +105,11 @@ class TestNtp(FilesystemMockingTestCase):
"""ntp_install_client runs install_func with empty list"""
mock_subp.which.return_value = None # check_exe not found
install_func = mock.MagicMock()
- cc_ntp.install_ntp_client(install_func, packages=[],
- check_exe='timesyncd')
+ cc_ntp.install_ntp_client(
+ install_func, packages=[], check_exe="timesyncd"
+ )
install_func.assert_called_once_with([])
- @mock.patch("cloudinit.config.cc_ntp.subp")
- def test_reload_ntp_defaults(self, mock_subp):
- """Test service is restarted/reloaded (defaults)"""
- service = 'ntp_service_name'
- cmd = ['service', service, 'restart']
- cc_ntp.reload_ntp(service)
- mock_subp.subp.assert_called_with(cmd, capture=True)
-
- @mock.patch("cloudinit.config.cc_ntp.subp")
- def test_reload_ntp_systemd(self, mock_subp):
- """Test service is restarted/reloaded (systemd)"""
- service = 'ntp_service_name'
- cc_ntp.reload_ntp(service, systemd=True)
- cmd = ['systemctl', 'reload-or-restart', service]
- mock_subp.subp.assert_called_with(cmd, capture=True)
-
def test_ntp_rename_ntp_conf(self):
"""When NTP_CONF exists, rename_ntp moves it."""
ntpconf = self.tmp_path("ntp.conf", self.new_root)
@@ -147,18 +129,22 @@ class TestNtp(FilesystemMockingTestCase):
def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self):
"""write_ntp_config_template reads from $client.conf.distro.tmpl"""
servers = []
- pools = ['10.0.0.1', '10.0.0.2']
+ pools = ["10.0.0.1", "10.0.0.2"]
(confpath, template_fn) = self._generate_template()
- mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR'
+ mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR"
with mock.patch(mock_path, self.new_root):
- cc_ntp.write_ntp_config_template('ubuntu',
- servers=servers, pools=pools,
- path=confpath,
- template_fn=template_fn,
- template=None)
+ cc_ntp.write_ntp_config_template(
+ "ubuntu",
+ servers=servers,
+ pools=pools,
+ path=confpath,
+ template_fn=template_fn,
+ template=None,
+ )
self.assertEqual(
"servers []\npools ['10.0.0.1', '10.0.0.2']\n",
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
def test_write_ntp_config_template_defaults_pools_w_empty_lists(self):
"""write_ntp_config_template defaults pools servers upon empty config.
@@ -166,20 +152,23 @@ class TestNtp(FilesystemMockingTestCase):
When both pools and servers are empty, default NR_POOL_SERVERS get
configured.
"""
- distro = 'ubuntu'
+ distro = "ubuntu"
pools = cc_ntp.generate_server_names(distro)
servers = []
(confpath, template_fn) = self._generate_template()
- mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR'
+ mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR"
with mock.patch(mock_path, self.new_root):
- cc_ntp.write_ntp_config_template(distro,
- servers=servers, pools=pools,
- path=confpath,
- template_fn=template_fn,
- template=None)
+ cc_ntp.write_ntp_config_template(
+ distro,
+ servers=servers,
+ pools=pools,
+ path=confpath,
+ template_fn=template_fn,
+ template=None,
+ )
self.assertEqual(
- "servers []\npools {0}\n".format(pools),
- util.load_file(confpath))
+ "servers []\npools {0}\n".format(pools), util.load_file(confpath)
+ )
def test_defaults_pools_empty_lists_sles(self):
"""write_ntp_config_template defaults opensuse pools upon empty config.
@@ -187,39 +176,50 @@ class TestNtp(FilesystemMockingTestCase):
When both pools and servers are empty, default NR_POOL_SERVERS get
configured.
"""
- distro = 'sles'
+ distro = "sles"
default_pools = cc_ntp.generate_server_names(distro)
(confpath, template_fn) = self._generate_template()
- cc_ntp.write_ntp_config_template(distro,
- servers=[], pools=[],
- path=confpath,
- template_fn=template_fn,
- template=None)
+ cc_ntp.write_ntp_config_template(
+ distro,
+ servers=[],
+ pools=[],
+ path=confpath,
+ template_fn=template_fn,
+ template=None,
+ )
for pool in default_pools:
- self.assertIn('opensuse', pool)
+ self.assertIn("opensuse", pool)
self.assertEqual(
"servers []\npools {0}\n".format(default_pools),
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
self.assertIn(
"Adding distro default ntp pool servers: {0}".format(
- ",".join(default_pools)),
- self.logs.getvalue())
+ ",".join(default_pools)
+ ),
+ self.logs.getvalue(),
+ )
def test_timesyncd_template(self):
"""Test timesycnd template is correct"""
- pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org']
- servers = ['192.168.23.3', '192.168.23.4']
+ pools = ["0.mycompany.pool.ntp.org", "3.mycompany.pool.ntp.org"]
+ servers = ["192.168.23.3", "192.168.23.4"]
(confpath, template_fn) = self._generate_template(
- template=TIMESYNCD_TEMPLATE)
- cc_ntp.write_ntp_config_template('ubuntu',
- servers=servers, pools=pools,
- path=confpath,
- template_fn=template_fn,
- template=None)
+ template=TIMESYNCD_TEMPLATE
+ )
+ cc_ntp.write_ntp_config_template(
+ "ubuntu",
+ servers=servers,
+ pools=pools,
+ path=confpath,
+ template_fn=template_fn,
+ template=None,
+ )
self.assertEqual(
"[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)),
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
def test_distro_ntp_client_configs(self):
"""Test we have updated ntp client configs on different distros"""
@@ -236,55 +236,62 @@ class TestNtp(FilesystemMockingTestCase):
result = cc_ntp.distro_ntp_client_configs(distro)
for client in delta[distro].keys():
for key in delta[distro][client].keys():
- self.assertEqual(delta[distro][client][key],
- result[client][key])
+ self.assertEqual(
+ delta[distro][client][key], result[client][key]
+ )
def _get_expected_pools(self, pools, distro, client):
- if client in ['ntp', 'chrony']:
- if client == 'ntp' and distro == 'alpine':
+ if client in ["ntp", "chrony"]:
+ if client == "ntp" and distro == "alpine":
# NTP for Alpine Linux is Busybox's ntp which does not
# support 'pool' lines in its configuration file.
expected_pools = []
else:
expected_pools = [
- 'pool {0} iburst'.format(pool) for pool in pools]
- elif client == 'systemd-timesyncd':
+ "pool {0} iburst".format(pool) for pool in pools
+ ]
+ elif client == "systemd-timesyncd":
expected_pools = " ".join(pools)
return expected_pools
def _get_expected_servers(self, servers, distro, client):
- if client in ['ntp', 'chrony']:
- if client == 'ntp' and distro == 'alpine':
+ if client in ["ntp", "chrony"]:
+ if client == "ntp" and distro == "alpine":
# NTP for Alpine Linux is Busybox's ntp which only supports
# 'server' lines without iburst option.
expected_servers = [
- 'server {0}'.format(srv) for srv in servers]
+ "server {0}".format(srv) for srv in servers
+ ]
else:
expected_servers = [
- 'server {0} iburst'.format(srv) for srv in servers]
- elif client == 'systemd-timesyncd':
+ "server {0} iburst".format(srv) for srv in servers
+ ]
+ elif client == "systemd-timesyncd":
expected_servers = " ".join(servers)
return expected_servers
def test_ntp_handler_real_distro_ntp_templates(self):
"""Test ntp handler renders the shipped distro ntp client templates."""
- pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org']
- servers = ['192.168.23.3', '192.168.23.4']
- for client in ['ntp', 'systemd-timesyncd', 'chrony']:
+ pools = ["0.mycompany.pool.ntp.org", "3.mycompany.pool.ntp.org"]
+ servers = ["192.168.23.3", "192.168.23.4"]
+ for client in ["ntp", "systemd-timesyncd", "chrony"]:
for distro in cc_ntp.distros:
distro_cfg = cc_ntp.distro_ntp_client_configs(distro)
ntpclient = distro_cfg[client]
- confpath = (
- os.path.join(self.new_root, ntpclient.get('confpath')[1:]))
- template = ntpclient.get('template_name')
+ confpath = os.path.join(
+ self.new_root, ntpclient.get("confpath")[1:]
+ )
+ template = ntpclient.get("template_name")
# find sourcetree template file
root_dir = (
- dirname(dirname(os.path.realpath(util.__file__))) +
- '/templates')
- source_fn = self._get_template_path(template, distro,
- basepath=root_dir)
+ dirname(dirname(os.path.realpath(util.__file__)))
+ + "/templates"
+ )
+ source_fn = self._get_template_path(
+ template, distro, basepath=root_dir
+ )
template_fn = self._get_template_path(template, distro)
# don't fail if cloud-init doesn't have a template for
# a distro,client pair
@@ -292,64 +299,77 @@ class TestNtp(FilesystemMockingTestCase):
continue
# Create a copy in our tmp_dir
shutil.copy(source_fn, template_fn)
- cc_ntp.write_ntp_config_template(distro, servers=servers,
- pools=pools, path=confpath,
- template_fn=template_fn)
+ cc_ntp.write_ntp_config_template(
+ distro,
+ servers=servers,
+ pools=pools,
+ path=confpath,
+ template_fn=template_fn,
+ )
content = util.load_file(confpath)
- if client in ['ntp', 'chrony']:
+ if client in ["ntp", "chrony"]:
content_lines = content.splitlines()
- expected_servers = self._get_expected_servers(servers,
- distro,
- client)
- print('distro=%s client=%s' % (distro, client))
+ expected_servers = self._get_expected_servers(
+ servers, distro, client
+ )
+ print("distro=%s client=%s" % (distro, client))
for sline in expected_servers:
- self.assertIn(sline, content_lines,
- ('failed to render {0} conf'
- ' for distro:{1}'.format(client,
- distro)))
- expected_pools = self._get_expected_pools(pools, distro,
- client)
+ self.assertIn(
+ sline,
+ content_lines,
+ "failed to render {0} conf for distro:{1}".format(
+ client, distro
+ ),
+ )
+ expected_pools = self._get_expected_pools(
+ pools, distro, client
+ )
if expected_pools != []:
for pline in expected_pools:
- self.assertIn(pline, content_lines,
- ('failed to render {0} conf'
- ' for distro:{1}'.format(client,
- distro)))
- elif client == 'systemd-timesyncd':
- expected_servers = self._get_expected_servers(servers,
- distro,
- client)
- expected_pools = self._get_expected_pools(pools,
- distro,
- client)
+ self.assertIn(
+ pline,
+ content_lines,
+ "failed to render {0} conf"
+ " for distro:{1}".format(client, distro),
+ )
+ elif client == "systemd-timesyncd":
+ expected_servers = self._get_expected_servers(
+ servers, distro, client
+ )
+ expected_pools = self._get_expected_pools(
+ pools, distro, client
+ )
expected_content = (
- "# cloud-init generated file\n" +
- "# See timesyncd.conf(5) for details.\n\n" +
- "[Time]\nNTP=%s %s \n" % (expected_servers,
- expected_pools))
+ "# cloud-init generated file\n"
+ + "# See timesyncd.conf(5) for details.\n\n"
+ + "[Time]\nNTP=%s %s \n"
+ % (expected_servers, expected_pools)
+ )
self.assertEqual(expected_content, content)
def test_no_ntpcfg_does_nothing(self):
"""When no ntp section is defined handler logs a warning and noops."""
- cc_ntp.handle('cc_ntp', {}, None, None, [])
+ cc_ntp.handle("cc_ntp", {}, None, None, [])
self.assertEqual(
- 'DEBUG: Skipping module named cc_ntp, '
- 'not present or disabled by cfg\n',
- self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
- def test_ntp_handler_schema_validation_allows_empty_ntp_config(self,
- m_select):
+ "DEBUG: Skipping module named cc_ntp, "
+ "not present or disabled by cfg\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
+ def test_ntp_handler_schema_validation_allows_empty_ntp_config(
+ self, m_select
+ ):
"""Ntp schema validation allows for an empty ntp: configuration."""
- valid_empty_configs = [{'ntp': {}}, {'ntp': None}]
+ valid_empty_configs = [{"ntp": {}}, {"ntp": None}]
for valid_empty_config in valid_empty_configs:
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig['confpath']
+ confpath = ntpconfig["confpath"]
m_select.return_value = ntpconfig
- cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, [])
- if distro == 'alpine':
+ cc_ntp.handle("cc_ntp", valid_empty_config, mycloud, None, [])
+ if distro == "alpine":
# _mock_ntp_client_config call above did not specify a
# client value and so it defaults to "ntp" which on
# Alpine Linux only supports servers and not pools.
@@ -357,213 +377,243 @@ class TestNtp(FilesystemMockingTestCase):
servers = cc_ntp.generate_server_names(mycloud.distro.name)
self.assertEqual(
"servers {0}\npools []\n".format(servers),
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
else:
pools = cc_ntp.generate_server_names(mycloud.distro.name)
self.assertEqual(
"servers []\npools {0}\n".format(pools),
- util.load_file(confpath))
- self.assertNotIn('Invalid config:', self.logs.getvalue())
+ util.load_file(confpath),
+ )
+ self.assertNotIn(
+ "Invalid cloud-config provided:", self.logs.getvalue()
+ )
@skipUnlessJsonSchema()
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
- def test_ntp_handler_schema_validation_warns_non_string_item_type(self,
- m_sel):
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
+ def test_ntp_handler_schema_validation_warns_non_string_item_type(
+ self, m_sel
+ ):
"""Ntp schema validation warns of non-strings in pools or servers.
Schema validation is not strict, so ntp config is still be rendered.
"""
- invalid_config = {'ntp': {'pools': [123], 'servers': ['valid', None]}}
+ invalid_config = {"ntp": {"pools": [123], "servers": ["valid", None]}}
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig['confpath']
+ confpath = ntpconfig["confpath"]
m_sel.return_value = ntpconfig
- cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, [])
+ cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, [])
self.assertIn(
- "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n"
- "ntp.servers.1: None is not of type 'string'",
- self.logs.getvalue())
- self.assertEqual("servers ['valid', None]\npools [123]\n",
- util.load_file(confpath))
+ "Invalid cloud-config provided:\nntp.pools.0: 123 is not of"
+ " type 'string'\nntp.servers.1: None is not of type 'string'",
+ self.logs.getvalue(),
+ )
+ self.assertEqual(
+ "servers ['valid', None]\npools [123]\n",
+ util.load_file(confpath),
+ )
@skipUnlessJsonSchema()
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
- def test_ntp_handler_schema_validation_warns_of_non_array_type(self,
- m_select):
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
+ def test_ntp_handler_schema_validation_warns_of_non_array_type(
+ self, m_select
+ ):
"""Ntp schema validation warns of non-array pools or servers types.
Schema validation is not strict, so ntp config is still be rendered.
"""
- invalid_config = {'ntp': {'pools': 123, 'servers': 'non-array'}}
+ invalid_config = {"ntp": {"pools": 123, "servers": "non-array"}}
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig['confpath']
+ confpath = ntpconfig["confpath"]
m_select.return_value = ntpconfig
- cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, [])
+ cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, [])
self.assertIn(
- "Invalid config:\nntp.pools: 123 is not of type 'array'\n"
- "ntp.servers: 'non-array' is not of type 'array'",
- self.logs.getvalue())
- self.assertEqual("servers non-array\npools 123\n",
- util.load_file(confpath))
+ "Invalid cloud-config provided:\nntp.pools: 123 is not of type"
+ " 'array'\nntp.servers: 'non-array' is not of type 'array'",
+ self.logs.getvalue(),
+ )
+ self.assertEqual(
+ "servers non-array\npools 123\n", util.load_file(confpath)
+ )
@skipUnlessJsonSchema()
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
- def test_ntp_handler_schema_validation_warns_invalid_key_present(self,
- m_select):
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
+ def test_ntp_handler_schema_validation_warns_invalid_key_present(
+ self, m_select
+ ):
"""Ntp schema validation warns of invalid keys present in ntp config.
Schema validation is not strict, so ntp config is still be rendered.
"""
invalid_config = {
- 'ntp': {'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org']}}
+ "ntp": {"invalidkey": 1, "pools": ["0.mycompany.pool.ntp.org"]}
+ }
for distro in cc_ntp.distros:
- if distro != 'alpine':
+ if distro != "alpine":
mycloud = self._get_cloud(distro)
ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig['confpath']
+ confpath = ntpconfig["confpath"]
m_select.return_value = ntpconfig
- cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, [])
+ cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, [])
self.assertIn(
- "Invalid config:\nntp: Additional properties are not "
- "allowed ('invalidkey' was unexpected)",
- self.logs.getvalue())
+ "Invalid cloud-config provided:\nntp: Additional"
+ " properties are not allowed ('invalidkey' was"
+ " unexpected)",
+ self.logs.getvalue(),
+ )
self.assertEqual(
"servers []\npools ['0.mycompany.pool.ntp.org']\n",
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
@skipUnlessJsonSchema()
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
def test_ntp_handler_schema_validation_warns_of_duplicates(self, m_select):
"""Ntp schema validation warns of duplicates in servers or pools.
Schema validation is not strict, so ntp config is still be rendered.
"""
invalid_config = {
- 'ntp': {'pools': ['0.mypool.org', '0.mypool.org'],
- 'servers': ['10.0.0.1', '10.0.0.1']}}
+ "ntp": {
+ "pools": ["0.mypool.org", "0.mypool.org"],
+ "servers": ["10.0.0.1", "10.0.0.1"],
+ }
+ }
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig['confpath']
+ confpath = ntpconfig["confpath"]
m_select.return_value = ntpconfig
- cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, [])
+ cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, [])
self.assertIn(
- "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org']"
- " has non-unique elements\nntp.servers: "
+ "Invalid cloud-config provided:\nntp.pools: ['0.mypool.org',"
+ " '0.mypool.org'] has non-unique elements\nntp.servers: "
"['10.0.0.1', '10.0.0.1'] has non-unique elements",
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
self.assertEqual(
"servers ['10.0.0.1', '10.0.0.1']\n"
"pools ['0.mypool.org', '0.mypool.org']\n",
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
def test_ntp_handler_timesyncd(self, m_select):
"""Test ntp handler configures timesyncd"""
- servers = ['192.168.2.1', '192.168.2.2']
- pools = ['0.mypool.org']
- cfg = {'ntp': {'servers': servers, 'pools': pools}}
- client = 'systemd-timesyncd'
+ servers = ["192.168.2.1", "192.168.2.2"]
+ pools = ["0.mypool.org"]
+ cfg = {"ntp": {"servers": servers, "pools": pools}}
+ client = "systemd-timesyncd"
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
- ntpconfig = self._mock_ntp_client_config(distro=distro,
- client=client)
- confpath = ntpconfig['confpath']
+ ntpconfig = self._mock_ntp_client_config(
+ distro=distro, client=client
+ )
+ confpath = ntpconfig["confpath"]
m_select.return_value = ntpconfig
- cc_ntp.handle('cc_ntp', cfg, mycloud, None, [])
+ cc_ntp.handle("cc_ntp", cfg, mycloud, None, [])
self.assertEqual(
"[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n",
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
def test_ntp_handler_enabled_false(self, m_select):
- """Test ntp handler does not run if enabled: false """
- cfg = {'ntp': {'enabled': False}}
+ """Test ntp handler does not run if enabled: false"""
+ cfg = {"ntp": {"enabled": False}}
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
- cc_ntp.handle('notimportant', cfg, mycloud, None, None)
+ cc_ntp.handle("notimportant", cfg, mycloud, None, None)
self.assertEqual(0, m_select.call_count)
+ @mock.patch("cloudinit.distros.subp")
@mock.patch("cloudinit.config.cc_ntp.subp")
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
@mock.patch("cloudinit.distros.Distro.uses_systemd")
- def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp):
- """Test enabled config renders template, and restarts service """
- cfg = {'ntp': {'enabled': True}}
+ def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp, m_dsubp):
+ """Test enabled config renders template, and restarts service"""
+ cfg = {"ntp": {"enabled": True}}
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig['confpath']
- service_name = ntpconfig['service_name']
+ confpath = ntpconfig["confpath"]
+ service_name = ntpconfig["service_name"]
m_select.return_value = ntpconfig
hosts = cc_ntp.generate_server_names(mycloud.distro.name)
uses_systemd = True
- expected_service_call = ['systemctl', 'reload-or-restart',
- service_name]
+ expected_service_call = [
+ "systemctl",
+ "reload-or-restart",
+ service_name,
+ ]
expected_content = "servers []\npools {0}\n".format(hosts)
- if distro == 'alpine':
+ if distro == "alpine":
uses_systemd = False
- expected_service_call = ['service', service_name, 'restart']
+ expected_service_call = ["rc-service", service_name, "restart"]
# _mock_ntp_client_config call above did not specify a client
# value and so it defaults to "ntp" which on Alpine Linux only
# supports servers and not pools.
expected_content = "servers {0}\npools []\n".format(hosts)
m_sysd.return_value = uses_systemd
- with mock.patch('cloudinit.config.cc_ntp.util') as m_util:
+ with mock.patch("cloudinit.config.cc_ntp.util") as m_util:
# allow use of util.mergemanydict
m_util.mergemanydict.side_effect = util.mergemanydict
# default client is present
m_subp.which.return_value = True
# use the config 'enabled' value
m_util.is_false.return_value = util.is_false(
- cfg['ntp']['enabled'])
- cc_ntp.handle('notimportant', cfg, mycloud, None, None)
- m_subp.subp.assert_called_with(
- expected_service_call, capture=True)
+ cfg["ntp"]["enabled"]
+ )
+ cc_ntp.handle("notimportant", cfg, mycloud, None, None)
+ m_dsubp.subp.assert_called_with(
+ expected_service_call, capture=True
+ )
self.assertEqual(expected_content, util.load_file(confpath))
- def test_opensuse_picks_chrony(self):
+ @mock.patch("cloudinit.util.system_info")
+ def test_opensuse_picks_chrony(self, m_sysinfo):
"""Test opensuse picks chrony or ntp on certain distro versions"""
# < 15.0 => ntp
- self.m_sysinfo.return_value = {'dist':
- ('openSUSE', '13.2', 'Harlequin')}
- mycloud = self._get_cloud('opensuse')
+ m_sysinfo.return_value = {"dist": ("openSUSE", "13.2", "Harlequin")}
+ mycloud = self._get_cloud("opensuse")
expected_client = mycloud.distro.preferred_ntp_clients[0]
- self.assertEqual('ntp', expected_client)
+ self.assertEqual("ntp", expected_client)
# >= 15.0 and not openSUSE => chrony
- self.m_sysinfo.return_value = {'dist':
- ('SLES', '15.0',
- 'SUSE Linux Enterprise Server 15')}
- mycloud = self._get_cloud('sles')
+ m_sysinfo.return_value = {
+ "dist": ("SLES", "15.0", "SUSE Linux Enterprise Server 15")
+ }
+ mycloud = self._get_cloud("sles")
expected_client = mycloud.distro.preferred_ntp_clients[0]
- self.assertEqual('chrony', expected_client)
+ self.assertEqual("chrony", expected_client)
# >= 15.0 and openSUSE and ver != 42 => chrony
- self.m_sysinfo.return_value = {'dist': ('openSUSE Tumbleweed',
- '20180326',
- 'timbleweed')}
- mycloud = self._get_cloud('opensuse')
+ m_sysinfo.return_value = {
+ "dist": ("openSUSE Tumbleweed", "20180326", "timbleweed")
+ }
+ mycloud = self._get_cloud("opensuse")
expected_client = mycloud.distro.preferred_ntp_clients[0]
- self.assertEqual('chrony', expected_client)
+ self.assertEqual("chrony", expected_client)
- def test_ubuntu_xenial_picks_ntp(self):
+ @mock.patch("cloudinit.util.system_info")
+ def test_ubuntu_xenial_picks_ntp(self, m_sysinfo):
"""Test Ubuntu picks ntp on xenial release"""
- self.m_sysinfo.return_value = {'dist': ('Ubuntu', '16.04', 'xenial')}
- mycloud = self._get_cloud('ubuntu')
+ m_sysinfo.return_value = {"dist": ("Ubuntu", "16.04", "xenial")}
+ mycloud = self._get_cloud("ubuntu")
expected_client = mycloud.distro.preferred_ntp_clients[0]
- self.assertEqual('ntp', expected_client)
+ self.assertEqual("ntp", expected_client)
- @mock.patch('cloudinit.config.cc_ntp.subp.which')
+ @mock.patch("cloudinit.config.cc_ntp.subp.which")
def test_snappy_system_picks_timesyncd(self, m_which):
"""Test snappy systems prefer installed clients"""
@@ -571,26 +621,27 @@ class TestNtp(FilesystemMockingTestCase):
self.m_snappy.return_value = True
# ubuntu core systems will have timesyncd installed
- m_which.side_effect = iter([None, '/lib/systemd/systemd-timesyncd',
- None, None, None])
- distro = 'ubuntu'
+ m_which.side_effect = iter(
+ [None, "/lib/systemd/systemd-timesyncd", None, None, None]
+ )
+ distro = "ubuntu"
mycloud = self._get_cloud(distro)
distro_configs = cc_ntp.distro_ntp_client_configs(distro)
- expected_client = 'systemd-timesyncd'
+ expected_client = "systemd-timesyncd"
expected_cfg = distro_configs[expected_client]
expected_calls = []
# we only get to timesyncd
for client in mycloud.distro.preferred_ntp_clients[0:2]:
cfg = distro_configs[client]
- expected_calls.append(mock.call(cfg['check_exe']))
+ expected_calls.append(mock.call(cfg["check_exe"]))
result = cc_ntp.select_ntp_client(None, mycloud.distro)
m_which.assert_has_calls(expected_calls)
self.assertEqual(sorted(expected_cfg), sorted(cfg))
self.assertEqual(sorted(expected_cfg), sorted(result))
- @mock.patch('cloudinit.config.cc_ntp.subp.which')
+ @mock.patch("cloudinit.config.cc_ntp.subp.which")
def test_ntp_distro_searches_all_preferred_clients(self, m_which):
- """Test select_ntp_client search all distro perferred clients """
+ """Test select_ntp_client search all distro perferred clients"""
# nothing is installed
m_which.return_value = None
for distro in cc_ntp.distros:
@@ -601,12 +652,12 @@ class TestNtp(FilesystemMockingTestCase):
expected_calls = []
for client in mycloud.distro.preferred_ntp_clients:
cfg = distro_configs[client]
- expected_calls.append(mock.call(cfg['check_exe']))
+ expected_calls.append(mock.call(cfg["check_exe"]))
cc_ntp.select_ntp_client({}, mycloud.distro)
m_which.assert_has_calls(expected_calls)
self.assertEqual(sorted(expected_cfg), sorted(cfg))
- @mock.patch('cloudinit.config.cc_ntp.subp.which')
+ @mock.patch("cloudinit.config.cc_ntp.subp.which")
def test_user_cfg_ntp_client_auto_uses_distro_clients(self, m_which):
"""Test user_cfg.ntp_client='auto' defaults to distro search"""
# nothing is installed
@@ -619,34 +670,36 @@ class TestNtp(FilesystemMockingTestCase):
expected_calls = []
for client in mycloud.distro.preferred_ntp_clients:
cfg = distro_configs[client]
- expected_calls.append(mock.call(cfg['check_exe']))
- cc_ntp.select_ntp_client('auto', mycloud.distro)
+ expected_calls.append(mock.call(cfg["check_exe"]))
+ cc_ntp.select_ntp_client("auto", mycloud.distro)
m_which.assert_has_calls(expected_calls)
self.assertEqual(sorted(expected_cfg), sorted(cfg))
- @mock.patch('cloudinit.config.cc_ntp.write_ntp_config_template')
- @mock.patch('cloudinit.cloud.Cloud.get_template_filename')
- @mock.patch('cloudinit.config.cc_ntp.subp.which')
- def test_ntp_custom_client_overrides_installed_clients(self, m_which,
- m_tmpfn, m_write):
- """Test user client is installed despite other clients present """
- client = 'ntpdate'
- cfg = {'ntp': {'ntp_client': client}}
+ @mock.patch("cloudinit.config.cc_ntp.write_ntp_config_template")
+ @mock.patch("cloudinit.cloud.Cloud.get_template_filename")
+ @mock.patch("cloudinit.config.cc_ntp.subp.which")
+ def test_ntp_custom_client_overrides_installed_clients(
+ self, m_which, m_tmpfn, m_write
+ ):
+ """Test user client is installed despite other clients present"""
+ client = "ntpdate"
+ cfg = {"ntp": {"ntp_client": client}}
for distro in cc_ntp.distros:
# client is not installed
m_which.side_effect = iter([None])
mycloud = self._get_cloud(distro)
- with mock.patch.object(mycloud.distro,
- 'install_packages') as m_install:
- cc_ntp.handle('notimportant', cfg, mycloud, None, None)
+ with mock.patch.object(
+ mycloud.distro, "install_packages"
+ ) as m_install:
+ cc_ntp.handle("notimportant", cfg, mycloud, None, None)
m_install.assert_called_with([client])
m_which.assert_called_with(client)
- @mock.patch('cloudinit.config.cc_ntp.subp.which')
+ @mock.patch("cloudinit.config.cc_ntp.subp.which")
def test_ntp_system_config_overrides_distro_builtin_clients(self, m_which):
"""Test distro system_config overrides builtin preferred ntp clients"""
- system_client = 'chrony'
- sys_cfg = {'ntp_client': system_client}
+ system_client = "chrony"
+ sys_cfg = {"ntp_client": system_client}
# no clients installed
m_which.return_value = None
for distro in cc_ntp.distros:
@@ -657,12 +710,12 @@ class TestNtp(FilesystemMockingTestCase):
self.assertEqual(sorted(expected_cfg), sorted(result))
m_which.assert_has_calls([])
- @mock.patch('cloudinit.config.cc_ntp.subp.which')
+ @mock.patch("cloudinit.config.cc_ntp.subp.which")
def test_ntp_user_config_overrides_system_cfg(self, m_which):
"""Test user-data overrides system_config ntp_client"""
- system_client = 'chrony'
- sys_cfg = {'ntp_client': system_client}
- user_client = 'systemd-timesyncd'
+ system_client = "chrony"
+ sys_cfg = {"ntp_client": system_client}
+ user_client = "systemd-timesyncd"
# no clients installed
m_which.return_value = None
for distro in cc_ntp.distros:
@@ -673,114 +726,145 @@ class TestNtp(FilesystemMockingTestCase):
self.assertEqual(sorted(expected_cfg), sorted(result))
m_which.assert_has_calls([])
- @mock.patch('cloudinit.config.cc_ntp.reload_ntp')
- @mock.patch('cloudinit.config.cc_ntp.install_ntp_client')
- def test_ntp_user_provided_config_with_template(self, m_install, m_reload):
- custom = r'\n#MyCustomTemplate'
+ @mock.patch("cloudinit.config.cc_ntp.install_ntp_client")
+ def test_ntp_user_provided_config_with_template(self, m_install):
+ custom = r"\n#MyCustomTemplate"
user_template = NTP_TEMPLATE + custom
- confpath = os.path.join(self.new_root, 'etc/myntp/myntp.conf')
+ confpath = os.path.join(self.new_root, "etc/myntp/myntp.conf")
cfg = {
- 'ntp': {
- 'pools': ['mypool.org'],
- 'ntp_client': 'myntpd',
- 'config': {
- 'check_exe': 'myntpd',
- 'confpath': confpath,
- 'packages': ['myntp'],
- 'service_name': 'myntp',
- 'template': user_template,
- }
+ "ntp": {
+ "pools": ["mypool.org"],
+ "ntp_client": "myntpd",
+ "config": {
+ "check_exe": "myntpd",
+ "confpath": confpath,
+ "packages": ["myntp"],
+ "service_name": "myntp",
+ "template": user_template,
+ },
}
}
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
- mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR'
+ mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR"
with mock.patch(mock_path, self.new_root):
- cc_ntp.handle('notimportant', cfg, mycloud, None, None)
+ cc_ntp.handle("notimportant", cfg, mycloud, None, None)
self.assertEqual(
"servers []\npools ['mypool.org']\n%s" % custom,
- util.load_file(confpath))
-
- @mock.patch('cloudinit.config.cc_ntp.supplemental_schema_validation')
- @mock.patch('cloudinit.config.cc_ntp.reload_ntp')
- @mock.patch('cloudinit.config.cc_ntp.install_ntp_client')
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
- def test_ntp_user_provided_config_template_only(self, m_select, m_install,
- m_reload, m_schema):
+ util.load_file(confpath),
+ )
+
+ @mock.patch("cloudinit.config.cc_ntp.supplemental_schema_validation")
+ @mock.patch("cloudinit.config.cc_ntp.install_ntp_client")
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
+ def test_ntp_user_provided_config_template_only(
+ self, m_select, m_install, m_schema
+ ):
"""Test custom template for default client"""
- custom = r'\n#MyCustomTemplate'
+ custom = r"\n#MyCustomTemplate"
user_template = NTP_TEMPLATE + custom
- client = 'chrony'
+ client = "chrony"
cfg = {
- 'pools': ['mypool.org'],
- 'ntp_client': client,
- 'config': {
- 'template': user_template,
- }
+ "pools": ["mypool.org"],
+ "ntp_client": client,
+ "config": {
+ "template": user_template,
+ },
}
expected_merged_cfg = {
- 'check_exe': 'chronyd',
- 'confpath': '{tmpdir}/client.conf'.format(tmpdir=self.new_root),
- 'template_name': 'client.conf', 'template': user_template,
- 'service_name': 'chrony', 'packages': ['chrony']}
+ "check_exe": "chronyd",
+ "confpath": "{tmpdir}/client.conf".format(tmpdir=self.new_root),
+ "template_name": "client.conf",
+ "template": user_template,
+ "service_name": "chrony",
+ "packages": ["chrony"],
+ }
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
- ntpconfig = self._mock_ntp_client_config(client=client,
- distro=distro)
- confpath = ntpconfig['confpath']
+ ntpconfig = self._mock_ntp_client_config(
+ client=client, distro=distro
+ )
+ confpath = ntpconfig["confpath"]
m_select.return_value = ntpconfig
- mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR'
+ mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR"
with mock.patch(mock_path, self.new_root):
- cc_ntp.handle('notimportant',
- {'ntp': cfg}, mycloud, None, None)
+ cc_ntp.handle(
+ "notimportant", {"ntp": cfg}, mycloud, None, None
+ )
self.assertEqual(
"servers []\npools ['mypool.org']\n%s" % custom,
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
m_schema.assert_called_with(expected_merged_cfg)
class TestSupplementalSchemaValidation(CiTestCase):
-
def test_error_on_missing_keys(self):
"""ValueError raised reporting any missing required ntp:config keys"""
cfg = {}
- match = (r'Invalid ntp configuration:\\nMissing required ntp:config'
- ' keys: check_exe, confpath, packages, service_name')
+ match = (
+ r"Invalid ntp configuration:\\nMissing required ntp:config"
+ " keys: check_exe, confpath, packages, service_name"
+ )
with self.assertRaisesRegex(ValueError, match):
cc_ntp.supplemental_schema_validation(cfg)
def test_error_requiring_either_template_or_template_name(self):
"""ValueError raised if both template not template_name are None."""
- cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '',
- 'template': None, 'template_name': None, 'packages': []}
- match = (r'Invalid ntp configuration:\\nEither ntp:config:template'
- ' or ntp:config:template_name values are required')
+ cfg = {
+ "confpath": "someconf",
+ "check_exe": "",
+ "service_name": "",
+ "template": None,
+ "template_name": None,
+ "packages": [],
+ }
+ match = (
+ r"Invalid ntp configuration:\\nEither ntp:config:template"
+ " or ntp:config:template_name values are required"
+ )
with self.assertRaisesRegex(ValueError, match):
cc_ntp.supplemental_schema_validation(cfg)
def test_error_on_non_list_values(self):
"""ValueError raised when packages is not of type list."""
- cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '',
- 'template': 'asdf', 'template_name': None, 'packages': 'NOPE'}
- match = (r'Invalid ntp configuration:\\nExpected a list of required'
- ' package names for ntp:config:packages. Found \\(NOPE\\)')
+ cfg = {
+ "confpath": "someconf",
+ "check_exe": "",
+ "service_name": "",
+ "template": "asdf",
+ "template_name": None,
+ "packages": "NOPE",
+ }
+ match = (
+ r"Invalid ntp configuration:\\nExpected a list of required"
+ " package names for ntp:config:packages. Found \\(NOPE\\)"
+ )
with self.assertRaisesRegex(ValueError, match):
cc_ntp.supplemental_schema_validation(cfg)
def test_error_on_non_string_values(self):
"""ValueError raised for any values expected as string type."""
- cfg = {'confpath': 1, 'check_exe': 2, 'service_name': 3,
- 'template': 4, 'template_name': 5, 'packages': []}
+ cfg = {
+ "confpath": 1,
+ "check_exe": 2,
+ "service_name": 3,
+ "template": 4,
+ "template_name": 5,
+ "packages": [],
+ }
errors = [
- 'Expected a config file path ntp:config:confpath. Found (1)',
- 'Expected a string type for ntp:config:check_exe. Found (2)',
- 'Expected a string type for ntp:config:service_name. Found (3)',
- 'Expected a string type for ntp:config:template. Found (4)',
- 'Expected a string type for ntp:config:template_name. Found (5)']
+ "Expected a config file path ntp:config:confpath. Found (1)",
+ "Expected a string type for ntp:config:check_exe. Found (2)",
+ "Expected a string type for ntp:config:service_name. Found (3)",
+ "Expected a string type for ntp:config:template. Found (4)",
+ "Expected a string type for ntp:config:template_name. Found (5)",
+ ]
with self.assertRaises(ValueError) as context_mgr:
cc_ntp.supplemental_schema_validation(cfg)
error_msg = str(context_mgr.exception)
for error in errors:
self.assertIn(error, error_msg)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/config/test_cc_power_state_change.py
index 4ac49424..47eb0d58 100644
--- a/tests/unittests/test_handler/test_handler_power_state.py
+++ b/tests/unittests/config/test_cc_power_state_change.py
@@ -2,21 +2,18 @@
import sys
+from cloudinit import distros, helpers
from cloudinit.config import cc_power_state_change as psc
-
-from cloudinit import distros
-from cloudinit import helpers
-
-from cloudinit.tests import helpers as t_help
-from cloudinit.tests.helpers import mock
+from tests.unittests import helpers as t_help
+from tests.unittests.helpers import mock
class TestLoadPowerState(t_help.TestCase):
def setUp(self):
super(TestLoadPowerState, self).setUp()
- cls = distros.fetch('ubuntu')
+ cls = distros.fetch("ubuntu")
paths = helpers.Paths({})
- self.dist = cls('ubuntu', {}, paths)
+ self.dist = cls("ubuntu", {}, paths)
def test_no_config(self):
# completely empty config should mean do nothing
@@ -25,85 +22,86 @@ class TestLoadPowerState(t_help.TestCase):
def test_irrelevant_config(self):
# no power_state field in config should return None for cmd
- (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'},
- self.dist)
+ (cmd, _timeout, _condition) = psc.load_power_state(
+ {"foo": "bar"}, self.dist
+ )
self.assertIsNone(cmd)
def test_invalid_mode(self):
- cfg = {'power_state': {'mode': 'gibberish'}}
+ cfg = {"power_state": {"mode": "gibberish"}}
self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
- cfg = {'power_state': {'mode': ''}}
+ cfg = {"power_state": {"mode": ""}}
self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
def test_empty_mode(self):
- cfg = {'power_state': {'message': 'goodbye'}}
+ cfg = {"power_state": {"message": "goodbye"}}
self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
def test_valid_modes(self):
- cfg = {'power_state': {}}
- for mode in ('halt', 'poweroff', 'reboot'):
- cfg['power_state']['mode'] = mode
+ cfg = {"power_state": {}}
+ for mode in ("halt", "poweroff", "reboot"):
+ cfg["power_state"]["mode"] = mode
check_lps_ret(psc.load_power_state(cfg, self.dist), mode=mode)
def test_invalid_delay(self):
- cfg = {'power_state': {'mode': 'poweroff', 'delay': 'goodbye'}}
+ cfg = {"power_state": {"mode": "poweroff", "delay": "goodbye"}}
self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
def test_valid_delay(self):
- cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}}
+ cfg = {"power_state": {"mode": "poweroff", "delay": ""}}
for delay in ("now", "+1", "+30"):
- cfg['power_state']['delay'] = delay
+ cfg["power_state"]["delay"] = delay
check_lps_ret(psc.load_power_state(cfg, self.dist))
def test_message_present(self):
- cfg = {'power_state': {'mode': 'poweroff', 'message': 'GOODBYE'}}
+ cfg = {"power_state": {"mode": "poweroff", "message": "GOODBYE"}}
ret = psc.load_power_state(cfg, self.dist)
check_lps_ret(psc.load_power_state(cfg, self.dist))
- self.assertIn(cfg['power_state']['message'], ret[0])
+ self.assertIn(cfg["power_state"]["message"], ret[0])
def test_no_message(self):
# if message is not present, then no argument should be passed for it
- cfg = {'power_state': {'mode': 'poweroff'}}
+ cfg = {"power_state": {"mode": "poweroff"}}
(cmd, _timeout, _condition) = psc.load_power_state(cfg, self.dist)
self.assertNotIn("", cmd)
check_lps_ret(psc.load_power_state(cfg, self.dist))
self.assertTrue(len(cmd) == 3)
def test_condition_null_raises(self):
- cfg = {'power_state': {'mode': 'poweroff', 'condition': None}}
+ cfg = {"power_state": {"mode": "poweroff", "condition": None}}
self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
def test_condition_default_is_true(self):
- cfg = {'power_state': {'mode': 'poweroff'}}
+ cfg = {"power_state": {"mode": "poweroff"}}
_cmd, _timeout, cond = psc.load_power_state(cfg, self.dist)
self.assertEqual(cond, True)
def test_freebsd_poweroff_uses_lowercase_p(self):
- cls = distros.fetch('freebsd')
+ cls = distros.fetch("freebsd")
paths = helpers.Paths({})
- freebsd = cls('freebsd', {}, paths)
- cfg = {'power_state': {'mode': 'poweroff'}}
+ freebsd = cls("freebsd", {}, paths)
+ cfg = {"power_state": {"mode": "poweroff"}}
ret = psc.load_power_state(cfg, freebsd)
- self.assertIn('-p', ret[0])
+ self.assertIn("-p", ret[0])
def test_alpine_delay(self):
# alpine takes delay in seconds.
- cls = distros.fetch('alpine')
+ cls = distros.fetch("alpine")
paths = helpers.Paths({})
- alpine = cls('alpine', {}, paths)
- cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}}
- for delay, value in (('now', 0), ("+1", 60), ("+30", 1800)):
- cfg['power_state']['delay'] = delay
+ alpine = cls("alpine", {}, paths)
+ cfg = {"power_state": {"mode": "poweroff", "delay": ""}}
+ for delay, value in (("now", 0), ("+1", 60), ("+30", 1800)):
+ cfg["power_state"]["delay"] = delay
ret = psc.load_power_state(cfg, alpine)
- self.assertEqual('-d', ret[0][1])
+ self.assertEqual("-d", ret[0][1])
self.assertEqual(str(value), ret[0][2])
class TestCheckCondition(t_help.TestCase):
def cmd_with_exit(self, rc):
- return([sys.executable, '-c', 'import sys; sys.exit(%s)' % rc])
+ return [sys.executable, "-c", "import sys; sys.exit(%s)" % rc]
def test_true_is_true(self):
self.assertEqual(psc.check_condition(True), True)
@@ -120,7 +118,8 @@ class TestCheckCondition(t_help.TestCase):
def test_cmd_exit_nonzero_warns(self):
mocklog = mock.Mock()
self.assertEqual(
- psc.check_condition(self.cmd_with_exit(2), mocklog), False)
+ psc.check_condition(self.cmd_with_exit(2), mocklog), False
+ )
self.assertEqual(mocklog.warning.call_count, 1)
@@ -133,14 +132,14 @@ def check_lps_ret(psc_return, mode=None):
timeout = psc_return[1]
condition = psc_return[2]
- if 'shutdown' not in psc_return[0][0]:
+ if "shutdown" not in psc_return[0][0]:
errs.append("string 'shutdown' not in cmd")
if condition is None:
errs.append("condition was not returned")
if mode is not None:
- opt = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}[mode]
+ opt = {"halt": "-H", "poweroff": "-P", "reboot": "-r"}[mode]
if opt not in psc_return[0]:
errs.append("opt '%s' not in cmd: %s" % (opt, cmd))
@@ -154,6 +153,7 @@ def check_lps_ret(psc_return, mode=None):
if len(errs):
lines = ["Errors in result: %s" % str(psc_return)] + errs
- raise Exception('\n'.join(lines))
+ raise Exception("\n".join(lines))
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_puppet.py b/tests/unittests/config/test_cc_puppet.py
new file mode 100644
index 00000000..2c4481da
--- /dev/null
+++ b/tests/unittests/config/test_cc_puppet.py
@@ -0,0 +1,450 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import textwrap
+
+from cloudinit import util
+from cloudinit.config import cc_puppet
+from tests.unittests.helpers import CiTestCase, HttprettyTestCase, mock
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+
+@mock.patch("cloudinit.config.cc_puppet.subp.subp")
+@mock.patch("cloudinit.config.cc_puppet.os")
+class TestAutostartPuppet(CiTestCase):
+ def test_wb_autostart_puppet_updates_puppet_default(self, m_os, m_subp):
+ """Update /etc/default/puppet to autostart if it exists."""
+
+ def _fake_exists(path):
+ return path == "/etc/default/puppet"
+
+ m_os.path.exists.side_effect = _fake_exists
+ cc_puppet._autostart_puppet(LOG)
+ self.assertEqual(
+ [
+ mock.call(
+ [
+ "sed",
+ "-i",
+ "-e",
+ "s/^START=.*/START=yes/",
+ "/etc/default/puppet",
+ ],
+ capture=False,
+ )
+ ],
+ m_subp.call_args_list,
+ )
+
+ def test_wb_autostart_pupppet_enables_puppet_systemctl(self, m_os, m_subp):
+ """If systemctl is present, enable puppet via systemctl."""
+
+ def _fake_exists(path):
+ return path == "/bin/systemctl"
+
+ m_os.path.exists.side_effect = _fake_exists
+ cc_puppet._autostart_puppet(LOG)
+ expected_calls = [
+ mock.call(
+ ["/bin/systemctl", "enable", "puppet.service"], capture=False
+ )
+ ]
+ self.assertEqual(expected_calls, m_subp.call_args_list)
+
+ def test_wb_autostart_pupppet_enables_puppet_chkconfig(self, m_os, m_subp):
+ """If chkconfig is present, enable puppet via checkcfg."""
+
+ def _fake_exists(path):
+ return path == "/sbin/chkconfig"
+
+ m_os.path.exists.side_effect = _fake_exists
+ cc_puppet._autostart_puppet(LOG)
+ expected_calls = [
+ mock.call(["/sbin/chkconfig", "puppet", "on"], capture=False)
+ ]
+ self.assertEqual(expected_calls, m_subp.call_args_list)
+
+
+@mock.patch("cloudinit.config.cc_puppet._autostart_puppet")
+class TestPuppetHandle(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestPuppetHandle, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.conf = self.tmp_path("puppet.conf")
+ self.csr_attributes_path = self.tmp_path("csr_attributes.yaml")
+ self.cloud = get_cloud()
+
+ def test_skips_missing_puppet_key_in_cloudconfig(self, m_auto):
+ """Cloud-config containing no 'puppet' key is skipped."""
+
+ cfg = {}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertIn("no 'puppet' configuration found", self.logs.getvalue())
+ self.assertEqual(0, m_auto.call_count)
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_starts_puppet_service(self, m_subp, m_auto):
+ """Cloud-config 'puppet' configuration starts puppet."""
+
+ cfg = {"puppet": {"install": False}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [mock.call(["service", "puppet", "start"], capture=False)],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_empty_puppet_config_installs_puppet(self, m_subp, m_auto):
+ """Cloud-config empty 'puppet' configuration installs latest puppet."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {"puppet": {}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(
+ [mock.call(("puppet", None))],
+ self.cloud.distro.install_packages.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_installs_puppet_on_true(self, m_subp, _):
+ """Cloud-config with 'puppet' key installs when 'install' is True."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {"puppet": {"install": True}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(
+ [mock.call(("puppet", None))],
+ self.cloud.distro.install_packages.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True)
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio(self, m_subp, m_aio, _):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio'."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {"puppet": {"install": True, "install_type": "aio"}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ m_aio.assert_called_with(cc_puppet.AIO_INSTALL_URL, None, None, True)
+
+ @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True)
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_with_version(
+ self, m_subp, m_aio, _
+ ):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and 'version' is specified."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {
+ "puppet": {
+ "install": True,
+ "version": "6.24.0",
+ "install_type": "aio",
+ }
+ }
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ m_aio.assert_called_with(
+ cc_puppet.AIO_INSTALL_URL, "6.24.0", None, True
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True)
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_with_collection(
+ self, m_subp, m_aio, _
+ ):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and 'collection' is specified."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {
+ "puppet": {
+ "install": True,
+ "collection": "puppet6",
+ "install_type": "aio",
+ }
+ }
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ m_aio.assert_called_with(
+ cc_puppet.AIO_INSTALL_URL, None, "puppet6", True
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True)
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_with_custom_url(
+ self, m_subp, m_aio, _
+ ):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and 'aio_install_url' is specified."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {
+ "puppet": {
+ "install": True,
+ "aio_install_url": "http://test.url/path/to/script.sh",
+ "install_type": "aio",
+ }
+ }
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ m_aio.assert_called_with(
+ "http://test.url/path/to/script.sh", None, None, True
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True)
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_without_cleanup(
+ self, m_subp, m_aio, _
+ ):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and no cleanup."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {
+ "puppet": {
+ "install": True,
+ "cleanup": False,
+ "install_type": "aio",
+ }
+ }
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ m_aio.assert_called_with(cc_puppet.AIO_INSTALL_URL, None, None, False)
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_installs_puppet_version(self, m_subp, _):
+ """Cloud-config 'puppet' configuration can specify a version."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {"puppet": {"version": "3.8"}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(
+ [mock.call(("puppet", "3.8"))],
+ self.cloud.distro.install_packages.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.get_config_value")
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_updates_puppet_conf(
+ self, m_subp, m_default, m_auto
+ ):
+ """When 'conf' is provided update values in PUPPET_CONF_PATH."""
+
+ def _fake_get_config_value(puppet_bin, setting):
+ return self.conf
+
+ m_default.side_effect = _fake_get_config_value
+
+ cfg = {
+ "puppet": {
+ "conf": {"agent": {"server": "puppetserver.example.org"}}
+ }
+ }
+ util.write_file(self.conf, "[agent]\nserver = origpuppet\nother = 3")
+ self.cloud.distro = mock.MagicMock()
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ content = util.load_file(self.conf)
+ expected = "[agent]\nserver = puppetserver.example.org\nother = 3\n\n"
+ self.assertEqual(expected, content)
+
+ @mock.patch("cloudinit.config.cc_puppet.get_config_value")
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp")
+ def test_puppet_writes_csr_attributes_file(
+ self, m_subp, m_default, m_auto
+ ):
+ """When csr_attributes is provided
+ creates file in PUPPET_CSR_ATTRIBUTES_PATH."""
+
+ def _fake_get_config_value(puppet_bin, setting):
+ return self.csr_attributes_path
+
+ m_default.side_effect = _fake_get_config_value
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {
+ "puppet": {
+ "csr_attributes": {
+ "custom_attributes": {
+ "1.2.840.113549.1.9.7": (
+ "342thbjkt82094y0uthhor289jnqthpc2290"
+ )
+ },
+ "extension_requests": {
+ "pp_uuid": "ED803750-E3C7-44F5-BB08-41A04433FE2E",
+ "pp_image_name": "my_ami_image",
+ "pp_preshared_key": (
+ "342thbjkt82094y0uthhor289jnqthpc2290"
+ ),
+ },
+ }
+ }
+ }
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ content = util.load_file(self.csr_attributes_path)
+ expected = textwrap.dedent(
+ """\
+ custom_attributes:
+ 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290
+ extension_requests:
+ pp_image_name: my_ami_image
+ pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290
+ pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E
+ """
+ )
+ self.assertEqual(expected, content)
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_runs_puppet_if_requested(self, m_subp, m_auto):
+ """Run puppet with default args if 'exec' is set to True."""
+
+ cfg = {"puppet": {"exec": True}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [mock.call(["puppet", "agent", "--test"], capture=False)],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_starts_puppetd(self, m_subp, m_auto):
+ """Run puppet with default args if 'exec' is set to True."""
+
+ cfg = {"puppet": {}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [mock.call(["service", "puppet", "start"], capture=False)],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_skips_puppetd(self, m_subp, m_auto):
+ """Run puppet with default args if 'exec' is set to True."""
+
+ cfg = {"puppet": {"start_service": False}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(0, m_auto.call_count)
+ self.assertNotIn(
+ [mock.call(["service", "puppet", "start"], capture=False)],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_runs_puppet_with_args_list_if_requested(
+ self, m_subp, m_auto
+ ):
+ """Run puppet with 'exec_args' list if 'exec' is set to True."""
+
+ cfg = {
+ "puppet": {
+ "exec": True,
+ "exec_args": ["--onetime", "--detailed-exitcodes"],
+ }
+ }
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [
+ mock.call(
+ ["puppet", "agent", "--onetime", "--detailed-exitcodes"],
+ capture=False,
+ )
+ ],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_runs_puppet_with_args_string_if_requested(
+ self, m_subp, m_auto
+ ):
+ """Run puppet with 'exec_args' string if 'exec' is set to True."""
+
+ cfg = {
+ "puppet": {
+ "exec": True,
+ "exec_args": "--onetime --detailed-exitcodes",
+ }
+ }
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [
+ mock.call(
+ ["puppet", "agent", "--onetime", "--detailed-exitcodes"],
+ capture=False,
+ )
+ ],
+ m_subp.call_args_list,
+ )
+
+
+URL_MOCK = mock.Mock()
+URL_MOCK.contents = b'#!/bin/bash\necho "Hi Mom"'
+
+
+@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=(None, None))
+@mock.patch(
+ "cloudinit.config.cc_puppet.url_helper.readurl",
+ return_value=URL_MOCK,
+ autospec=True,
+)
+class TestInstallPuppetAio(HttprettyTestCase):
+ def test_install_with_default_arguments(self, m_readurl, m_subp):
+ """Install AIO with no arguments"""
+ cc_puppet.install_puppet_aio()
+
+ self.assertEqual(
+ [mock.call([mock.ANY, "--cleanup"], capture=False)],
+ m_subp.call_args_list,
+ )
+
+ def test_install_with_custom_url(self, m_readurl, m_subp):
+ """Install AIO from custom URL"""
+ cc_puppet.install_puppet_aio("http://custom.url/path/to/script.sh")
+ m_readurl.assert_called_with(
+ url="http://custom.url/path/to/script.sh", retries=5
+ )
+
+ self.assertEqual(
+ [mock.call([mock.ANY, "--cleanup"], capture=False)],
+ m_subp.call_args_list,
+ )
+
+ def test_install_with_version(self, m_readurl, m_subp):
+ """Install AIO with specific version"""
+ cc_puppet.install_puppet_aio(cc_puppet.AIO_INSTALL_URL, "7.6.0")
+
+ self.assertEqual(
+ [mock.call([mock.ANY, "-v", "7.6.0", "--cleanup"], capture=False)],
+ m_subp.call_args_list,
+ )
+
+ def test_install_with_collection(self, m_readurl, m_subp):
+ """Install AIO with specific collection"""
+ cc_puppet.install_puppet_aio(
+ cc_puppet.AIO_INSTALL_URL, None, "puppet6-nightly"
+ )
+
+ self.assertEqual(
+ [
+ mock.call(
+ [mock.ANY, "-c", "puppet6-nightly", "--cleanup"],
+ capture=False,
+ )
+ ],
+ m_subp.call_args_list,
+ )
+
+ def test_install_with_no_cleanup(self, m_readurl, m_subp):
+ """Install AIO with no cleanup"""
+ cc_puppet.install_puppet_aio(
+ cc_puppet.AIO_INSTALL_URL, None, None, False
+ )
+
+ self.assertEqual(
+ [mock.call([mock.ANY], capture=False)], m_subp.call_args_list
+ )
diff --git a/tests/unittests/config/test_cc_refresh_rmc_and_interface.py b/tests/unittests/config/test_cc_refresh_rmc_and_interface.py
new file mode 100644
index 00000000..e038f814
--- /dev/null
+++ b/tests/unittests/config/test_cc_refresh_rmc_and_interface.py
@@ -0,0 +1,157 @@
+import logging
+from textwrap import dedent
+
+from cloudinit import util
+from cloudinit.config import cc_refresh_rmc_and_interface as ccrmci
+from tests.unittests import helpers as t_help
+from tests.unittests.helpers import mock
+
+LOG = logging.getLogger(__name__)
+MPATH = "cloudinit.config.cc_refresh_rmc_and_interface"
+NET_INFO = {
+ "lo": {
+ "ipv4": [
+ {
+ "ip": "127.0.0.1",
+ "bcast": "",
+ "mask": "255.0.0.0",
+ "scope": "host",
+ }
+ ],
+ "ipv6": [{"ip": "::1/128", "scope6": "host"}],
+ "hwaddr": "",
+ "up": "True",
+ },
+ "env2": {
+ "ipv4": [
+ {
+ "ip": "8.0.0.19",
+ "bcast": "8.0.0.255",
+ "mask": "255.255.255.0",
+ "scope": "global",
+ }
+ ],
+ "ipv6": [{"ip": "fe80::f896:c2ff:fe81:8220/64", "scope6": "link"}],
+ "hwaddr": "fa:96:c2:81:82:20",
+ "up": "True",
+ },
+ "env3": {
+ "ipv4": [
+ {
+ "ip": "90.0.0.14",
+ "bcast": "90.0.0.255",
+ "mask": "255.255.255.0",
+ "scope": "global",
+ }
+ ],
+ "ipv6": [{"ip": "fe80::f896:c2ff:fe81:8221/64", "scope6": "link"}],
+ "hwaddr": "fa:96:c2:81:82:21",
+ "up": "True",
+ },
+ "env4": {
+ "ipv4": [
+ {
+ "ip": "9.114.23.7",
+ "bcast": "9.114.23.255",
+ "mask": "255.255.255.0",
+ "scope": "global",
+ }
+ ],
+ "ipv6": [{"ip": "fe80::f896:c2ff:fe81:8222/64", "scope6": "link"}],
+ "hwaddr": "fa:96:c2:81:82:22",
+ "up": "True",
+ },
+ "env5": {
+ "ipv4": [],
+ "ipv6": [{"ip": "fe80::9c26:c3ff:fea4:62c8/64", "scope6": "link"}],
+ "hwaddr": "42:20:86:df:fa:4c",
+ "up": "True",
+ },
+}
+
+
+class TestRsctNodeFile(t_help.CiTestCase):
+ def test_disable_ipv6_interface(self):
+ """test parsing of iface files."""
+ fname = self.tmp_path("iface-eth5")
+ util.write_file(
+ fname,
+ dedent(
+ """\
+ BOOTPROTO=static
+ DEVICE=eth5
+ HWADDR=42:20:86:df:fa:4c
+ IPV6INIT=yes
+ IPADDR6=fe80::9c26:c3ff:fea4:62c8/64
+ IPV6ADDR=fe80::9c26:c3ff:fea4:62c8/64
+ NM_CONTROLLED=yes
+ ONBOOT=yes
+ STARTMODE=auto
+ TYPE=Ethernet
+ USERCTL=no
+ """
+ ),
+ )
+
+ ccrmci.disable_ipv6(fname)
+ self.assertEqual(
+ dedent(
+ """\
+ BOOTPROTO=static
+ DEVICE=eth5
+ HWADDR=42:20:86:df:fa:4c
+ ONBOOT=yes
+ STARTMODE=auto
+ TYPE=Ethernet
+ USERCTL=no
+ NM_CONTROLLED=no
+ """
+ ),
+ util.load_file(fname),
+ )
+
+ @mock.patch(MPATH + ".refresh_rmc")
+ @mock.patch(MPATH + ".restart_network_manager")
+ @mock.patch(MPATH + ".disable_ipv6")
+ @mock.patch(MPATH + ".refresh_ipv6")
+ @mock.patch(MPATH + ".netinfo.netdev_info")
+ @mock.patch(MPATH + ".subp.which")
+ def test_handle(
+ self,
+ m_refresh_rmc,
+ m_netdev_info,
+ m_refresh_ipv6,
+ m_disable_ipv6,
+ m_restart_nm,
+ m_which,
+ ):
+ """Basic test of handle."""
+ m_netdev_info.return_value = NET_INFO
+ m_which.return_value = "/opt/rsct/bin/rmcctrl"
+ ccrmci.handle("refresh_rmc_and_interface", None, None, None, None)
+ self.assertEqual(1, m_netdev_info.call_count)
+ m_refresh_ipv6.assert_called_with("env5")
+ m_disable_ipv6.assert_called_with(
+ "/etc/sysconfig/network-scripts/ifcfg-env5"
+ )
+ self.assertEqual(1, m_restart_nm.call_count)
+ self.assertEqual(1, m_refresh_rmc.call_count)
+
+ @mock.patch(MPATH + ".netinfo.netdev_info")
+ def test_find_ipv6(self, m_netdev_info):
+ """find_ipv6_ifaces parses netdev_info returning those with ipv6"""
+ m_netdev_info.return_value = NET_INFO
+ found = ccrmci.find_ipv6_ifaces()
+ self.assertEqual(["env5"], found)
+
+ @mock.patch(MPATH + ".subp.subp")
+ def test_refresh_ipv6(self, m_subp):
+ """refresh_ipv6 should ip down and up the interface."""
+ iface = "myeth0"
+ ccrmci.refresh_ipv6(iface)
+ m_subp.assert_has_calls(
+ [
+ mock.call(["ip", "link", "set", iface, "down"]),
+ mock.call(["ip", "link", "set", iface, "up"]),
+ ]
+ )
diff --git a/tests/unittests/config/test_cc_resizefs.py b/tests/unittests/config/test_cc_resizefs.py
new file mode 100644
index 00000000..9981dcea
--- /dev/null
+++ b/tests/unittests/config/test_cc_resizefs.py
@@ -0,0 +1,490 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+from collections import namedtuple
+
+from cloudinit.config.cc_resizefs import (
+ _resize_btrfs,
+ _resize_ext,
+ _resize_ufs,
+ _resize_xfs,
+ _resize_zfs,
+ can_skip_resize,
+ handle,
+ maybe_get_writable_device_path,
+)
+from cloudinit.subp import ProcessExecutionError
+from tests.unittests.helpers import (
+ CiTestCase,
+ mock,
+ skipUnlessJsonSchema,
+ util,
+ wrap_and_call,
+)
+
+LOG = logging.getLogger(__name__)
+
+
+class TestResizefs(CiTestCase):
+ with_logs = True
+
+ def setUp(self):
+ super(TestResizefs, self).setUp()
+ self.name = "resizefs"
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_skip_ufs_resize(self, m_subp):
+ fs_type = "ufs"
+ resize_what = "/"
+ devpth = "/dev/da0p2"
+ err = (
+ "growfs: requested size 2.0GB is not larger than the "
+ "current filesystem size 2.0GB\n"
+ )
+ exception = ProcessExecutionError(stderr=err, exit_code=1)
+ m_subp.side_effect = exception
+ res = can_skip_resize(fs_type, resize_what, devpth)
+ self.assertTrue(res)
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_cannot_skip_ufs_resize(self, m_subp):
+ fs_type = "ufs"
+ resize_what = "/"
+ devpth = "/dev/da0p2"
+ m_subp.return_value = (
+ "stdout: super-block backups (for fsck_ffs -b #) at:\n\n",
+ "growfs: no room to allocate last cylinder group; "
+ "leaving 364KB unused\n",
+ )
+ res = can_skip_resize(fs_type, resize_what, devpth)
+ self.assertFalse(res)
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_cannot_skip_ufs_growfs_exception(self, m_subp):
+ fs_type = "ufs"
+ resize_what = "/"
+ devpth = "/dev/da0p2"
+ err = "growfs: /dev/da0p2 is not clean - run fsck.\n"
+ exception = ProcessExecutionError(stderr=err, exit_code=1)
+ m_subp.side_effect = exception
+ with self.assertRaises(ProcessExecutionError):
+ can_skip_resize(fs_type, resize_what, devpth)
+
+ def test_can_skip_resize_ext(self):
+ self.assertFalse(can_skip_resize("ext", "/", "/dev/sda1"))
+
+ def test_handle_noops_on_disabled(self):
+ """The handle function logs when the configuration disables resize."""
+ cfg = {"resize_rootfs": False}
+ handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
+ self.assertIn(
+ "DEBUG: Skipping module named cc_resizefs, resizing disabled\n",
+ self.logs.getvalue(),
+ )
+
+ @skipUnlessJsonSchema()
+ def test_handle_schema_validation_logs_invalid_resize_rootfs_value(self):
+ """The handle reports json schema violations as a warning.
+
+ Invalid values for resize_rootfs result in disabling the module.
+ """
+ cfg = {"resize_rootfs": "junk"}
+ handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
+ logs = self.logs.getvalue()
+ self.assertIn(
+ "WARNING: Invalid cloud-config provided:\nresize_rootfs: 'junk' is"
+ " not one of [True, False, 'noblock']",
+ logs,
+ )
+ self.assertIn(
+ "DEBUG: Skipping module named cc_resizefs, resizing disabled\n",
+ logs,
+ )
+
+ @mock.patch("cloudinit.config.cc_resizefs.util.get_mount_info")
+ def test_handle_warns_on_unknown_mount_info(self, m_get_mount_info):
+ """handle warns when get_mount_info sees unknown filesystem for /."""
+ m_get_mount_info.return_value = None
+ cfg = {"resize_rootfs": True}
+ handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
+ logs = self.logs.getvalue()
+ self.assertNotIn(
+ "WARNING: Invalid cloud-config provided:\nresize_rootfs:", logs
+ )
+ self.assertIn(
+ "WARNING: Could not determine filesystem type of /\n", logs
+ )
+ self.assertEqual(
+ [mock.call("/", LOG)], m_get_mount_info.call_args_list
+ )
+
+ def test_handle_warns_on_undiscoverable_root_path_in_commandline(self):
+ """handle noops when the root path is not found on the commandline."""
+ cfg = {"resize_rootfs": True}
+ exists_mock_path = "cloudinit.config.cc_resizefs.os.path.exists"
+
+ def fake_mount_info(path, log):
+ self.assertEqual("/", path)
+ self.assertEqual(LOG, log)
+ return ("/dev/root", "ext4", "/")
+
+ with mock.patch(exists_mock_path) as m_exists:
+ m_exists.return_value = False
+ wrap_and_call(
+ "cloudinit.config.cc_resizefs.util",
+ {
+ "is_container": {"return_value": False},
+ "get_mount_info": {"side_effect": fake_mount_info},
+ "get_cmdline": {"return_value": "BOOT_IMAGE=/vmlinuz.efi"},
+ },
+ handle,
+ "cc_resizefs",
+ cfg,
+ _cloud=None,
+ log=LOG,
+ args=[],
+ )
+ logs = self.logs.getvalue()
+ self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
+
+ def test_resize_zfs_cmd_return(self):
+ zpool = "zroot"
+ devpth = "gpt/system"
+ self.assertEqual(
+ ("zpool", "online", "-e", zpool, devpth),
+ _resize_zfs(zpool, devpth),
+ )
+
+ def test_resize_xfs_cmd_return(self):
+ mount_point = "/mnt/test"
+ devpth = "/dev/sda1"
+ self.assertEqual(
+ ("xfs_growfs", mount_point), _resize_xfs(mount_point, devpth)
+ )
+
+ def test_resize_ext_cmd_return(self):
+ mount_point = "/"
+ devpth = "/dev/sdb1"
+ self.assertEqual(
+ ("resize2fs", devpth), _resize_ext(mount_point, devpth)
+ )
+
+ def test_resize_ufs_cmd_return(self):
+ mount_point = "/"
+ devpth = "/dev/sda2"
+ self.assertEqual(
+ ("growfs", "-y", mount_point), _resize_ufs(mount_point, devpth)
+ )
+
+ @mock.patch("cloudinit.util.is_container", return_value=False)
+ @mock.patch("cloudinit.util.parse_mount")
+ @mock.patch("cloudinit.util.get_device_info_from_zpool")
+ @mock.patch("cloudinit.util.get_mount_info")
+ def test_handle_zfs_root(
+ self, mount_info, zpool_info, parse_mount, is_container
+ ):
+ devpth = "vmzroot/ROOT/freebsd"
+ disk = "gpt/system"
+ fs_type = "zfs"
+ mount_point = "/"
+
+ mount_info.return_value = (devpth, fs_type, mount_point)
+ zpool_info.return_value = disk
+ parse_mount.return_value = (devpth, fs_type, mount_point)
+
+ cfg = {"resize_rootfs": True}
+
+ with mock.patch("cloudinit.config.cc_resizefs.do_resize") as dresize:
+ handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
+ ret = dresize.call_args[0][0]
+
+ self.assertEqual(("zpool", "online", "-e", "vmzroot", disk), ret)
+
+ @mock.patch("cloudinit.util.is_container", return_value=False)
+ @mock.patch("cloudinit.util.get_mount_info")
+ @mock.patch("cloudinit.util.get_device_info_from_zpool")
+ @mock.patch("cloudinit.util.parse_mount")
+ def test_handle_modern_zfsroot(
+ self, mount_info, zpool_info, parse_mount, is_container
+ ):
+ devpth = "zroot/ROOT/default"
+ disk = "da0p3"
+ fs_type = "zfs"
+ mount_point = "/"
+
+ mount_info.return_value = (devpth, fs_type, mount_point)
+ zpool_info.return_value = disk
+ parse_mount.return_value = (devpth, fs_type, mount_point)
+
+ cfg = {"resize_rootfs": True}
+
+ def fake_stat(devpath):
+ if devpath == disk:
+ raise OSError("not here")
+ FakeStat = namedtuple(
+ "FakeStat", ["st_mode", "st_size", "st_mtime"]
+ ) # minimal stat
+ return FakeStat(25008, 0, 1) # fake char block device
+
+ with mock.patch("cloudinit.config.cc_resizefs.do_resize") as dresize:
+ with mock.patch("cloudinit.config.cc_resizefs.os.stat") as m_stat:
+ m_stat.side_effect = fake_stat
+ handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
+
+ self.assertEqual(
+ ("zpool", "online", "-e", "zroot", "/dev/" + disk),
+ dresize.call_args[0][0],
+ )
+
+
+class TestRootDevFromCmdline(CiTestCase):
+ def test_rootdev_from_cmdline_with_no_root(self):
+ """Return None from rootdev_from_cmdline when root is not present."""
+ invalid_cases = [
+ "BOOT_IMAGE=/adsf asdfa werasef root adf",
+ "BOOT_IMAGE=/adsf",
+ "",
+ ]
+ for case in invalid_cases:
+ self.assertIsNone(util.rootdev_from_cmdline(case))
+
+ def test_rootdev_from_cmdline_with_root_startswith_dev(self):
+ """Return the cmdline root when the path starts with /dev."""
+ self.assertEqual(
+ "/dev/this", util.rootdev_from_cmdline("asdf root=/dev/this")
+ )
+
+ def test_rootdev_from_cmdline_with_root_without_dev_prefix(self):
+ """Add /dev prefix to cmdline root when the path lacks the prefix."""
+ self.assertEqual(
+ "/dev/this", util.rootdev_from_cmdline("asdf root=this")
+ )
+
+ def test_rootdev_from_cmdline_with_root_with_label(self):
+ """When cmdline root contains a LABEL, our root is disk/by-label."""
+ self.assertEqual(
+ "/dev/disk/by-label/unique",
+ util.rootdev_from_cmdline("asdf root=LABEL=unique"),
+ )
+
+ def test_rootdev_from_cmdline_with_root_with_uuid(self):
+ """When cmdline root contains a UUID, our root is disk/by-uuid."""
+ self.assertEqual(
+ "/dev/disk/by-uuid/adsfdsaf-adsf",
+ util.rootdev_from_cmdline("asdf root=UUID=adsfdsaf-adsf"),
+ )
+
+
+class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
+
+ with_logs = True
+
+ def test_maybe_get_writable_device_path_none_on_overlayroot(self):
+ """When devpath is overlayroot (on MAAS), is_dev_writable is False."""
+ info = "does not matter"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs.util",
+ {"is_container": {"return_value": False}},
+ maybe_get_writable_device_path,
+ "overlayroot",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "Not attempting to resize devpath 'overlayroot'",
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self):
+ """When root does not exist isn't in the cmdline, log warning."""
+ info = "does not matter"
+
+ def fake_mount_info(path, log):
+ self.assertEqual("/", path)
+ self.assertEqual(LOG, log)
+ return ("/dev/root", "ext4", "/")
+
+ exists_mock_path = "cloudinit.config.cc_resizefs.os.path.exists"
+ with mock.patch(exists_mock_path) as m_exists:
+ m_exists.return_value = False
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs.util",
+ {
+ "is_container": {"return_value": False},
+ "get_mount_info": {"side_effect": fake_mount_info},
+ "get_cmdline": {"return_value": "BOOT_IMAGE=/vmlinuz.efi"},
+ },
+ maybe_get_writable_device_path,
+ "/dev/root",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ logs = self.logs.getvalue()
+ self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
+
+ def test_maybe_get_writable_device_path_does_not_exist(self):
+ """When devpath does not exist, a warning is logged."""
+ info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs.util",
+ {"is_container": {"return_value": False}},
+ maybe_get_writable_device_path,
+ "/dev/I/dont/exist",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "WARNING: Device '/dev/I/dont/exist' did not exist."
+ " cannot resize: %s" % info,
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_does_not_exist_in_container(self):
+ """When devpath does not exist in a container, log a debug message."""
+ info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs.util",
+ {"is_container": {"return_value": True}},
+ maybe_get_writable_device_path,
+ "/dev/I/dont/exist",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "DEBUG: Device '/dev/I/dont/exist' did not exist in container."
+ " cannot resize: %s" % info,
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_raises_oserror(self):
+ """When unexpected OSError is raises by os.stat it is reraised."""
+ info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none"
+ with self.assertRaises(OSError) as context_manager:
+ wrap_and_call(
+ "cloudinit.config.cc_resizefs",
+ {
+ "util.is_container": {"return_value": True},
+ "os.stat": {
+ "side_effect": OSError("Something unexpected")
+ },
+ },
+ maybe_get_writable_device_path,
+ "/dev/I/dont/exist",
+ info,
+ LOG,
+ )
+ self.assertEqual(
+ "Something unexpected", str(context_manager.exception)
+ )
+
+ def test_maybe_get_writable_device_path_non_block(self):
+ """When device is not a block device, emit warning return False."""
+ fake_devpath = self.tmp_path("dev/readwrite")
+ util.write_file(fake_devpath, "", mode=0o600) # read-write
+ info = "dev=/dev/root mnt_point=/ path={0}".format(fake_devpath)
+
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs.util",
+ {"is_container": {"return_value": False}},
+ maybe_get_writable_device_path,
+ fake_devpath,
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "WARNING: device '{0}' not a block device. cannot resize".format(
+ fake_devpath
+ ),
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_non_block_on_container(self):
+ """When device is non-block device in container, emit debug log."""
+ fake_devpath = self.tmp_path("dev/readwrite")
+ util.write_file(fake_devpath, "", mode=0o600) # read-write
+ info = "dev=/dev/root mnt_point=/ path={0}".format(fake_devpath)
+
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs.util",
+ {"is_container": {"return_value": True}},
+ maybe_get_writable_device_path,
+ fake_devpath,
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "DEBUG: device '{0}' not a block device in container."
+ " cannot resize".format(fake_devpath),
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_returns_cmdline_root(self):
+ """When root device is UUID in kernel commandline, update devpath."""
+ # XXX Long-term we want to use FilesystemMocking test to avoid
+ # touching os.stat.
+ FakeStat = namedtuple(
+ "FakeStat", ["st_mode", "st_size", "st_mtime"]
+ ) # minimal def.
+ info = "dev=/dev/root mnt_point=/ path=/does/not/matter"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs",
+ {
+ "util.get_cmdline": {"return_value": "asdf root=UUID=my-uuid"},
+ "util.is_container": False,
+ "os.path.exists": False, # /dev/root doesn't exist
+ "os.stat": {
+ "return_value": FakeStat(25008, 0, 1)
+ }, # char block device
+ },
+ maybe_get_writable_device_path,
+ "/dev/root",
+ info,
+ LOG,
+ )
+ self.assertEqual("/dev/disk/by-uuid/my-uuid", devpath)
+ self.assertIn(
+ "DEBUG: Converted /dev/root to '/dev/disk/by-uuid/my-uuid'"
+ " per kernel cmdline",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.util.mount_is_read_write")
+ @mock.patch("cloudinit.config.cc_resizefs.os.path.isdir")
+ def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw):
+ """Do not resize / directly if it is read-only. (LP: #1734787)."""
+ m_is_rw.return_value = False
+ m_is_dir.return_value = True
+ self.assertEqual(
+ ("btrfs", "filesystem", "resize", "max", "//.snapshots"),
+ _resize_btrfs("/", "/dev/sda1"),
+ )
+
+ @mock.patch("cloudinit.util.mount_is_read_write")
+ @mock.patch("cloudinit.config.cc_resizefs.os.path.isdir")
+ def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw):
+ """Do not resize / directly if it is read-only. (LP: #1734787)."""
+ m_is_rw.return_value = True
+ m_is_dir.return_value = True
+ self.assertEqual(
+ ("btrfs", "filesystem", "resize", "max", "/"),
+ _resize_btrfs("/", "/dev/sda1"),
+ )
+
+ @mock.patch("cloudinit.util.is_container", return_value=True)
+ @mock.patch("cloudinit.util.is_FreeBSD")
+ def test_maybe_get_writable_device_path_zfs_freebsd(
+ self, freebsd, m_is_container
+ ):
+ freebsd.return_value = True
+ info = "dev=gpt/system mnt_point=/ path=/"
+ devpth = maybe_get_writable_device_path("gpt/system", info, LOG)
+ self.assertEqual("gpt/system", devpth)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_resizefs_vyos.py b/tests/unittests/config/test_cc_resizefs_vyos.py
new file mode 100644
index 00000000..c995e6aa
--- /dev/null
+++ b/tests/unittests/config/test_cc_resizefs_vyos.py
@@ -0,0 +1,490 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+from collections import namedtuple
+
+from cloudinit.config.cc_resizefs_vyos import (
+ _resize_btrfs,
+ _resize_ext,
+ _resize_ufs,
+ _resize_xfs,
+ _resize_zfs,
+ can_skip_resize,
+ handle,
+ maybe_get_writable_device_path,
+)
+from cloudinit.subp import ProcessExecutionError
+from tests.unittests.helpers import (
+ CiTestCase,
+ mock,
+ skipUnlessJsonSchema,
+ util,
+ wrap_and_call,
+)
+
+LOG = logging.getLogger(__name__)
+
+
+class TestResizefs(CiTestCase):
+ with_logs = True
+
+ def setUp(self):
+ super(TestResizefs, self).setUp()
+ self.name = "resizefs"
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_skip_ufs_resize(self, m_subp):
+ fs_type = "ufs"
+ resize_what = "/"
+ devpth = "/dev/da0p2"
+ err = (
+ "growfs: requested size 2.0GB is not larger than the "
+ "current filesystem size 2.0GB\n"
+ )
+ exception = ProcessExecutionError(stderr=err, exit_code=1)
+ m_subp.side_effect = exception
+ res = can_skip_resize(fs_type, resize_what, devpth)
+ self.assertTrue(res)
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_cannot_skip_ufs_resize(self, m_subp):
+ fs_type = "ufs"
+ resize_what = "/"
+ devpth = "/dev/da0p2"
+ m_subp.return_value = (
+ "stdout: super-block backups (for fsck_ffs -b #) at:\n\n",
+ "growfs: no room to allocate last cylinder group; "
+ "leaving 364KB unused\n",
+ )
+ res = can_skip_resize(fs_type, resize_what, devpth)
+ self.assertFalse(res)
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_cannot_skip_ufs_growfs_exception(self, m_subp):
+ fs_type = "ufs"
+ resize_what = "/"
+ devpth = "/dev/da0p2"
+ err = "growfs: /dev/da0p2 is not clean - run fsck.\n"
+ exception = ProcessExecutionError(stderr=err, exit_code=1)
+ m_subp.side_effect = exception
+ with self.assertRaises(ProcessExecutionError):
+ can_skip_resize(fs_type, resize_what, devpth)
+
+ def test_can_skip_resize_ext(self):
+ self.assertFalse(can_skip_resize("ext", "/", "/dev/sda1"))
+
+ def test_handle_noops_on_disabled(self):
+ """The handle function logs when the configuration disables resize."""
+ cfg = {"resizefs_enabled": False}
+ handle("cc_resizefs_vyos", cfg, _cloud=None, log=LOG, args=[])
+ self.assertIn(
+ "DEBUG: Skipping module named cc_resizefs_vyos, resizing disabled\n",
+ self.logs.getvalue(),
+ )
+
+ @skipUnlessJsonSchema()
+ def test_handle_schema_validation_logs_invalid_resizefs_enabled_value(self):
+ """The handle reports json schema violations as a warning.
+
+ Invalid values for resizefs_enabled result in disabling the module.
+ """
+ cfg = {"resizefs_enabled": "junk"}
+ handle("cc_resizefs_vyos", cfg, _cloud=None, log=LOG, args=[])
+ logs = self.logs.getvalue()
+ self.assertIn(
+ "WARNING: Invalid cloud-config provided:\nresizefs_enabled: 'junk' is"
+ " not one of [True, False, 'noblock']",
+ logs,
+ )
+ self.assertIn(
+ "DEBUG: Skipping module named cc_resizefs_vyos, resizing disabled\n",
+ logs,
+ )
+
+ @mock.patch("cloudinit.config.cc_resizefs_vyos.util.get_mount_info")
+ def test_handle_warns_on_unknown_mount_info(self, m_get_mount_info):
+ """handle warns when get_mount_info sees unknown filesystem for /."""
+ m_get_mount_info.return_value = None
+ cfg = {"resizefs_enabled": True}
+ handle("cc_resizefs_vyos", cfg, _cloud=None, log=LOG, args=[])
+ logs = self.logs.getvalue()
+ self.assertNotIn(
+ "WARNING: Invalid cloud-config provided:\nresizefs_enabled:", logs
+ )
+ self.assertIn(
+ "WARNING: Could not determine filesystem type of /\n", logs
+ )
+ self.assertEqual(
+ [mock.call("/", LOG)], m_get_mount_info.call_args_list
+ )
+
+ def test_handle_warns_on_undiscoverable_root_path_in_commandline(self):
+ """handle noops when the root path is not found on the commandline."""
+ cfg = {"resizefs_enabled": True}
+ exists_mock_path = "cloudinit.config.cc_resizefs_vyos.os.path.exists"
+
+ def fake_mount_info(path, log):
+ self.assertEqual("/", path)
+ self.assertEqual(LOG, log)
+ return ("/dev/root", "ext4", "/")
+
+ with mock.patch(exists_mock_path) as m_exists:
+ m_exists.return_value = False
+ wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos.util",
+ {
+ "is_container": {"return_value": False},
+ "get_mount_info": {"side_effect": fake_mount_info},
+ "get_cmdline": {"return_value": "BOOT_IMAGE=/vmlinuz.efi"},
+ },
+ handle,
+ "cc_resizefs_vyos",
+ cfg,
+ _cloud=None,
+ log=LOG,
+ args=[],
+ )
+ logs = self.logs.getvalue()
+ self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
+
+ def test_resize_zfs_cmd_return(self):
+ zpool = "zroot"
+ devpth = "gpt/system"
+ self.assertEqual(
+ ("zpool", "online", "-e", zpool, devpth),
+ _resize_zfs(zpool, devpth),
+ )
+
+ def test_resize_xfs_cmd_return(self):
+ mount_point = "/mnt/test"
+ devpth = "/dev/sda1"
+ self.assertEqual(
+ ("xfs_growfs", mount_point), _resize_xfs(mount_point, devpth)
+ )
+
+ def test_resize_ext_cmd_return(self):
+ mount_point = "/"
+ devpth = "/dev/sdb1"
+ self.assertEqual(
+ ("resize2fs", devpth), _resize_ext(mount_point, devpth)
+ )
+
+ def test_resize_ufs_cmd_return(self):
+ mount_point = "/"
+ devpth = "/dev/sda2"
+ self.assertEqual(
+ ("growfs", "-y", mount_point), _resize_ufs(mount_point, devpth)
+ )
+
+ @mock.patch("cloudinit.util.is_container", return_value=False)
+ @mock.patch("cloudinit.util.parse_mount")
+ @mock.patch("cloudinit.util.get_device_info_from_zpool")
+ @mock.patch("cloudinit.util.get_mount_info")
+ def test_handle_zfs_root(
+ self, mount_info, zpool_info, parse_mount, is_container
+ ):
+ devpth = "vmzroot/ROOT/freebsd"
+ disk = "gpt/system"
+ fs_type = "zfs"
+ mount_point = "/"
+
+ mount_info.return_value = (devpth, fs_type, mount_point)
+ zpool_info.return_value = disk
+ parse_mount.return_value = (devpth, fs_type, mount_point)
+
+ cfg = {"resizefs_enabled": True}
+
+ with mock.patch("cloudinit.config.cc_resizefs_vyos.do_resize") as dresize:
+ handle("cc_resizefs_vyos", cfg, _cloud=None, log=LOG, args=[])
+ ret = dresize.call_args[0][0]
+
+ self.assertEqual(("zpool", "online", "-e", "vmzroot", disk), ret)
+
+ @mock.patch("cloudinit.util.is_container", return_value=False)
+ @mock.patch("cloudinit.util.get_mount_info")
+ @mock.patch("cloudinit.util.get_device_info_from_zpool")
+ @mock.patch("cloudinit.util.parse_mount")
+ def test_handle_modern_zfsroot(
+ self, mount_info, zpool_info, parse_mount, is_container
+ ):
+ devpth = "zroot/ROOT/default"
+ disk = "da0p3"
+ fs_type = "zfs"
+ mount_point = "/"
+
+ mount_info.return_value = (devpth, fs_type, mount_point)
+ zpool_info.return_value = disk
+ parse_mount.return_value = (devpth, fs_type, mount_point)
+
+ cfg = {"resizefs_enabled": True}
+
+ def fake_stat(devpath):
+ if devpath == disk:
+ raise OSError("not here")
+ FakeStat = namedtuple(
+ "FakeStat", ["st_mode", "st_size", "st_mtime"]
+ ) # minimal stat
+ return FakeStat(25008, 0, 1) # fake char block device
+
+ with mock.patch("cloudinit.config.cc_resizefs_vyos.do_resize") as dresize:
+ with mock.patch("cloudinit.config.cc_resizefs_vyos.os.stat") as m_stat:
+ m_stat.side_effect = fake_stat
+ handle("cc_resizefs_vyos", cfg, _cloud=None, log=LOG, args=[])
+
+ self.assertEqual(
+ ("zpool", "online", "-e", "zroot", "/dev/" + disk),
+ dresize.call_args[0][0],
+ )
+
+
+class TestRootDevFromCmdline(CiTestCase):
+ def test_rootdev_from_cmdline_with_no_root(self):
+ """Return None from rootdev_from_cmdline when root is not present."""
+ invalid_cases = [
+ "BOOT_IMAGE=/adsf asdfa werasef root adf",
+ "BOOT_IMAGE=/adsf",
+ "",
+ ]
+ for case in invalid_cases:
+ self.assertIsNone(util.rootdev_from_cmdline(case))
+
+ def test_rootdev_from_cmdline_with_root_startswith_dev(self):
+ """Return the cmdline root when the path starts with /dev."""
+ self.assertEqual(
+ "/dev/this", util.rootdev_from_cmdline("asdf root=/dev/this")
+ )
+
+ def test_rootdev_from_cmdline_with_root_without_dev_prefix(self):
+ """Add /dev prefix to cmdline root when the path lacks the prefix."""
+ self.assertEqual(
+ "/dev/this", util.rootdev_from_cmdline("asdf root=this")
+ )
+
+ def test_rootdev_from_cmdline_with_root_with_label(self):
+ """When cmdline root contains a LABEL, our root is disk/by-label."""
+ self.assertEqual(
+ "/dev/disk/by-label/unique",
+ util.rootdev_from_cmdline("asdf root=LABEL=unique"),
+ )
+
+ def test_rootdev_from_cmdline_with_root_with_uuid(self):
+ """When cmdline root contains a UUID, our root is disk/by-uuid."""
+ self.assertEqual(
+ "/dev/disk/by-uuid/adsfdsaf-adsf",
+ util.rootdev_from_cmdline("asdf root=UUID=adsfdsaf-adsf"),
+ )
+
+
+class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
+
+ with_logs = True
+
+ def test_maybe_get_writable_device_path_none_on_overlayroot(self):
+ """When devpath is overlayroot (on MAAS), is_dev_writable is False."""
+ info = "does not matter"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos.util",
+ {"is_container": {"return_value": False}},
+ maybe_get_writable_device_path,
+ "overlayroot",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "Not attempting to resize devpath 'overlayroot'",
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self):
+ """When root does not exist isn't in the cmdline, log warning."""
+ info = "does not matter"
+
+ def fake_mount_info(path, log):
+ self.assertEqual("/", path)
+ self.assertEqual(LOG, log)
+ return ("/dev/root", "ext4", "/")
+
+ exists_mock_path = "cloudinit.config.cc_resizefs_vyos.os.path.exists"
+ with mock.patch(exists_mock_path) as m_exists:
+ m_exists.return_value = False
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos.util",
+ {
+ "is_container": {"return_value": False},
+ "get_mount_info": {"side_effect": fake_mount_info},
+ "get_cmdline": {"return_value": "BOOT_IMAGE=/vmlinuz.efi"},
+ },
+ maybe_get_writable_device_path,
+ "/dev/root",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ logs = self.logs.getvalue()
+ self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
+
+ def test_maybe_get_writable_device_path_does_not_exist(self):
+ """When devpath does not exist, a warning is logged."""
+ info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos.util",
+ {"is_container": {"return_value": False}},
+ maybe_get_writable_device_path,
+ "/dev/I/dont/exist",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "WARNING: Device '/dev/I/dont/exist' did not exist."
+ " cannot resize: %s" % info,
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_does_not_exist_in_container(self):
+ """When devpath does not exist in a container, log a debug message."""
+ info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos.util",
+ {"is_container": {"return_value": True}},
+ maybe_get_writable_device_path,
+ "/dev/I/dont/exist",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "DEBUG: Device '/dev/I/dont/exist' did not exist in container."
+ " cannot resize: %s" % info,
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_raises_oserror(self):
+ """When unexpected OSError is raises by os.stat it is reraised."""
+ info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none"
+ with self.assertRaises(OSError) as context_manager:
+ wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos",
+ {
+ "util.is_container": {"return_value": True},
+ "os.stat": {
+ "side_effect": OSError("Something unexpected")
+ },
+ },
+ maybe_get_writable_device_path,
+ "/dev/I/dont/exist",
+ info,
+ LOG,
+ )
+ self.assertEqual(
+ "Something unexpected", str(context_manager.exception)
+ )
+
+ def test_maybe_get_writable_device_path_non_block(self):
+ """When device is not a block device, emit warning return False."""
+ fake_devpath = self.tmp_path("dev/readwrite")
+ util.write_file(fake_devpath, "", mode=0o600) # read-write
+ info = "dev=/dev/root mnt_point=/ path={0}".format(fake_devpath)
+
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos.util",
+ {"is_container": {"return_value": False}},
+ maybe_get_writable_device_path,
+ fake_devpath,
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "WARNING: device '{0}' not a block device. cannot resize".format(
+ fake_devpath
+ ),
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_non_block_on_container(self):
+ """When device is non-block device in container, emit debug log."""
+ fake_devpath = self.tmp_path("dev/readwrite")
+ util.write_file(fake_devpath, "", mode=0o600) # read-write
+ info = "dev=/dev/root mnt_point=/ path={0}".format(fake_devpath)
+
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos.util",
+ {"is_container": {"return_value": True}},
+ maybe_get_writable_device_path,
+ fake_devpath,
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "DEBUG: device '{0}' not a block device in container."
+ " cannot resize".format(fake_devpath),
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_returns_cmdline_root(self):
+ """When root device is UUID in kernel commandline, update devpath."""
+ # XXX Long-term we want to use FilesystemMocking test to avoid
+ # touching os.stat.
+ FakeStat = namedtuple(
+ "FakeStat", ["st_mode", "st_size", "st_mtime"]
+ ) # minimal def.
+ info = "dev=/dev/root mnt_point=/ path=/does/not/matter"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos",
+ {
+ "util.get_cmdline": {"return_value": "asdf root=UUID=my-uuid"},
+ "util.is_container": False,
+ "os.path.exists": False, # /dev/root doesn't exist
+ "os.stat": {
+ "return_value": FakeStat(25008, 0, 1)
+ }, # char block device
+ },
+ maybe_get_writable_device_path,
+ "/dev/root",
+ info,
+ LOG,
+ )
+ self.assertEqual("/dev/disk/by-uuid/my-uuid", devpath)
+ self.assertIn(
+ "DEBUG: Converted /dev/root to '/dev/disk/by-uuid/my-uuid'"
+ " per kernel cmdline",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.util.mount_is_read_write")
+ @mock.patch("cloudinit.config.cc_resizefs_vyos.os.path.isdir")
+ def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw):
+ """Do not resize / directly if it is read-only. (LP: #1734787)."""
+ m_is_rw.return_value = False
+ m_is_dir.return_value = True
+ self.assertEqual(
+ ("btrfs", "filesystem", "resize", "max", "//.snapshots"),
+ _resize_btrfs("/", "/dev/sda1"),
+ )
+
+ @mock.patch("cloudinit.util.mount_is_read_write")
+ @mock.patch("cloudinit.config.cc_resizefs_vyos.os.path.isdir")
+ def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw):
+ """Do not resize / directly if it is read-only. (LP: #1734787)."""
+ m_is_rw.return_value = True
+ m_is_dir.return_value = True
+ self.assertEqual(
+ ("btrfs", "filesystem", "resize", "max", "/"),
+ _resize_btrfs("/", "/dev/sda1"),
+ )
+
+ @mock.patch("cloudinit.util.is_container", return_value=True)
+ @mock.patch("cloudinit.util.is_FreeBSD")
+ def test_maybe_get_writable_device_path_zfs_freebsd(
+ self, freebsd, m_is_container
+ ):
+ freebsd.return_value = True
+ info = "dev=gpt/system mnt_point=/ path=/"
+ devpth = maybe_get_writable_device_path("gpt/system", info, LOG)
+ self.assertEqual("gpt/system", devpth)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_resolv_conf.py b/tests/unittests/config/test_cc_resolv_conf.py
new file mode 100644
index 00000000..8896a4e8
--- /dev/null
+++ b/tests/unittests/config/test_cc_resolv_conf.py
@@ -0,0 +1,197 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+import os
+import shutil
+import tempfile
+from copy import deepcopy
+from unittest import mock
+
+import pytest
+
+from cloudinit import cloud, distros, helpers, util
+from cloudinit.config import cc_resolv_conf
+from cloudinit.config.cc_resolv_conf import generate_resolv_conf
+from tests.unittests import helpers as t_help
+from tests.unittests.util import MockDistro
+
+LOG = logging.getLogger(__name__)
+EXPECTED_HEADER = """\
+# Your system has been configured with 'manage-resolv-conf' set to true.
+# As a result, cloud-init has written this file with configuration data
+# that it has been provided. Cloud-init, by default, will write this file
+# a single time (PER_ONCE).
+#\n\n"""
+
+
+class TestResolvConf(t_help.FilesystemMockingTestCase):
+ with_logs = True
+ cfg = {"manage_resolv_conf": True, "resolv_conf": {}}
+
+ def setUp(self):
+ super(TestResolvConf, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ util.ensure_dir(os.path.join(self.tmp, "data"))
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def _fetch_distro(self, kind, conf=None):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ conf = {} if conf is None else conf
+ return cls(kind, conf, paths)
+
+ def call_resolv_conf_handler(self, distro_name, conf, cc=None):
+ if not cc:
+ ds = None
+ distro = self._fetch_distro(distro_name, conf)
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ cc_resolv_conf.handle("cc_resolv_conf", conf, cc, LOG, [])
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_systemd_resolved(self, m_render_to_file):
+ self.call_resolv_conf_handler("photon", self.cfg)
+
+ assert [
+ mock.call(mock.ANY, "/etc/systemd/resolved.conf", mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_no_param(self, m_render_to_file):
+ tmp = deepcopy(self.cfg)
+ self.logs.truncate(0)
+ tmp.pop("resolv_conf")
+ self.call_resolv_conf_handler("photon", tmp)
+
+ self.assertIn(
+ "manage_resolv_conf True but no parameters provided",
+ self.logs.getvalue(),
+ )
+ assert [
+ mock.call(mock.ANY, "/etc/systemd/resolved.conf", mock.ANY)
+ ] not in m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_manage_resolv_conf_false(self, m_render_to_file):
+ tmp = deepcopy(self.cfg)
+ self.logs.truncate(0)
+ tmp["manage_resolv_conf"] = False
+ self.call_resolv_conf_handler("photon", tmp)
+ self.assertIn(
+ "'manage_resolv_conf' present but set to False",
+ self.logs.getvalue(),
+ )
+ assert [
+ mock.call(mock.ANY, "/etc/systemd/resolved.conf", mock.ANY)
+ ] not in m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_etc_resolv_conf(self, m_render_to_file):
+ self.call_resolv_conf_handler("rhel", self.cfg)
+
+ assert [
+ mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_invalid_resolve_conf_fn(self, m_render_to_file):
+ ds = None
+ distro = self._fetch_distro("rhel", self.cfg)
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ cc.distro.resolve_conf_fn = "bla"
+
+ self.logs.truncate(0)
+ self.call_resolv_conf_handler("rhel", self.cfg, cc)
+
+ self.assertIn(
+ "No template found, not rendering resolve configs",
+ self.logs.getvalue(),
+ )
+
+ assert [
+ mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY)
+ ] not in m_render_to_file.call_args_list
+
+
+class TestGenerateResolvConf:
+
+ dist = MockDistro()
+ tmpl_fn = t_help.cloud_init_project_dir("templates/resolv.conf.tmpl")
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_dist_resolv_conf_fn(self, m_render_to_file):
+ self.dist.resolve_conf_fn = "/tmp/resolv-test.conf"
+ generate_resolv_conf(
+ self.tmpl_fn, mock.MagicMock(), self.dist.resolve_conf_fn
+ )
+
+ assert [
+ mock.call(mock.ANY, self.dist.resolve_conf_fn, mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_target_fname_is_used_if_passed(self, m_render_to_file):
+ path = "/use/this/path"
+ generate_resolv_conf(self.tmpl_fn, mock.MagicMock(), path)
+
+ assert [
+ mock.call(mock.ANY, path, mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ # Patch in templater so we can assert on the actual generated content
+ @mock.patch("cloudinit.templater.util.write_file")
+ # Parameterise with the value to be passed to generate_resolv_conf as the
+ # params parameter, and the expected line after the header as
+ # expected_extra_line.
+ @pytest.mark.parametrize(
+ "params,expected_extra_line",
+ [
+ # No options
+ ({}, None),
+ # Just a true flag
+ ({"options": {"foo": True}}, "options foo"),
+ # Just a false flag
+ ({"options": {"foo": False}}, None),
+ # Just an option
+ ({"options": {"foo": "some_value"}}, "options foo:some_value"),
+ # A true flag and an option
+ (
+ {"options": {"foo": "some_value", "bar": True}},
+ "options bar foo:some_value",
+ ),
+ # Two options
+ (
+ {"options": {"foo": "some_value", "bar": "other_value"}},
+ "options bar:other_value foo:some_value",
+ ),
+ # Everything
+ (
+ {
+ "options": {
+ "foo": "some_value",
+ "bar": "other_value",
+ "baz": False,
+ "spam": True,
+ }
+ },
+ "options spam bar:other_value foo:some_value",
+ ),
+ ],
+ )
+ def test_flags_and_options(
+ self, m_write_file, params, expected_extra_line
+ ):
+ target_fn = "/etc/resolv.conf"
+ generate_resolv_conf(self.tmpl_fn, params, target_fn)
+
+ expected_content = EXPECTED_HEADER
+ if expected_extra_line is not None:
+ # If we have any extra lines, expect a trailing newline
+ expected_content += "\n".join([expected_extra_line, ""])
+ assert [
+ mock.call(mock.ANY, expected_content, mode=mock.ANY)
+ ] == m_write_file.call_args_list
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_rh_subscription.py b/tests/unittests/config/test_cc_rh_subscription.py
new file mode 100644
index 00000000..fcc7db34
--- /dev/null
+++ b/tests/unittests/config/test_cc_rh_subscription.py
@@ -0,0 +1,320 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests for registering RHEL subscription via rh_subscription."""
+
+import copy
+import logging
+
+from cloudinit import subp
+from cloudinit.config import cc_rh_subscription
+from tests.unittests.helpers import CiTestCase, mock
+
+SUBMGR = cc_rh_subscription.SubscriptionManager
+SUB_MAN_CLI = "cloudinit.config.cc_rh_subscription._sub_man_cli"
+
+
+@mock.patch(SUB_MAN_CLI)
+class GoodTests(CiTestCase):
+ with_logs = True
+
+ def setUp(self):
+ super(GoodTests, self).setUp()
+ self.name = "cc_rh_subscription"
+ self.cloud_init = None
+ self.log = logging.getLogger("good_tests")
+ self.args = []
+ self.handle = cc_rh_subscription.handle
+
+ self.config = {
+ "rh_subscription": {
+ "username": "scooby@do.com",
+ "password": "scooby-snacks",
+ }
+ }
+ self.config_full = {
+ "rh_subscription": {
+ "username": "scooby@do.com",
+ "password": "scooby-snacks",
+ "auto-attach": True,
+ "service-level": "self-support",
+ "add-pool": ["pool1", "pool2", "pool3"],
+ "enable-repo": ["repo1", "repo2", "repo3"],
+ "disable-repo": ["repo4", "repo5"],
+ }
+ }
+
+ def test_already_registered(self, m_sman_cli):
+ """
+ Emulates a system that is already registered. Ensure it gets
+ a non-ProcessExecution error from is_registered()
+ """
+ self.handle(
+ self.name, self.config, self.cloud_init, self.log, self.args
+ )
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assertIn("System is already registered", self.logs.getvalue())
+
+ def test_simple_registration(self, m_sman_cli):
+ """
+ Simple registration with username and password
+ """
+ reg = (
+ "The system has been registered with ID:"
+ " 12345678-abde-abcde-1234-1234567890abc"
+ )
+ m_sman_cli.side_effect = [subp.ProcessExecutionError, (reg, "bar")]
+ self.handle(
+ self.name, self.config, self.cloud_init, self.log, self.args
+ )
+ self.assertIn(mock.call(["identity"]), m_sman_cli.call_args_list)
+ self.assertIn(
+ mock.call(
+ [
+ "register",
+ "--username=scooby@do.com",
+ "--password=scooby-snacks",
+ ],
+ logstring_val=True,
+ ),
+ m_sman_cli.call_args_list,
+ )
+ self.assertIn(
+ "rh_subscription plugin completed successfully",
+ self.logs.getvalue(),
+ )
+ self.assertEqual(m_sman_cli.call_count, 2)
+
+ @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_getRepos")
+ def test_update_repos_disable_with_none(self, m_get_repos, m_sman_cli):
+ cfg = copy.deepcopy(self.config)
+ m_get_repos.return_value = ([], ["repo1"])
+ cfg["rh_subscription"].update(
+ {"enable-repo": ["repo1"], "disable-repo": None}
+ )
+ mysm = cc_rh_subscription.SubscriptionManager(cfg)
+ self.assertEqual(True, mysm.update_repos())
+ m_get_repos.assert_called_with()
+ self.assertEqual(
+ m_sman_cli.call_args_list, [mock.call(["repos", "--enable=repo1"])]
+ )
+
+ def test_full_registration(self, m_sman_cli):
+ """
+ Registration with auto-attach, service-level, adding pools,
+ and enabling and disabling yum repos
+ """
+ call_lists = []
+ call_lists.append(["attach", "--pool=pool1", "--pool=pool3"])
+ call_lists.append(
+ ["repos", "--disable=repo5", "--enable=repo2", "--enable=repo3"]
+ )
+ call_lists.append(["attach", "--auto", "--servicelevel=self-support"])
+ reg = (
+ "The system has been registered with ID:"
+ " 12345678-abde-abcde-1234-1234567890abc"
+ )
+ m_sman_cli.side_effect = [
+ subp.ProcessExecutionError,
+ (reg, "bar"),
+ ("Service level set to: self-support", ""),
+ ("pool1\npool3\n", ""),
+ ("pool2\n", ""),
+ ("", ""),
+ ("Repo ID: repo1\nRepo ID: repo5\n", ""),
+ ("Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4", ""),
+ ("", ""),
+ ]
+ self.handle(
+ self.name, self.config_full, self.cloud_init, self.log, self.args
+ )
+ self.assertEqual(m_sman_cli.call_count, 9)
+ for call in call_lists:
+ self.assertIn(mock.call(call), m_sman_cli.call_args_list)
+ self.assertIn(
+ "rh_subscription plugin completed successfully",
+ self.logs.getvalue(),
+ )
+
+
+@mock.patch(SUB_MAN_CLI)
+class TestBadInput(CiTestCase):
+ with_logs = True
+ name = "cc_rh_subscription"
+ cloud_init = None
+ log = logging.getLogger("bad_tests")
+ args = []
+ SM = cc_rh_subscription.SubscriptionManager
+ reg = (
+ "The system has been registered with ID:"
+ " 12345678-abde-abcde-1234-1234567890abc"
+ )
+
+ config_no_password = {"rh_subscription": {"username": "scooby@do.com"}}
+
+ config_no_key = {
+ "rh_subscription": {
+ "activation-key": "1234abcde",
+ }
+ }
+
+ config_service = {
+ "rh_subscription": {
+ "username": "scooby@do.com",
+ "password": "scooby-snacks",
+ "service-level": "self-support",
+ }
+ }
+
+ config_badpool = {
+ "rh_subscription": {
+ "username": "scooby@do.com",
+ "password": "scooby-snacks",
+ "add-pool": "not_a_list",
+ }
+ }
+ config_badrepo = {
+ "rh_subscription": {
+ "username": "scooby@do.com",
+ "password": "scooby-snacks",
+ "enable-repo": "not_a_list",
+ }
+ }
+ config_badkey = {
+ "rh_subscription": {
+ "activation-key": "abcdef1234",
+ "fookey": "bar",
+ "org": "123",
+ }
+ }
+
+ def setUp(self):
+ super(TestBadInput, self).setUp()
+ self.handle = cc_rh_subscription.handle
+
+ def assert_logged_warnings(self, warnings):
+ logs = self.logs.getvalue()
+ missing = [w for w in warnings if "WARNING: " + w not in logs]
+ self.assertEqual([], missing, "Missing expected warnings.")
+
+ def test_no_password(self, m_sman_cli):
+ """Attempt to register without the password key/value."""
+ m_sman_cli.side_effect = [
+ subp.ProcessExecutionError,
+ (self.reg, "bar"),
+ ]
+ self.handle(
+ self.name,
+ self.config_no_password,
+ self.cloud_init,
+ self.log,
+ self.args,
+ )
+ self.assertEqual(m_sman_cli.call_count, 0)
+
+ def test_no_org(self, m_sman_cli):
+ """Attempt to register without the org key/value."""
+ m_sman_cli.side_effect = [subp.ProcessExecutionError]
+ self.handle(
+ self.name, self.config_no_key, self.cloud_init, self.log, self.args
+ )
+ m_sman_cli.assert_called_with(["identity"])
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assert_logged_warnings(
+ (
+ "Unable to register system due to incomplete information.",
+ "Use either activationkey and org *or* userid and password",
+ "Registration failed or did not run completely",
+ "rh_subscription plugin did not complete successfully",
+ )
+ )
+
+ def test_service_level_without_auto(self, m_sman_cli):
+ """Attempt to register using service-level without auto-attach key."""
+ m_sman_cli.side_effect = [
+ subp.ProcessExecutionError,
+ (self.reg, "bar"),
+ ]
+ self.handle(
+ self.name,
+ self.config_service,
+ self.cloud_init,
+ self.log,
+ self.args,
+ )
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assert_logged_warnings(
+ (
+ "The service-level key must be used in conjunction with ",
+ "rh_subscription plugin did not complete successfully",
+ )
+ )
+
+ def test_pool_not_a_list(self, m_sman_cli):
+ """
+ Register with pools that are not in the format of a list
+ """
+ m_sman_cli.side_effect = [
+ subp.ProcessExecutionError,
+ (self.reg, "bar"),
+ ]
+ self.handle(
+ self.name,
+ self.config_badpool,
+ self.cloud_init,
+ self.log,
+ self.args,
+ )
+ self.assertEqual(m_sman_cli.call_count, 2)
+ self.assert_logged_warnings(
+ (
+ "Pools must in the format of a list",
+ "rh_subscription plugin did not complete successfully",
+ )
+ )
+
+ def test_repo_not_a_list(self, m_sman_cli):
+ """
+ Register with repos that are not in the format of a list
+ """
+ m_sman_cli.side_effect = [
+ subp.ProcessExecutionError,
+ (self.reg, "bar"),
+ ]
+ self.handle(
+ self.name,
+ self.config_badrepo,
+ self.cloud_init,
+ self.log,
+ self.args,
+ )
+ self.assertEqual(m_sman_cli.call_count, 2)
+ self.assert_logged_warnings(
+ (
+ "Repo IDs must in the format of a list.",
+ "Unable to add or remove repos",
+ "rh_subscription plugin did not complete successfully",
+ )
+ )
+
+ def test_bad_key_value(self, m_sman_cli):
+ """
+ Attempt to register with a key that we don't know
+ """
+ m_sman_cli.side_effect = [
+ subp.ProcessExecutionError,
+ (self.reg, "bar"),
+ ]
+ self.handle(
+ self.name, self.config_badkey, self.cloud_init, self.log, self.args
+ )
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assert_logged_warnings(
+ (
+ "fookey is not a valid key for rh_subscription. Valid keys"
+ " are:",
+ "rh_subscription plugin did not complete successfully",
+ )
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_rsyslog.py b/tests/unittests/config/test_cc_rsyslog.py
index 8c8e2838..e5d06ca2 100644
--- a/tests/unittests/test_handler/test_handler_rsyslog.py
+++ b/tests/unittests/config/test_cc_rsyslog.py
@@ -4,55 +4,63 @@ import os
import shutil
import tempfile
-from cloudinit.config.cc_rsyslog import (
- apply_rsyslog_changes, DEF_DIR, DEF_FILENAME, DEF_RELOAD, load_config,
- parse_remotes_line, remotes_to_rsyslog_cfg)
from cloudinit import util
-
-from cloudinit.tests import helpers as t_help
+from cloudinit.config.cc_rsyslog import (
+ DEF_DIR,
+ DEF_FILENAME,
+ DEF_RELOAD,
+ apply_rsyslog_changes,
+ load_config,
+ parse_remotes_line,
+ remotes_to_rsyslog_cfg,
+)
+from tests.unittests import helpers as t_help
class TestLoadConfig(t_help.TestCase):
def setUp(self):
super(TestLoadConfig, self).setUp()
self.basecfg = {
- 'config_filename': DEF_FILENAME,
- 'config_dir': DEF_DIR,
- 'service_reload_command': DEF_RELOAD,
- 'configs': [],
- 'remotes': {},
+ "config_filename": DEF_FILENAME,
+ "config_dir": DEF_DIR,
+ "service_reload_command": DEF_RELOAD,
+ "configs": [],
+ "remotes": {},
}
def test_legacy_full(self):
- found = load_config({
- 'rsyslog': ['*.* @192.168.1.1'],
- 'rsyslog_dir': "mydir",
- 'rsyslog_filename': "myfilename"})
- self.basecfg.update({
- 'configs': ['*.* @192.168.1.1'],
- 'config_dir': "mydir",
- 'config_filename': 'myfilename',
- 'service_reload_command': 'auto'}
+ found = load_config(
+ {
+ "rsyslog": ["*.* @192.168.1.1"],
+ "rsyslog_dir": "mydir",
+ "rsyslog_filename": "myfilename",
+ }
+ )
+ self.basecfg.update(
+ {
+ "configs": ["*.* @192.168.1.1"],
+ "config_dir": "mydir",
+ "config_filename": "myfilename",
+ "service_reload_command": "auto",
+ }
)
self.assertEqual(found, self.basecfg)
def test_legacy_defaults(self):
- found = load_config({
- 'rsyslog': ['*.* @192.168.1.1']})
- self.basecfg.update({
- 'configs': ['*.* @192.168.1.1']})
+ found = load_config({"rsyslog": ["*.* @192.168.1.1"]})
+ self.basecfg.update({"configs": ["*.* @192.168.1.1"]})
self.assertEqual(found, self.basecfg)
def test_new_defaults(self):
self.assertEqual(load_config({}), self.basecfg)
def test_new_configs(self):
- cfgs = ['*.* myhost', '*.* my2host']
- self.basecfg.update({'configs': cfgs})
+ cfgs = ["*.* myhost", "*.* my2host"]
+ self.basecfg.update({"configs": cfgs})
self.assertEqual(
- load_config({'rsyslog': {'configs': cfgs}}),
- self.basecfg)
+ load_config({"rsyslog": {"configs": cfgs}}), self.basecfg
+ )
class TestApplyChanges(t_help.TestCase):
@@ -63,27 +71,29 @@ class TestApplyChanges(t_help.TestCase):
def test_simple(self):
cfgline = "*.* foohost"
changed = apply_rsyslog_changes(
- configs=[cfgline], def_fname="foo.cfg", cfg_dir=self.tmp)
+ configs=[cfgline], def_fname="foo.cfg", cfg_dir=self.tmp
+ )
fname = os.path.join(self.tmp, "foo.cfg")
self.assertEqual([fname], changed)
- self.assertEqual(
- util.load_file(fname), cfgline + "\n")
+ self.assertEqual(util.load_file(fname), cfgline + "\n")
def test_multiple_files(self):
configs = [
- '*.* foohost',
- {'content': 'abc', 'filename': 'my.cfg'},
- {'content': 'filefoo-content',
- 'filename': os.path.join(self.tmp, 'mydir/mycfg')},
+ "*.* foohost",
+ {"content": "abc", "filename": "my.cfg"},
+ {
+ "content": "filefoo-content",
+ "filename": os.path.join(self.tmp, "mydir/mycfg"),
+ },
]
changed = apply_rsyslog_changes(
- configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
+ configs=configs, def_fname="default.cfg", cfg_dir=self.tmp
+ )
expected = [
- (os.path.join(self.tmp, "default.cfg"),
- "*.* foohost\n"),
+ (os.path.join(self.tmp, "default.cfg"), "*.* foohost\n"),
(os.path.join(self.tmp, "my.cfg"), "abc\n"),
(os.path.join(self.tmp, "mydir/mycfg"), "filefoo-content\n"),
]
@@ -91,30 +101,37 @@ class TestApplyChanges(t_help.TestCase):
actual = []
for fname, _content in expected:
util.load_file(fname)
- actual.append((fname, util.load_file(fname),))
+ actual.append(
+ (
+ fname,
+ util.load_file(fname),
+ )
+ )
self.assertEqual(expected, actual)
def test_repeat_def(self):
- configs = ['*.* foohost', "*.warn otherhost"]
+ configs = ["*.* foohost", "*.warn otherhost"]
changed = apply_rsyslog_changes(
- configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
+ configs=configs, def_fname="default.cfg", cfg_dir=self.tmp
+ )
fname = os.path.join(self.tmp, "default.cfg")
self.assertEqual([fname], changed)
- expected_content = '\n'.join([c for c in configs]) + '\n'
+ expected_content = "\n".join([c for c in configs]) + "\n"
found_content = util.load_file(fname)
self.assertEqual(expected_content, found_content)
def test_multiline_content(self):
- configs = ['line1', 'line2\nline3\n']
+ configs = ["line1", "line2\nline3\n"]
apply_rsyslog_changes(
- configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
+ configs=configs, def_fname="default.cfg", cfg_dir=self.tmp
+ )
fname = os.path.join(self.tmp, "default.cfg")
- expected_content = '\n'.join([c for c in configs])
+ expected_content = "\n".join([c for c in configs])
found_content = util.load_file(fname)
self.assertEqual(expected_content, found_content)
@@ -152,7 +169,7 @@ class TestRemotesToSyslog(t_help.TestCase):
# str rendered line must appear in remotes_to_ryslog_cfg return
mycfg = "*.* myhost"
myline = str(parse_remotes_line(mycfg, name="myname"))
- r = remotes_to_rsyslog_cfg({'myname': mycfg})
+ r = remotes_to_rsyslog_cfg({"myname": mycfg})
lines = r.splitlines()
self.assertEqual(1, len(lines))
self.assertTrue(myline in r.splitlines())
@@ -161,7 +178,8 @@ class TestRemotesToSyslog(t_help.TestCase):
header = "#foo head"
footer = "#foo foot"
r = remotes_to_rsyslog_cfg(
- {'myname': "*.* myhost"}, header=header, footer=footer)
+ {"myname": "*.* myhost"}, header=header, footer=footer
+ )
lines = r.splitlines()
self.assertTrue(header, lines[0])
self.assertTrue(footer, lines[-1])
@@ -170,9 +188,11 @@ class TestRemotesToSyslog(t_help.TestCase):
mycfg = "*.* myhost"
myline = str(parse_remotes_line(mycfg, name="myname"))
r = remotes_to_rsyslog_cfg(
- {'myname': mycfg, 'removed': None, 'removed2': ""})
+ {"myname": mycfg, "removed": None, "removed2": ""}
+ )
lines = r.splitlines()
self.assertEqual(1, len(lines))
self.assertTrue(myline in r.splitlines())
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_runcmd.py b/tests/unittests/config/test_cc_runcmd.py
new file mode 100644
index 00000000..59490d67
--- /dev/null
+++ b/tests/unittests/config/test_cc_runcmd.py
@@ -0,0 +1,137 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import os
+import stat
+from unittest.mock import patch
+
+from cloudinit import helpers, subp, util
+from cloudinit.config.cc_runcmd import handle, schema
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ SchemaTestCaseMixin,
+ skipUnlessJsonSchema,
+)
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+
+class TestRuncmd(FilesystemMockingTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestRuncmd, self).setUp()
+ self.subp = subp.subp
+ self.new_root = self.tmp_dir()
+ self.patchUtils(self.new_root)
+ self.paths = helpers.Paths({"scripts": self.new_root})
+
+ def test_handler_skip_if_no_runcmd(self):
+ """When the provided config doesn't contain runcmd, skip it."""
+ cfg = {}
+ mycloud = get_cloud(paths=self.paths)
+ handle("notimportant", cfg, mycloud, LOG, None)
+ self.assertIn(
+ "Skipping module named notimportant, no 'runcmd' key",
+ self.logs.getvalue(),
+ )
+
+ @patch("cloudinit.util.shellify")
+ def test_runcmd_shellify_fails(self, cls):
+ """When shellify fails throw exception"""
+ cls.side_effect = TypeError("patched shellify")
+ valid_config = {"runcmd": ["echo 42"]}
+ cc = get_cloud(paths=self.paths)
+ with self.assertRaises(TypeError) as cm:
+ with self.allow_subp(["/bin/sh"]):
+ handle("cc_runcmd", valid_config, cc, LOG, None)
+ self.assertIn("Failed to shellify", str(cm.exception))
+
+ def test_handler_invalid_command_set(self):
+ """Commands which can't be converted to shell will raise errors."""
+ invalid_config = {"runcmd": 1}
+ cc = get_cloud(paths=self.paths)
+ with self.assertRaises(TypeError) as cm:
+ handle("cc_runcmd", invalid_config, cc, LOG, [])
+ self.assertIn(
+ "Failed to shellify 1 into file"
+ " /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd",
+ str(cm.exception),
+ )
+
+ @skipUnlessJsonSchema()
+ def test_handler_schema_validation_warns_non_array_type(self):
+ """Schema validation warns of non-array type for runcmd key.
+
+ Schema validation is not strict, so runcmd attempts to shellify the
+ invalid content.
+ """
+ invalid_config = {"runcmd": 1}
+ cc = get_cloud(paths=self.paths)
+ with self.assertRaises(TypeError) as cm:
+ handle("cc_runcmd", invalid_config, cc, LOG, [])
+ self.assertIn(
+ "Invalid cloud-config provided:\nruncmd: 1 is not of type 'array'",
+ self.logs.getvalue(),
+ )
+ self.assertIn("Failed to shellify", str(cm.exception))
+
+ @skipUnlessJsonSchema()
+ def test_handler_schema_validation_warns_non_array_item_type(self):
+ """Schema validation warns of non-array or string runcmd items.
+
+ Schema validation is not strict, so runcmd attempts to shellify the
+ invalid content.
+ """
+ invalid_config = {
+ "runcmd": ["ls /", 20, ["wget", "http://stuff/blah"], {"a": "n"}]
+ }
+ cc = get_cloud(paths=self.paths)
+ with self.assertRaises(TypeError) as cm:
+ handle("cc_runcmd", invalid_config, cc, LOG, [])
+ expected_warnings = [
+ "runcmd.1: 20 is not valid under any of the given schemas",
+ "runcmd.3: {'a': 'n'} is not valid under any of the given schema",
+ ]
+ logs = self.logs.getvalue()
+ for warning in expected_warnings:
+ self.assertIn(warning, logs)
+ self.assertIn("Failed to shellify", str(cm.exception))
+
+ def test_handler_write_valid_runcmd_schema_to_file(self):
+ """Valid runcmd schema is written to a runcmd shell script."""
+ valid_config = {"runcmd": [["ls", "/"]]}
+ cc = get_cloud(paths=self.paths)
+ handle("cc_runcmd", valid_config, cc, LOG, [])
+ runcmd_file = os.path.join(
+ self.new_root,
+ "var/lib/cloud/instances/iid-datasource-none/scripts/runcmd",
+ )
+ self.assertEqual("#!/bin/sh\n'ls' '/'\n", util.load_file(runcmd_file))
+ file_stat = os.stat(runcmd_file)
+ self.assertEqual(0o700, stat.S_IMODE(file_stat.st_mode))
+
+
+@skipUnlessJsonSchema()
+class TestSchema(CiTestCase, SchemaTestCaseMixin):
+ """Directly test schema rather than through handle."""
+
+ schema = schema
+
+ def test_duplicates_are_fine_array_array(self):
+ """Duplicated commands array/array entries are allowed."""
+ self.assertSchemaValid(
+ [["echo", "bye"], ["echo", "bye"]],
+ "command entries can be duplicate.",
+ )
+
+ def test_duplicates_are_fine_array_string(self):
+ """Duplicated commands array/string entries are allowed."""
+ self.assertSchemaValid(
+ ["echo bye", "echo bye"], "command entries can be duplicate."
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_seed_random.py b/tests/unittests/config/test_cc_seed_random.py
new file mode 100644
index 00000000..8b2fdcdd
--- /dev/null
+++ b/tests/unittests/config/test_cc_seed_random.py
@@ -0,0 +1,221 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# Based on test_handler_set_hostname.py
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+import gzip
+import logging
+import tempfile
+from io import BytesIO
+
+from cloudinit import subp, util
+from cloudinit.config import cc_seed_random
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+
+class TestRandomSeed(t_help.TestCase):
+ def setUp(self):
+ super(TestRandomSeed, self).setUp()
+ self._seed_file = tempfile.mktemp()
+ self.unapply = []
+
+ # by default 'which' has nothing in its path
+ self.apply_patches([(subp, "which", self._which)])
+ self.apply_patches([(subp, "subp", self._subp)])
+ self.subp_called = []
+ self.whichdata = {}
+
+ def tearDown(self):
+ apply_patches([i for i in reversed(self.unapply)])
+ util.del_file(self._seed_file)
+
+ def apply_patches(self, patches):
+ ret = apply_patches(patches)
+ self.unapply += ret
+
+ def _which(self, program):
+ return self.whichdata.get(program)
+
+ def _subp(self, *args, **kwargs):
+ # supports subp calling with cmd as args or kwargs
+ if "args" not in kwargs:
+ kwargs["args"] = args[0]
+ self.subp_called.append(kwargs)
+ return
+
+ def _compress(self, text):
+ contents = BytesIO()
+ gz_fh = gzip.GzipFile(mode="wb", fileobj=contents)
+ gz_fh.write(text)
+ gz_fh.close()
+ return contents.getvalue()
+
+ def test_append_random(self):
+ cfg = {
+ "random_seed": {
+ "file": self._seed_file,
+ "data": "tiny-tim-was-here",
+ }
+ }
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEqual("tiny-tim-was-here", contents)
+
+ def test_append_random_unknown_encoding(self):
+ data = self._compress(b"tiny-toe")
+ cfg = {
+ "random_seed": {
+ "file": self._seed_file,
+ "data": data,
+ "encoding": "special_encoding",
+ }
+ }
+ self.assertRaises(
+ IOError,
+ cc_seed_random.handle,
+ "test",
+ cfg,
+ get_cloud("ubuntu"),
+ LOG,
+ [],
+ )
+
+ def test_append_random_gzip(self):
+ data = self._compress(b"tiny-toe")
+ cfg = {
+ "random_seed": {
+ "file": self._seed_file,
+ "data": data,
+ "encoding": "gzip",
+ }
+ }
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEqual("tiny-toe", contents)
+
+ def test_append_random_gz(self):
+ data = self._compress(b"big-toe")
+ cfg = {
+ "random_seed": {
+ "file": self._seed_file,
+ "data": data,
+ "encoding": "gz",
+ }
+ }
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEqual("big-toe", contents)
+
+ def test_append_random_base64(self):
+ data = util.b64e("bubbles")
+ cfg = {
+ "random_seed": {
+ "file": self._seed_file,
+ "data": data,
+ "encoding": "base64",
+ }
+ }
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEqual("bubbles", contents)
+
+ def test_append_random_b64(self):
+ data = util.b64e("kit-kat")
+ cfg = {
+ "random_seed": {
+ "file": self._seed_file,
+ "data": data,
+ "encoding": "b64",
+ }
+ }
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEqual("kit-kat", contents)
+
+ def test_append_random_metadata(self):
+ cfg = {
+ "random_seed": {
+ "file": self._seed_file,
+ "data": "tiny-tim-was-here",
+ }
+ }
+ c = get_cloud("ubuntu", metadata={"random_seed": "-so-was-josh"})
+ cc_seed_random.handle("test", cfg, c, LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEqual("tiny-tim-was-here-so-was-josh", contents)
+
+ def test_seed_command_provided_and_available(self):
+ c = get_cloud("ubuntu")
+ self.whichdata = {"pollinate": "/usr/bin/pollinate"}
+ cfg = {"random_seed": {"command": ["pollinate", "-q"]}}
+ cc_seed_random.handle("test", cfg, c, LOG, [])
+
+ subp_args = [f["args"] for f in self.subp_called]
+ self.assertIn(["pollinate", "-q"], subp_args)
+
+ def test_seed_command_not_provided(self):
+ c = get_cloud("ubuntu")
+ self.whichdata = {}
+ cc_seed_random.handle("test", {}, c, LOG, [])
+
+ # subp should not have been called as which would say not available
+ self.assertFalse(self.subp_called)
+
+ def test_unavailable_seed_command_and_required_raises_error(self):
+ c = get_cloud("ubuntu")
+ self.whichdata = {}
+ cfg = {
+ "random_seed": {
+ "command": ["THIS_NO_COMMAND"],
+ "command_required": True,
+ }
+ }
+ self.assertRaises(
+ ValueError, cc_seed_random.handle, "test", cfg, c, LOG, []
+ )
+
+ def test_seed_command_and_required(self):
+ c = get_cloud("ubuntu")
+ self.whichdata = {"foo": "foo"}
+ cfg = {"random_seed": {"command_required": True, "command": ["foo"]}}
+ cc_seed_random.handle("test", cfg, c, LOG, [])
+
+ self.assertIn(["foo"], [f["args"] for f in self.subp_called])
+
+ def test_file_in_environment_for_command(self):
+ c = get_cloud("ubuntu")
+ self.whichdata = {"foo": "foo"}
+ cfg = {
+ "random_seed": {
+ "command_required": True,
+ "command": ["foo"],
+ "file": self._seed_file,
+ }
+ }
+ cc_seed_random.handle("test", cfg, c, LOG, [])
+
+ # this just instists that the first time subp was called,
+ # RANDOM_SEED_FILE was in the environment set up correctly
+ subp_env = [f["env"] for f in self.subp_called]
+ self.assertEqual(subp_env[0].get("RANDOM_SEED_FILE"), self._seed_file)
+
+
+def apply_patches(patches):
+ ret = []
+ for (ref, name, replace) in patches:
+ if replace is None:
+ continue
+ orig = getattr(ref, name)
+ setattr(ref, name, replace)
+ ret.append((ref, name, orig))
+ return ret
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_set_hostname.py b/tests/unittests/config/test_cc_set_hostname.py
new file mode 100644
index 00000000..fd994c4e
--- /dev/null
+++ b/tests/unittests/config/test_cc_set_hostname.py
@@ -0,0 +1,208 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+import os
+import shutil
+import tempfile
+from io import BytesIO
+from unittest import mock
+
+from configobj import ConfigObj
+
+from cloudinit import cloud, distros, helpers, util
+from cloudinit.config import cc_set_hostname
+from tests.unittests import helpers as t_help
+
+LOG = logging.getLogger(__name__)
+
+
+class TestHostname(t_help.FilesystemMockingTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestHostname, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ util.ensure_dir(os.path.join(self.tmp, "data"))
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def _fetch_distro(self, kind, conf=None):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ conf = {} if conf is None else conf
+ return cls(kind, conf, paths)
+
+ def test_debian_write_hostname_prefer_fqdn(self):
+ cfg = {
+ "hostname": "blah",
+ "prefer_fqdn_over_hostname": True,
+ "fqdn": "blah.yahoo.com",
+ }
+ distro = self._fetch_distro("debian", cfg)
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ contents = util.load_file("/etc/hostname")
+ self.assertEqual("blah.yahoo.com", contents.strip())
+
+ @mock.patch("cloudinit.distros.Distro.uses_systemd", return_value=False)
+ def test_rhel_write_hostname_prefer_hostname(self, m_uses_systemd):
+ cfg = {
+ "hostname": "blah",
+ "prefer_fqdn_over_hostname": False,
+ "fqdn": "blah.yahoo.com",
+ }
+ distro = self._fetch_distro("rhel", cfg)
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ contents = util.load_file("/etc/sysconfig/network", decode=False)
+ n_cfg = ConfigObj(BytesIO(contents))
+ self.assertEqual({"HOSTNAME": "blah"}, dict(n_cfg))
+
+ @mock.patch("cloudinit.distros.Distro.uses_systemd", return_value=False)
+ def test_write_hostname_rhel(self, m_uses_systemd):
+ cfg = {"hostname": "blah", "fqdn": "blah.blah.blah.yahoo.com"}
+ distro = self._fetch_distro("rhel")
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ contents = util.load_file("/etc/sysconfig/network", decode=False)
+ n_cfg = ConfigObj(BytesIO(contents))
+ self.assertEqual({"HOSTNAME": "blah.blah.blah.yahoo.com"}, dict(n_cfg))
+
+ def test_write_hostname_debian(self):
+ cfg = {
+ "hostname": "blah",
+ "fqdn": "blah.blah.blah.yahoo.com",
+ }
+ distro = self._fetch_distro("debian")
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ contents = util.load_file("/etc/hostname")
+ self.assertEqual("blah", contents.strip())
+
+ @mock.patch("cloudinit.distros.Distro.uses_systemd", return_value=False)
+ def test_write_hostname_sles(self, m_uses_systemd):
+ cfg = {
+ "hostname": "blah.blah.blah.suse.com",
+ }
+ distro = self._fetch_distro("sles")
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ contents = util.load_file(distro.hostname_conf_fn)
+ self.assertEqual("blah", contents.strip())
+
+ @mock.patch("cloudinit.distros.photon.subp.subp")
+ def test_photon_hostname(self, m_subp):
+ cfg1 = {
+ "hostname": "photon",
+ "prefer_fqdn_over_hostname": True,
+ "fqdn": "test1.vmware.com",
+ }
+ cfg2 = {
+ "hostname": "photon",
+ "prefer_fqdn_over_hostname": False,
+ "fqdn": "test2.vmware.com",
+ }
+
+ ds = None
+ m_subp.return_value = (None, None)
+ distro = self._fetch_distro("photon", cfg1)
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ for c in [cfg1, cfg2]:
+ cc_set_hostname.handle("cc_set_hostname", c, cc, LOG, [])
+ print("\n", m_subp.call_args_list)
+ if c["prefer_fqdn_over_hostname"]:
+ assert [
+ mock.call(
+ ["hostnamectl", "set-hostname", c["fqdn"]],
+ capture=True,
+ )
+ ] in m_subp.call_args_list
+ assert [
+ mock.call(
+ ["hostnamectl", "set-hostname", c["hostname"]],
+ capture=True,
+ )
+ ] not in m_subp.call_args_list
+ else:
+ assert [
+ mock.call(
+ ["hostnamectl", "set-hostname", c["hostname"]],
+ capture=True,
+ )
+ ] in m_subp.call_args_list
+ assert [
+ mock.call(
+ ["hostnamectl", "set-hostname", c["fqdn"]],
+ capture=True,
+ )
+ ] not in m_subp.call_args_list
+
+ def test_multiple_calls_skips_unchanged_hostname(self):
+ """Only new hostname or fqdn values will generate a hostname call."""
+ distro = self._fetch_distro("debian")
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle(
+ "cc_set_hostname", {"hostname": "hostname1.me.com"}, cc, LOG, []
+ )
+ contents = util.load_file("/etc/hostname")
+ self.assertEqual("hostname1", contents.strip())
+ cc_set_hostname.handle(
+ "cc_set_hostname", {"hostname": "hostname1.me.com"}, cc, LOG, []
+ )
+ self.assertIn(
+ "DEBUG: No hostname changes. Skipping set-hostname\n",
+ self.logs.getvalue(),
+ )
+ cc_set_hostname.handle(
+ "cc_set_hostname", {"hostname": "hostname2.me.com"}, cc, LOG, []
+ )
+ contents = util.load_file("/etc/hostname")
+ self.assertEqual("hostname2", contents.strip())
+ self.assertIn(
+ "Non-persistently setting the system hostname to hostname2",
+ self.logs.getvalue(),
+ )
+
+ def test_error_on_distro_set_hostname_errors(self):
+ """Raise SetHostnameError on exceptions from distro.set_hostname."""
+ distro = self._fetch_distro("debian")
+
+ def set_hostname_error(hostname, fqdn):
+ raise Exception("OOPS on: %s" % fqdn)
+
+ distro.set_hostname = set_hostname_error
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ with self.assertRaises(cc_set_hostname.SetHostnameError) as ctx_mgr:
+ cc_set_hostname.handle(
+ "somename", {"hostname": "hostname1.me.com"}, cc, LOG, []
+ )
+ self.assertEqual(
+ "Failed to set the hostname to hostname1.me.com (hostname1):"
+ " OOPS on: hostname1.me.com",
+ str(ctx_mgr.exception),
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_set_passwords.py b/tests/unittests/config/test_cc_set_passwords.py
new file mode 100644
index 00000000..bc81214b
--- /dev/null
+++ b/tests/unittests/config/test_cc_set_passwords.py
@@ -0,0 +1,177 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from unittest import mock
+
+from cloudinit import util
+from cloudinit.config import cc_set_passwords as setpass
+from tests.unittests.helpers import CiTestCase
+
+MODPATH = "cloudinit.config.cc_set_passwords."
+
+
+class TestHandleSshPwauth(CiTestCase):
+ """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth."""
+
+ with_logs = True
+
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_unknown_value_logs_warning(self, m_subp):
+ cloud = self.tmp_cloud(distro="ubuntu")
+ setpass.handle_ssh_pwauth("floo", cloud.distro)
+ self.assertIn(
+ "Unrecognized value: ssh_pwauth=floo", self.logs.getvalue()
+ )
+ m_subp.assert_not_called()
+
+ @mock.patch(MODPATH + "update_ssh_config", return_value=True)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config):
+ """If systemctl in service cmd: systemctl restart name."""
+ cloud = self.tmp_cloud(distro="ubuntu")
+ cloud.distro.init_cmd = ["systemctl"]
+ setpass.handle_ssh_pwauth(True, cloud.distro)
+ m_subp.assert_called_with(
+ ["systemctl", "restart", "ssh"], capture=True
+ )
+
+ @mock.patch(MODPATH + "update_ssh_config", return_value=False)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config):
+ """If config is not updated, then no system restart should be done."""
+ cloud = self.tmp_cloud(distro="ubuntu")
+ setpass.handle_ssh_pwauth(True, cloud.distro)
+ m_subp.assert_not_called()
+ self.assertIn("No need to restart SSH", self.logs.getvalue())
+
+ @mock.patch(MODPATH + "update_ssh_config", return_value=True)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config):
+ """If 'unchanged', then no updates to config and no restart."""
+ cloud = self.tmp_cloud(distro="ubuntu")
+ setpass.handle_ssh_pwauth("unchanged", cloud.distro)
+ m_update_ssh_config.assert_not_called()
+ m_subp.assert_not_called()
+
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_valid_change_values(self, m_subp):
+ """If value is a valid changen value, then update should be called."""
+ cloud = self.tmp_cloud(distro="ubuntu")
+ upname = MODPATH + "update_ssh_config"
+ optname = "PasswordAuthentication"
+ for value in util.FALSE_STRINGS + util.TRUE_STRINGS:
+ optval = "yes" if value in util.TRUE_STRINGS else "no"
+ with mock.patch(upname, return_value=False) as m_update:
+ setpass.handle_ssh_pwauth(value, cloud.distro)
+ m_update.assert_called_with({optname: optval})
+ m_subp.assert_not_called()
+
+
+class TestSetPasswordsHandle(CiTestCase):
+ """Test cc_set_passwords.handle"""
+
+ with_logs = True
+
+ def test_handle_on_empty_config(self, *args):
+ """handle logs that no password has changed when config is empty."""
+ cloud = self.tmp_cloud(distro="ubuntu")
+ setpass.handle(
+ "IGNORED", cfg={}, cloud=cloud, log=self.logger, args=[]
+ )
+ self.assertEqual(
+ "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. "
+ "ssh_pwauth=None\n",
+ self.logs.getvalue(),
+ )
+
+ def test_handle_on_chpasswd_list_parses_common_hashes(self):
+ """handle parses command password hashes."""
+ cloud = self.tmp_cloud(distro="ubuntu")
+ valid_hashed_pwds = [
+ "root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/"
+ "Dlew1Va",
+ "ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q"
+ "SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1",
+ ]
+ cfg = {"chpasswd": {"list": valid_hashed_pwds}}
+ with mock.patch.object(setpass, "chpasswd") as chpasswd:
+ setpass.handle(
+ "IGNORED", cfg=cfg, cloud=cloud, log=self.logger, args=[]
+ )
+ self.assertIn(
+ "DEBUG: Handling input for chpasswd as list.", self.logs.getvalue()
+ )
+ self.assertIn(
+ "DEBUG: Setting hashed password for ['root', 'ubuntu']",
+ self.logs.getvalue(),
+ )
+ valid = "\n".join(valid_hashed_pwds) + "\n"
+ called = chpasswd.call_args[0][1]
+ self.assertEqual(valid, called)
+
+ @mock.patch(MODPATH + "util.is_BSD")
+ @mock.patch(MODPATH + "subp.subp")
+ def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
+ self, m_subp, m_is_bsd
+ ):
+ """BSD don't use chpasswd"""
+ m_is_bsd.return_value = True
+ cloud = self.tmp_cloud(distro="freebsd")
+ valid_pwds = ["ubuntu:passw0rd"]
+ cfg = {"chpasswd": {"list": valid_pwds}}
+ setpass.handle(
+ "IGNORED", cfg=cfg, cloud=cloud, log=self.logger, args=[]
+ )
+ self.assertEqual(
+ [
+ mock.call(
+ ["pw", "usermod", "ubuntu", "-h", "0"],
+ data="passw0rd",
+ logstring="chpasswd for ubuntu",
+ ),
+ mock.call(["pw", "usermod", "ubuntu", "-p", "01-Jan-1970"]),
+ ],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch(MODPATH + "util.multi_log")
+ @mock.patch(MODPATH + "subp.subp")
+ def test_handle_on_chpasswd_list_creates_random_passwords(
+ self, m_subp, m_multi_log
+ ):
+ """handle parses command set random passwords."""
+ cloud = self.tmp_cloud(distro="ubuntu")
+ valid_random_pwds = ["root:R", "ubuntu:RANDOM"]
+ cfg = {"chpasswd": {"expire": "false", "list": valid_random_pwds}}
+ with mock.patch.object(setpass, "chpasswd") as chpasswd:
+ setpass.handle(
+ "IGNORED", cfg=cfg, cloud=cloud, log=self.logger, args=[]
+ )
+ self.assertIn(
+ "DEBUG: Handling input for chpasswd as list.", self.logs.getvalue()
+ )
+ self.assertEqual(1, chpasswd.call_count)
+ passwords, _ = chpasswd.call_args
+ user_pass = {
+ user: password
+ for user, password in (
+ line.split(":") for line in passwords[1].splitlines()
+ )
+ }
+
+ self.assertEqual(1, m_multi_log.call_count)
+ self.assertEqual(
+ mock.call(mock.ANY, stderr=False, fallback_to_stdout=False),
+ m_multi_log.call_args,
+ )
+
+ self.assertEqual(set(["root", "ubuntu"]), set(user_pass.keys()))
+ written_lines = m_multi_log.call_args[0][0].splitlines()
+ for password in user_pass.values():
+ for line in written_lines:
+ if password in line:
+ break
+ else:
+ self.fail("Password not emitted to console")
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_snap.py b/tests/unittests/config/test_cc_snap.py
index 6d4c014a..1632676d 100644
--- a/cloudinit/config/tests/test_snap.py
+++ b/tests/unittests/config/test_cc_snap.py
@@ -3,14 +3,23 @@
import re
from io import StringIO
+from cloudinit import util
from cloudinit.config.cc_snap import (
- ASSERTIONS_FILE, add_assertions, handle, maybe_install_squashfuse,
- run_commands, schema)
+ ASSERTIONS_FILE,
+ add_assertions,
+ handle,
+ maybe_install_squashfuse,
+ run_commands,
+ schema,
+)
from cloudinit.config.schema import validate_cloudconfig_schema
-from cloudinit import util
-from cloudinit.tests.helpers import (
- CiTestCase, SchemaTestCaseMixin, mock, wrap_and_call, skipUnlessJsonSchema)
-
+from tests.unittests.helpers import (
+ CiTestCase,
+ SchemaTestCaseMixin,
+ mock,
+ skipUnlessJsonSchema,
+ wrap_and_call,
+)
SYSTEM_USER_ASSERTION = """\
type: system-user
@@ -92,11 +101,11 @@ class TestAddAssertions(CiTestCase):
super(TestAddAssertions, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('cloudinit.config.cc_snap.subp.subp')
+ @mock.patch("cloudinit.config.cc_snap.subp.subp")
def test_add_assertions_on_empty_list(self, m_subp):
"""When provided with an empty list, add_assertions does nothing."""
add_assertions([])
- self.assertEqual('', self.logs.getvalue())
+ self.assertEqual("", self.logs.getvalue())
m_subp.assert_not_called()
def test_add_assertions_on_non_list_or_dict(self):
@@ -105,58 +114,72 @@ class TestAddAssertions(CiTestCase):
add_assertions(assertions="I'm Not Valid")
self.assertEqual(
"assertion parameter was not a list or dict: I'm Not Valid",
- str(context_manager.exception))
+ str(context_manager.exception),
+ )
- @mock.patch('cloudinit.config.cc_snap.subp.subp')
+ @mock.patch("cloudinit.config.cc_snap.subp.subp")
def test_add_assertions_adds_assertions_as_list(self, m_subp):
"""When provided with a list, add_assertions adds all assertions."""
self.assertEqual(
- ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions')
- assert_file = self.tmp_path('snapd.assertions', dir=self.tmp)
+ ASSERTIONS_FILE, "/var/lib/cloud/instance/snapd.assertions"
+ )
+ assert_file = self.tmp_path("snapd.assertions", dir=self.tmp)
assertions = [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]
wrap_and_call(
- 'cloudinit.config.cc_snap',
- {'ASSERTIONS_FILE': {'new': assert_file}},
- add_assertions, assertions)
+ "cloudinit.config.cc_snap",
+ {"ASSERTIONS_FILE": {"new": assert_file}},
+ add_assertions,
+ assertions,
+ )
self.assertIn(
- 'Importing user-provided snap assertions', self.logs.getvalue())
- self.assertIn(
- 'sertions', self.logs.getvalue())
+ "Importing user-provided snap assertions", self.logs.getvalue()
+ )
+ self.assertIn("sertions", self.logs.getvalue())
self.assertEqual(
- [mock.call(['snap', 'ack', assert_file], capture=True)],
- m_subp.call_args_list)
- compare_file = self.tmp_path('comparison', dir=self.tmp)
- util.write_file(compare_file, '\n'.join(assertions).encode('utf-8'))
+ [mock.call(["snap", "ack", assert_file], capture=True)],
+ m_subp.call_args_list,
+ )
+ compare_file = self.tmp_path("comparison", dir=self.tmp)
+ util.write_file(compare_file, "\n".join(assertions).encode("utf-8"))
self.assertEqual(
- util.load_file(compare_file), util.load_file(assert_file))
+ util.load_file(compare_file), util.load_file(assert_file)
+ )
- @mock.patch('cloudinit.config.cc_snap.subp.subp')
+ @mock.patch("cloudinit.config.cc_snap.subp.subp")
def test_add_assertions_adds_assertions_as_dict(self, m_subp):
"""When provided with a dict, add_assertions adds all assertions."""
self.assertEqual(
- ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions')
- assert_file = self.tmp_path('snapd.assertions', dir=self.tmp)
- assertions = {'00': SYSTEM_USER_ASSERTION, '01': ACCOUNT_ASSERTION}
+ ASSERTIONS_FILE, "/var/lib/cloud/instance/snapd.assertions"
+ )
+ assert_file = self.tmp_path("snapd.assertions", dir=self.tmp)
+ assertions = {"00": SYSTEM_USER_ASSERTION, "01": ACCOUNT_ASSERTION}
wrap_and_call(
- 'cloudinit.config.cc_snap',
- {'ASSERTIONS_FILE': {'new': assert_file}},
- add_assertions, assertions)
+ "cloudinit.config.cc_snap",
+ {"ASSERTIONS_FILE": {"new": assert_file}},
+ add_assertions,
+ assertions,
+ )
self.assertIn(
- 'Importing user-provided snap assertions', self.logs.getvalue())
+ "Importing user-provided snap assertions", self.logs.getvalue()
+ )
self.assertIn(
"DEBUG: Snap acking: ['type: system-user', 'authority-id: Lqv",
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
self.assertIn(
"DEBUG: Snap acking: ['type: account-key', 'authority-id: canonic",
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
self.assertEqual(
- [mock.call(['snap', 'ack', assert_file], capture=True)],
- m_subp.call_args_list)
- compare_file = self.tmp_path('comparison', dir=self.tmp)
- combined = '\n'.join(assertions.values())
- util.write_file(compare_file, combined.encode('utf-8'))
+ [mock.call(["snap", "ack", assert_file], capture=True)],
+ m_subp.call_args_list,
+ )
+ compare_file = self.tmp_path("comparison", dir=self.tmp)
+ combined = "\n".join(assertions.values())
+ util.write_file(compare_file, combined.encode("utf-8"))
self.assertEqual(
- util.load_file(compare_file), util.load_file(assert_file))
+ util.load_file(compare_file), util.load_file(assert_file)
+ )
class TestRunCommands(CiTestCase):
@@ -168,11 +191,11 @@ class TestRunCommands(CiTestCase):
super(TestRunCommands, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('cloudinit.config.cc_snap.subp.subp')
+ @mock.patch("cloudinit.config.cc_snap.subp.subp")
def test_run_commands_on_empty_list(self, m_subp):
"""When provided with an empty list, run_commands does nothing."""
run_commands([])
- self.assertEqual('', self.logs.getvalue())
+ self.assertEqual("", self.logs.getvalue())
m_subp.assert_not_called()
def test_run_commands_on_non_list_or_dict(self):
@@ -181,68 +204,74 @@ class TestRunCommands(CiTestCase):
run_commands(commands="I'm Not Valid")
self.assertEqual(
"commands parameter was not a list or dict: I'm Not Valid",
- str(context_manager.exception))
+ str(context_manager.exception),
+ )
def test_run_command_logs_commands_and_exit_codes_to_stderr(self):
"""All exit codes are logged to stderr."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
+ outfile = self.tmp_path("output.log", dir=self.tmp)
cmd1 = 'echo "HI" >> %s' % outfile
- cmd2 = 'bogus command'
+ cmd2 = "bogus command"
cmd3 = 'echo "MOM" >> %s' % outfile
commands = [cmd1, cmd2, cmd3]
- mock_path = 'cloudinit.config.cc_snap.sys.stderr'
+ mock_path = "cloudinit.config.cc_snap.sys.stderr"
with mock.patch(mock_path, new_callable=StringIO) as m_stderr:
with self.assertRaises(RuntimeError) as context_manager:
run_commands(commands=commands)
self.assertIsNotNone(
- re.search(r'bogus: (command )?not found',
- str(context_manager.exception)),
- msg='Expected bogus command not found')
- expected_stderr_log = '\n'.join([
- 'Begin run command: {cmd}'.format(cmd=cmd1),
- 'End run command: exit(0)',
- 'Begin run command: {cmd}'.format(cmd=cmd2),
- 'ERROR: End run command: exit(127)',
- 'Begin run command: {cmd}'.format(cmd=cmd3),
- 'End run command: exit(0)\n'])
+ re.search(
+ r"bogus: (command )?not found", str(context_manager.exception)
+ ),
+ msg="Expected bogus command not found",
+ )
+ expected_stderr_log = "\n".join(
+ [
+ "Begin run command: {cmd}".format(cmd=cmd1),
+ "End run command: exit(0)",
+ "Begin run command: {cmd}".format(cmd=cmd2),
+ "ERROR: End run command: exit(127)",
+ "Begin run command: {cmd}".format(cmd=cmd3),
+ "End run command: exit(0)\n",
+ ]
+ )
self.assertEqual(expected_stderr_log, m_stderr.getvalue())
def test_run_command_as_lists(self):
"""When commands are specified as a list, run them in order."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
+ outfile = self.tmp_path("output.log", dir=self.tmp)
cmd1 = 'echo "HI" >> %s' % outfile
cmd2 = 'echo "MOM" >> %s' % outfile
commands = [cmd1, cmd2]
- mock_path = 'cloudinit.config.cc_snap.sys.stderr'
+ mock_path = "cloudinit.config.cc_snap.sys.stderr"
with mock.patch(mock_path, new_callable=StringIO):
run_commands(commands=commands)
self.assertIn(
- 'DEBUG: Running user-provided snap commands',
- self.logs.getvalue())
- self.assertEqual('HI\nMOM\n', util.load_file(outfile))
+ "DEBUG: Running user-provided snap commands", self.logs.getvalue()
+ )
+ self.assertEqual("HI\nMOM\n", util.load_file(outfile))
self.assertIn(
- 'WARNING: Non-snap commands in snap config:', self.logs.getvalue())
+ "WARNING: Non-snap commands in snap config:", self.logs.getvalue()
+ )
def test_run_command_dict_sorted_as_command_script(self):
"""When commands are a dict, sort them and run."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
+ outfile = self.tmp_path("output.log", dir=self.tmp)
cmd1 = 'echo "HI" >> %s' % outfile
cmd2 = 'echo "MOM" >> %s' % outfile
- commands = {'02': cmd1, '01': cmd2}
- mock_path = 'cloudinit.config.cc_snap.sys.stderr'
+ commands = {"02": cmd1, "01": cmd2}
+ mock_path = "cloudinit.config.cc_snap.sys.stderr"
with mock.patch(mock_path, new_callable=StringIO):
run_commands(commands=commands)
- expected_messages = [
- 'DEBUG: Running user-provided snap commands']
+ expected_messages = ["DEBUG: Running user-provided snap commands"]
for message in expected_messages:
self.assertIn(message, self.logs.getvalue())
- self.assertEqual('MOM\nHI\n', util.load_file(outfile))
+ self.assertEqual("MOM\nHI\n", util.load_file(outfile))
@skipUnlessJsonSchema()
@@ -253,164 +282,177 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin):
def test_schema_warns_on_snap_not_as_dict(self):
"""If the snap configuration is not a dict, emit a warning."""
- validate_cloudconfig_schema({'snap': 'wrong type'}, schema)
+ validate_cloudconfig_schema({"snap": "wrong type"}, schema)
self.assertEqual(
- "WARNING: Invalid config:\nsnap: 'wrong type' is not of type"
- " 'object'\n",
- self.logs.getvalue())
+ "WARNING: Invalid cloud-config provided:\nsnap: 'wrong type'"
+ " is not of type 'object'\n",
+ self.logs.getvalue(),
+ )
- @mock.patch('cloudinit.config.cc_snap.run_commands')
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
def test_schema_disallows_unknown_keys(self, _):
"""Unknown keys in the snap configuration emit warnings."""
validate_cloudconfig_schema(
- {'snap': {'commands': ['ls'], 'invalid-key': ''}}, schema)
+ {"snap": {"commands": ["ls"], "invalid-key": ""}}, schema
+ )
self.assertIn(
- 'WARNING: Invalid config:\nsnap: Additional properties are not'
- " allowed ('invalid-key' was unexpected)",
- self.logs.getvalue())
+ "WARNING: Invalid cloud-config provided:\nsnap: Additional"
+ " properties are not allowed ('invalid-key' was unexpected)",
+ self.logs.getvalue(),
+ )
def test_warn_schema_requires_either_commands_or_assertions(self):
"""Warn when snap configuration lacks both commands and assertions."""
- validate_cloudconfig_schema(
- {'snap': {}}, schema)
+ validate_cloudconfig_schema({"snap": {}}, schema)
self.assertIn(
- 'WARNING: Invalid config:\nsnap: {} does not have enough'
- ' properties',
- self.logs.getvalue())
+ "WARNING: Invalid cloud-config provided:\nsnap: {} does not"
+ " have enough properties",
+ self.logs.getvalue(),
+ )
- @mock.patch('cloudinit.config.cc_snap.run_commands')
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
def test_warn_schema_commands_is_not_list_or_dict(self, _):
"""Warn when snap:commands config is not a list or dict."""
- validate_cloudconfig_schema(
- {'snap': {'commands': 'broken'}}, schema)
+ validate_cloudconfig_schema({"snap": {"commands": "broken"}}, schema)
self.assertEqual(
- "WARNING: Invalid config:\nsnap.commands: 'broken' is not of type"
- " 'object', 'array'\n",
- self.logs.getvalue())
+ "WARNING: Invalid cloud-config provided:\nsnap.commands: 'broken'"
+ " is not of type 'object', 'array'\n",
+ self.logs.getvalue(),
+ )
- @mock.patch('cloudinit.config.cc_snap.run_commands')
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
def test_warn_schema_when_commands_is_empty(self, _):
"""Emit warnings when snap:commands is an empty list or dict."""
- validate_cloudconfig_schema(
- {'snap': {'commands': []}}, schema)
- validate_cloudconfig_schema(
- {'snap': {'commands': {}}}, schema)
+ validate_cloudconfig_schema({"snap": {"commands": []}}, schema)
+ validate_cloudconfig_schema({"snap": {"commands": {}}}, schema)
self.assertEqual(
- "WARNING: Invalid config:\nsnap.commands: [] is too short\n"
- "WARNING: Invalid config:\nsnap.commands: {} does not have enough"
- " properties\n",
- self.logs.getvalue())
+ "WARNING: Invalid cloud-config provided:\nsnap.commands: [] is"
+ " too short\nWARNING: Invalid cloud-config provided:\n"
+ "snap.commands: {} does not have enough properties\n",
+ self.logs.getvalue(),
+ )
- @mock.patch('cloudinit.config.cc_snap.run_commands')
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
def test_schema_when_commands_are_list_or_dict(self, _):
"""No warnings when snap:commands are either a list or dict."""
+ validate_cloudconfig_schema({"snap": {"commands": ["valid"]}}, schema)
validate_cloudconfig_schema(
- {'snap': {'commands': ['valid']}}, schema)
- validate_cloudconfig_schema(
- {'snap': {'commands': {'01': 'also valid'}}}, schema)
- self.assertEqual('', self.logs.getvalue())
+ {"snap": {"commands": {"01": "also valid"}}}, schema
+ )
+ self.assertEqual("", self.logs.getvalue())
- @mock.patch('cloudinit.config.cc_snap.run_commands')
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
def test_schema_when_commands_values_are_invalid_type(self, _):
"""Warnings when snap:commands values are invalid type (e.g. int)"""
+ validate_cloudconfig_schema({"snap": {"commands": [123]}}, schema)
validate_cloudconfig_schema(
- {'snap': {'commands': [123]}}, schema)
- validate_cloudconfig_schema(
- {'snap': {'commands': {'01': 123}}}, schema)
+ {"snap": {"commands": {"01": 123}}}, schema
+ )
self.assertEqual(
- "WARNING: Invalid config:\n"
+ "WARNING: Invalid cloud-config provided:\n"
"snap.commands.0: 123 is not valid under any of the given"
" schemas\n"
- "WARNING: Invalid config:\n"
+ "WARNING: Invalid cloud-config provided:\n"
"snap.commands.01: 123 is not valid under any of the given"
" schemas\n",
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
- @mock.patch('cloudinit.config.cc_snap.run_commands')
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
def test_schema_when_commands_list_values_are_invalid_type(self, _):
"""Warnings when snap:commands list values are wrong type (e.g. int)"""
validate_cloudconfig_schema(
- {'snap': {'commands': [["snap", "install", 123]]}}, schema)
+ {"snap": {"commands": [["snap", "install", 123]]}}, schema
+ )
validate_cloudconfig_schema(
- {'snap': {'commands': {'01': ["snap", "install", 123]}}}, schema)
+ {"snap": {"commands": {"01": ["snap", "install", 123]}}}, schema
+ )
self.assertEqual(
- "WARNING: Invalid config:\n"
+ "WARNING: Invalid cloud-config provided:\n"
"snap.commands.0: ['snap', 'install', 123] is not valid under any"
" of the given schemas\n",
- "WARNING: Invalid config:\n"
+ "WARNING: Invalid cloud-config provided:\n"
"snap.commands.0: ['snap', 'install', 123] is not valid under any"
" of the given schemas\n",
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
- @mock.patch('cloudinit.config.cc_snap.run_commands')
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
def test_schema_when_assertions_values_are_invalid_type(self, _):
"""Warnings when snap:assertions values are invalid type (e.g. int)"""
+ validate_cloudconfig_schema({"snap": {"assertions": [123]}}, schema)
validate_cloudconfig_schema(
- {'snap': {'assertions': [123]}}, schema)
- validate_cloudconfig_schema(
- {'snap': {'assertions': {'01': 123}}}, schema)
+ {"snap": {"assertions": {"01": 123}}}, schema
+ )
self.assertEqual(
- "WARNING: Invalid config:\n"
+ "WARNING: Invalid cloud-config provided:\n"
"snap.assertions.0: 123 is not of type 'string'\n"
- "WARNING: Invalid config:\n"
+ "WARNING: Invalid cloud-config provided:\n"
"snap.assertions.01: 123 is not of type 'string'\n",
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
- @mock.patch('cloudinit.config.cc_snap.add_assertions')
+ @mock.patch("cloudinit.config.cc_snap.add_assertions")
def test_warn_schema_assertions_is_not_list_or_dict(self, _):
"""Warn when snap:assertions config is not a list or dict."""
- validate_cloudconfig_schema(
- {'snap': {'assertions': 'broken'}}, schema)
+ validate_cloudconfig_schema({"snap": {"assertions": "broken"}}, schema)
self.assertEqual(
- "WARNING: Invalid config:\nsnap.assertions: 'broken' is not of"
- " type 'object', 'array'\n",
- self.logs.getvalue())
+ "WARNING: Invalid cloud-config provided:\nsnap.assertions:"
+ " 'broken' is not of type 'object', 'array'\n",
+ self.logs.getvalue(),
+ )
- @mock.patch('cloudinit.config.cc_snap.add_assertions')
+ @mock.patch("cloudinit.config.cc_snap.add_assertions")
def test_warn_schema_when_assertions_is_empty(self, _):
"""Emit warnings when snap:assertions is an empty list or dict."""
- validate_cloudconfig_schema(
- {'snap': {'assertions': []}}, schema)
- validate_cloudconfig_schema(
- {'snap': {'assertions': {}}}, schema)
+ validate_cloudconfig_schema({"snap": {"assertions": []}}, schema)
+ validate_cloudconfig_schema({"snap": {"assertions": {}}}, schema)
self.assertEqual(
- "WARNING: Invalid config:\nsnap.assertions: [] is too short\n"
- "WARNING: Invalid config:\nsnap.assertions: {} does not have"
- " enough properties\n",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_snap.add_assertions')
+ "WARNING: Invalid cloud-config provided:\nsnap.assertions: []"
+ " is too short\n"
+ "WARNING: Invalid cloud-config provided:\nsnap.assertions: {}"
+ " does not have enough properties\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.config.cc_snap.add_assertions")
def test_schema_when_assertions_are_list_or_dict(self, _):
"""No warnings when snap:assertions are a list or dict."""
validate_cloudconfig_schema(
- {'snap': {'assertions': ['valid']}}, schema)
+ {"snap": {"assertions": ["valid"]}}, schema
+ )
validate_cloudconfig_schema(
- {'snap': {'assertions': {'01': 'also valid'}}}, schema)
- self.assertEqual('', self.logs.getvalue())
+ {"snap": {"assertions": {"01": "also valid"}}}, schema
+ )
+ self.assertEqual("", self.logs.getvalue())
def test_duplicates_are_fine_array_array(self):
"""Duplicated commands array/array entries are allowed."""
self.assertSchemaValid(
- {'commands': [["echo", "bye"], ["echo", "bye"]]},
- "command entries can be duplicate.")
+ {"commands": [["echo", "bye"], ["echo", "bye"]]},
+ "command entries can be duplicate.",
+ )
def test_duplicates_are_fine_array_string(self):
"""Duplicated commands array/string entries are allowed."""
self.assertSchemaValid(
- {'commands': ["echo bye", "echo bye"]},
- "command entries can be duplicate.")
+ {"commands": ["echo bye", "echo bye"]},
+ "command entries can be duplicate.",
+ )
def test_duplicates_are_fine_dict_array(self):
"""Duplicated commands dict/array entries are allowed."""
self.assertSchemaValid(
- {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}},
- "command entries can be duplicate.")
+ {"commands": {"00": ["echo", "bye"], "01": ["echo", "bye"]}},
+ "command entries can be duplicate.",
+ )
def test_duplicates_are_fine_dict_string(self):
"""Duplicated commands dict/string entries are allowed."""
self.assertSchemaValid(
- {'commands': {'00': "echo bye", '01': "echo bye"}},
- "command entries can be duplicate.")
+ {"commands": {"00": "echo bye", "01": "echo bye"}},
+ "command entries can be duplicate.",
+ )
class TestHandle(CiTestCase):
@@ -421,92 +463,122 @@ class TestHandle(CiTestCase):
super(TestHandle, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('cloudinit.config.cc_snap.run_commands')
- @mock.patch('cloudinit.config.cc_snap.add_assertions')
- @mock.patch('cloudinit.config.cc_snap.validate_cloudconfig_schema')
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
+ @mock.patch("cloudinit.config.cc_snap.add_assertions")
+ @mock.patch("cloudinit.config.cc_snap.validate_cloudconfig_schema")
def test_handle_no_config(self, m_schema, m_add, m_run):
"""When no snap-related configuration is provided, nothing happens."""
cfg = {}
- handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None)
+ handle("snap", cfg=cfg, cloud=None, log=self.logger, args=None)
self.assertIn(
"DEBUG: Skipping module named snap, no 'snap' key in config",
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
m_schema.assert_not_called()
m_add.assert_not_called()
m_run.assert_not_called()
- @mock.patch('cloudinit.config.cc_snap.run_commands')
- @mock.patch('cloudinit.config.cc_snap.add_assertions')
- @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse')
- def test_handle_skips_squashfuse_when_unconfigured(self, m_squash, m_add,
- m_run):
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
+ @mock.patch("cloudinit.config.cc_snap.add_assertions")
+ @mock.patch("cloudinit.config.cc_snap.maybe_install_squashfuse")
+ def test_handle_skips_squashfuse_when_unconfigured(
+ self, m_squash, m_add, m_run
+ ):
"""When squashfuse_in_container is unset, don't attempt to install."""
handle(
- 'snap', cfg={'snap': {}}, cloud=None, log=self.logger, args=None)
+ "snap", cfg={"snap": {}}, cloud=None, log=self.logger, args=None
+ )
handle(
- 'snap', cfg={'snap': {'squashfuse_in_container': None}},
- cloud=None, log=self.logger, args=None)
+ "snap",
+ cfg={"snap": {"squashfuse_in_container": None}},
+ cloud=None,
+ log=self.logger,
+ args=None,
+ )
handle(
- 'snap', cfg={'snap': {'squashfuse_in_container': False}},
- cloud=None, log=self.logger, args=None)
+ "snap",
+ cfg={"snap": {"squashfuse_in_container": False}},
+ cloud=None,
+ log=self.logger,
+ args=None,
+ )
self.assertEqual([], m_squash.call_args_list) # No calls
# snap configuration missing assertions and commands will default to []
self.assertIn(mock.call([]), m_add.call_args_list)
self.assertIn(mock.call([]), m_run.call_args_list)
- @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse')
+ @mock.patch("cloudinit.config.cc_snap.maybe_install_squashfuse")
def test_handle_tries_to_install_squashfuse(self, m_squash):
"""If squashfuse_in_container is True, try installing squashfuse."""
- cfg = {'snap': {'squashfuse_in_container': True}}
+ cfg = {"snap": {"squashfuse_in_container": True}}
mycloud = FakeCloud(None)
- handle('snap', cfg=cfg, cloud=mycloud, log=self.logger, args=None)
- self.assertEqual(
- [mock.call(mycloud)], m_squash.call_args_list)
+ handle("snap", cfg=cfg, cloud=mycloud, log=self.logger, args=None)
+ self.assertEqual([mock.call(mycloud)], m_squash.call_args_list)
def test_handle_runs_commands_provided(self):
"""If commands are specified as a list, run them."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
+ outfile = self.tmp_path("output.log", dir=self.tmp)
cfg = {
- 'snap': {'commands': ['echo "HI" >> %s' % outfile,
- 'echo "MOM" >> %s' % outfile]}}
- mock_path = 'cloudinit.config.cc_snap.sys.stderr'
+ "snap": {
+ "commands": [
+ 'echo "HI" >> %s' % outfile,
+ 'echo "MOM" >> %s' % outfile,
+ ]
+ }
+ }
+ mock_path = "cloudinit.config.cc_snap.sys.stderr"
with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]):
with mock.patch(mock_path, new_callable=StringIO):
- handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None)
+ handle("snap", cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertEqual('HI\nMOM\n', util.load_file(outfile))
+ self.assertEqual("HI\nMOM\n", util.load_file(outfile))
- @mock.patch('cloudinit.config.cc_snap.subp.subp')
+ @mock.patch("cloudinit.config.cc_snap.subp.subp")
def test_handle_adds_assertions(self, m_subp):
"""Any configured snap assertions are provided to add_assertions."""
- assert_file = self.tmp_path('snapd.assertions', dir=self.tmp)
- compare_file = self.tmp_path('comparison', dir=self.tmp)
+ assert_file = self.tmp_path("snapd.assertions", dir=self.tmp)
+ compare_file = self.tmp_path("comparison", dir=self.tmp)
cfg = {
- 'snap': {'assertions': [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]}}
+ "snap": {"assertions": [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]}
+ }
wrap_and_call(
- 'cloudinit.config.cc_snap',
- {'ASSERTIONS_FILE': {'new': assert_file}},
- handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None)
- content = '\n'.join(cfg['snap']['assertions'])
- util.write_file(compare_file, content.encode('utf-8'))
+ "cloudinit.config.cc_snap",
+ {"ASSERTIONS_FILE": {"new": assert_file}},
+ handle,
+ "snap",
+ cfg=cfg,
+ cloud=None,
+ log=self.logger,
+ args=None,
+ )
+ content = "\n".join(cfg["snap"]["assertions"])
+ util.write_file(compare_file, content.encode("utf-8"))
self.assertEqual(
- util.load_file(compare_file), util.load_file(assert_file))
+ util.load_file(compare_file), util.load_file(assert_file)
+ )
- @mock.patch('cloudinit.config.cc_snap.subp.subp')
+ @mock.patch("cloudinit.config.cc_snap.subp.subp")
@skipUnlessJsonSchema()
def test_handle_validates_schema(self, m_subp):
"""Any provided configuration is runs validate_cloudconfig_schema."""
- assert_file = self.tmp_path('snapd.assertions', dir=self.tmp)
- cfg = {'snap': {'invalid': ''}} # Generates schema warning
+ assert_file = self.tmp_path("snapd.assertions", dir=self.tmp)
+ cfg = {"snap": {"invalid": ""}} # Generates schema warning
wrap_and_call(
- 'cloudinit.config.cc_snap',
- {'ASSERTIONS_FILE': {'new': assert_file}},
- handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None)
+ "cloudinit.config.cc_snap",
+ {"ASSERTIONS_FILE": {"new": assert_file}},
+ handle,
+ "snap",
+ cfg=cfg,
+ cloud=None,
+ log=self.logger,
+ args=None,
+ )
self.assertEqual(
- "WARNING: Invalid config:\nsnap: Additional properties are not"
- " allowed ('invalid' was unexpected)\n",
- self.logs.getvalue())
+ "WARNING: Invalid cloud-config provided:\nsnap: Additional"
+ " properties are not allowed ('invalid' was unexpected)\n",
+ self.logs.getvalue(),
+ )
class TestMaybeInstallSquashFuse(CiTestCase):
@@ -517,48 +589,52 @@ class TestMaybeInstallSquashFuse(CiTestCase):
super(TestMaybeInstallSquashFuse, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('cloudinit.config.cc_snap.util.is_container')
+ @mock.patch("cloudinit.config.cc_snap.util.is_container")
def test_maybe_install_squashfuse_skips_non_containers(self, m_container):
"""maybe_install_squashfuse does nothing when not on a container."""
m_container.return_value = False
maybe_install_squashfuse(cloud=FakeCloud(None))
self.assertEqual([mock.call()], m_container.call_args_list)
- self.assertEqual('', self.logs.getvalue())
+ self.assertEqual("", self.logs.getvalue())
- @mock.patch('cloudinit.config.cc_snap.util.is_container')
+ @mock.patch("cloudinit.config.cc_snap.util.is_container")
def test_maybe_install_squashfuse_raises_install_errors(self, m_container):
"""maybe_install_squashfuse logs and raises package install errors."""
m_container.return_value = True
distro = mock.MagicMock()
distro.update_package_sources.side_effect = RuntimeError(
- 'Some apt error')
+ "Some apt error"
+ )
with self.assertRaises(RuntimeError) as context_manager:
maybe_install_squashfuse(cloud=FakeCloud(distro))
- self.assertEqual('Some apt error', str(context_manager.exception))
- self.assertIn('Package update failed\nTraceback', self.logs.getvalue())
+ self.assertEqual("Some apt error", str(context_manager.exception))
+ self.assertIn("Package update failed\nTraceback", self.logs.getvalue())
- @mock.patch('cloudinit.config.cc_snap.util.is_container')
+ @mock.patch("cloudinit.config.cc_snap.util.is_container")
def test_maybe_install_squashfuse_raises_update_errors(self, m_container):
"""maybe_install_squashfuse logs and raises package update errors."""
m_container.return_value = True
distro = mock.MagicMock()
distro.update_package_sources.side_effect = RuntimeError(
- 'Some apt error')
+ "Some apt error"
+ )
with self.assertRaises(RuntimeError) as context_manager:
maybe_install_squashfuse(cloud=FakeCloud(distro))
- self.assertEqual('Some apt error', str(context_manager.exception))
- self.assertIn('Package update failed\nTraceback', self.logs.getvalue())
+ self.assertEqual("Some apt error", str(context_manager.exception))
+ self.assertIn("Package update failed\nTraceback", self.logs.getvalue())
- @mock.patch('cloudinit.config.cc_snap.util.is_container')
+ @mock.patch("cloudinit.config.cc_snap.util.is_container")
def test_maybe_install_squashfuse_happy_path(self, m_container):
"""maybe_install_squashfuse logs and raises package install errors."""
m_container.return_value = True
distro = mock.MagicMock() # No errors raised
maybe_install_squashfuse(cloud=FakeCloud(distro))
self.assertEqual(
- [mock.call()], distro.update_package_sources.call_args_list)
+ [mock.call()], distro.update_package_sources.call_args_list
+ )
self.assertEqual(
- [mock.call(['squashfuse'])],
- distro.install_packages.call_args_list)
+ [mock.call(["squashfuse"])], distro.install_packages.call_args_list
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_spacewalk.py b/tests/unittests/config/test_cc_spacewalk.py
index 26f7648f..e1f42968 100644
--- a/tests/unittests/test_handler/test_handler_spacewalk.py
+++ b/tests/unittests/config/test_cc_spacewalk.py
@@ -1,21 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.config import cc_spacewalk
-from cloudinit import subp
-
-from cloudinit.tests import helpers
-
import logging
from unittest import mock
+from cloudinit import subp
+from cloudinit.config import cc_spacewalk
+from tests.unittests import helpers
+
LOG = logging.getLogger(__name__)
class TestSpacewalk(helpers.TestCase):
space_cfg = {
- 'spacewalk': {
- 'server': 'localhost',
- 'profile_name': 'test',
+ "spacewalk": {
+ "server": "localhost",
+ "profile_name": "test",
}
}
@@ -31,12 +30,19 @@ class TestSpacewalk(helpers.TestCase):
@mock.patch("cloudinit.config.cc_spacewalk.subp.subp")
def test_do_register(self, mock_subp):
- cc_spacewalk.do_register(**self.space_cfg['spacewalk'])
- mock_subp.assert_called_with([
- 'rhnreg_ks',
- '--serverUrl', 'https://localhost/XMLRPC',
- '--profilename', 'test',
- '--sslCACert', cc_spacewalk.def_ca_cert_path,
- ], capture=False)
+ cc_spacewalk.do_register(**self.space_cfg["spacewalk"])
+ mock_subp.assert_called_with(
+ [
+ "rhnreg_ks",
+ "--serverUrl",
+ "https://localhost/XMLRPC",
+ "--profilename",
+ "test",
+ "--sslCACert",
+ cc_spacewalk.def_ca_cert_path,
+ ],
+ capture=False,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_ssh.py b/tests/unittests/config/test_cc_ssh.py
index 87ccdb60..d66cc4cb 100644
--- a/cloudinit/config/tests/test_ssh.py
+++ b/tests/unittests/config/test_cc_ssh.py
@@ -1,17 +1,18 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import logging
import os.path
-from cloudinit.config import cc_ssh
from cloudinit import ssh_util
-from cloudinit.tests.helpers import CiTestCase, mock
-import logging
+from cloudinit.config import cc_ssh
+from tests.unittests.helpers import CiTestCase, mock
LOG = logging.getLogger(__name__)
MODPATH = "cloudinit.config.cc_ssh."
-KEY_NAMES_NO_DSA = [name for name in cc_ssh.GENERATE_KEY_NAMES
- if name not in 'dsa']
+KEY_NAMES_NO_DSA = [
+ name for name in cc_ssh.GENERATE_KEY_NAMES if name not in "dsa"
+]
@mock.patch(MODPATH + "ssh_util.setup_user_keys")
@@ -20,39 +21,45 @@ class TestHandleSsh(CiTestCase):
def _publish_hostkey_test_setup(self):
self.test_hostkeys = {
- 'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'),
- 'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'),
- 'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'),
- 'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'),
+ "dsa": ("ssh-dss", "AAAAB3NzaC1kc3MAAACB"),
+ "ecdsa": ("ecdsa-sha2-nistp256", "AAAAE2VjZ"),
+ "ed25519": ("ssh-ed25519", "AAAAC3NzaC1lZDI"),
+ "rsa": ("ssh-rsa", "AAAAB3NzaC1yc2EAAA"),
}
self.test_hostkey_files = []
hostkey_tmpdir = self.tmp_dir()
for key_type in cc_ssh.GENERATE_KEY_NAMES:
key_data = self.test_hostkeys[key_type]
- filename = 'ssh_host_%s_key.pub' % key_type
+ filename = "ssh_host_%s_key.pub" % key_type
filepath = os.path.join(hostkey_tmpdir, filename)
self.test_hostkey_files.append(filepath)
- with open(filepath, 'w') as f:
- f.write(' '.join(key_data))
+ with open(filepath, "w") as f:
+ f.write(" ".join(key_data))
- cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key')
+ cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, "ssh_host_%s_key")
def test_apply_credentials_with_user(self, m_setup_keys):
"""Apply keys for the given user and root."""
keys = ["key1"]
user = "clouduser"
cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS)
- self.assertEqual([mock.call(set(keys), user),
- mock.call(set(keys), "root", options="")],
- m_setup_keys.call_args_list)
+ self.assertEqual(
+ [
+ mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=""),
+ ],
+ m_setup_keys.call_args_list,
+ )
def test_apply_credentials_with_no_user(self, m_setup_keys):
"""Apply keys for root only."""
keys = ["key1"]
user = None
cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS)
- self.assertEqual([mock.call(set(keys), "root", options="")],
- m_setup_keys.call_args_list)
+ self.assertEqual(
+ [mock.call(set(keys), "root", options="")],
+ m_setup_keys.call_args_list,
+ )
def test_apply_credentials_with_user_disable_root(self, m_setup_keys):
"""Apply keys for the given user and disable root ssh."""
@@ -62,9 +69,13 @@ class TestHandleSsh(CiTestCase):
cc_ssh.apply_credentials(keys, user, True, options)
options = options.replace("$USER", user)
options = options.replace("$DISABLE_USER", "root")
- self.assertEqual([mock.call(set(keys), user),
- mock.call(set(keys), "root", options=options)],
- m_setup_keys.call_args_list)
+ self.assertEqual(
+ [
+ mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=options),
+ ],
+ m_setup_keys.call_args_list,
+ )
def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys):
"""Apply keys no user and disable root ssh."""
@@ -74,14 +85,15 @@ class TestHandleSsh(CiTestCase):
cc_ssh.apply_credentials(keys, user, True, options)
options = options.replace("$USER", "NONE")
options = options.replace("$DISABLE_USER", "root")
- self.assertEqual([mock.call(set(keys), "root", options=options)],
- m_setup_keys.call_args_list)
+ self.assertEqual(
+ [mock.call(set(keys), "root", options=options)],
+ m_setup_keys.call_args_list,
+ )
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "os.path.exists")
- def test_handle_no_cfg(self, m_path_exists, m_nug,
- m_glob, m_setup_keys):
+ def test_handle_no_cfg(self, m_path_exists, m_nug, m_glob, m_setup_keys):
"""Test handle with no config ignores generating existing keyfiles."""
cfg = {}
keys = ["key1"]
@@ -90,28 +102,33 @@ class TestHandleSsh(CiTestCase):
m_path_exists.return_value = True
m_nug.return_value = ([], {})
cc_ssh.PUBLISH_HOST_KEYS = False
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
cc_ssh.handle("name", cfg, cloud, LOG, None)
options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE")
options = options.replace("$DISABLE_USER", "root")
- m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*')
+ m_glob.assert_called_once_with("/etc/ssh/ssh_host_*key*")
self.assertIn(
- [mock.call('/etc/ssh/ssh_host_rsa_key'),
- mock.call('/etc/ssh/ssh_host_dsa_key'),
- mock.call('/etc/ssh/ssh_host_ecdsa_key'),
- mock.call('/etc/ssh/ssh_host_ed25519_key')],
- m_path_exists.call_args_list)
- self.assertEqual([mock.call(set(keys), "root", options=options)],
- m_setup_keys.call_args_list)
+ [
+ mock.call("/etc/ssh/ssh_host_rsa_key"),
+ mock.call("/etc/ssh/ssh_host_dsa_key"),
+ mock.call("/etc/ssh/ssh_host_ecdsa_key"),
+ mock.call("/etc/ssh/ssh_host_ed25519_key"),
+ ],
+ m_path_exists.call_args_list,
+ )
+ self.assertEqual(
+ [mock.call(set(keys), "root", options=options)],
+ m_setup_keys.call_args_list,
+ )
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "os.path.exists")
- def test_dont_allow_public_ssh_keys(self, m_path_exists, m_nug,
- m_glob, m_setup_keys):
+ def test_dont_allow_public_ssh_keys(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
"""Test allow_public_ssh_keys=False ignores ssh public keys from
- platform.
+ platform.
"""
cfg = {"allow_public_ssh_keys": False}
keys = ["key1"]
@@ -120,21 +137,25 @@ class TestHandleSsh(CiTestCase):
# Mock os.path.exits to True to short-circuit the key writing logic
m_path_exists.return_value = True
m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
cc_ssh.handle("name", cfg, cloud, LOG, None)
options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
options = options.replace("$DISABLE_USER", "root")
- self.assertEqual([mock.call(set(), user),
- mock.call(set(), "root", options=options)],
- m_setup_keys.call_args_list)
+ self.assertEqual(
+ [
+ mock.call(set(), user),
+ mock.call(set(), "root", options=options),
+ ],
+ m_setup_keys.call_args_list,
+ )
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "os.path.exists")
- def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug,
- m_glob, m_setup_keys):
+ def test_handle_no_cfg_and_default_root(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
"""Test handle with no config and a default distro user."""
cfg = {}
keys = ["key1"]
@@ -143,21 +164,25 @@ class TestHandleSsh(CiTestCase):
# Mock os.path.exits to True to short-circuit the key writing logic
m_path_exists.return_value = True
m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
cc_ssh.handle("name", cfg, cloud, LOG, None)
options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
options = options.replace("$DISABLE_USER", "root")
- self.assertEqual([mock.call(set(keys), user),
- mock.call(set(keys), "root", options=options)],
- m_setup_keys.call_args_list)
+ self.assertEqual(
+ [
+ mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=options),
+ ],
+ m_setup_keys.call_args_list,
+ )
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "os.path.exists")
- def test_handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug,
- m_glob, m_setup_keys):
+ def test_handle_cfg_with_explicit_disable_root(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
"""Test handle with explicit disable_root and a default distro user."""
# This test is identical to test_handle_no_cfg_and_default_root,
# except this uses an explicit cfg value
@@ -168,21 +193,25 @@ class TestHandleSsh(CiTestCase):
# Mock os.path.exits to True to short-circuit the key writing logic
m_path_exists.return_value = True
m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
cc_ssh.handle("name", cfg, cloud, LOG, None)
options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
options = options.replace("$DISABLE_USER", "root")
- self.assertEqual([mock.call(set(keys), user),
- mock.call(set(keys), "root", options=options)],
- m_setup_keys.call_args_list)
+ self.assertEqual(
+ [
+ mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=options),
+ ],
+ m_setup_keys.call_args_list,
+ )
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "os.path.exists")
- def test_handle_cfg_without_disable_root(self, m_path_exists, m_nug,
- m_glob, m_setup_keys):
+ def test_handle_cfg_without_disable_root(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
"""Test handle with disable_root == False."""
# When disable_root == False, the ssh redirect for root is skipped
cfg = {"disable_root": False}
@@ -192,96 +221,111 @@ class TestHandleSsh(CiTestCase):
# Mock os.path.exits to True to short-circuit the key writing logic
m_path_exists.return_value = True
m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
cloud.get_public_ssh_keys = mock.Mock(return_value=keys)
cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertEqual([mock.call(set(keys), user),
- mock.call(set(keys), "root", options="")],
- m_setup_keys.call_args_list)
+ self.assertEqual(
+ [
+ mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=""),
+ ],
+ m_setup_keys.call_args_list,
+ )
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "os.path.exists")
def test_handle_publish_hostkeys_default(
- self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
"""Test handle with various configs for ssh_publish_hostkeys."""
self._publish_hostkey_test_setup()
cc_ssh.PUBLISH_HOST_KEYS = True
keys = ["key1"]
user = "clouduser"
# Return no matching keys for first glob, test keys for second.
- m_glob.side_effect = iter([
- [],
- self.test_hostkey_files,
- ])
+ m_glob.side_effect = iter(
+ [
+ [],
+ self.test_hostkey_files,
+ ]
+ )
# Mock os.path.exits to True to short-circuit the key writing logic
m_path_exists.return_value = True
m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
cloud.datasource.publish_host_keys = mock.Mock()
cfg = {}
- expected_call = [self.test_hostkeys[key_type] for key_type
- in KEY_NAMES_NO_DSA]
+ expected_call = [
+ self.test_hostkeys[key_type] for key_type in KEY_NAMES_NO_DSA
+ ]
cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertEqual([mock.call(expected_call)],
- cloud.datasource.publish_host_keys.call_args_list)
+ self.assertEqual(
+ [mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list,
+ )
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "os.path.exists")
def test_handle_publish_hostkeys_config_enable(
- self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
"""Test handle with various configs for ssh_publish_hostkeys."""
self._publish_hostkey_test_setup()
cc_ssh.PUBLISH_HOST_KEYS = False
keys = ["key1"]
user = "clouduser"
# Return no matching keys for first glob, test keys for second.
- m_glob.side_effect = iter([
- [],
- self.test_hostkey_files,
- ])
+ m_glob.side_effect = iter(
+ [
+ [],
+ self.test_hostkey_files,
+ ]
+ )
# Mock os.path.exits to True to short-circuit the key writing logic
m_path_exists.return_value = True
m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
cloud.datasource.publish_host_keys = mock.Mock()
- cfg = {'ssh_publish_hostkeys': {'enabled': True}}
- expected_call = [self.test_hostkeys[key_type] for key_type
- in KEY_NAMES_NO_DSA]
+ cfg = {"ssh_publish_hostkeys": {"enabled": True}}
+ expected_call = [
+ self.test_hostkeys[key_type] for key_type in KEY_NAMES_NO_DSA
+ ]
cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertEqual([mock.call(expected_call)],
- cloud.datasource.publish_host_keys.call_args_list)
+ self.assertEqual(
+ [mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list,
+ )
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "os.path.exists")
def test_handle_publish_hostkeys_config_disable(
- self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
"""Test handle with various configs for ssh_publish_hostkeys."""
self._publish_hostkey_test_setup()
cc_ssh.PUBLISH_HOST_KEYS = True
keys = ["key1"]
user = "clouduser"
# Return no matching keys for first glob, test keys for second.
- m_glob.side_effect = iter([
- [],
- self.test_hostkey_files,
- ])
+ m_glob.side_effect = iter(
+ [
+ [],
+ self.test_hostkey_files,
+ ]
+ )
# Mock os.path.exits to True to short-circuit the key writing logic
m_path_exists.return_value = True
m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
cloud.datasource.publish_host_keys = mock.Mock()
- cfg = {'ssh_publish_hostkeys': {'enabled': False}}
+ cfg = {"ssh_publish_hostkeys": {"enabled": False}}
cc_ssh.handle("name", cfg, cloud, LOG, None)
self.assertFalse(cloud.datasource.publish_host_keys.call_args_list)
cloud.datasource.publish_host_keys.assert_not_called()
@@ -290,61 +334,75 @@ class TestHandleSsh(CiTestCase):
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "os.path.exists")
def test_handle_publish_hostkeys_config_blacklist(
- self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
"""Test handle with various configs for ssh_publish_hostkeys."""
self._publish_hostkey_test_setup()
cc_ssh.PUBLISH_HOST_KEYS = True
keys = ["key1"]
user = "clouduser"
# Return no matching keys for first glob, test keys for second.
- m_glob.side_effect = iter([
- [],
- self.test_hostkey_files,
- ])
+ m_glob.side_effect = iter(
+ [
+ [],
+ self.test_hostkey_files,
+ ]
+ )
# Mock os.path.exits to True to short-circuit the key writing logic
m_path_exists.return_value = True
m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
cloud.datasource.publish_host_keys = mock.Mock()
- cfg = {'ssh_publish_hostkeys': {'enabled': True,
- 'blacklist': ['dsa', 'rsa']}}
- expected_call = [self.test_hostkeys[key_type] for key_type
- in ['ecdsa', 'ed25519']]
+ cfg = {
+ "ssh_publish_hostkeys": {
+ "enabled": True,
+ "blacklist": ["dsa", "rsa"],
+ }
+ }
+ expected_call = [
+ self.test_hostkeys[key_type] for key_type in ["ecdsa", "ed25519"]
+ ]
cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertEqual([mock.call(expected_call)],
- cloud.datasource.publish_host_keys.call_args_list)
+ self.assertEqual(
+ [mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list,
+ )
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "os.path.exists")
def test_handle_publish_hostkeys_empty_blacklist(
- self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
"""Test handle with various configs for ssh_publish_hostkeys."""
self._publish_hostkey_test_setup()
cc_ssh.PUBLISH_HOST_KEYS = True
keys = ["key1"]
user = "clouduser"
# Return no matching keys for first glob, test keys for second.
- m_glob.side_effect = iter([
- [],
- self.test_hostkey_files,
- ])
+ m_glob.side_effect = iter(
+ [
+ [],
+ self.test_hostkey_files,
+ ]
+ )
# Mock os.path.exits to True to short-circuit the key writing logic
m_path_exists.return_value = True
m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
cloud.datasource.publish_host_keys = mock.Mock()
- cfg = {'ssh_publish_hostkeys': {'enabled': True,
- 'blacklist': []}}
- expected_call = [self.test_hostkeys[key_type] for key_type
- in cc_ssh.GENERATE_KEY_NAMES]
+ cfg = {"ssh_publish_hostkeys": {"enabled": True, "blacklist": []}}
+ expected_call = [
+ self.test_hostkeys[key_type]
+ for key_type in cc_ssh.GENERATE_KEY_NAMES
+ ]
cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertEqual([mock.call(expected_call)],
- cloud.datasource.publish_host_keys.call_args_list)
+ self.assertEqual(
+ [mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list,
+ )
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "util.write_file")
@@ -369,36 +427,40 @@ class TestHandleSsh(CiTestCase):
cfg["ssh_keys"][public_name] = public_value
cfg["ssh_keys"][cert_name] = cert_value
- expected_calls.extend([
- mock.call(
- '/etc/ssh/ssh_host_{}_key'.format(key_type),
- private_value,
- 384
- ),
- mock.call(
- '/etc/ssh/ssh_host_{}_key.pub'.format(key_type),
- public_value,
- 384
- ),
- mock.call(
- '/etc/ssh/ssh_host_{}_key-cert.pub'.format(key_type),
- cert_value,
- 384
- ),
- mock.call(
- '/etc/ssh/sshd_config',
- ('HostCertificate /etc/ssh/ssh_host_{}_key-cert.pub'
- '\n'.format(key_type)),
- preserve_mode=True
- )
- ])
+ expected_calls.extend(
+ [
+ mock.call(
+ "/etc/ssh/ssh_host_{}_key".format(key_type),
+ private_value,
+ 384,
+ ),
+ mock.call(
+ "/etc/ssh/ssh_host_{}_key.pub".format(key_type),
+ public_value,
+ 384,
+ ),
+ mock.call(
+ "/etc/ssh/ssh_host_{}_key-cert.pub".format(key_type),
+ cert_value,
+ 384,
+ ),
+ mock.call(
+ "/etc/ssh/sshd_config",
+ "HostCertificate /etc/ssh/ssh_host_{}_key-cert.pub"
+ "\n".format(key_type),
+ preserve_mode=True,
+ ),
+ ]
+ )
# Run the handler.
m_nug.return_value = ([], {})
- with mock.patch(MODPATH + 'ssh_util.parse_ssh_config',
- return_value=[]):
- cc_ssh.handle("name", cfg, self.tmp_cloud(distro='ubuntu'),
- LOG, None)
+ with mock.patch(
+ MODPATH + "ssh_util.parse_ssh_config", return_value=[]
+ ):
+ cc_ssh.handle(
+ "name", cfg, self.tmp_cloud(distro="ubuntu"), LOG, None
+ )
# Check that all expected output has been done.
for call_ in expected_calls:
diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/config/test_cc_timezone.py
index 50c45363..f76397b7 100644
--- a/tests/unittests/test_handler/test_handler_timezone.py
+++ b/tests/unittests/config/test_cc_timezone.py
@@ -4,23 +4,18 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.config import cc_timezone
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.sources import DataSourceNoCloud
-
-from cloudinit.tests import helpers as t_help
-
-from configobj import ConfigObj
import logging
import shutil
import tempfile
from io import BytesIO
+from configobj import ConfigObj
+
+from cloudinit import util
+from cloudinit.config import cc_timezone
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
+
LOG = logging.getLogger(__name__)
@@ -29,38 +24,30 @@ class TestTimezone(t_help.FilesystemMockingTestCase):
super(TestTimezone, self).setUp()
self.new_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.new_root)
-
- def _get_cloud(self, distro):
self.patchUtils(self.new_root)
self.patchOS(self.new_root)
- paths = helpers.Paths({})
-
- cls = distros.fetch(distro)
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
def test_set_timezone_sles(self):
cfg = {
- 'timezone': 'Tatooine/Bestine',
+ "timezone": "Tatooine/Bestine",
}
- cc = self._get_cloud('sles')
+ cc = get_cloud("sles")
# Create a dummy timezone file
- dummy_contents = '0123456789abcdefgh'
- util.write_file('/usr/share/zoneinfo/%s' % cfg['timezone'],
- dummy_contents)
+ dummy_contents = "0123456789abcdefgh"
+ util.write_file(
+ "/usr/share/zoneinfo/%s" % cfg["timezone"], dummy_contents
+ )
- cc_timezone.handle('cc_timezone', cfg, cc, LOG, [])
+ cc_timezone.handle("cc_timezone", cfg, cc, LOG, [])
- contents = util.load_file('/etc/sysconfig/clock', decode=False)
+ contents = util.load_file("/etc/sysconfig/clock", decode=False)
n_cfg = ConfigObj(BytesIO(contents))
- self.assertEqual({'TIMEZONE': cfg['timezone']}, dict(n_cfg))
+ self.assertEqual({"TIMEZONE": cfg["timezone"]}, dict(n_cfg))
- contents = util.load_file('/etc/localtime')
+ contents = util.load_file("/etc/localtime")
self.assertEqual(dummy_contents, contents.strip())
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_ubuntu_advantage.py b/tests/unittests/config/test_cc_ubuntu_advantage.py
new file mode 100644
index 00000000..2037c5ed
--- /dev/null
+++ b/tests/unittests/config/test_cc_ubuntu_advantage.py
@@ -0,0 +1,391 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import subp
+from cloudinit.config.cc_ubuntu_advantage import (
+ configure_ua,
+ handle,
+ maybe_install_ua_tools,
+ schema,
+)
+from cloudinit.config.schema import validate_cloudconfig_schema
+from tests.unittests.helpers import (
+ CiTestCase,
+ SchemaTestCaseMixin,
+ mock,
+ skipUnlessJsonSchema,
+)
+
+# Module path used in mocks
+MPATH = "cloudinit.config.cc_ubuntu_advantage"
+
+
+class FakeCloud(object):
+ def __init__(self, distro):
+ self.distro = distro
+
+
+class TestConfigureUA(CiTestCase):
+
+ with_logs = True
+ allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
+
+ def setUp(self):
+ super(TestConfigureUA, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ @mock.patch("%s.subp.subp" % MPATH)
+ def test_configure_ua_attach_error(self, m_subp):
+ """Errors from ua attach command are raised."""
+ m_subp.side_effect = subp.ProcessExecutionError(
+ "Invalid token SomeToken"
+ )
+ with self.assertRaises(RuntimeError) as context_manager:
+ configure_ua(token="SomeToken")
+ self.assertEqual(
+ "Failure attaching Ubuntu Advantage:\nUnexpected error while"
+ " running command.\nCommand: -\nExit code: -\nReason: -\n"
+ "Stdout: Invalid token SomeToken\nStderr: -",
+ str(context_manager.exception),
+ )
+
+ @mock.patch("%s.subp.subp" % MPATH)
+ def test_configure_ua_attach_with_token(self, m_subp):
+ """When token is provided, attach the machine to ua using the token."""
+ configure_ua(token="SomeToken")
+ m_subp.assert_called_once_with(["ua", "attach", "SomeToken"])
+ self.assertEqual(
+ "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("%s.subp.subp" % MPATH)
+ def test_configure_ua_attach_on_service_error(self, m_subp):
+ """all services should be enabled and then any failures raised"""
+
+ def fake_subp(cmd, capture=None):
+ fail_cmds = [
+ ["ua", "enable", "--assume-yes", svc] for svc in ["esm", "cc"]
+ ]
+ if cmd in fail_cmds and capture:
+ svc = cmd[-1]
+ raise subp.ProcessExecutionError(
+ "Invalid {} credentials".format(svc.upper())
+ )
+
+ m_subp.side_effect = fake_subp
+
+ with self.assertRaises(RuntimeError) as context_manager:
+ configure_ua(token="SomeToken", enable=["esm", "cc", "fips"])
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ mock.call(["ua", "attach", "SomeToken"]),
+ mock.call(
+ ["ua", "enable", "--assume-yes", "esm"], capture=True
+ ),
+ mock.call(
+ ["ua", "enable", "--assume-yes", "cc"], capture=True
+ ),
+ mock.call(
+ ["ua", "enable", "--assume-yes", "fips"], capture=True
+ ),
+ ],
+ )
+ self.assertIn(
+ 'WARNING: Failure enabling "esm":\nUnexpected error'
+ " while running command.\nCommand: -\nExit code: -\nReason: -\n"
+ "Stdout: Invalid ESM credentials\nStderr: -\n",
+ self.logs.getvalue(),
+ )
+ self.assertIn(
+ 'WARNING: Failure enabling "cc":\nUnexpected error'
+ " while running command.\nCommand: -\nExit code: -\nReason: -\n"
+ "Stdout: Invalid CC credentials\nStderr: -\n",
+ self.logs.getvalue(),
+ )
+ self.assertEqual(
+ 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"',
+ str(context_manager.exception),
+ )
+
+ @mock.patch("%s.subp.subp" % MPATH)
+ def test_configure_ua_attach_with_empty_services(self, m_subp):
+ """When services is an empty list, do not auto-enable attach."""
+ configure_ua(token="SomeToken", enable=[])
+ m_subp.assert_called_once_with(["ua", "attach", "SomeToken"])
+ self.assertEqual(
+ "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("%s.subp.subp" % MPATH)
+ def test_configure_ua_attach_with_specific_services(self, m_subp):
+ """When services a list, only enable specific services."""
+ configure_ua(token="SomeToken", enable=["fips"])
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ mock.call(["ua", "attach", "SomeToken"]),
+ mock.call(
+ ["ua", "enable", "--assume-yes", "fips"], capture=True
+ ),
+ ],
+ )
+ self.assertEqual(
+ "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH, mock.MagicMock())
+ @mock.patch("%s.subp.subp" % MPATH)
+ def test_configure_ua_attach_with_string_services(self, m_subp):
+ """When services a string, treat as singleton list and warn"""
+ configure_ua(token="SomeToken", enable="fips")
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ mock.call(["ua", "attach", "SomeToken"]),
+ mock.call(
+ ["ua", "enable", "--assume-yes", "fips"], capture=True
+ ),
+ ],
+ )
+ self.assertEqual(
+ "WARNING: ubuntu_advantage: enable should be a list, not a"
+ " string; treating as a single enable\n"
+ "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("%s.subp.subp" % MPATH)
+ def test_configure_ua_attach_with_weird_services(self, m_subp):
+ """When services not string or list, warn but still attach"""
+ configure_ua(token="SomeToken", enable={"deffo": "wont work"})
+ self.assertEqual(
+ m_subp.call_args_list, [mock.call(["ua", "attach", "SomeToken"])]
+ )
+ self.assertEqual(
+ "WARNING: ubuntu_advantage: enable should be a list, not a"
+ " dict; skipping enabling services\n"
+ "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
+ self.logs.getvalue(),
+ )
+
+
+@skipUnlessJsonSchema()
+class TestSchema(CiTestCase, SchemaTestCaseMixin):
+
+ with_logs = True
+ schema = schema
+
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH)
+ @mock.patch("%s.configure_ua" % MPATH)
+ def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _):
+ """If ubuntu_advantage configuration is not a dict, emit a warning."""
+ validate_cloudconfig_schema({"ubuntu_advantage": "wrong type"}, schema)
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\nubuntu_advantage:"
+ " 'wrong type' is not of type 'object'\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH)
+ @mock.patch("%s.configure_ua" % MPATH)
+ def test_schema_disallows_unknown_keys(self, _cfg, _):
+ """Unknown keys in ubuntu_advantage configuration emit warnings."""
+ validate_cloudconfig_schema(
+ {"ubuntu_advantage": {"token": "winner", "invalid-key": ""}},
+ schema,
+ )
+ self.assertIn(
+ "WARNING: Invalid cloud-config provided:\nubuntu_advantage:"
+ " Additional properties are not allowed ('invalid-key' was"
+ " unexpected)",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH)
+ @mock.patch("%s.configure_ua" % MPATH)
+ def test_warn_schema_requires_token(self, _cfg, _):
+ """Warn if ubuntu_advantage configuration lacks token."""
+ validate_cloudconfig_schema(
+ {"ubuntu_advantage": {"enable": ["esm"]}}, schema
+ )
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\nubuntu_advantage:"
+ " 'token' is a required property\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH)
+ @mock.patch("%s.configure_ua" % MPATH)
+ def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _):
+ """Warn when ubuntu_advantage:enable config is not a list."""
+ validate_cloudconfig_schema(
+ {"ubuntu_advantage": {"enable": "needslist"}}, schema
+ )
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\nubuntu_advantage:"
+ " 'token' is a required property\nubuntu_advantage.enable:"
+ " 'needslist' is not of type 'array'\n",
+ self.logs.getvalue(),
+ )
+
+
+class TestHandle(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestHandle, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ @mock.patch("%s.validate_cloudconfig_schema" % MPATH)
+ def test_handle_no_config(self, m_schema):
+ """When no ua-related configuration is provided, nothing happens."""
+ cfg = {}
+ handle("ua-test", cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertIn(
+ "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'"
+ " configuration found",
+ self.logs.getvalue(),
+ )
+ m_schema.assert_not_called()
+
+ @mock.patch("%s.configure_ua" % MPATH)
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH)
+ def test_handle_tries_to_install_ubuntu_advantage_tools(
+ self, m_install, m_cfg
+ ):
+ """If ubuntu_advantage is provided, try installing ua-tools package."""
+ cfg = {"ubuntu_advantage": {"token": "valid"}}
+ mycloud = FakeCloud(None)
+ handle("nomatter", cfg=cfg, cloud=mycloud, log=self.logger, args=None)
+ m_install.assert_called_once_with(mycloud)
+
+ @mock.patch("%s.configure_ua" % MPATH)
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH)
+ def test_handle_passes_credentials_and_services_to_configure_ua(
+ self, m_install, m_configure_ua
+ ):
+ """All ubuntu_advantage config keys are passed to configure_ua."""
+ cfg = {"ubuntu_advantage": {"token": "token", "enable": ["esm"]}}
+ handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
+ m_configure_ua.assert_called_once_with(token="token", enable=["esm"])
+
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH, mock.MagicMock())
+ @mock.patch("%s.configure_ua" % MPATH)
+ def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config(
+ self, m_configure_ua
+ ):
+ """Warning when ubuntu-advantage key is present with new config"""
+ cfg = {"ubuntu-advantage": {"token": "token", "enable": ["esm"]}}
+ handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'WARNING: Deprecated configuration key "ubuntu-advantage"'
+ ' provided. Expected underscore delimited "ubuntu_advantage";'
+ " will attempt to continue.",
+ self.logs.getvalue().splitlines()[0],
+ )
+ m_configure_ua.assert_called_once_with(token="token", enable=["esm"])
+
+ def test_handle_error_on_deprecated_commands_key_dashed(self):
+ """Error when commands is present in ubuntu-advantage key."""
+ cfg = {"ubuntu-advantage": {"commands": "nogo"}}
+ with self.assertRaises(RuntimeError) as context_manager:
+ handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'Deprecated configuration "ubuntu-advantage: commands" provided.'
+ ' Expected "token"',
+ str(context_manager.exception),
+ )
+
+ def test_handle_error_on_deprecated_commands_key_underscored(self):
+ """Error when commands is present in ubuntu_advantage key."""
+ cfg = {"ubuntu_advantage": {"commands": "nogo"}}
+ with self.assertRaises(RuntimeError) as context_manager:
+ handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'Deprecated configuration "ubuntu-advantage: commands" provided.'
+ ' Expected "token"',
+ str(context_manager.exception),
+ )
+
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH, mock.MagicMock())
+ @mock.patch("%s.configure_ua" % MPATH)
+ def test_handle_prefers_new_style_config(self, m_configure_ua):
+ """ubuntu_advantage should be preferred over ubuntu-advantage"""
+ cfg = {
+ "ubuntu-advantage": {"token": "nope", "enable": ["wrong"]},
+ "ubuntu_advantage": {"token": "token", "enable": ["esm"]},
+ }
+ handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'WARNING: Deprecated configuration key "ubuntu-advantage"'
+ ' provided. Expected underscore delimited "ubuntu_advantage";'
+ " will attempt to continue.",
+ self.logs.getvalue().splitlines()[0],
+ )
+ m_configure_ua.assert_called_once_with(token="token", enable=["esm"])
+
+
+class TestMaybeInstallUATools(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestMaybeInstallUATools, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ @mock.patch("%s.subp.which" % MPATH)
+ def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which):
+ """Do nothing if ubuntu-advantage-tools already exists."""
+ m_which.return_value = "/usr/bin/ua" # already installed
+ distro = mock.MagicMock()
+ distro.update_package_sources.side_effect = RuntimeError(
+ "Some apt error"
+ )
+ maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError
+
+ @mock.patch("%s.subp.which" % MPATH)
+ def test_maybe_install_ua_tools_raises_update_errors(self, m_which):
+ """maybe_install_ua_tools logs and raises apt update errors."""
+ m_which.return_value = None
+ distro = mock.MagicMock()
+ distro.update_package_sources.side_effect = RuntimeError(
+ "Some apt error"
+ )
+ with self.assertRaises(RuntimeError) as context_manager:
+ maybe_install_ua_tools(cloud=FakeCloud(distro))
+ self.assertEqual("Some apt error", str(context_manager.exception))
+ self.assertIn("Package update failed\nTraceback", self.logs.getvalue())
+
+ @mock.patch("%s.subp.which" % MPATH)
+ def test_maybe_install_ua_raises_install_errors(self, m_which):
+ """maybe_install_ua_tools logs and raises package install errors."""
+ m_which.return_value = None
+ distro = mock.MagicMock()
+ distro.update_package_sources.return_value = None
+ distro.install_packages.side_effect = RuntimeError(
+ "Some install error"
+ )
+ with self.assertRaises(RuntimeError) as context_manager:
+ maybe_install_ua_tools(cloud=FakeCloud(distro))
+ self.assertEqual("Some install error", str(context_manager.exception))
+ self.assertIn(
+ "Failed to install ubuntu-advantage-tools\n", self.logs.getvalue()
+ )
+
+ @mock.patch("%s.subp.which" % MPATH)
+ def test_maybe_install_ua_tools_happy_path(self, m_which):
+ """maybe_install_ua_tools installs ubuntu-advantage-tools."""
+ m_which.return_value = None
+ distro = mock.MagicMock() # No errors raised
+ maybe_install_ua_tools(cloud=FakeCloud(distro))
+ distro.update_package_sources.assert_called_once_with()
+ distro.install_packages.assert_called_once_with(
+ ["ubuntu-advantage-tools"]
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/tests/unittests/config/test_cc_ubuntu_drivers.py
index 504ba356..4987492d 100644
--- a/cloudinit/config/tests/test_ubuntu_drivers.py
+++ b/tests/unittests/config/test_cc_ubuntu_drivers.py
@@ -3,17 +3,20 @@
import copy
import os
-from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock
-from cloudinit.config.schema import (
- SchemaValidationError, validate_cloudconfig_schema)
from cloudinit.config import cc_ubuntu_drivers as drivers
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ validate_cloudconfig_schema,
+)
from cloudinit.subp import ProcessExecutionError
+from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
MPATH = "cloudinit.config.cc_ubuntu_drivers."
M_TMP_PATH = MPATH + "temp_utils.mkdtemp"
OLD_UBUNTU_DRIVERS_ERROR_STDERR = (
"ubuntu-drivers: error: argument <command>: invalid choice: 'install' "
- "(choose from 'list', 'autoinstall', 'devices', 'debug')\n")
+ "(choose from 'list', 'autoinstall', 'devices', 'debug')\n"
+)
# The tests in this module call helper methods which are decorated with
@@ -23,8 +26,8 @@ OLD_UBUNTU_DRIVERS_ERROR_STDERR = (
# disable it for the entire module:
# pylint: disable=no-value-for-parameter
-class AnyTempScriptAndDebconfFile(object):
+class AnyTempScriptAndDebconfFile(object):
def __init__(self, tmp_dir, debconf_file):
self.tmp_dir = tmp_dir
self.debconf_file = debconf_file
@@ -33,60 +36,68 @@ class AnyTempScriptAndDebconfFile(object):
if not len(cmd) == 2:
return False
script, debconf_file = cmd
- if bool(script.startswith(self.tmp_dir) and script.endswith('.sh')):
+ if bool(script.startswith(self.tmp_dir) and script.endswith(".sh")):
return debconf_file == self.debconf_file
return False
class TestUbuntuDrivers(CiTestCase):
- cfg_accepted = {'drivers': {'nvidia': {'license-accepted': True}}}
- install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia']
+ cfg_accepted = {"drivers": {"nvidia": {"license-accepted": True}}}
+ install_gpgpu = ["ubuntu-drivers", "install", "--gpgpu", "nvidia"]
with_logs = True
@skipUnlessJsonSchema()
def test_schema_requires_boolean_for_license_accepted(self):
with self.assertRaisesRegex(
- SchemaValidationError, ".*license-accepted.*TRUE.*boolean"):
+ SchemaValidationError, ".*license-accepted.*TRUE.*boolean"
+ ):
validate_cloudconfig_schema(
- {'drivers': {'nvidia': {'license-accepted': "TRUE"}}},
- schema=drivers.schema, strict=True)
+ {"drivers": {"nvidia": {"license-accepted": "TRUE"}}},
+ schema=drivers.schema,
+ strict=True,
+ )
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.subp", return_value=("", ""))
@mock.patch(MPATH + "subp.which", return_value=False)
- def _assert_happy_path_taken(
- self, config, m_which, m_subp, m_tmp):
+ def _assert_happy_path_taken(self, config, m_which, m_subp, m_tmp):
"""Positive path test through handle. Package should be installed."""
tdir = self.tmp_dir()
- debconf_file = os.path.join(tdir, 'nvidia.template')
+ debconf_file = os.path.join(tdir, "nvidia.template")
m_tmp.return_value = tdir
myCloud = mock.MagicMock()
- drivers.handle('ubuntu_drivers', config, myCloud, None, None)
- self.assertEqual([mock.call(['ubuntu-drivers-common'])],
- myCloud.distro.install_packages.call_args_list)
+ drivers.handle("ubuntu_drivers", config, myCloud, None, None)
+ self.assertEqual(
+ [mock.call(["ubuntu-drivers-common"])],
+ myCloud.distro.install_packages.call_args_list,
+ )
self.assertEqual(
- [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(self.install_gpgpu)],
- m_subp.call_args_list)
+ [
+ mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu),
+ ],
+ m_subp.call_args_list,
+ )
def test_handle_does_package_install(self):
self._assert_happy_path_taken(self.cfg_accepted)
def test_trueish_strings_are_considered_approval(self):
- for true_value in ['yes', 'true', 'on', '1']:
+ for true_value in ["yes", "true", "on", "1"]:
new_config = copy.deepcopy(self.cfg_accepted)
- new_config['drivers']['nvidia']['license-accepted'] = true_value
+ new_config["drivers"]["nvidia"]["license-accepted"] = true_value
self._assert_happy_path_taken(new_config)
@mock.patch(M_TMP_PATH)
@mock.patch(MPATH + "subp.subp")
@mock.patch(MPATH + "subp.which", return_value=False)
def test_handle_raises_error_if_no_drivers_found(
- self, m_which, m_subp, m_tmp):
+ self, m_which, m_subp, m_tmp
+ ):
"""If ubuntu-drivers doesn't install any drivers, raise an error."""
tdir = self.tmp_dir()
- debconf_file = os.path.join(tdir, 'nvidia.template')
+ debconf_file = os.path.join(tdir, "nvidia.template")
m_tmp.return_value = tdir
myCloud = mock.MagicMock()
@@ -94,84 +105,103 @@ class TestUbuntuDrivers(CiTestCase):
if cmd[0].startswith(tdir):
return
raise ProcessExecutionError(
- stdout='No drivers found for installation.\n', exit_code=1)
+ stdout="No drivers found for installation.\n", exit_code=1
+ )
+
m_subp.side_effect = fake_subp
with self.assertRaises(Exception):
drivers.handle(
- 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None)
- self.assertEqual([mock.call(['ubuntu-drivers-common'])],
- myCloud.distro.install_packages.call_args_list)
+ "ubuntu_drivers", self.cfg_accepted, myCloud, None, None
+ )
self.assertEqual(
- [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(self.install_gpgpu)],
- m_subp.call_args_list)
- self.assertIn('ubuntu-drivers found no drivers for installation',
- self.logs.getvalue())
-
- @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ [mock.call(["ubuntu-drivers-common"])],
+ myCloud.distro.install_packages.call_args_list,
+ )
+ self.assertEqual(
+ [
+ mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu),
+ ],
+ m_subp.call_args_list,
+ )
+ self.assertIn(
+ "ubuntu-drivers found no drivers for installation",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch(MPATH + "subp.subp", return_value=("", ""))
@mock.patch(MPATH + "subp.which", return_value=False)
def _assert_inert_with_config(self, config, m_which, m_subp):
"""Helper to reduce repetition when testing negative cases"""
myCloud = mock.MagicMock()
- drivers.handle('ubuntu_drivers', config, myCloud, None, None)
+ drivers.handle("ubuntu_drivers", config, myCloud, None, None)
self.assertEqual(0, myCloud.distro.install_packages.call_count)
self.assertEqual(0, m_subp.call_count)
def test_handle_inert_if_license_not_accepted(self):
"""Ensure we don't do anything if the license is rejected."""
self._assert_inert_with_config(
- {'drivers': {'nvidia': {'license-accepted': False}}})
+ {"drivers": {"nvidia": {"license-accepted": False}}}
+ )
def test_handle_inert_if_garbage_in_license_field(self):
"""Ensure we don't do anything if unknown text is in license field."""
self._assert_inert_with_config(
- {'drivers': {'nvidia': {'license-accepted': 'garbage'}}})
+ {"drivers": {"nvidia": {"license-accepted": "garbage"}}}
+ )
def test_handle_inert_if_no_license_key(self):
"""Ensure we don't do anything if no license key."""
- self._assert_inert_with_config({'drivers': {'nvidia': {}}})
+ self._assert_inert_with_config({"drivers": {"nvidia": {}}})
def test_handle_inert_if_no_nvidia_key(self):
"""Ensure we don't do anything if other license accepted."""
self._assert_inert_with_config(
- {'drivers': {'acme': {'license-accepted': True}}})
+ {"drivers": {"acme": {"license-accepted": True}}}
+ )
def test_handle_inert_if_string_given(self):
"""Ensure we don't do anything if string refusal given."""
- for false_value in ['no', 'false', 'off', '0']:
+ for false_value in ["no", "false", "off", "0"]:
self._assert_inert_with_config(
- {'drivers': {'nvidia': {'license-accepted': false_value}}})
+ {"drivers": {"nvidia": {"license-accepted": false_value}}}
+ )
@mock.patch(MPATH + "install_drivers")
def test_handle_no_drivers_does_nothing(self, m_install_drivers):
"""If no 'drivers' key in the config, nothing should be done."""
myCloud = mock.MagicMock()
myLog = mock.MagicMock()
- drivers.handle('ubuntu_drivers', {'foo': 'bzr'}, myCloud, myLog, None)
- self.assertIn('Skipping module named',
- myLog.debug.call_args_list[0][0][0])
+ drivers.handle("ubuntu_drivers", {"foo": "bzr"}, myCloud, myLog, None)
+ self.assertIn(
+ "Skipping module named", myLog.debug.call_args_list[0][0][0]
+ )
self.assertEqual(0, m_install_drivers.call_count)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.subp", return_value=("", ""))
@mock.patch(MPATH + "subp.which", return_value=True)
def test_install_drivers_no_install_if_present(
- self, m_which, m_subp, m_tmp):
+ self, m_which, m_subp, m_tmp
+ ):
"""If 'ubuntu-drivers' is present, no package install should occur."""
tdir = self.tmp_dir()
- debconf_file = os.path.join(tdir, 'nvidia.template')
+ debconf_file = os.path.join(tdir, "nvidia.template")
m_tmp.return_value = tdir
pkg_install = mock.MagicMock()
- drivers.install_drivers(self.cfg_accepted['drivers'],
- pkg_install_func=pkg_install)
+ drivers.install_drivers(
+ self.cfg_accepted["drivers"], pkg_install_func=pkg_install
+ )
self.assertEqual(0, pkg_install.call_count)
- self.assertEqual([mock.call('ubuntu-drivers')],
- m_which.call_args_list)
+ self.assertEqual([mock.call("ubuntu-drivers")], m_which.call_args_list)
self.assertEqual(
- [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(self.install_gpgpu)],
- m_subp.call_args_list)
+ [
+ mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu),
+ ],
+ m_subp.call_args_list,
+ )
def test_install_drivers_rejects_invalid_config(self):
"""install_drivers should raise TypeError if not given a config dict"""
@@ -184,10 +214,11 @@ class TestUbuntuDrivers(CiTestCase):
@mock.patch(MPATH + "subp.subp")
@mock.patch(MPATH + "subp.which", return_value=False)
def test_install_drivers_handles_old_ubuntu_drivers_gracefully(
- self, m_which, m_subp, m_tmp):
+ self, m_which, m_subp, m_tmp
+ ):
"""Older ubuntu-drivers versions should emit message and raise error"""
tdir = self.tmp_dir()
- debconf_file = os.path.join(tdir, 'nvidia.template')
+ debconf_file = os.path.join(tdir, "nvidia.template")
m_tmp.return_value = tdir
myCloud = mock.MagicMock()
@@ -195,50 +226,68 @@ class TestUbuntuDrivers(CiTestCase):
if cmd[0].startswith(tdir):
return
raise ProcessExecutionError(
- stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2)
+ stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2
+ )
+
m_subp.side_effect = fake_subp
with self.assertRaises(Exception):
drivers.handle(
- 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None)
- self.assertEqual([mock.call(['ubuntu-drivers-common'])],
- myCloud.distro.install_packages.call_args_list)
+ "ubuntu_drivers", self.cfg_accepted, myCloud, None, None
+ )
self.assertEqual(
- [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(self.install_gpgpu)],
- m_subp.call_args_list)
- self.assertIn('WARNING: the available version of ubuntu-drivers is'
- ' too old to perform requested driver installation',
- self.logs.getvalue())
+ [mock.call(["ubuntu-drivers-common"])],
+ myCloud.distro.install_packages.call_args_list,
+ )
+ self.assertEqual(
+ [
+ mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu),
+ ],
+ m_subp.call_args_list,
+ )
+ self.assertIn(
+ "WARNING: the available version of ubuntu-drivers is"
+ " too old to perform requested driver installation",
+ self.logs.getvalue(),
+ )
# Sub-class TestUbuntuDrivers to run the same test cases, but with a version
class TestUbuntuDriversWithVersion(TestUbuntuDrivers):
cfg_accepted = {
- 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}}
- install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123']
+ "drivers": {"nvidia": {"license-accepted": True, "version": "123"}}
+ }
+ install_gpgpu = ["ubuntu-drivers", "install", "--gpgpu", "nvidia:123"]
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.subp", return_value=("", ""))
@mock.patch(MPATH + "subp.which", return_value=False)
def test_version_none_uses_latest(self, m_which, m_subp, m_tmp):
tdir = self.tmp_dir()
- debconf_file = os.path.join(tdir, 'nvidia.template')
+ debconf_file = os.path.join(tdir, "nvidia.template")
m_tmp.return_value = tdir
myCloud = mock.MagicMock()
version_none_cfg = {
- 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}}
- drivers.handle(
- 'ubuntu_drivers', version_none_cfg, myCloud, None, None)
+ "drivers": {"nvidia": {"license-accepted": True, "version": None}}
+ }
+ drivers.handle("ubuntu_drivers", version_none_cfg, myCloud, None, None)
self.assertEqual(
- [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'])],
- m_subp.call_args_list)
+ [
+ mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(["ubuntu-drivers", "install", "--gpgpu", "nvidia"]),
+ ],
+ m_subp.call_args_list,
+ )
def test_specifying_a_version_doesnt_override_license_acceptance(self):
- self._assert_inert_with_config({
- 'drivers': {'nvidia': {'license-accepted': False,
- 'version': '123'}}
- })
+ self._assert_inert_with_config(
+ {
+ "drivers": {
+ "nvidia": {"license-accepted": False, "version": "123"}
+ }
+ }
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_update_etc_hosts.py b/tests/unittests/config/test_cc_update_etc_hosts.py
new file mode 100644
index 00000000..2bbc16f4
--- /dev/null
+++ b/tests/unittests/config/test_cc_update_etc_hosts.py
@@ -0,0 +1,68 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+import os
+import shutil
+
+from cloudinit import cloud, distros, helpers, util
+from cloudinit.config import cc_update_etc_hosts
+from tests.unittests import helpers as t_help
+
+LOG = logging.getLogger(__name__)
+
+
+class TestHostsFile(t_help.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestHostsFile, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def _fetch_distro(self, kind):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({})
+ return cls(kind, {}, paths)
+
+ def test_write_etc_hosts_suse_localhost(self):
+ cfg = {
+ "manage_etc_hosts": "localhost",
+ "hostname": "cloud-init.test.us",
+ }
+ os.makedirs("%s/etc/" % self.tmp)
+ hosts_content = "192.168.1.1 blah.blah.us blah\n"
+ fout = open("%s/etc/hosts" % self.tmp, "w")
+ fout.write(hosts_content)
+ fout.close()
+ distro = self._fetch_distro("sles")
+ distro.hosts_fn = "%s/etc/hosts" % self.tmp
+ paths = helpers.Paths({})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_update_etc_hosts.handle("test", cfg, cc, LOG, [])
+ contents = util.load_file("%s/etc/hosts" % self.tmp)
+ if "127.0.1.1\tcloud-init.test.us\tcloud-init" not in contents:
+ self.assertIsNone("No entry for 127.0.1.1 in etc/hosts")
+ if "192.168.1.1\tblah.blah.us\tblah" not in contents:
+ self.assertIsNone("Default etc/hosts content modified")
+
+ @t_help.skipUnlessJinja()
+ def test_write_etc_hosts_suse_template(self):
+ cfg = {
+ "manage_etc_hosts": "template",
+ "hostname": "cloud-init.test.us",
+ }
+ shutil.copytree(
+ t_help.cloud_init_project_dir("templates"),
+ "%s/etc/cloud/templates" % self.tmp,
+ )
+ distro = self._fetch_distro("sles")
+ paths = helpers.Paths({})
+ paths.template_tpl = "%s" % self.tmp + "/etc/cloud/templates/%s.tmpl"
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_update_etc_hosts.handle("test", cfg, cc, LOG, [])
+ contents = util.load_file("%s/etc/hosts" % self.tmp)
+ if "127.0.1.1 cloud-init.test.us cloud-init" not in contents:
+ self.assertIsNone("No entry for 127.0.1.1 in etc/hosts")
+ if "::1 cloud-init.test.us cloud-init" not in contents:
+ self.assertIsNone("No entry for 127.0.0.1 in etc/hosts")
diff --git a/tests/unittests/config/test_cc_users_groups.py b/tests/unittests/config/test_cc_users_groups.py
new file mode 100644
index 00000000..0bd3c980
--- /dev/null
+++ b/tests/unittests/config/test_cc_users_groups.py
@@ -0,0 +1,268 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+
+from cloudinit.config import cc_users_groups
+from tests.unittests.helpers import CiTestCase, mock
+
+MODPATH = "cloudinit.config.cc_users_groups"
+
+
+@mock.patch("cloudinit.distros.ubuntu.Distro.create_group")
+@mock.patch("cloudinit.distros.ubuntu.Distro.create_user")
+class TestHandleUsersGroups(CiTestCase):
+ """Test cc_users_groups handling of config."""
+
+ with_logs = True
+
+ def test_handle_no_cfg_creates_no_users_or_groups(self, m_user, m_group):
+ """Test handle with no config will not create users or groups."""
+ cfg = {} # merged cloud-config
+ # System config defines a default user for the distro.
+ sys_cfg = {
+ "default_user": {
+ "name": "ubuntu",
+ "lock_passwd": True,
+ "groups": ["lxd", "sudo"],
+ "shell": "/bin/bash",
+ }
+ }
+ metadata = {}
+ cloud = self.tmp_cloud(
+ distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
+ )
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ m_user.assert_not_called()
+ m_group.assert_not_called()
+
+ def test_handle_users_in_cfg_calls_create_users(self, m_user, m_group):
+ """When users in config, create users with distro.create_user."""
+ cfg = {"users": ["default", {"name": "me2"}]} # merged cloud-config
+ # System config defines a default user for the distro.
+ sys_cfg = {
+ "default_user": {
+ "name": "ubuntu",
+ "lock_passwd": True,
+ "groups": ["lxd", "sudo"],
+ "shell": "/bin/bash",
+ }
+ }
+ metadata = {}
+ cloud = self.tmp_cloud(
+ distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
+ )
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ self.assertCountEqual(
+ m_user.call_args_list,
+ [
+ mock.call(
+ "ubuntu",
+ groups="lxd,sudo",
+ lock_passwd=True,
+ shell="/bin/bash",
+ ),
+ mock.call("me2", default=False),
+ ],
+ )
+ m_group.assert_not_called()
+
+ @mock.patch("cloudinit.distros.freebsd.Distro.create_group")
+ @mock.patch("cloudinit.distros.freebsd.Distro.create_user")
+ def test_handle_users_in_cfg_calls_create_users_on_bsd(
+ self,
+ m_fbsd_user,
+ m_fbsd_group,
+ m_linux_user,
+ m_linux_group,
+ ):
+ """When users in config, create users with freebsd.create_user."""
+ cfg = {"users": ["default", {"name": "me2"}]} # merged cloud-config
+ # System config defines a default user for the distro.
+ sys_cfg = {
+ "default_user": {
+ "name": "freebsd",
+ "lock_passwd": True,
+ "groups": ["wheel"],
+ "shell": "/bin/tcsh",
+ }
+ }
+ metadata = {}
+ cloud = self.tmp_cloud(
+ distro="freebsd", sys_cfg=sys_cfg, metadata=metadata
+ )
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ self.assertCountEqual(
+ m_fbsd_user.call_args_list,
+ [
+ mock.call(
+ "freebsd",
+ groups="wheel",
+ lock_passwd=True,
+ shell="/bin/tcsh",
+ ),
+ mock.call("me2", default=False),
+ ],
+ )
+ m_fbsd_group.assert_not_called()
+ m_linux_group.assert_not_called()
+ m_linux_user.assert_not_called()
+
+ def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group):
+ """When ssh_redirect_user is True pass default user and cloud keys."""
+ cfg = {
+ "users": ["default", {"name": "me2", "ssh_redirect_user": True}]
+ }
+ # System config defines a default user for the distro.
+ sys_cfg = {
+ "default_user": {
+ "name": "ubuntu",
+ "lock_passwd": True,
+ "groups": ["lxd", "sudo"],
+ "shell": "/bin/bash",
+ }
+ }
+ metadata = {"public-keys": ["key1"]}
+ cloud = self.tmp_cloud(
+ distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
+ )
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ self.assertCountEqual(
+ m_user.call_args_list,
+ [
+ mock.call(
+ "ubuntu",
+ groups="lxd,sudo",
+ lock_passwd=True,
+ shell="/bin/bash",
+ ),
+ mock.call(
+ "me2",
+ cloud_public_ssh_keys=["key1"],
+ default=False,
+ ssh_redirect_user="ubuntu",
+ ),
+ ],
+ )
+ m_group.assert_not_called()
+
+ def test_users_with_ssh_redirect_user_default_str(self, m_user, m_group):
+ """When ssh_redirect_user is 'default' pass default username."""
+ cfg = {
+ "users": [
+ "default",
+ {"name": "me2", "ssh_redirect_user": "default"},
+ ]
+ }
+ # System config defines a default user for the distro.
+ sys_cfg = {
+ "default_user": {
+ "name": "ubuntu",
+ "lock_passwd": True,
+ "groups": ["lxd", "sudo"],
+ "shell": "/bin/bash",
+ }
+ }
+ metadata = {"public-keys": ["key1"]}
+ cloud = self.tmp_cloud(
+ distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
+ )
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ self.assertCountEqual(
+ m_user.call_args_list,
+ [
+ mock.call(
+ "ubuntu",
+ groups="lxd,sudo",
+ lock_passwd=True,
+ shell="/bin/bash",
+ ),
+ mock.call(
+ "me2",
+ cloud_public_ssh_keys=["key1"],
+ default=False,
+ ssh_redirect_user="ubuntu",
+ ),
+ ],
+ )
+ m_group.assert_not_called()
+
+ def test_users_with_ssh_redirect_user_non_default(self, m_user, m_group):
+ """Warn when ssh_redirect_user is not 'default'."""
+ cfg = {
+ "users": [
+ "default",
+ {"name": "me2", "ssh_redirect_user": "snowflake"},
+ ]
+ }
+ # System config defines a default user for the distro.
+ sys_cfg = {
+ "default_user": {
+ "name": "ubuntu",
+ "lock_passwd": True,
+ "groups": ["lxd", "sudo"],
+ "shell": "/bin/bash",
+ }
+ }
+ metadata = {"public-keys": ["key1"]}
+ cloud = self.tmp_cloud(
+ distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
+ )
+ with self.assertRaises(ValueError) as context_manager:
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ m_group.assert_not_called()
+ self.assertEqual(
+ "Not creating user me2. Invalid value of ssh_redirect_user:"
+ " snowflake. Expected values: true, default or false.",
+ str(context_manager.exception),
+ )
+
+ def test_users_with_ssh_redirect_user_default_false(self, m_user, m_group):
+ """When unspecified ssh_redirect_user is false and not set up."""
+ cfg = {"users": ["default", {"name": "me2"}]}
+ # System config defines a default user for the distro.
+ sys_cfg = {
+ "default_user": {
+ "name": "ubuntu",
+ "lock_passwd": True,
+ "groups": ["lxd", "sudo"],
+ "shell": "/bin/bash",
+ }
+ }
+ metadata = {"public-keys": ["key1"]}
+ cloud = self.tmp_cloud(
+ distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
+ )
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ self.assertCountEqual(
+ m_user.call_args_list,
+ [
+ mock.call(
+ "ubuntu",
+ groups="lxd,sudo",
+ lock_passwd=True,
+ shell="/bin/bash",
+ ),
+ mock.call("me2", default=False),
+ ],
+ )
+ m_group.assert_not_called()
+
+ def test_users_ssh_redirect_user_and_no_default(self, m_user, m_group):
+ """Warn when ssh_redirect_user is True and no default user present."""
+ cfg = {
+ "users": ["default", {"name": "me2", "ssh_redirect_user": True}]
+ }
+ # System config defines *no* default user for the distro.
+ sys_cfg = {}
+ metadata = {} # no public-keys defined
+ cloud = self.tmp_cloud(
+ distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
+ )
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ m_user.assert_called_once_with("me2", default=False)
+ m_group.assert_not_called()
+ self.assertEqual(
+ "WARNING: Ignoring ssh_redirect_user: True for me2. No"
+ " default_user defined. Perhaps missing"
+ " cloud configuration users: [default, ..].\n",
+ self.logs.getvalue(),
+ )
diff --git a/tests/unittests/test_handler/test_handler_write_files.py b/tests/unittests/config/test_cc_write_files.py
index 727681d3..faea5885 100644
--- a/tests/unittests/test_handler/test_handler_write_files.py
+++ b/tests/unittests/config/test_cc_write_files.py
@@ -7,13 +7,15 @@ import io
import shutil
import tempfile
-from cloudinit.config.cc_write_files import (
- handle, decode_perms, write_files)
from cloudinit import log as logging
from cloudinit import util
-
-from cloudinit.tests.helpers import (
- CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema)
+from cloudinit.config.cc_write_files import decode_perms, handle, write_files
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ skipUnlessJsonSchema,
+)
LOG = logging.getLogger(__name__)
@@ -35,73 +37,91 @@ write_files:
"""
YAML_CONTENT_EXPECTED = {
- '/usr/bin/hello': "#!/bin/sh\necho hello world\n",
- '/wark': "foobar\n",
- '/tmp/message': "hi mom line 1\nhi mom line 2\n",
+ "/usr/bin/hello": "#!/bin/sh\necho hello world\n",
+ "/wark": "foobar\n",
+ "/tmp/message": "hi mom line 1\nhi mom line 2\n",
}
VALID_SCHEMA = {
- 'write_files': [
- {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff',
- 'path': '/some', 'permissions': '0777'}
+ "write_files": [
+ {
+ "append": False,
+ "content": "a",
+ "encoding": "gzip",
+ "owner": "jeff",
+ "path": "/some",
+ "permissions": "0777",
+ }
]
}
INVALID_SCHEMA = { # Dropped required path key
- 'write_files': [
- {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff',
- 'permissions': '0777'}
+ "write_files": [
+ {
+ "append": False,
+ "content": "a",
+ "encoding": "gzip",
+ "owner": "jeff",
+ "permissions": "0777",
+ }
]
}
@skipUnlessJsonSchema()
-@mock.patch('cloudinit.config.cc_write_files.write_files')
+@mock.patch("cloudinit.config.cc_write_files.write_files")
class TestWriteFilesSchema(CiTestCase):
with_logs = True
def test_schema_validation_warns_missing_path(self, m_write_files):
"""The only required file item property is 'path'."""
- cc = self.tmp_cloud('ubuntu')
- valid_config = {'write_files': [{'path': '/some/path'}]}
- handle('cc_write_file', valid_config, cc, LOG, [])
- self.assertNotIn('Invalid config:', self.logs.getvalue())
- handle('cc_write_file', INVALID_SCHEMA, cc, LOG, [])
- self.assertIn('Invalid config:', self.logs.getvalue())
+ cc = self.tmp_cloud("ubuntu")
+ valid_config = {"write_files": [{"path": "/some/path"}]}
+ handle("cc_write_file", valid_config, cc, LOG, [])
+ self.assertNotIn(
+ "Invalid cloud-config provided:", self.logs.getvalue()
+ )
+ handle("cc_write_file", INVALID_SCHEMA, cc, LOG, [])
+ self.assertIn("Invalid cloud-config provided:", self.logs.getvalue())
self.assertIn("'path' is a required property", self.logs.getvalue())
def test_schema_validation_warns_non_string_type_for_files(
- self, m_write_files):
+ self, m_write_files
+ ):
"""Schema validation warns of non-string values for each file item."""
- cc = self.tmp_cloud('ubuntu')
- for key in VALID_SCHEMA['write_files'][0].keys():
- if key == 'append':
- key_type = 'boolean'
+ cc = self.tmp_cloud("ubuntu")
+ for key in VALID_SCHEMA["write_files"][0].keys():
+ if key == "append":
+ key_type = "boolean"
else:
- key_type = 'string'
+ key_type = "string"
invalid_config = copy.deepcopy(VALID_SCHEMA)
- invalid_config['write_files'][0][key] = 1
- handle('cc_write_file', invalid_config, cc, LOG, [])
+ invalid_config["write_files"][0][key] = 1
+ handle("cc_write_file", invalid_config, cc, LOG, [])
self.assertIn(
- mock.call('cc_write_file', invalid_config['write_files']),
- m_write_files.call_args_list)
+ mock.call("cc_write_file", invalid_config["write_files"]),
+ m_write_files.call_args_list,
+ )
self.assertIn(
- 'write_files.0.%s: 1 is not of type \'%s\'' % (key, key_type),
- self.logs.getvalue())
- self.assertIn('Invalid config:', self.logs.getvalue())
+ "write_files.0.%s: 1 is not of type '%s'" % (key, key_type),
+ self.logs.getvalue(),
+ )
+ self.assertIn("Invalid cloud-config provided:", self.logs.getvalue())
def test_schema_validation_warns_on_additional_undefined_propertes(
- self, m_write_files):
+ self, m_write_files
+ ):
"""Schema validation warns on additional undefined file properties."""
- cc = self.tmp_cloud('ubuntu')
+ cc = self.tmp_cloud("ubuntu")
invalid_config = copy.deepcopy(VALID_SCHEMA)
- invalid_config['write_files'][0]['bogus'] = 'value'
- handle('cc_write_file', invalid_config, cc, LOG, [])
+ invalid_config["write_files"][0]["bogus"] = "value"
+ handle("cc_write_file", invalid_config, cc, LOG, [])
self.assertIn(
- "Invalid config:\nwrite_files.0: Additional properties"
- " are not allowed ('bogus' was unexpected)",
- self.logs.getvalue())
+ "Invalid cloud-config provided:\nwrite_files.0: Additional"
+ " properties are not allowed ('bogus' was unexpected)",
+ self.logs.getvalue(),
+ )
class TestWriteFiles(FilesystemMockingTestCase):
@@ -116,20 +136,21 @@ class TestWriteFiles(FilesystemMockingTestCase):
@skipUnlessJsonSchema()
def test_handler_schema_validation_warns_non_array_type(self):
"""Schema validation warns of non-array value."""
- invalid_config = {'write_files': 1}
- cc = self.tmp_cloud('ubuntu')
+ invalid_config = {"write_files": 1}
+ cc = self.tmp_cloud("ubuntu")
with self.assertRaises(TypeError):
- handle('cc_write_file', invalid_config, cc, LOG, [])
+ handle("cc_write_file", invalid_config, cc, LOG, [])
self.assertIn(
- 'Invalid config:\nwrite_files: 1 is not of type \'array\'',
- self.logs.getvalue())
+ "Invalid cloud-config provided:\nwrite_files: 1 is not of type"
+ " 'array'",
+ self.logs.getvalue(),
+ )
def test_simple(self):
self.patchUtils(self.tmp)
expected = "hello world\n"
filename = "/tmp/my.file"
- write_files(
- "test_simple", [{"content": expected, "path": filename}])
+ write_files("test_simple", [{"content": expected, "path": filename}])
self.assertEqual(util.load_file(filename), expected)
def test_append(self):
@@ -141,13 +162,14 @@ class TestWriteFiles(FilesystemMockingTestCase):
util.write_file(filename, existing)
write_files(
"test_append",
- [{"content": added, "path": filename, "append": "true"}])
+ [{"content": added, "path": filename, "append": "true"}],
+ )
self.assertEqual(util.load_file(filename), expected)
def test_yaml_binary(self):
self.patchUtils(self.tmp)
data = util.load_yaml(YAML_TEXT)
- write_files("testname", data['write_files'])
+ write_files("testname", data["write_files"])
for path, content in YAML_CONTENT_EXPECTED.items():
self.assertEqual(util.load_file(path), content)
@@ -158,13 +180,13 @@ class TestWriteFiles(FilesystemMockingTestCase):
# for 'gz', 'gzip', 'gz+base64' ...
data = b"foobzr"
utf8_valid = b"foobzr"
- utf8_invalid = b'ab\xaadef'
+ utf8_invalid = b"ab\xaadef"
files = []
expected = []
- gz_aliases = ('gz', 'gzip')
- gz_b64_aliases = ('gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64')
- b64_aliases = ('base64', 'b64')
+ gz_aliases = ("gz", "gzip")
+ gz_b64_aliases = ("gz+base64", "gzip+base64", "gz+b64", "gzip+b64")
+ b64_aliases = ("base64", "b64")
datum = (("utf8", utf8_valid), ("no-utf8", utf8_invalid))
for name, data in datum:
@@ -173,11 +195,13 @@ class TestWriteFiles(FilesystemMockingTestCase):
b64 = (base64.b64encode(data), b64_aliases)
for content, aliases in (gz, gz_b64, b64):
for enc in aliases:
- cur = {'content': content,
- 'path': '/tmp/file-%s-%s' % (name, enc),
- 'encoding': enc}
+ cur = {
+ "content": content,
+ "path": "/tmp/file-%s-%s" % (name, enc),
+ "encoding": enc,
+ }
files.append(cur)
- expected.append((cur['path'], data))
+ expected.append((cur["path"], data))
write_files("test_decoding", files)
@@ -185,10 +209,20 @@ class TestWriteFiles(FilesystemMockingTestCase):
self.assertEqual(util.load_file(path, decode=False), content)
# make sure we actually wrote *some* files.
- flen_expected = (
- len(gz_aliases + gz_b64_aliases + b64_aliases) * len(datum))
+ flen_expected = len(gz_aliases + gz_b64_aliases + b64_aliases) * len(
+ datum
+ )
self.assertEqual(len(expected), flen_expected)
+ def test_deferred(self):
+ self.patchUtils(self.tmp)
+ file_path = "/tmp/deferred.file"
+ config = {"write_files": [{"path": file_path, "defer": True}]}
+ cc = self.tmp_cloud("ubuntu")
+ handle("cc_write_file", config, cc, LOG, [])
+ with self.assertRaises(FileNotFoundError):
+ util.load_file(file_path)
+
class TestDecodePerms(CiTestCase):
diff --git a/tests/unittests/config/test_cc_write_files_deferred.py b/tests/unittests/config/test_cc_write_files_deferred.py
new file mode 100644
index 00000000..17203233
--- /dev/null
+++ b/tests/unittests/config/test_cc_write_files_deferred.py
@@ -0,0 +1,85 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import shutil
+import tempfile
+
+from cloudinit import log as logging
+from cloudinit import util
+from cloudinit.config.cc_write_files_deferred import handle
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ skipUnlessJsonSchema,
+)
+
+from .test_cc_write_files import VALID_SCHEMA
+
+LOG = logging.getLogger(__name__)
+
+
+@skipUnlessJsonSchema()
+@mock.patch("cloudinit.config.cc_write_files_deferred.write_files")
+class TestWriteFilesDeferredSchema(CiTestCase):
+
+ with_logs = True
+
+ def test_schema_validation_warns_invalid_value(
+ self, m_write_files_deferred
+ ):
+ """If 'defer' is defined, it must be of type 'bool'."""
+
+ valid_config = {
+ "write_files": [
+ {**VALID_SCHEMA.get("write_files")[0], "defer": True}
+ ]
+ }
+
+ invalid_config = {
+ "write_files": [
+ {**VALID_SCHEMA.get("write_files")[0], "defer": str("no")}
+ ]
+ }
+
+ cc = self.tmp_cloud("ubuntu")
+ handle("cc_write_files_deferred", valid_config, cc, LOG, [])
+ self.assertNotIn(
+ "Invalid cloud-config provided:", self.logs.getvalue()
+ )
+ handle("cc_write_files_deferred", invalid_config, cc, LOG, [])
+ self.assertIn("Invalid cloud-config provided:", self.logs.getvalue())
+ self.assertIn(
+ "defer: 'no' is not of type 'boolean'", self.logs.getvalue()
+ )
+
+
+class TestWriteFilesDeferred(FilesystemMockingTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestWriteFilesDeferred, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def test_filtering_deferred_files(self):
+ self.patchUtils(self.tmp)
+ expected = "hello world\n"
+ config = {
+ "write_files": [
+ {
+ "path": "/tmp/deferred.file",
+ "defer": True,
+ "content": expected,
+ },
+ {"path": "/tmp/not_deferred.file"},
+ ]
+ }
+ cc = self.tmp_cloud("ubuntu")
+ handle("cc_write_files_deferred", config, cc, LOG, [])
+ self.assertEqual(util.load_file("/tmp/deferred.file"), expected)
+ with self.assertRaises(FileNotFoundError):
+ util.load_file("/tmp/not_deferred.file")
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_yum_add_repo.py b/tests/unittests/config/test_cc_yum_add_repo.py
new file mode 100644
index 00000000..550b0af2
--- /dev/null
+++ b/tests/unittests/config/test_cc_yum_add_repo.py
@@ -0,0 +1,120 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import configparser
+import logging
+import shutil
+import tempfile
+
+from cloudinit import util
+from cloudinit.config import cc_yum_add_repo
+from tests.unittests import helpers
+
+LOG = logging.getLogger(__name__)
+
+
+class TestConfig(helpers.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestConfig, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def test_bad_config(self):
+ cfg = {
+ "yum_repos": {
+ "epel-testing": {
+ "name": "Extra Packages for Enterprise Linux 5 - Testing",
+ # Missing this should cause the repo not to be written
+ # 'baseurl': 'http://blah.org/pub/epel/testing/5/$barch',
+ "enabled": False,
+ "gpgcheck": True,
+ "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL",
+ "failovermethod": "priority",
+ },
+ },
+ }
+ self.patchUtils(self.tmp)
+ cc_yum_add_repo.handle("yum_add_repo", cfg, None, LOG, [])
+ self.assertRaises(
+ IOError, util.load_file, "/etc/yum.repos.d/epel_testing.repo"
+ )
+
+ def test_write_config(self):
+ cfg = {
+ "yum_repos": {
+ "epel-testing": {
+ "name": "Extra Packages for Enterprise Linux 5 - Testing",
+ "baseurl": "http://blah.org/pub/epel/testing/5/$basearch",
+ "enabled": False,
+ "gpgcheck": True,
+ "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL",
+ "failovermethod": "priority",
+ },
+ },
+ }
+ self.patchUtils(self.tmp)
+ cc_yum_add_repo.handle("yum_add_repo", cfg, None, LOG, [])
+ contents = util.load_file("/etc/yum.repos.d/epel_testing.repo")
+ parser = configparser.ConfigParser()
+ parser.read_string(contents)
+ expected = {
+ "epel_testing": {
+ "name": "Extra Packages for Enterprise Linux 5 - Testing",
+ "failovermethod": "priority",
+ "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL",
+ "enabled": "0",
+ "baseurl": "http://blah.org/pub/epel/testing/5/$basearch",
+ "gpgcheck": "1",
+ }
+ }
+ for section in expected:
+ self.assertTrue(
+ parser.has_section(section),
+ "Contains section {0}".format(section),
+ )
+ for k, v in expected[section].items():
+ self.assertEqual(parser.get(section, k), v)
+
+ def test_write_config_array(self):
+ cfg = {
+ "yum_repos": {
+ "puppetlabs-products": {
+ "name": "Puppet Labs Products El 6 - $basearch",
+ "baseurl": (
+ "http://yum.puppetlabs.com/el/6/products/$basearch"
+ ),
+ "gpgkey": [
+ "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs",
+ "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet",
+ ],
+ "enabled": True,
+ "gpgcheck": True,
+ }
+ }
+ }
+ self.patchUtils(self.tmp)
+ cc_yum_add_repo.handle("yum_add_repo", cfg, None, LOG, [])
+ contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo")
+ parser = configparser.ConfigParser()
+ parser.read_string(contents)
+ expected = {
+ "puppetlabs_products": {
+ "name": "Puppet Labs Products El 6 - $basearch",
+ "baseurl": "http://yum.puppetlabs.com/el/6/products/$basearch",
+ "gpgkey": (
+ "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs\n"
+ "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet"
+ ),
+ "enabled": "1",
+ "gpgcheck": "1",
+ }
+ }
+ for section in expected:
+ self.assertTrue(
+ parser.has_section(section),
+ "Contains section {0}".format(section),
+ )
+ for k, v in expected[section].items():
+ self.assertEqual(parser.get(section, k), v)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_zypper_add_repo.py b/tests/unittests/config/test_cc_zypper_add_repo.py
index 0fb1de1a..4304fee1 100644
--- a/tests/unittests/test_handler/test_handler_zypper_add_repo.py
+++ b/tests/unittests/config/test_cc_zypper_add_repo.py
@@ -7,8 +7,8 @@ import os
from cloudinit import util
from cloudinit.config import cc_zypper_add_repo
-from cloudinit.tests import helpers
-from cloudinit.tests.helpers import mock
+from tests.unittests import helpers
+from tests.unittests.helpers import mock
LOG = logging.getLogger(__name__)
@@ -17,31 +17,28 @@ class TestConfig(helpers.FilesystemMockingTestCase):
def setUp(self):
super(TestConfig, self).setUp()
self.tmp = self.tmp_dir()
- self.zypp_conf = 'etc/zypp/zypp.conf'
+ self.zypp_conf = "etc/zypp/zypp.conf"
def test_bad_repo_config(self):
"""Config has no baseurl, no file should be written"""
cfg = {
- 'repos': [
- {
- 'id': 'foo',
- 'name': 'suse-test',
- 'enabled': '1'
- },
+ "repos": [
+ {"id": "foo", "name": "suse-test", "enabled": "1"},
]
}
self.patchUtils(self.tmp)
- cc_zypper_add_repo._write_repos(cfg['repos'], '/etc/zypp/repos.d')
- self.assertRaises(IOError, util.load_file,
- "/etc/zypp/repos.d/foo.repo")
+ cc_zypper_add_repo._write_repos(cfg["repos"], "/etc/zypp/repos.d")
+ self.assertRaises(
+ IOError, util.load_file, "/etc/zypp/repos.d/foo.repo"
+ )
def test_write_repos(self):
"""Verify valid repos get written"""
cfg = self._get_base_config_repos()
root_d = self.tmp_dir()
- cc_zypper_add_repo._write_repos(cfg['zypper']['repos'], root_d)
- repos = glob.glob('%s/*.repo' % root_d)
- expected_repos = ['testing-foo.repo', 'testing-bar.repo']
+ cc_zypper_add_repo._write_repos(cfg["zypper"]["repos"], root_d)
+ repos = glob.glob("%s/*.repo" % root_d)
+ expected_repos = ["testing-foo.repo", "testing-bar.repo"]
if len(repos) != 2:
assert 'Number of repos written is "%d" expected 2' % len(repos)
for repo in repos:
@@ -53,80 +50,77 @@ class TestConfig(helpers.FilesystemMockingTestCase):
def test_write_repo(self):
"""Verify the content of a repo file"""
cfg = {
- 'repos': [
+ "repos": [
{
- 'baseurl': 'http://foo',
- 'name': 'test-foo',
- 'id': 'testing-foo'
+ "baseurl": "http://foo",
+ "name": "test-foo",
+ "id": "testing-foo",
},
]
}
root_d = self.tmp_dir()
- cc_zypper_add_repo._write_repos(cfg['repos'], root_d)
+ cc_zypper_add_repo._write_repos(cfg["repos"], root_d)
contents = util.load_file("%s/testing-foo.repo" % root_d)
parser = configparser.ConfigParser()
parser.read_string(contents)
expected = {
- 'testing-foo': {
- 'name': 'test-foo',
- 'baseurl': 'http://foo',
- 'enabled': '1',
- 'autorefresh': '1'
+ "testing-foo": {
+ "name": "test-foo",
+ "baseurl": "http://foo",
+ "enabled": "1",
+ "autorefresh": "1",
}
}
for section in expected:
- self.assertTrue(parser.has_section(section),
- "Contains section {0}".format(section))
+ self.assertTrue(
+ parser.has_section(section),
+ "Contains section {0}".format(section),
+ )
for k, v in expected[section].items():
self.assertEqual(parser.get(section, k), v)
def test_config_write(self):
"""Write valid configuration data"""
- cfg = {
- 'config': {
- 'download.deltarpm': 'False',
- 'reposdir': 'foo'
- }
- }
+ cfg = {"config": {"download.deltarpm": "False", "reposdir": "foo"}}
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'})
+ helpers.populate_dir(root_d, {self.zypp_conf: "# Zypp config\n"})
self.reRoot(root_d)
- cc_zypper_add_repo._write_zypp_config(cfg['config'])
+ cc_zypper_add_repo._write_zypp_config(cfg["config"])
cfg_out = os.path.join(root_d, self.zypp_conf)
contents = util.load_file(cfg_out)
expected = [
- '# Zypp config',
- '# Added via cloud.cfg',
- 'download.deltarpm=False',
- 'reposdir=foo'
+ "# Zypp config",
+ "# Added via cloud.cfg",
+ "download.deltarpm=False",
+ "reposdir=foo",
]
- for item in contents.split('\n'):
+ for item in contents.split("\n"):
if item not in expected:
self.assertIsNone(item)
- @mock.patch('cloudinit.log.logging')
+ @mock.patch("cloudinit.log.logging")
def test_config_write_skip_configdir(self, mock_logging):
"""Write configuration but skip writing 'configdir' setting"""
cfg = {
- 'config': {
- 'download.deltarpm': 'False',
- 'reposdir': 'foo',
- 'configdir': 'bar'
+ "config": {
+ "download.deltarpm": "False",
+ "reposdir": "foo",
+ "configdir": "bar",
}
}
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'})
+ helpers.populate_dir(root_d, {self.zypp_conf: "# Zypp config\n"})
self.reRoot(root_d)
- cc_zypper_add_repo._write_zypp_config(cfg['config'])
+ cc_zypper_add_repo._write_zypp_config(cfg["config"])
cfg_out = os.path.join(root_d, self.zypp_conf)
contents = util.load_file(cfg_out)
expected = [
- '# Zypp config',
- '# Added via cloud.cfg',
- 'download.deltarpm=False',
- 'reposdir=foo'
+ "# Zypp config",
+ "# Added via cloud.cfg",
+ "download.deltarpm=False",
+ "reposdir=foo",
]
- for item in contents.split('\n'):
+ for item in contents.split("\n"):
if item not in expected:
self.assertIsNone(item)
# Not finding teh right path for mocking :(
@@ -134,55 +128,53 @@ class TestConfig(helpers.FilesystemMockingTestCase):
def test_empty_config_section_no_new_data(self):
"""When the config section is empty no new data should be written to
- zypp.conf"""
+ zypp.conf"""
cfg = self._get_base_config_repos()
- cfg['zypper']['config'] = None
+ cfg["zypper"]["config"] = None
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {self.zypp_conf: '# No data'})
+ helpers.populate_dir(root_d, {self.zypp_conf: "# No data"})
self.reRoot(root_d)
- cc_zypper_add_repo._write_zypp_config(cfg.get('config', {}))
+ cc_zypper_add_repo._write_zypp_config(cfg.get("config", {}))
cfg_out = os.path.join(root_d, self.zypp_conf)
contents = util.load_file(cfg_out)
- self.assertEqual(contents, '# No data')
+ self.assertEqual(contents, "# No data")
def test_empty_config_value_no_new_data(self):
"""When the config section is not empty but there are no values
- no new data should be written to zypp.conf"""
+ no new data should be written to zypp.conf"""
cfg = self._get_base_config_repos()
- cfg['zypper']['config'] = {
- 'download.deltarpm': None
- }
+ cfg["zypper"]["config"] = {"download.deltarpm": None}
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {self.zypp_conf: '# No data'})
+ helpers.populate_dir(root_d, {self.zypp_conf: "# No data"})
self.reRoot(root_d)
- cc_zypper_add_repo._write_zypp_config(cfg.get('config', {}))
+ cc_zypper_add_repo._write_zypp_config(cfg.get("config", {}))
cfg_out = os.path.join(root_d, self.zypp_conf)
contents = util.load_file(cfg_out)
- self.assertEqual(contents, '# No data')
+ self.assertEqual(contents, "# No data")
def test_handler_full_setup(self):
"""Test that the handler ends up calling the renderers"""
cfg = self._get_base_config_repos()
- cfg['zypper']['config'] = {
- 'download.deltarpm': 'False',
+ cfg["zypper"]["config"] = {
+ "download.deltarpm": "False",
}
root_d = self.tmp_dir()
- os.makedirs('%s/etc/zypp/repos.d' % root_d)
- helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'})
+ os.makedirs("%s/etc/zypp/repos.d" % root_d)
+ helpers.populate_dir(root_d, {self.zypp_conf: "# Zypp config\n"})
self.reRoot(root_d)
- cc_zypper_add_repo.handle('zypper_add_repo', cfg, None, LOG, [])
+ cc_zypper_add_repo.handle("zypper_add_repo", cfg, None, LOG, [])
cfg_out = os.path.join(root_d, self.zypp_conf)
contents = util.load_file(cfg_out)
expected = [
- '# Zypp config',
- '# Added via cloud.cfg',
- 'download.deltarpm=False',
+ "# Zypp config",
+ "# Added via cloud.cfg",
+ "download.deltarpm=False",
]
- for item in contents.split('\n'):
+ for item in contents.split("\n"):
if item not in expected:
self.assertIsNone(item)
- repos = glob.glob('%s/etc/zypp/repos.d/*.repo' % root_d)
- expected_repos = ['testing-foo.repo', 'testing-bar.repo']
+ repos = glob.glob("%s/etc/zypp/repos.d/*.repo" % root_d)
+ expected_repos = ["testing-foo.repo", "testing-bar.repo"]
if len(repos) != 2:
assert 'Number of repos written is "%d" expected 2' % len(repos)
for repo in repos:
@@ -192,39 +184,39 @@ class TestConfig(helpers.FilesystemMockingTestCase):
def test_no_config_section_no_new_data(self):
"""When there is no config section no new data should be written to
- zypp.conf"""
+ zypp.conf"""
cfg = self._get_base_config_repos()
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {self.zypp_conf: '# No data'})
+ helpers.populate_dir(root_d, {self.zypp_conf: "# No data"})
self.reRoot(root_d)
- cc_zypper_add_repo._write_zypp_config(cfg.get('config', {}))
+ cc_zypper_add_repo._write_zypp_config(cfg.get("config", {}))
cfg_out = os.path.join(root_d, self.zypp_conf)
contents = util.load_file(cfg_out)
- self.assertEqual(contents, '# No data')
+ self.assertEqual(contents, "# No data")
def test_no_repo_data(self):
"""When there is no repo data nothing should happen"""
root_d = self.tmp_dir()
self.reRoot(root_d)
cc_zypper_add_repo._write_repos(None, root_d)
- content = glob.glob('%s/*' % root_d)
+ content = glob.glob("%s/*" % root_d)
self.assertEqual(len(content), 0)
def _get_base_config_repos(self):
"""Basic valid repo configuration"""
cfg = {
- 'zypper': {
- 'repos': [
+ "zypper": {
+ "repos": [
{
- 'baseurl': 'http://foo',
- 'name': 'test-foo',
- 'id': 'testing-foo'
+ "baseurl": "http://foo",
+ "name": "test-foo",
+ "id": "testing-foo",
},
{
- 'baseurl': 'http://bar',
- 'name': 'test-bar',
- 'id': 'testing-bar'
- }
+ "baseurl": "http://bar",
+ "name": "test-bar",
+ "id": "testing-bar",
+ },
]
}
}
diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py
new file mode 100644
index 00000000..3a39f343
--- /dev/null
+++ b/tests/unittests/config/test_schema.py
@@ -0,0 +1,917 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+
+import importlib
+import inspect
+import itertools
+import logging
+import sys
+from copy import copy
+from pathlib import Path
+from textwrap import dedent
+
+import pytest
+import yaml
+from yaml import safe_load
+
+from cloudinit.config.schema import (
+ CLOUD_CONFIG_HEADER,
+ MetaSchema,
+ SchemaValidationError,
+ _schemapath_for_cloudconfig,
+ annotated_cloudconfig_file,
+ get_jsonschema_validator,
+ get_meta_doc,
+ get_schema,
+ load_doc,
+ main,
+ validate_cloudconfig_file,
+ validate_cloudconfig_metaschema,
+ validate_cloudconfig_schema,
+)
+from cloudinit.util import write_file
+from tests.unittests.helpers import (
+ CiTestCase,
+ cloud_init_project_dir,
+ mock,
+ skipUnlessJsonSchema,
+)
+
+
+def get_schemas() -> dict:
+ """Return all legacy module schemas
+
+ Assumes that module schemas have the variable name "schema"
+ """
+ return get_module_variable("schema")
+
+
+def get_metas() -> dict:
+ """Return all module metas
+
+ Assumes that module schemas have the variable name "schema"
+ """
+ return get_module_variable("meta")
+
+
+def get_module_variable(var_name) -> dict:
+ """Inspect modules and get variable from module matching var_name"""
+ schemas = {}
+
+ files = list(
+ Path(cloud_init_project_dir("cloudinit/config/")).glob("cc_*.py")
+ )
+
+ modules = [mod.stem for mod in files]
+
+ for module in modules:
+ importlib.import_module("cloudinit.config.{}".format(module))
+
+ for k, v in sys.modules.items():
+ path = Path(k)
+
+ if "cloudinit.config" == path.stem and path.suffix[1:4] == "cc_":
+ module_name = path.suffix[1:]
+ members = inspect.getmembers(v)
+ schemas[module_name] = None
+ for name, value in members:
+ if name == var_name:
+ schemas[module_name] = value
+ break
+ return schemas
+
+
+class TestGetSchema:
+ def test_get_schema_coalesces_known_schema(self):
+ """Every cloudconfig module with schema is listed in allOf keyword."""
+ schema = get_schema()
+ assert sorted(
+ [
+ "cc_apk_configure",
+ "cc_apt_configure",
+ "cc_apt_pipelining",
+ "cc_bootcmd",
+ "cc_byobu",
+ "cc_ca_certs",
+ "cc_chef",
+ "cc_debug",
+ "cc_disable_ec2_metadata",
+ "cc_disk_setup",
+ "cc_install_hotplug",
+ "cc_keyboard",
+ "cc_locale",
+ "cc_ntp",
+ "cc_resizefs",
+ "cc_resizefs_vyos",
+ "cc_runcmd",
+ "cc_snap",
+ "cc_ubuntu_advantage",
+ "cc_ubuntu_drivers",
+ "cc_write_files",
+ "cc_zypper_add_repo",
+ ]
+ ) == sorted(
+ [meta["id"] for meta in get_metas().values() if meta is not None]
+ )
+ assert "http://json-schema.org/draft-04/schema#" == schema["$schema"]
+ assert ["$defs", "$schema", "allOf"] == sorted(list(schema.keys()))
+ # New style schema should be defined in static schema file in $defs
+ expected_subschema_defs = [
+ {"$ref": "#/$defs/cc_apk_configure"},
+ {"$ref": "#/$defs/cc_apt_configure"},
+ {"$ref": "#/$defs/cc_apt_pipelining"},
+ {"$ref": "#/$defs/cc_bootcmd"},
+ {"$ref": "#/$defs/cc_byobu"},
+ {"$ref": "#/$defs/cc_ca_certs"},
+ {"$ref": "#/$defs/cc_chef"},
+ {"$ref": "#/$defs/cc_debug"},
+ {"$ref": "#/$defs/cc_disable_ec2_metadata"},
+ {"$ref": "#/$defs/cc_disk_setup"},
+ ]
+ found_subschema_defs = []
+ legacy_schema_keys = []
+ for subschema in schema["allOf"]:
+ if "$ref" in subschema:
+ found_subschema_defs.append(subschema)
+ else: # Legacy subschema sourced from cc_* module 'schema' attr
+ legacy_schema_keys.extend(subschema["properties"].keys())
+
+ assert expected_subschema_defs == found_subschema_defs
+ # This list will dwindle as we move legacy schema to new $defs
+ assert [
+ "drivers",
+ "keyboard",
+ "locale",
+ "locale_configfile",
+ "ntp",
+ "resize_rootfs",
+ "resizefs_enabled",
+ "resizefs_list",
+ "runcmd",
+ "snap",
+ "ubuntu_advantage",
+ "updates",
+ "write_files",
+ "write_files",
+ "zypper",
+ ] == sorted(legacy_schema_keys)
+
+
+class TestLoadDoc:
+
+ docs = get_module_variable("__doc__")
+
+ # TODO( Drop legacy test when all sub-schemas in cloud-init-schema.json )
+ @pytest.mark.parametrize(
+ "module_name",
+ (
+ "cc_apt_pipelining", # new style composite schema file
+ "cc_zypper_add_repo", # legacy sub-schema defined in module
+ ),
+ )
+ def test_report_docs_for_legacy_and_consolidated_schema(self, module_name):
+ doc = load_doc([module_name])
+ assert doc, "Unexpected empty docs for {}".format(module_name)
+ assert self.docs[module_name] == doc
+
+
+class Test_SchemapathForCloudconfig:
+ """Coverage tests for supported YAML formats."""
+
+ @pytest.mark.parametrize(
+ "source_content, expected",
+ (
+ (b"{}", {}), # assert empty config handled
+ # Multiple keys account for comments and whitespace lines
+ (b"#\na: va\n \nb: vb\n#\nc: vc", {"a": 2, "b": 4, "c": 6}),
+ # List items represented on correct line number
+ (b"a:\n - a1\n\n - a2\n", {"a": 1, "a.0": 2, "a.1": 4}),
+ # Nested dicts represented on correct line number
+ (b"a:\n a1:\n\n aa1: aa1v\n", {"a": 1, "a.a1": 2, "a.a1.aa1": 4}),
+ ),
+ )
+ def test_schemapaths_representatative_of_source_yaml(
+ self, source_content, expected
+ ):
+ """Validate schemapaths dict accurately represents source YAML line."""
+ cfg = yaml.safe_load(source_content)
+ assert expected == _schemapath_for_cloudconfig(
+ config=cfg, original_content=source_content
+ )
+
+
+class SchemaValidationErrorTest(CiTestCase):
+ """Test validate_cloudconfig_schema"""
+
+ def test_schema_validation_error_expects_schema_errors(self):
+ """SchemaValidationError is initialized from schema_errors."""
+ errors = (
+ ("key.path", 'unexpected key "junk"'),
+ ("key2.path", '"-123" is not a valid "hostname" format'),
+ )
+ exception = SchemaValidationError(schema_errors=errors)
+ self.assertIsInstance(exception, Exception)
+ self.assertEqual(exception.schema_errors, errors)
+ self.assertEqual(
+ 'Cloud config schema errors: key.path: unexpected key "junk", '
+ 'key2.path: "-123" is not a valid "hostname" format',
+ str(exception),
+ )
+ self.assertTrue(isinstance(exception, ValueError))
+
+
+class TestValidateCloudConfigSchema:
+ """Tests for validate_cloudconfig_schema."""
+
+ with_logs = True
+
+ @pytest.mark.parametrize(
+ "schema, call_count",
+ ((None, 1), ({"properties": {"p1": {"type": "string"}}}, 0)),
+ )
+ @skipUnlessJsonSchema()
+ @mock.patch("cloudinit.config.schema.get_schema")
+ def test_validateconfig_schema_use_full_schema_when_no_schema_param(
+ self, get_schema, schema, call_count
+ ):
+ """Use full schema when schema param is absent."""
+ get_schema.return_value = {"properties": {"p1": {"type": "string"}}}
+ kwargs = {"config": {"p1": "valid"}}
+ if schema:
+ kwargs["schema"] = schema
+ validate_cloudconfig_schema(**kwargs)
+ assert call_count == get_schema.call_count
+
+ @skipUnlessJsonSchema()
+ def test_validateconfig_schema_non_strict_emits_warnings(self, caplog):
+ """When strict is False validate_cloudconfig_schema emits warnings."""
+ schema = {"properties": {"p1": {"type": "string"}}}
+ validate_cloudconfig_schema({"p1": -1}, schema, strict=False)
+ [(module, log_level, log_msg)] = caplog.record_tuples
+ assert "cloudinit.config.schema" == module
+ assert logging.WARNING == log_level
+ assert (
+ "Invalid cloud-config provided:\np1: -1 is not of type 'string'"
+ == log_msg
+ )
+
+ @skipUnlessJsonSchema()
+ def test_validateconfig_schema_emits_warning_on_missing_jsonschema(
+ self, caplog
+ ):
+ """Warning from validate_cloudconfig_schema when missing jsonschema."""
+ schema = {"properties": {"p1": {"type": "string"}}}
+ with mock.patch.dict("sys.modules", **{"jsonschema": ImportError()}):
+ validate_cloudconfig_schema({"p1": -1}, schema, strict=True)
+ assert "Ignoring schema validation. jsonschema is not present" in (
+ caplog.text
+ )
+
+ @skipUnlessJsonSchema()
+ def test_validateconfig_schema_strict_raises_errors(self):
+ """When strict is True validate_cloudconfig_schema raises errors."""
+ schema = {"properties": {"p1": {"type": "string"}}}
+ with pytest.raises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_schema({"p1": -1}, schema, strict=True)
+ assert (
+ "Cloud config schema errors: p1: -1 is not of type 'string'"
+ == (str(context_mgr.value))
+ )
+
+ @skipUnlessJsonSchema()
+ def test_validateconfig_schema_honors_formats(self):
+ """With strict True, validate_cloudconfig_schema errors on format."""
+ schema = {"properties": {"p1": {"type": "string", "format": "email"}}}
+ with pytest.raises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_schema({"p1": "-1"}, schema, strict=True)
+ assert "Cloud config schema errors: p1: '-1' is not a 'email'" == (
+ str(context_mgr.value)
+ )
+
+ @skipUnlessJsonSchema()
+ def test_validateconfig_schema_honors_formats_strict_metaschema(self):
+ """With strict and strict_metaschema True, ensure errors on format"""
+ schema = {"properties": {"p1": {"type": "string", "format": "email"}}}
+ with pytest.raises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_schema(
+ {"p1": "-1"}, schema, strict=True, strict_metaschema=True
+ )
+ assert "Cloud config schema errors: p1: '-1' is not a 'email'" == str(
+ context_mgr.value
+ )
+
+ @skipUnlessJsonSchema()
+ def test_validateconfig_strict_metaschema_do_not_raise_exception(
+ self, caplog
+ ):
+ """With strict_metaschema=True, do not raise exceptions.
+
+ This flag is currently unused, but is intended for run-time validation.
+ This should warn, but not raise.
+ """
+ schema = {"properties": {"p1": {"types": "string", "format": "email"}}}
+ validate_cloudconfig_schema(
+ {"p1": "-1"}, schema, strict_metaschema=True
+ )
+ assert (
+ "Meta-schema validation failed, attempting to validate config"
+ in caplog.text
+ )
+
+
+class TestCloudConfigExamples:
+ metas = get_metas()
+ params = [
+ (meta["id"], example)
+ for meta in metas.values()
+ if meta and meta.get("examples")
+ for example in meta.get("examples")
+ ]
+
+ @pytest.mark.parametrize("schema_id, example", params)
+ @skipUnlessJsonSchema()
+ def test_validateconfig_schema_of_example(self, schema_id, example):
+ """For a given example in a config module we test if it is valid
+ according to the unified schema of all config modules
+ """
+ schema = get_schema()
+ config_load = safe_load(example)
+ validate_cloudconfig_schema(config_load, schema, strict=True)
+
+
+class ValidateCloudConfigFileTest(CiTestCase):
+ """Tests for validate_cloudconfig_file."""
+
+ def setUp(self):
+ super(ValidateCloudConfigFileTest, self).setUp()
+ self.config_file = self.tmp_path("cloudcfg.yaml")
+
+ def test_validateconfig_file_error_on_absent_file(self):
+ """On absent config_path, validate_cloudconfig_file errors."""
+ with self.assertRaises(RuntimeError) as context_mgr:
+ validate_cloudconfig_file("/not/here", {})
+ self.assertEqual(
+ "Configfile /not/here does not exist", str(context_mgr.exception)
+ )
+
+ def test_validateconfig_file_error_on_invalid_header(self):
+ """On invalid header, validate_cloudconfig_file errors.
+
+ A SchemaValidationError is raised when the file doesn't begin with
+ CLOUD_CONFIG_HEADER.
+ """
+ write_file(self.config_file, "#junk")
+ with self.assertRaises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_file(self.config_file, {})
+ self.assertEqual(
+ "Cloud config schema errors: format-l1.c1: File {0} needs to begin"
+ ' with "{1}"'.format(
+ self.config_file, CLOUD_CONFIG_HEADER.decode()
+ ),
+ str(context_mgr.exception),
+ )
+
+ def test_validateconfig_file_error_on_non_yaml_scanner_error(self):
+ """On non-yaml scan issues, validate_cloudconfig_file errors."""
+ # Generate a scanner error by providing text on a single line with
+ # improper indent.
+ write_file(self.config_file, "#cloud-config\nasdf:\nasdf")
+ with self.assertRaises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_file(self.config_file, {})
+ self.assertIn(
+ "schema errors: format-l3.c1: File {0} is not valid yaml.".format(
+ self.config_file
+ ),
+ str(context_mgr.exception),
+ )
+
+ def test_validateconfig_file_error_on_non_yaml_parser_error(self):
+ """On non-yaml parser issues, validate_cloudconfig_file errors."""
+ write_file(self.config_file, "#cloud-config\n{}}")
+ with self.assertRaises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_file(self.config_file, {})
+ self.assertIn(
+ "schema errors: format-l2.c3: File {0} is not valid yaml.".format(
+ self.config_file
+ ),
+ str(context_mgr.exception),
+ )
+
+ @skipUnlessJsonSchema()
+ def test_validateconfig_file_sctrictly_validates_schema(self):
+ """validate_cloudconfig_file raises errors on invalid schema."""
+ schema = {"properties": {"p1": {"type": "string", "format": "string"}}}
+ write_file(self.config_file, "#cloud-config\np1: -1")
+ with self.assertRaises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_file(self.config_file, schema)
+ self.assertEqual(
+ "Cloud config schema errors: p1: -1 is not of type 'string'",
+ str(context_mgr.exception),
+ )
+
+
+class GetSchemaDocTest(CiTestCase):
+ """Tests for get_meta_doc."""
+
+ def setUp(self):
+ super(GetSchemaDocTest, self).setUp()
+ self.required_schema = {
+ "title": "title",
+ "description": "description",
+ "id": "id",
+ "name": "name",
+ "frequency": "frequency",
+ "distros": ["debian", "rhel"],
+ }
+ self.meta: MetaSchema = {
+ "title": "title",
+ "description": "description",
+ "id": "id",
+ "name": "name",
+ "frequency": "frequency",
+ "distros": ["debian", "rhel"],
+ "examples": [
+ 'ex1:\n [don\'t, expand, "this"]',
+ "ex2: true",
+ ],
+ }
+
+ def test_get_meta_doc_returns_restructured_text(self):
+ """get_meta_doc returns restructured text for a cloudinit schema."""
+ full_schema = copy(self.required_schema)
+ full_schema.update(
+ {
+ "properties": {
+ "prop1": {
+ "type": "array",
+ "description": "prop-description",
+ "items": {"type": "integer"},
+ }
+ }
+ }
+ )
+
+ doc = get_meta_doc(self.meta, full_schema)
+ self.assertEqual(
+ dedent(
+ """
+ name
+ ----
+ **Summary:** title
+
+ description
+
+ **Internal name:** ``id``
+
+ **Module frequency:** frequency
+
+ **Supported distros:** debian, rhel
+
+ **Config schema**:
+ **prop1:** (array of integer) prop-description
+
+ **Examples**::
+
+ ex1:
+ [don't, expand, "this"]
+ # --- Example2 ---
+ ex2: true
+ """
+ ),
+ doc,
+ )
+
+ def test_get_meta_doc_handles_multiple_types(self):
+ """get_meta_doc delimits multiple property types with a '/'."""
+ schema = {"properties": {"prop1": {"type": ["string", "integer"]}}}
+ self.assertIn(
+ "**prop1:** (string/integer)", get_meta_doc(self.meta, schema)
+ )
+
+ def test_get_meta_doc_handles_enum_types(self):
+ """get_meta_doc converts enum types to yaml and delimits with '/'."""
+ schema = {"properties": {"prop1": {"enum": [True, False, "stuff"]}}}
+ self.assertIn(
+ "**prop1:** (true/false/stuff)", get_meta_doc(self.meta, schema)
+ )
+
+ def test_get_meta_doc_handles_nested_oneof_property_types(self):
+ """get_meta_doc describes array items oneOf declarations in type."""
+ schema = {
+ "properties": {
+ "prop1": {
+ "type": "array",
+ "items": {
+ "oneOf": [{"type": "string"}, {"type": "integer"}]
+ },
+ }
+ }
+ }
+ self.assertIn(
+ "**prop1:** (array of (string)/(integer))",
+ get_meta_doc(self.meta, schema),
+ )
+
+ def test_get_meta_doc_handles_string_examples(self):
+ """get_meta_doc properly indented examples as a list of strings."""
+ full_schema = copy(self.required_schema)
+ full_schema.update(
+ {
+ "examples": [
+ 'ex1:\n [don\'t, expand, "this"]',
+ "ex2: true",
+ ],
+ "properties": {
+ "prop1": {
+ "type": "array",
+ "description": "prop-description",
+ "items": {"type": "integer"},
+ }
+ },
+ }
+ )
+ self.assertIn(
+ dedent(
+ """
+ **Config schema**:
+ **prop1:** (array of integer) prop-description
+
+ **Examples**::
+
+ ex1:
+ [don't, expand, "this"]
+ # --- Example2 ---
+ ex2: true
+ """
+ ),
+ get_meta_doc(self.meta, full_schema),
+ )
+
+ def test_get_meta_doc_properly_parse_description(self):
+ """get_meta_doc description properly formatted"""
+ schema = {
+ "properties": {
+ "p1": {
+ "type": "string",
+ "description": dedent(
+ """\
+ This item
+ has the
+ following options:
+
+ - option1
+ - option2
+ - option3
+
+ The default value is
+ option1"""
+ ),
+ }
+ }
+ }
+
+ self.assertIn(
+ dedent(
+ """
+ **Config schema**:
+ **p1:** (string) This item has the following options:
+
+ - option1
+ - option2
+ - option3
+
+ The default value is option1
+
+ """
+ ),
+ get_meta_doc(self.meta, schema),
+ )
+
+ def test_get_meta_doc_raises_key_errors(self):
+ """get_meta_doc raises KeyErrors on missing keys."""
+ schema = {
+ "properties": {
+ "prop1": {
+ "type": "array",
+ "items": {
+ "oneOf": [{"type": "string"}, {"type": "integer"}]
+ },
+ }
+ }
+ }
+ for key in self.meta:
+ invalid_meta = copy(self.meta)
+ invalid_meta.pop(key)
+ with self.assertRaises(KeyError) as context_mgr:
+ get_meta_doc(invalid_meta, schema)
+ self.assertIn(key, str(context_mgr.exception))
+
+ def test_label_overrides_property_name(self):
+ """get_meta_doc overrides property name with label."""
+ schema = {
+ "properties": {
+ "prop1": {
+ "type": "string",
+ "label": "label1",
+ },
+ "prop_no_label": {
+ "type": "string",
+ },
+ "prop_array": {
+ "label": "array_label",
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "some_prop": {"type": "number"},
+ },
+ },
+ },
+ },
+ "patternProperties": {
+ "^.*$": {
+ "type": "string",
+ "label": "label2",
+ }
+ },
+ }
+ meta_doc = get_meta_doc(self.meta, schema)
+ assert "**label1:** (string)" in meta_doc
+ assert "**label2:** (string" in meta_doc
+ assert "**prop_no_label:** (string)" in meta_doc
+ assert "Each item in **array_label** list" in meta_doc
+
+ assert "prop1" not in meta_doc
+ assert ".*" not in meta_doc
+
+
+class AnnotatedCloudconfigFileTest(CiTestCase):
+ maxDiff = None
+
+ def test_annotated_cloudconfig_file_no_schema_errors(self):
+ """With no schema_errors, print the original content."""
+ content = b"ntp:\n pools: [ntp1.pools.com]\n"
+ self.assertEqual(
+ content, annotated_cloudconfig_file({}, content, schema_errors=[])
+ )
+
+ def test_annotated_cloudconfig_file_with_non_dict_cloud_config(self):
+ """Error when empty non-dict cloud-config is provided.
+
+ OurJSON validation when user-data is None type generates a bunch
+ schema validation errors of the format:
+ ('', "None is not of type 'object'"). Ignore those symptoms and
+ report the general problem instead.
+ """
+ content = b"\n\n\n"
+ expected = "\n".join(
+ [
+ content.decode(),
+ "# Errors: -------------",
+ "# E1: Cloud-config is not a YAML dict.\n\n",
+ ]
+ )
+ self.assertEqual(
+ expected,
+ annotated_cloudconfig_file(
+ None,
+ content,
+ schema_errors=[("", "None is not of type 'object'")],
+ ),
+ )
+
+ def test_annotated_cloudconfig_file_schema_annotates_and_adds_footer(self):
+ """With schema_errors, error lines are annotated and a footer added."""
+ content = dedent(
+ """\
+ #cloud-config
+ # comment
+ ntp:
+ pools: [-99, 75]
+ """
+ ).encode()
+ expected = dedent(
+ """\
+ #cloud-config
+ # comment
+ ntp: # E1
+ pools: [-99, 75] # E2,E3
+
+ # Errors: -------------
+ # E1: Some type error
+ # E2: -99 is not a string
+ # E3: 75 is not a string
+
+ """
+ )
+ parsed_config = safe_load(content[13:])
+ schema_errors = [
+ ("ntp", "Some type error"),
+ ("ntp.pools.0", "-99 is not a string"),
+ ("ntp.pools.1", "75 is not a string"),
+ ]
+ self.assertEqual(
+ expected,
+ annotated_cloudconfig_file(parsed_config, content, schema_errors),
+ )
+
+ def test_annotated_cloudconfig_file_annotates_separate_line_items(self):
+ """Errors are annotated for lists with items on separate lines."""
+ content = dedent(
+ """\
+ #cloud-config
+ # comment
+ ntp:
+ pools:
+ - -99
+ - 75
+ """
+ ).encode()
+ expected = dedent(
+ """\
+ ntp:
+ pools:
+ - -99 # E1
+ - 75 # E2
+ """
+ )
+ parsed_config = safe_load(content[13:])
+ schema_errors = [
+ ("ntp.pools.0", "-99 is not a string"),
+ ("ntp.pools.1", "75 is not a string"),
+ ]
+ self.assertIn(
+ expected,
+ annotated_cloudconfig_file(parsed_config, content, schema_errors),
+ )
+
+
+class TestMain:
+
+ exclusive_combinations = itertools.combinations(
+ ["--system", "--docs all", "--config-file something"], 2
+ )
+
+ @pytest.mark.parametrize("params", exclusive_combinations)
+ def test_main_exclusive_args(self, params, capsys):
+ """Main exits non-zero and error on required exclusive args."""
+ params = list(itertools.chain(*[a.split() for a in params]))
+ with mock.patch("sys.argv", ["mycmd"] + params):
+ with pytest.raises(SystemExit) as context_manager:
+ main()
+ assert 1 == context_manager.value.code
+
+ _out, err = capsys.readouterr()
+ expected = (
+ "Error:\n"
+ "Expected one of --config-file, --system or --docs arguments\n"
+ )
+ assert expected == err
+
+ def test_main_missing_args(self, capsys):
+ """Main exits non-zero and reports an error on missing parameters."""
+ with mock.patch("sys.argv", ["mycmd"]):
+ with pytest.raises(SystemExit) as context_manager:
+ main()
+ assert 1 == context_manager.value.code
+
+ _out, err = capsys.readouterr()
+ expected = (
+ "Error:\n"
+ "Expected one of --config-file, --system or --docs arguments\n"
+ )
+ assert expected == err
+
+ def test_main_absent_config_file(self, capsys):
+ """Main exits non-zero when config file is absent."""
+ myargs = ["mycmd", "--annotate", "--config-file", "NOT_A_FILE"]
+ with mock.patch("sys.argv", myargs):
+ with pytest.raises(SystemExit) as context_manager:
+ main()
+ assert 1 == context_manager.value.code
+ _out, err = capsys.readouterr()
+ assert "Error:\nConfigfile NOT_A_FILE does not exist\n" == err
+
+ def test_main_invalid_flag_combo(self, capsys):
+ """Main exits non-zero when invalid flag combo used."""
+ myargs = ["mycmd", "--annotate", "--docs", "DOES_NOT_MATTER"]
+ with mock.patch("sys.argv", myargs):
+ with pytest.raises(SystemExit) as context_manager:
+ main()
+ assert 1 == context_manager.value.code
+ _, err = capsys.readouterr()
+ assert (
+ "Error:\nInvalid flag combination. "
+ "Cannot use --annotate with --docs\n" == err
+ )
+
+ def test_main_prints_docs(self, capsys):
+ """When --docs parameter is provided, main generates documentation."""
+ myargs = ["mycmd", "--docs", "all"]
+ with mock.patch("sys.argv", myargs):
+ assert 0 == main(), "Expected 0 exit code"
+ out, _err = capsys.readouterr()
+ assert "\nNTP\n---\n" in out
+ assert "\nRuncmd\n------\n" in out
+
+ def test_main_validates_config_file(self, tmpdir, capsys):
+ """When --config-file parameter is provided, main validates schema."""
+ myyaml = tmpdir.join("my.yaml")
+ myargs = ["mycmd", "--config-file", myyaml.strpath]
+ myyaml.write(b"#cloud-config\nntp:") # shortest ntp schema
+ with mock.patch("sys.argv", myargs):
+ assert 0 == main(), "Expected 0 exit code"
+ out, _err = capsys.readouterr()
+ assert "Valid cloud-config: {0}\n".format(myyaml) == out
+
+ @mock.patch("cloudinit.config.schema.read_cfg_paths")
+ @mock.patch("cloudinit.config.schema.os.getuid", return_value=0)
+ def test_main_validates_system_userdata(
+ self, m_getuid, m_read_cfg_paths, capsys, paths
+ ):
+ """When --system is provided, main validates system userdata."""
+ m_read_cfg_paths.return_value = paths
+ ud_file = paths.get_ipath_cur("userdata_raw")
+ write_file(ud_file, b"#cloud-config\nntp:")
+ myargs = ["mycmd", "--system"]
+ with mock.patch("sys.argv", myargs):
+ assert 0 == main(), "Expected 0 exit code"
+ out, _err = capsys.readouterr()
+ assert "Valid cloud-config: system userdata\n" == out
+
+ @mock.patch("cloudinit.config.schema.os.getuid", return_value=1000)
+ def test_main_system_userdata_requires_root(self, m_getuid, capsys, paths):
+ """Non-root user can't use --system param"""
+ myargs = ["mycmd", "--system"]
+ with mock.patch("sys.argv", myargs):
+ with pytest.raises(SystemExit) as context_manager:
+ main()
+ assert 1 == context_manager.value.code
+ _out, err = capsys.readouterr()
+ expected = (
+ "Error:\nUnable to read system userdata as non-root user. "
+ "Try using sudo\n"
+ )
+ assert expected == err
+
+
+def _get_meta_doc_examples():
+ examples_dir = Path(cloud_init_project_dir("doc/examples"))
+ assert examples_dir.is_dir()
+
+ return (
+ str(f)
+ for f in examples_dir.glob("cloud-config*.txt")
+ if not f.name.startswith("cloud-config-archive")
+ )
+
+
+class TestSchemaDocExamples:
+ schema = get_schema()
+
+ @pytest.mark.parametrize("example_path", _get_meta_doc_examples())
+ @skipUnlessJsonSchema()
+ def test_schema_doc_examples(self, example_path):
+ validate_cloudconfig_file(example_path, self.schema)
+
+
+class TestStrictMetaschema:
+ """Validate that schemas follow a stricter metaschema definition than
+ the default. This disallows arbitrary key/value pairs.
+ """
+
+ @skipUnlessJsonSchema()
+ def test_modules(self):
+ """Validate all modules with a stricter metaschema"""
+ (validator, _) = get_jsonschema_validator()
+ for (name, value) in get_schemas().items():
+ if value:
+ validate_cloudconfig_metaschema(validator, value)
+ else:
+ logging.warning("module %s has no schema definition", name)
+
+ @skipUnlessJsonSchema()
+ def test_validate_bad_module(self):
+ """Throw exception by default, don't throw if throw=False
+
+ item should be 'items' and is therefore interpreted as an additional
+ property which is invalid with a strict metaschema
+ """
+ (validator, _) = get_jsonschema_validator()
+ schema = {
+ "type": "array",
+ "item": {
+ "type": "object",
+ },
+ }
+ with pytest.raises(
+ SchemaValidationError,
+ match=r"Additional properties are not allowed.*",
+ ):
+
+ validate_cloudconfig_metaschema(validator, schema)
+
+ validate_cloudconfig_metaschema(validator, schema, throw=False)
+
+
+# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_distros/__init__.py b/tests/unittests/distros/__init__.py
index 5394aa56..e66b9446 100644
--- a/tests/unittests/test_distros/__init__.py
+++ b/tests/unittests/distros/__init__.py
@@ -1,9 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import copy
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import settings
+from cloudinit import distros, helpers, settings
def _get_distro(dtype, system_info=None):
@@ -14,8 +12,8 @@ def _get_distro(dtype, system_info=None):
example: _get_distro("debian")
"""
if system_info is None:
- system_info = copy.deepcopy(settings.CFG_BUILTIN['system_info'])
- system_info['distro'] = dtype
- paths = helpers.Paths(system_info['paths'])
+ system_info = copy.deepcopy(settings.CFG_BUILTIN["system_info"])
+ system_info["distro"] = dtype
+ paths = helpers.Paths(system_info["paths"])
distro_cls = distros.fetch(dtype)
return distro_cls(dtype, system_info, paths)
diff --git a/tests/unittests/distros/test_arch.py b/tests/unittests/distros/test_arch.py
new file mode 100644
index 00000000..5446295e
--- /dev/null
+++ b/tests/unittests/distros/test_arch.py
@@ -0,0 +1,55 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import util
+from cloudinit.distros.arch import _render_network
+from tests.unittests.helpers import CiTestCase, dir2dict
+
+from . import _get_distro
+
+
+class TestArch(CiTestCase):
+ def test_get_distro(self):
+ distro = _get_distro("arch")
+ hostname = "myhostname"
+ hostfile = self.tmp_path("hostfile")
+ distro._write_hostname(hostname, hostfile)
+ self.assertEqual(hostname + "\n", util.load_file(hostfile))
+
+
+class TestRenderNetwork(CiTestCase):
+ def test_basic_static(self):
+ """Just the most basic static config.
+
+ note 'lo' should not be rendered as an interface."""
+ entries = {
+ "eth0": {
+ "auto": True,
+ "dns-nameservers": ["8.8.8.8"],
+ "bootproto": "static",
+ "address": "10.0.0.2",
+ "gateway": "10.0.0.1",
+ "netmask": "255.255.255.0",
+ },
+ "lo": {"auto": True},
+ }
+ target = self.tmp_dir()
+ devs = _render_network(entries, target=target)
+ files = dir2dict(target, prefix=target)
+ self.assertEqual(["eth0"], devs)
+ self.assertEqual(
+ {
+ "/etc/netctl/eth0": "\n".join(
+ [
+ "Address=10.0.0.2/255.255.255.0",
+ "Connection=ethernet",
+ "DNS=('8.8.8.8')",
+ "Gateway=10.0.0.1",
+ "IP=static",
+ "Interface=eth0",
+ "",
+ ]
+ ),
+ "/etc/resolv.conf": "nameserver 8.8.8.8\n",
+ },
+ files,
+ )
diff --git a/tests/unittests/distros/test_bsd_utils.py b/tests/unittests/distros/test_bsd_utils.py
new file mode 100644
index 00000000..d6f0aeed
--- /dev/null
+++ b/tests/unittests/distros/test_bsd_utils.py
@@ -0,0 +1,66 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import cloudinit.distros.bsd_utils as bsd_utils
+from tests.unittests.helpers import CiTestCase, ExitStack, mock
+
+RC_FILE = """
+if something; then
+ do something here
+fi
+hostname={hostname}
+"""
+
+
+class TestBsdUtils(CiTestCase):
+ def setUp(self):
+ super().setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.load_file = patches.enter_context(
+ mock.patch.object(bsd_utils.util, "load_file")
+ )
+
+ self.write_file = patches.enter_context(
+ mock.patch.object(bsd_utils.util, "write_file")
+ )
+
+ def test_get_rc_config_value(self):
+ self.load_file.return_value = "hostname=foo\n"
+ self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo")
+ self.load_file.assert_called_with("/etc/rc.conf")
+
+ self.load_file.return_value = "hostname=foo"
+ self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo")
+
+ self.load_file.return_value = 'hostname="foo"'
+ self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo")
+
+ self.load_file.return_value = "hostname='foo'"
+ self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo")
+
+ self.load_file.return_value = "hostname='foo\""
+ self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "'foo\"")
+
+ self.load_file.return_value = ""
+ self.assertEqual(bsd_utils.get_rc_config_value("hostname"), None)
+
+ self.load_file.return_value = RC_FILE.format(hostname="foo")
+ self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo")
+
+ def test_set_rc_config_value_unchanged(self):
+ # bsd_utils.set_rc_config_value('hostname', 'foo')
+ # self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n')
+
+ self.load_file.return_value = RC_FILE.format(hostname="foo")
+ self.write_file.assert_not_called()
+
+ def test_set_rc_config_value(self):
+ bsd_utils.set_rc_config_value("hostname", "foo")
+ self.write_file.assert_called_with("/etc/rc.conf", "hostname=foo\n")
+
+ self.load_file.return_value = RC_FILE.format(hostname="foo")
+ bsd_utils.set_rc_config_value("hostname", "bar")
+ self.write_file.assert_called_with(
+ "/etc/rc.conf", RC_FILE.format(hostname="bar")
+ )
diff --git a/tests/unittests/distros/test_create_users.py b/tests/unittests/distros/test_create_users.py
new file mode 100644
index 00000000..ddb039bd
--- /dev/null
+++ b/tests/unittests/distros/test_create_users.py
@@ -0,0 +1,282 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import re
+
+from cloudinit import distros, ssh_util
+from tests.unittests.helpers import CiTestCase, mock
+from tests.unittests.util import abstract_to_concrete
+
+
+@mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False)
+@mock.patch("cloudinit.distros.subp.subp")
+class TestCreateUser(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestCreateUser, self).setUp()
+ self.dist = abstract_to_concrete(distros.Distro)(
+ name="test", cfg=None, paths=None
+ )
+
+ def _useradd2call(self, args):
+ # return a mock call for the useradd command in args
+ # with expected 'logstring'.
+ args = ["useradd"] + args
+ logcmd = [a for a in args]
+ for i in range(len(args)):
+ if args[i] in ("--password",):
+ logcmd[i + 1] = "REDACTED"
+ return mock.call(args, logstring=logcmd)
+
+ def test_basic(self, m_subp, m_is_snappy):
+ user = "foouser"
+ self.dist.create_user(user)
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+
+ def test_no_home(self, m_subp, m_is_snappy):
+ user = "foouser"
+ self.dist.create_user(user, no_create_home=True)
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "-M"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+
+ def test_system_user(self, m_subp, m_is_snappy):
+ # system user should have no home and get --system
+ user = "foouser"
+ self.dist.create_user(user, system=True)
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "--system", "-M"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+
+ def test_explicit_no_home_false(self, m_subp, m_is_snappy):
+ user = "foouser"
+ self.dist.create_user(user, no_create_home=False)
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+
+ def test_unlocked(self, m_subp, m_is_snappy):
+ user = "foouser"
+ self.dist.create_user(user, lock_passwd=False)
+ self.assertEqual(
+ m_subp.call_args_list, [self._useradd2call([user, "-m"])]
+ )
+
+ def test_set_password(self, m_subp, m_is_snappy):
+ user = "foouser"
+ password = "passfoo"
+ self.dist.create_user(user, passwd=password)
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "--password", password, "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+
+ @mock.patch("cloudinit.distros.util.is_group")
+ def test_group_added(self, m_is_group, m_subp, m_is_snappy):
+ m_is_group.return_value = False
+ user = "foouser"
+ self.dist.create_user(user, groups=["group1"])
+ expected = [
+ mock.call(["groupadd", "group1"]),
+ self._useradd2call([user, "--groups", "group1", "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ]
+ self.assertEqual(m_subp.call_args_list, expected)
+
+ @mock.patch("cloudinit.distros.util.is_group")
+ def test_only_new_group_added(self, m_is_group, m_subp, m_is_snappy):
+ ex_groups = ["existing_group"]
+ groups = ["group1", ex_groups[0]]
+ m_is_group.side_effect = lambda m: m in ex_groups
+ user = "foouser"
+ self.dist.create_user(user, groups=groups)
+ expected = [
+ mock.call(["groupadd", "group1"]),
+ self._useradd2call([user, "--groups", ",".join(groups), "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ]
+ self.assertEqual(m_subp.call_args_list, expected)
+
+ @mock.patch("cloudinit.distros.util.is_group")
+ def test_create_groups_with_whitespace_string(
+ self, m_is_group, m_subp, m_is_snappy
+ ):
+ # groups supported as a comma delimeted string even with white space
+ m_is_group.return_value = False
+ user = "foouser"
+ self.dist.create_user(user, groups="group1, group2")
+ expected = [
+ mock.call(["groupadd", "group1"]),
+ mock.call(["groupadd", "group2"]),
+ self._useradd2call([user, "--groups", "group1,group2", "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ]
+ self.assertEqual(m_subp.call_args_list, expected)
+
+ def test_explicit_sudo_false(self, m_subp, m_is_snappy):
+ user = "foouser"
+ self.dist.create_user(user, sudo=False)
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+
+ @mock.patch("cloudinit.ssh_util.setup_user_keys")
+ def test_setup_ssh_authorized_keys_with_string(
+ self, m_setup_user_keys, m_subp, m_is_snappy
+ ):
+ """ssh_authorized_keys allows string and calls setup_user_keys."""
+ user = "foouser"
+ self.dist.create_user(user, ssh_authorized_keys="mykey")
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+ m_setup_user_keys.assert_called_once_with(set(["mykey"]), user)
+
+ @mock.patch("cloudinit.ssh_util.setup_user_keys")
+ def test_setup_ssh_authorized_keys_with_list(
+ self, m_setup_user_keys, m_subp, m_is_snappy
+ ):
+ """ssh_authorized_keys allows lists and calls setup_user_keys."""
+ user = "foouser"
+ self.dist.create_user(user, ssh_authorized_keys=["key1", "key2"])
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+ m_setup_user_keys.assert_called_once_with(set(["key1", "key2"]), user)
+
+ @mock.patch("cloudinit.ssh_util.setup_user_keys")
+ def test_setup_ssh_authorized_keys_with_integer(
+ self, m_setup_user_keys, m_subp, m_is_snappy
+ ):
+ """ssh_authorized_keys warns on non-iterable/string type."""
+ user = "foouser"
+ self.dist.create_user(user, ssh_authorized_keys=-1)
+ m_setup_user_keys.assert_called_once_with(set([]), user)
+ match = re.match(
+ r".*WARNING: Invalid type \'<(type|class) \'int\'>\' detected for"
+ " 'ssh_authorized_keys'.*",
+ self.logs.getvalue(),
+ re.DOTALL,
+ )
+ self.assertIsNotNone(
+ match, "Missing ssh_authorized_keys invalid type warning"
+ )
+
+ @mock.patch("cloudinit.ssh_util.setup_user_keys")
+ def test_create_user_with_ssh_redirect_user_no_cloud_keys(
+ self, m_setup_user_keys, m_subp, m_is_snappy
+ ):
+ """Log a warning when trying to redirect a user no cloud ssh keys."""
+ user = "foouser"
+ self.dist.create_user(user, ssh_redirect_user="someuser")
+ self.assertIn(
+ "WARNING: Unable to disable SSH logins for foouser given "
+ "ssh_redirect_user: someuser. No cloud public-keys present.\n",
+ self.logs.getvalue(),
+ )
+ m_setup_user_keys.assert_not_called()
+
+ @mock.patch("cloudinit.ssh_util.setup_user_keys")
+ def test_create_user_with_ssh_redirect_user_with_cloud_keys(
+ self, m_setup_user_keys, m_subp, m_is_snappy
+ ):
+ """Disable ssh when ssh_redirect_user and cloud ssh keys are set."""
+ user = "foouser"
+ self.dist.create_user(
+ user, ssh_redirect_user="someuser", cloud_public_ssh_keys=["key1"]
+ )
+ disable_prefix = ssh_util.DISABLE_USER_OPTS
+ disable_prefix = disable_prefix.replace("$USER", "someuser")
+ disable_prefix = disable_prefix.replace("$DISABLE_USER", user)
+ m_setup_user_keys.assert_called_once_with(
+ set(["key1"]), "foouser", options=disable_prefix
+ )
+
+ @mock.patch("cloudinit.ssh_util.setup_user_keys")
+ def test_create_user_with_ssh_redirect_user_does_not_disable_auth_keys(
+ self, m_setup_user_keys, m_subp, m_is_snappy
+ ):
+ """Do not disable ssh_authorized_keys when ssh_redirect_user is set."""
+ user = "foouser"
+ self.dist.create_user(
+ user,
+ ssh_authorized_keys="auth1",
+ ssh_redirect_user="someuser",
+ cloud_public_ssh_keys=["key1"],
+ )
+ disable_prefix = ssh_util.DISABLE_USER_OPTS
+ disable_prefix = disable_prefix.replace("$USER", "someuser")
+ disable_prefix = disable_prefix.replace("$DISABLE_USER", user)
+ self.assertEqual(
+ m_setup_user_keys.call_args_list,
+ [
+ mock.call(set(["auth1"]), user), # not disabled
+ mock.call(set(["key1"]), "foouser", options=disable_prefix),
+ ],
+ )
+
+ @mock.patch("cloudinit.distros.subp.which")
+ def test_lock_with_usermod_if_no_passwd(
+ self, m_which, m_subp, m_is_snappy
+ ):
+ """Lock uses usermod --lock if no 'passwd' cmd available."""
+ m_which.side_effect = lambda m: m in ("usermod",)
+ self.dist.lock_passwd("bob")
+ self.assertEqual(
+ [mock.call(["usermod", "--lock", "bob"])], m_subp.call_args_list
+ )
+
+ @mock.patch("cloudinit.distros.subp.which")
+ def test_lock_with_passwd_if_available(self, m_which, m_subp, m_is_snappy):
+ """Lock with only passwd will use passwd."""
+ m_which.side_effect = lambda m: m in ("passwd",)
+ self.dist.lock_passwd("bob")
+ self.assertEqual(
+ [mock.call(["passwd", "-l", "bob"])], m_subp.call_args_list
+ )
+
+ @mock.patch("cloudinit.distros.subp.which")
+ def test_lock_raises_runtime_if_no_commands(
+ self, m_which, m_subp, m_is_snappy
+ ):
+ """Lock with no commands available raises RuntimeError."""
+ m_which.return_value = None
+ with self.assertRaises(RuntimeError):
+ self.dist.lock_passwd("bob")
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/distros/test_debian.py b/tests/unittests/distros/test_debian.py
new file mode 100644
index 00000000..c7c5932e
--- /dev/null
+++ b/tests/unittests/distros/test_debian.py
@@ -0,0 +1,211 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from itertools import count, cycle
+from unittest import mock
+
+import pytest
+
+from cloudinit import distros, subp, util
+from cloudinit.distros.debian import APT_GET_COMMAND, APT_GET_WRAPPER
+from tests.unittests.helpers import FilesystemMockingTestCase
+
+
+@mock.patch("cloudinit.distros.debian.subp.subp")
+class TestDebianApplyLocale(FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestDebianApplyLocale, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.patchOS(self.new_root)
+ self.patchUtils(self.new_root)
+ self.spath = self.tmp_path("etc/default/locale", self.new_root)
+ cls = distros.fetch("debian")
+ self.distro = cls("debian", {}, None)
+
+ def test_no_rerun(self, m_subp):
+ """If system has defined locale, no re-run is expected."""
+ m_subp.return_value = (None, None)
+ locale = "en_US.UTF-8"
+ util.write_file(self.spath, "LANG=%s\n" % locale, omode="w")
+ self.distro.apply_locale(locale, out_fn=self.spath)
+ m_subp.assert_not_called()
+
+ def test_no_regen_on_c_utf8(self, m_subp):
+ """If locale is set to C.UTF8, do not attempt to call locale-gen"""
+ m_subp.return_value = (None, None)
+ locale = "C.UTF-8"
+ util.write_file(self.spath, "LANG=%s\n" % "en_US.UTF-8", omode="w")
+ self.distro.apply_locale(locale, out_fn=self.spath)
+ self.assertEqual(
+ [
+ [
+ "update-locale",
+ "--locale-file=" + self.spath,
+ "LANG=%s" % locale,
+ ]
+ ],
+ [p[0][0] for p in m_subp.call_args_list],
+ )
+
+ def test_rerun_if_different(self, m_subp):
+ """If system has different locale, locale-gen should be called."""
+ m_subp.return_value = (None, None)
+ locale = "en_US.UTF-8"
+ util.write_file(self.spath, "LANG=fr_FR.UTF-8", omode="w")
+ self.distro.apply_locale(locale, out_fn=self.spath)
+ self.assertEqual(
+ [
+ ["locale-gen", locale],
+ [
+ "update-locale",
+ "--locale-file=" + self.spath,
+ "LANG=%s" % locale,
+ ],
+ ],
+ [p[0][0] for p in m_subp.call_args_list],
+ )
+
+ def test_rerun_if_no_file(self, m_subp):
+ """If system has no locale file, locale-gen should be called."""
+ m_subp.return_value = (None, None)
+ locale = "en_US.UTF-8"
+ self.distro.apply_locale(locale, out_fn=self.spath)
+ self.assertEqual(
+ [
+ ["locale-gen", locale],
+ [
+ "update-locale",
+ "--locale-file=" + self.spath,
+ "LANG=%s" % locale,
+ ],
+ ],
+ [p[0][0] for p in m_subp.call_args_list],
+ )
+
+ def test_rerun_on_unset_system_locale(self, m_subp):
+ """If system has unset locale, locale-gen should be called."""
+ m_subp.return_value = (None, None)
+ locale = "en_US.UTF-8"
+ util.write_file(self.spath, "LANG=", omode="w")
+ self.distro.apply_locale(locale, out_fn=self.spath)
+ self.assertEqual(
+ [
+ ["locale-gen", locale],
+ [
+ "update-locale",
+ "--locale-file=" + self.spath,
+ "LANG=%s" % locale,
+ ],
+ ],
+ [p[0][0] for p in m_subp.call_args_list],
+ )
+
+ def test_rerun_on_mismatched_keys(self, m_subp):
+ """If key is LC_ALL and system has only LANG, rerun is expected."""
+ m_subp.return_value = (None, None)
+ locale = "en_US.UTF-8"
+ util.write_file(self.spath, "LANG=", omode="w")
+ self.distro.apply_locale(locale, out_fn=self.spath, keyname="LC_ALL")
+ self.assertEqual(
+ [
+ ["locale-gen", locale],
+ [
+ "update-locale",
+ "--locale-file=" + self.spath,
+ "LC_ALL=%s" % locale,
+ ],
+ ],
+ [p[0][0] for p in m_subp.call_args_list],
+ )
+
+ def test_falseish_locale_raises_valueerror(self, m_subp):
+ """locale as None or "" is invalid and should raise ValueError."""
+
+ with self.assertRaises(ValueError) as ctext_m:
+ self.distro.apply_locale(None)
+ m_subp.assert_not_called()
+
+ self.assertEqual(
+ "Failed to provide locale value.", str(ctext_m.exception)
+ )
+
+ with self.assertRaises(ValueError) as ctext_m:
+ self.distro.apply_locale("")
+ m_subp.assert_not_called()
+ self.assertEqual(
+ "Failed to provide locale value.", str(ctext_m.exception)
+ )
+
+
+@mock.patch.dict("os.environ", {}, clear=True)
+@mock.patch("cloudinit.distros.debian.subp.which", return_value=True)
+@mock.patch("cloudinit.distros.debian.subp.subp")
+class TestPackageCommand:
+ distro = distros.fetch("debian")("debian", {}, None)
+
+ @mock.patch(
+ "cloudinit.distros.debian.Distro._apt_lock_available",
+ return_value=True,
+ )
+ def test_simple_command(self, m_apt_avail, m_subp, m_which):
+ self.distro.package_command("update")
+ apt_args = [APT_GET_WRAPPER["command"]]
+ apt_args.extend(APT_GET_COMMAND)
+ apt_args.append("update")
+ expected_call = {
+ "args": apt_args,
+ "capture": False,
+ "env": {"DEBIAN_FRONTEND": "noninteractive"},
+ }
+ assert m_subp.call_args == mock.call(**expected_call)
+
+ @mock.patch(
+ "cloudinit.distros.debian.Distro._apt_lock_available",
+ side_effect=[False, False, True],
+ )
+ @mock.patch("cloudinit.distros.debian.time.sleep")
+ def test_wait_for_lock(self, m_sleep, m_apt_avail, m_subp, m_which):
+ self.distro._wait_for_apt_command("stub", {"args": "stub2"})
+ assert m_sleep.call_args_list == [mock.call(1), mock.call(1)]
+ assert m_subp.call_args_list == [mock.call(args="stub2")]
+
+ @mock.patch(
+ "cloudinit.distros.debian.Distro._apt_lock_available",
+ return_value=False,
+ )
+ @mock.patch("cloudinit.distros.debian.time.sleep")
+ @mock.patch("cloudinit.distros.debian.time.time", side_effect=count())
+ def test_lock_wait_timeout(
+ self, m_time, m_sleep, m_apt_avail, m_subp, m_which
+ ):
+ with pytest.raises(TimeoutError):
+ self.distro._wait_for_apt_command("stub", "stub2", timeout=5)
+ assert m_subp.call_args_list == []
+
+ @mock.patch(
+ "cloudinit.distros.debian.Distro._apt_lock_available",
+ side_effect=cycle([True, False]),
+ )
+ @mock.patch("cloudinit.distros.debian.time.sleep")
+ def test_lock_exception_wait(self, m_sleep, m_apt_avail, m_subp, m_which):
+ exception = subp.ProcessExecutionError(
+ exit_code=100, stderr="Could not get apt lock"
+ )
+ m_subp.side_effect = [exception, exception, "return_thing"]
+ ret = self.distro._wait_for_apt_command("stub", {"args": "stub2"})
+ assert ret == "return_thing"
+
+ @mock.patch(
+ "cloudinit.distros.debian.Distro._apt_lock_available",
+ side_effect=cycle([True, False]),
+ )
+ @mock.patch("cloudinit.distros.debian.time.sleep")
+ @mock.patch("cloudinit.distros.debian.time.time", side_effect=count())
+ def test_lock_exception_timeout(
+ self, m_time, m_sleep, m_apt_avail, m_subp, m_which
+ ):
+ m_subp.side_effect = subp.ProcessExecutionError(
+ exit_code=100, stderr="Could not get apt lock"
+ )
+ with pytest.raises(TimeoutError):
+ self.distro._wait_for_apt_command(
+ "stub", {"args": "stub2"}, timeout=5
+ )
diff --git a/tests/unittests/distros/test_dragonflybsd.py b/tests/unittests/distros/test_dragonflybsd.py
new file mode 100644
index 00000000..f0cd1b24
--- /dev/null
+++ b/tests/unittests/distros/test_dragonflybsd.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python3
+
+
+import cloudinit.util
+from tests.unittests.helpers import mock
+
+
+def test_find_dragonflybsd_part():
+ assert cloudinit.util.find_dragonflybsd_part("/dev/vbd0s3") == "vbd0s3"
+
+
+@mock.patch("cloudinit.util.is_DragonFlyBSD")
+@mock.patch("cloudinit.subp.subp")
+def test_parse_mount(mock_subp, m_is_DragonFlyBSD):
+ mount_out = """
+vbd0s3 on / (hammer2, local)
+devfs on /dev (devfs, nosymfollow, local)
+/dev/vbd0s0a on /boot (ufs, local)
+procfs on /proc (procfs, local)
+tmpfs on /var/run/shm (tmpfs, local)
+"""
+
+ mock_subp.return_value = (mount_out, "")
+ m_is_DragonFlyBSD.return_value = True
+ assert cloudinit.util.parse_mount("/") == ("vbd0s3", "hammer2", "/")
diff --git a/tests/unittests/test_distros/test_freebsd.py b/tests/unittests/distros/test_freebsd.py
index be565b04..22be5098 100644
--- a/tests/unittests/test_distros/test_freebsd.py
+++ b/tests/unittests/distros/test_freebsd.py
@@ -1,45 +1,43 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.util import (find_freebsd_part, get_path_dev_freebsd)
-from cloudinit.tests.helpers import (CiTestCase, mock)
-
import os
+from cloudinit.util import find_freebsd_part, get_path_dev_freebsd
+from tests.unittests.helpers import CiTestCase, mock
-class TestDeviceLookUp(CiTestCase):
- @mock.patch('cloudinit.subp.subp')
+class TestDeviceLookUp(CiTestCase):
+ @mock.patch("cloudinit.subp.subp")
def test_find_freebsd_part_label(self, mock_subp):
- glabel_out = '''
+ glabel_out = """
gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1
label/rootfs N/A da0p2
label/swap N/A da0p3
-'''
+"""
mock_subp.return_value = (glabel_out, "")
res = find_freebsd_part("/dev/label/rootfs")
self.assertEqual("da0p2", res)
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_find_freebsd_part_gpt(self, mock_subp):
- glabel_out = '''
+ glabel_out = """
gpt/bootfs N/A vtbd0p1
gptid/3f4cbe26-75da-11e8-a8f2-002590ec6166 N/A vtbd0p1
gpt/swapfs N/A vtbd0p2
gpt/rootfs N/A vtbd0p3
iso9660/cidata N/A vtbd2
-'''
+"""
mock_subp.return_value = (glabel_out, "")
res = find_freebsd_part("/dev/gpt/rootfs")
self.assertEqual("vtbd0p3", res)
def test_get_path_dev_freebsd_label(self):
- mnt_list = '''
+ mnt_list = """
/dev/label/rootfs / ufs rw 1 1
devfs /dev devfs rw,multilabel 0 0
fdescfs /dev/fd fdescfs rw 0 0
/dev/da1s1 /mnt/resource ufs rw 2 2
-'''
- with mock.patch.object(os.path, 'exists',
- return_value=True):
- res = get_path_dev_freebsd('/etc', mnt_list)
+"""
+ with mock.patch.object(os.path, "exists", return_value=True):
+ res = get_path_dev_freebsd("/etc", mnt_list)
self.assertIsNotNone(res)
diff --git a/tests/unittests/distros/test_generic.py b/tests/unittests/distros/test_generic.py
new file mode 100644
index 00000000..93c5395c
--- /dev/null
+++ b/tests/unittests/distros/test_generic.py
@@ -0,0 +1,383 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import shutil
+import tempfile
+from unittest import mock
+
+import pytest
+
+from cloudinit import distros, util
+from tests.unittests import helpers
+
+unknown_arch_info = {
+ "arches": ["default"],
+ "failsafe": {
+ "primary": "http://fs-primary-default",
+ "security": "http://fs-security-default",
+ },
+}
+
+package_mirrors = [
+ {
+ "arches": ["i386", "amd64"],
+ "failsafe": {
+ "primary": "http://fs-primary-intel",
+ "security": "http://fs-security-intel",
+ },
+ "search": {
+ "primary": [
+ "http://%(ec2_region)s.ec2/",
+ "http://%(availability_zone)s.clouds/",
+ ],
+ "security": [
+ "http://security-mirror1-intel",
+ "http://security-mirror2-intel",
+ ],
+ },
+ },
+ {
+ "arches": ["armhf", "armel"],
+ "failsafe": {
+ "primary": "http://fs-primary-arm",
+ "security": "http://fs-security-arm",
+ },
+ },
+ unknown_arch_info,
+]
+
+gpmi = distros._get_package_mirror_info
+gapmi = distros._get_arch_package_mirror_info
+
+
+class TestGenericDistro(helpers.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestGenericDistro, self).setUp()
+ # Make a temp directoy for tests to use.
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def _write_load_sudoers(self, _user, rules):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ os.makedirs(os.path.join(self.tmp, "etc"))
+ os.makedirs(os.path.join(self.tmp, "etc", "sudoers.d"))
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ d.write_sudo_rules("harlowja", rules)
+ contents = util.load_file(d.ci_sudoers_fn)
+ return contents
+
+ def _count_in(self, lines_look_for, text_content):
+ found_amount = 0
+ for e in lines_look_for:
+ for line in text_content.splitlines():
+ line = line.strip()
+ if line == e:
+ found_amount += 1
+ return found_amount
+
+ def test_sudoers_ensure_rules(self):
+ rules = "ALL=(ALL:ALL) ALL"
+ contents = self._write_load_sudoers("harlowja", rules)
+ expected = ["harlowja ALL=(ALL:ALL) ALL"]
+ self.assertEqual(len(expected), self._count_in(expected, contents))
+ not_expected = [
+ "harlowja A",
+ "harlowja L",
+ "harlowja L",
+ ]
+ self.assertEqual(0, self._count_in(not_expected, contents))
+
+ def test_sudoers_ensure_rules_list(self):
+ rules = [
+ "ALL=(ALL:ALL) ALL",
+ "B-ALL=(ALL:ALL) ALL",
+ "C-ALL=(ALL:ALL) ALL",
+ ]
+ contents = self._write_load_sudoers("harlowja", rules)
+ expected = [
+ "harlowja ALL=(ALL:ALL) ALL",
+ "harlowja B-ALL=(ALL:ALL) ALL",
+ "harlowja C-ALL=(ALL:ALL) ALL",
+ ]
+ self.assertEqual(len(expected), self._count_in(expected, contents))
+ not_expected = [
+ "harlowja A",
+ "harlowja L",
+ "harlowja L",
+ ]
+ self.assertEqual(0, self._count_in(not_expected, contents))
+
+ def test_sudoers_ensure_new(self):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ d.ensure_sudo_dir("/b")
+ contents = util.load_file("/etc/sudoers")
+ self.assertIn("includedir /b", contents)
+ self.assertTrue(os.path.isdir("/b"))
+
+ def test_sudoers_ensure_append(self):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ util.write_file("/etc/sudoers", "josh, josh\n")
+ d.ensure_sudo_dir("/b")
+ contents = util.load_file("/etc/sudoers")
+ self.assertIn("includedir /b", contents)
+ self.assertTrue(os.path.isdir("/b"))
+ self.assertIn("josh", contents)
+ self.assertEqual(2, contents.count("josh"))
+
+ def test_sudoers_ensure_only_one_includedir(self):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ for char in ["#", "@"]:
+ util.write_file("/etc/sudoers", "{}includedir /b".format(char))
+ d.ensure_sudo_dir("/b")
+ contents = util.load_file("/etc/sudoers")
+ self.assertIn("includedir /b", contents)
+ self.assertTrue(os.path.isdir("/b"))
+ self.assertEqual(1, contents.count("includedir /b"))
+
+ def test_arch_package_mirror_info_unknown(self):
+ """for an unknown arch, we should get back that with arch 'default'."""
+ arch_mirrors = gapmi(package_mirrors, arch="unknown")
+ self.assertEqual(unknown_arch_info, arch_mirrors)
+
+ def test_arch_package_mirror_info_known(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ self.assertEqual(package_mirrors[0], arch_mirrors)
+
+ def test_systemd_in_use(self):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ os.makedirs("/run/systemd/system")
+ self.assertTrue(d.uses_systemd())
+
+ def test_systemd_not_in_use(self):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ self.assertFalse(d.uses_systemd())
+
+ def test_systemd_symlink(self):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ os.makedirs("/run/systemd")
+ os.symlink("/", "/run/systemd/system")
+ self.assertFalse(d.uses_systemd())
+
+ @mock.patch("cloudinit.distros.debian.read_system_locale")
+ def test_get_locale_ubuntu(self, m_locale):
+ """Test ubuntu distro returns locale set to C.UTF-8"""
+ m_locale.return_value = "C.UTF-8"
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ locale = d.get_locale()
+ self.assertEqual("C.UTF-8", locale)
+
+ def test_get_locale_rhel(self):
+ """Test rhel distro returns NotImplementedError exception"""
+ cls = distros.fetch("rhel")
+ d = cls("rhel", {}, None)
+ with self.assertRaises(NotImplementedError):
+ d.get_locale()
+
+ def test_expire_passwd_uses_chpasswd(self):
+ """Test ubuntu.expire_passwd uses the passwd command."""
+ for d_name in ("ubuntu", "rhel"):
+ cls = distros.fetch(d_name)
+ d = cls(d_name, {}, None)
+ with mock.patch("cloudinit.subp.subp") as m_subp:
+ d.expire_passwd("myuser")
+ m_subp.assert_called_once_with(["passwd", "--expire", "myuser"])
+
+ def test_expire_passwd_freebsd_uses_pw_command(self):
+ """Test FreeBSD.expire_passwd uses the pw command."""
+ cls = distros.fetch("freebsd")
+ d = cls("freebsd", {}, None)
+ with mock.patch("cloudinit.subp.subp") as m_subp:
+ d.expire_passwd("myuser")
+ m_subp.assert_called_once_with(
+ ["pw", "usermod", "myuser", "-p", "01-Jan-1970"]
+ )
+
+
+class TestGetPackageMirrors:
+ def return_first(self, mlist):
+ if not mlist:
+ return None
+ return mlist[0]
+
+ def return_second(self, mlist):
+ if not mlist:
+ return None
+
+ return mlist[1] if len(mlist) > 1 else None
+
+ def return_none(self, _mlist):
+ return None
+
+ def return_last(self, mlist):
+ if not mlist:
+ return None
+ return mlist[-1]
+
+ @pytest.mark.parametrize(
+ "allow_ec2_mirror, platform_type, mirrors",
+ [
+ (
+ True,
+ "ec2",
+ [
+ {
+ "primary": "http://us-east-1.ec2/",
+ "security": "http://security-mirror1-intel",
+ },
+ {
+ "primary": "http://us-east-1a.clouds/",
+ "security": "http://security-mirror2-intel",
+ },
+ ],
+ ),
+ (
+ True,
+ "other",
+ [
+ {
+ "primary": "http://us-east-1.ec2/",
+ "security": "http://security-mirror1-intel",
+ },
+ {
+ "primary": "http://us-east-1a.clouds/",
+ "security": "http://security-mirror2-intel",
+ },
+ ],
+ ),
+ (
+ False,
+ "ec2",
+ [
+ {
+ "primary": "http://us-east-1.ec2/",
+ "security": "http://security-mirror1-intel",
+ },
+ {
+ "primary": "http://us-east-1a.clouds/",
+ "security": "http://security-mirror2-intel",
+ },
+ ],
+ ),
+ (
+ False,
+ "other",
+ [
+ {
+ "primary": "http://us-east-1a.clouds/",
+ "security": "http://security-mirror1-intel",
+ },
+ {
+ "primary": "http://fs-primary-intel",
+ "security": "http://security-mirror2-intel",
+ },
+ ],
+ ),
+ ],
+ )
+ def test_get_package_mirror_info_az_ec2(
+ self, allow_ec2_mirror, platform_type, mirrors
+ ):
+ flag_path = (
+ "cloudinit.distros.ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES"
+ )
+ with mock.patch(flag_path, allow_ec2_mirror):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ data_source_mock = mock.Mock(
+ availability_zone="us-east-1a", platform_type=platform_type
+ )
+
+ results = gpmi(
+ arch_mirrors,
+ data_source=data_source_mock,
+ mirror_filter=self.return_first,
+ )
+ assert results == mirrors[0]
+
+ results = gpmi(
+ arch_mirrors,
+ data_source=data_source_mock,
+ mirror_filter=self.return_second,
+ )
+ assert results == mirrors[1]
+
+ results = gpmi(
+ arch_mirrors,
+ data_source=data_source_mock,
+ mirror_filter=self.return_none,
+ )
+ assert results == package_mirrors[0]["failsafe"]
+
+ def test_get_package_mirror_info_az_non_ec2(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ data_source_mock = mock.Mock(availability_zone="nova.cloudvendor")
+
+ results = gpmi(
+ arch_mirrors,
+ data_source=data_source_mock,
+ mirror_filter=self.return_first,
+ )
+ assert results == {
+ "primary": "http://nova.cloudvendor.clouds/",
+ "security": "http://security-mirror1-intel",
+ }
+
+ results = gpmi(
+ arch_mirrors,
+ data_source=data_source_mock,
+ mirror_filter=self.return_last,
+ )
+ assert results == {
+ "primary": "http://nova.cloudvendor.clouds/",
+ "security": "http://security-mirror2-intel",
+ }
+
+ def test_get_package_mirror_info_none(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ data_source_mock = mock.Mock(availability_zone=None)
+
+ # because both search entries here replacement based on
+ # availability-zone, the filter will be called with an empty list and
+ # failsafe should be taken.
+ results = gpmi(
+ arch_mirrors,
+ data_source=data_source_mock,
+ mirror_filter=self.return_first,
+ )
+ assert results == {
+ "primary": "http://fs-primary-intel",
+ "security": "http://security-mirror1-intel",
+ }
+
+ results = gpmi(
+ arch_mirrors,
+ data_source=data_source_mock,
+ mirror_filter=self.return_last,
+ )
+ assert results == {
+ "primary": "http://fs-primary-intel",
+ "security": "http://security-mirror2-intel",
+ }
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_gentoo.py b/tests/unittests/distros/test_gentoo.py
index 37a4f51f..dadf5df5 100644
--- a/tests/unittests/test_distros/test_gentoo.py
+++ b/tests/unittests/distros/test_gentoo.py
@@ -1,13 +1,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import util
-from cloudinit import atomic_helper
-from cloudinit.tests.helpers import CiTestCase
+from cloudinit import atomic_helper, util
+from tests.unittests.helpers import CiTestCase
+
from . import _get_distro
class TestGentoo(CiTestCase):
-
def test_write_hostname(self):
distro = _get_distro("gentoo")
hostname = "myhostname"
@@ -22,5 +21,7 @@ class TestGentoo(CiTestCase):
hostfile = self.tmp_path("hostfile")
atomic_helper.write_file(hostfile, contents, omode="w")
distro._write_hostname(hostname, hostfile)
- self.assertEqual('#This is the hostname\nhostname="myhostname"\n',
- util.load_file(hostfile))
+ self.assertEqual(
+ '#This is the hostname\nhostname="myhostname"\n',
+ util.load_file(hostfile),
+ )
diff --git a/tests/unittests/test_distros/test_hostname.py b/tests/unittests/distros/test_hostname.py
index f6d4dbe5..2cbbb3e2 100644
--- a/tests/unittests/test_distros/test_hostname.py
+++ b/tests/unittests/distros/test_hostname.py
@@ -4,13 +4,12 @@ import unittest
from cloudinit.distros.parsers import hostname
-
-BASE_HOSTNAME = '''
+BASE_HOSTNAME = """
# My super-duper-hostname
blahblah
-'''
+"""
BASE_HOSTNAME = BASE_HOSTNAME.strip()
@@ -18,7 +17,7 @@ class TestHostnameHelper(unittest.TestCase):
def test_parse_same(self):
hn = hostname.HostnameConf(BASE_HOSTNAME)
self.assertEqual(str(hn).strip(), BASE_HOSTNAME)
- self.assertEqual(hn.hostname, 'blahblah')
+ self.assertEqual(hn.hostname, "blahblah")
def test_no_adjust_hostname(self):
hn = hostname.HostnameConf(BASE_HOSTNAME)
@@ -29,14 +28,15 @@ class TestHostnameHelper(unittest.TestCase):
def test_adjust_hostname(self):
hn = hostname.HostnameConf(BASE_HOSTNAME)
prev_name = hn.hostname
- self.assertEqual(prev_name, 'blahblah')
+ self.assertEqual(prev_name, "blahblah")
hn.set_hostname("bbbbd")
- self.assertEqual(hn.hostname, 'bbbbd')
- expected_out = '''
+ self.assertEqual(hn.hostname, "bbbbd")
+ expected_out = """
# My super-duper-hostname
bbbbd
-'''
+"""
self.assertEqual(str(hn).strip(), expected_out.strip())
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/distros/test_hosts.py b/tests/unittests/distros/test_hosts.py
new file mode 100644
index 00000000..faffd912
--- /dev/null
+++ b/tests/unittests/distros/test_hosts.py
@@ -0,0 +1,47 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import unittest
+
+from cloudinit.distros.parsers import hosts
+
+BASE_ETC = """
+# Example
+127.0.0.1 localhost
+192.168.1.10 foo.mydomain.org foo
+192.168.1.10 bar.mydomain.org bar
+146.82.138.7 master.debian.org master
+209.237.226.90 www.opensource.org
+"""
+BASE_ETC = BASE_ETC.strip()
+
+
+class TestHostsHelper(unittest.TestCase):
+ def test_parse(self):
+ eh = hosts.HostsConf(BASE_ETC)
+ self.assertEqual(eh.get_entry("127.0.0.1"), [["localhost"]])
+ self.assertEqual(
+ eh.get_entry("192.168.1.10"),
+ [["foo.mydomain.org", "foo"], ["bar.mydomain.org", "bar"]],
+ )
+ eh = str(eh)
+ self.assertTrue(eh.startswith("# Example"))
+
+ def test_add(self):
+ eh = hosts.HostsConf(BASE_ETC)
+ eh.add_entry("127.0.0.0", "blah")
+ self.assertEqual(eh.get_entry("127.0.0.0"), [["blah"]])
+ eh.add_entry("127.0.0.3", "blah", "blah2", "blah3")
+ self.assertEqual(
+ eh.get_entry("127.0.0.3"), [["blah", "blah2", "blah3"]]
+ )
+
+ def test_del(self):
+ eh = hosts.HostsConf(BASE_ETC)
+ eh.add_entry("127.0.0.0", "blah")
+ self.assertEqual(eh.get_entry("127.0.0.0"), [["blah"]])
+
+ eh.del_entries("127.0.0.0")
+ self.assertEqual(eh.get_entry("127.0.0.0"), [])
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/distros/test_init.py b/tests/unittests/distros/test_init.py
new file mode 100644
index 00000000..8f3c8978
--- /dev/null
+++ b/tests/unittests/distros/test_init.py
@@ -0,0 +1,248 @@
+# Copyright (C) 2020 Canonical Ltd.
+#
+# Author: Daniel Watkins <oddbloke@ubuntu.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Tests for cloudinit/distros/__init__.py"""
+
+from unittest import mock
+
+import pytest
+
+from cloudinit.distros import LDH_ASCII_CHARS, _get_package_mirror_info
+
+# In newer versions of Python, these characters will be omitted instead
+# of substituted because of security concerns.
+# See https://bugs.python.org/issue43882
+SECURITY_URL_CHARS = "\n\r\t"
+
+# Define a set of characters we would expect to be replaced
+INVALID_URL_CHARS = [
+ chr(x)
+ for x in range(127)
+ if chr(x) not in LDH_ASCII_CHARS + SECURITY_URL_CHARS
+]
+for separator in [":", ".", "/", "#", "?", "@", "[", "]"]:
+ # Remove from the set characters that either separate hostname parts (":",
+ # "."), terminate hostnames ("/", "#", "?", "@"), or cause Python to be
+ # unable to parse URLs ("[", "]").
+ INVALID_URL_CHARS.remove(separator)
+
+
+class TestGetPackageMirrorInfo:
+ """
+ Tests for cloudinit.distros._get_package_mirror_info.
+
+ These supplement the tests in tests/unittests/test_distros/test_generic.py
+ which are more focused on testing a single production-like configuration.
+ These tests are more focused on specific aspects of the unit under test.
+ """
+
+ @pytest.mark.parametrize(
+ "mirror_info,expected",
+ [
+ # Empty info gives empty return
+ ({}, {}),
+ # failsafe values used if present
+ (
+ {
+ "failsafe": {
+ "primary": "http://value",
+ "security": "http://other",
+ }
+ },
+ {"primary": "http://value", "security": "http://other"},
+ ),
+ # search values used if present
+ (
+ {
+ "search": {
+ "primary": ["http://value"],
+ "security": ["http://other"],
+ }
+ },
+ {"primary": ["http://value"], "security": ["http://other"]},
+ ),
+ # failsafe values used if search value not present
+ (
+ {
+ "search": {"primary": ["http://value"]},
+ "failsafe": {"security": "http://other"},
+ },
+ {"primary": ["http://value"], "security": "http://other"},
+ ),
+ ],
+ )
+ def test_get_package_mirror_info_failsafe(self, mirror_info, expected):
+ """
+ Test the interaction between search and failsafe inputs
+
+ (This doesn't test the case where the mirror_filter removes all search
+ options; test_failsafe_used_if_all_search_results_filtered_out covers
+ that.)
+ """
+ assert expected == _get_package_mirror_info(
+ mirror_info, mirror_filter=lambda x: x
+ )
+
+ def test_failsafe_used_if_all_search_results_filtered_out(self):
+ """Test the failsafe option used if all search options eliminated."""
+ mirror_info = {
+ "search": {"primary": ["http://value"]},
+ "failsafe": {"primary": "http://other"},
+ }
+ assert {"primary": "http://other"} == _get_package_mirror_info(
+ mirror_info, mirror_filter=lambda x: False
+ )
+
+ @pytest.mark.parametrize(
+ "allow_ec2_mirror, platform_type", [(True, "ec2")]
+ )
+ @pytest.mark.parametrize(
+ "availability_zone,region,patterns,expected",
+ (
+ # Test ec2_region alone
+ (
+ "fk-fake-1f",
+ None,
+ ["http://EC2-%(ec2_region)s/ubuntu"],
+ ["http://ec2-fk-fake-1/ubuntu"],
+ ),
+ # Test availability_zone alone
+ (
+ "fk-fake-1f",
+ None,
+ ["http://AZ-%(availability_zone)s/ubuntu"],
+ ["http://az-fk-fake-1f/ubuntu"],
+ ),
+ # Test region alone
+ (
+ None,
+ "fk-fake-1",
+ ["http://RG-%(region)s/ubuntu"],
+ ["http://rg-fk-fake-1/ubuntu"],
+ ),
+ # Test that ec2_region is not available for non-matching AZs
+ (
+ "fake-fake-1f",
+ None,
+ [
+ "http://EC2-%(ec2_region)s/ubuntu",
+ "http://AZ-%(availability_zone)s/ubuntu",
+ ],
+ ["http://az-fake-fake-1f/ubuntu"],
+ ),
+ # Test that template order maintained
+ (
+ None,
+ "fake-region",
+ [
+ "http://RG-%(region)s-2/ubuntu",
+ "http://RG-%(region)s-1/ubuntu",
+ ],
+ [
+ "http://rg-fake-region-2/ubuntu",
+ "http://rg-fake-region-1/ubuntu",
+ ],
+ ),
+ # Test that non-ASCII hostnames are IDNA encoded;
+ # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q"
+ (
+ None,
+ "ТεЅТ̣",
+ ["http://www.IDNA-%(region)s.com/ubuntu"],
+ ["http://www.xn--idna--4kd53hh6aba3q.com/ubuntu"],
+ ),
+ # Test that non-ASCII hostnames with a port are IDNA encoded;
+ # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q"
+ (
+ None,
+ "ТεЅТ̣",
+ ["http://www.IDNA-%(region)s.com:8080/ubuntu"],
+ ["http://www.xn--idna--4kd53hh6aba3q.com:8080/ubuntu"],
+ ),
+ # Test that non-ASCII non-hostname parts of URLs are unchanged
+ (
+ None,
+ "ТεЅТ̣",
+ ["http://www.example.com/%(region)s/ubuntu"],
+ ["http://www.example.com/ТεЅТ̣/ubuntu"],
+ ),
+ # Test that IPv4 addresses are unchanged
+ (
+ None,
+ "fk-fake-1",
+ ["http://192.168.1.1:8080/%(region)s/ubuntu"],
+ ["http://192.168.1.1:8080/fk-fake-1/ubuntu"],
+ ),
+ # Test that IPv6 addresses are unchanged
+ (
+ None,
+ "fk-fake-1",
+ ["http://[2001:67c:1360:8001::23]/%(region)s/ubuntu"],
+ ["http://[2001:67c:1360:8001::23]/fk-fake-1/ubuntu"],
+ ),
+ # Test that unparseable URLs are filtered out of the mirror list
+ (
+ None,
+ "inv[lid",
+ [
+ "http://%(region)s.in.hostname/should/be/filtered",
+ "http://but.not.in.the.path/%(region)s",
+ ],
+ ["http://but.not.in.the.path/inv[lid"],
+ ),
+ (
+ None,
+ "-some-region-",
+ ["http://-lead-ing.%(region)s.trail-ing-.example.com/ubuntu"],
+ ["http://lead-ing.some-region.trail-ing.example.com/ubuntu"],
+ ),
+ )
+ + tuple(
+ # Dynamically generate a test case for each non-LDH
+ # (Letters/Digits/Hyphen) ASCII character, testing that it is
+ # substituted with a hyphen
+ (
+ None,
+ "fk{0}fake{0}1".format(invalid_char),
+ ["http://%(region)s/ubuntu"],
+ ["http://fk-fake-1/ubuntu"],
+ )
+ for invalid_char in INVALID_URL_CHARS
+ ),
+ )
+ def test_valid_substitution(
+ self,
+ allow_ec2_mirror,
+ platform_type,
+ availability_zone,
+ region,
+ patterns,
+ expected,
+ ):
+ """Test substitution works as expected."""
+ flag_path = (
+ "cloudinit.distros.ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES"
+ )
+
+ m_data_source = mock.Mock(
+ availability_zone=availability_zone,
+ region=region,
+ platform_type=platform_type,
+ )
+ mirror_info = {"search": {"primary": patterns}}
+
+ with mock.patch(flag_path, allow_ec2_mirror):
+ ret = _get_package_mirror_info(
+ mirror_info,
+ data_source=m_data_source,
+ mirror_filter=lambda x: x,
+ )
+ print(allow_ec2_mirror)
+ print(platform_type)
+ print(availability_zone)
+ print(region)
+ print(patterns)
+ print(expected)
+ assert {"primary": expected} == ret
diff --git a/tests/unittests/distros/test_manage_service.py b/tests/unittests/distros/test_manage_service.py
new file mode 100644
index 00000000..9e64b35c
--- /dev/null
+++ b/tests/unittests/distros/test_manage_service.py
@@ -0,0 +1,41 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from tests.unittests.helpers import CiTestCase, mock
+from tests.unittests.util import MockDistro
+
+
+class TestManageService(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestManageService, self).setUp()
+ self.dist = MockDistro()
+
+ @mock.patch.object(MockDistro, "uses_systemd", return_value=False)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_manage_service_systemctl_initcmd(self, m_subp, m_sysd):
+ self.dist.init_cmd = ["systemctl"]
+ self.dist.manage_service("start", "myssh")
+ m_subp.assert_called_with(
+ ["systemctl", "start", "myssh"], capture=True
+ )
+
+ @mock.patch.object(MockDistro, "uses_systemd", return_value=False)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_manage_service_service_initcmd(self, m_subp, m_sysd):
+ self.dist.init_cmd = ["service"]
+ self.dist.manage_service("start", "myssh")
+ m_subp.assert_called_with(["service", "myssh", "start"], capture=True)
+
+ @mock.patch.object(MockDistro, "uses_systemd", return_value=True)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_manage_service_systemctl(self, m_subp, m_sysd):
+ self.dist.init_cmd = ["ignore"]
+ self.dist.manage_service("start", "myssh")
+ m_subp.assert_called_with(
+ ["systemctl", "start", "myssh"], capture=True
+ )
+
+
+# vi: ts=4 sw=4 expandtab
diff --git a/tests/unittests/test_distros/test_netbsd.py b/tests/unittests/distros/test_netbsd.py
index 11a68d2a..0bc6dfbd 100644
--- a/tests/unittests/test_distros/test_netbsd.py
+++ b/tests/unittests/distros/test_netbsd.py
@@ -1,10 +1,11 @@
-import cloudinit.distros.netbsd
+import unittest.mock as mock
import pytest
-import unittest.mock as mock
+
+import cloudinit.distros.netbsd
-@pytest.mark.parametrize('with_pkgin', (True, False))
+@pytest.mark.parametrize("with_pkgin", (True, False))
@mock.patch("cloudinit.distros.netbsd.os")
def test_init(m_os, with_pkgin):
print(with_pkgin)
@@ -12,6 +13,6 @@ def test_init(m_os, with_pkgin):
cfg = {}
distro = cloudinit.distros.netbsd.NetBSD("netbsd", cfg, None)
- expectation = ['pkgin', '-y', 'full-upgrade'] if with_pkgin else None
+ expectation = ["pkgin", "-y", "full-upgrade"] if with_pkgin else None
assert distro.pkg_cmd_upgrade_prefix == expectation
- assert [mock.call('/usr/pkg/bin/pkgin')] == m_os.path.exists.call_args_list
+ assert [mock.call("/usr/pkg/bin/pkgin")] == m_os.path.exists.call_args_list
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py
index a1df066a..a25be481 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/distros/test_netconfig.py
@@ -2,21 +2,16 @@
import copy
import os
+import re
from io import StringIO
from textwrap import dedent
from unittest import mock
-from cloudinit import distros
+from cloudinit import distros, helpers, safeyaml, settings, subp, util
from cloudinit.distros.parsers.sys_conf import SysConf
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit.tests.helpers import (
- FilesystemMockingTestCase, dir2dict)
-from cloudinit import subp
-from cloudinit import util
+from tests.unittests.helpers import FilesystemMockingTestCase, dir2dict
-
-BASE_NET_CFG = '''
+BASE_NET_CFG = """
auto lo
iface lo inet loopback
@@ -30,9 +25,9 @@ iface eth0 inet static
auto eth1
iface eth1 inet dhcp
-'''
+"""
-BASE_NET_CFG_FROM_V2 = '''
+BASE_NET_CFG_FROM_V2 = """
auto lo
iface lo inet loopback
@@ -43,9 +38,9 @@ iface eth0 inet static
auto eth1
iface eth1 inet dhcp
-'''
+"""
-BASE_NET_CFG_IPV6 = '''
+BASE_NET_CFG_IPV6 = """
auto lo
iface lo inet loopback
@@ -73,20 +68,49 @@ iface eth1 inet6 static
address 2607:f0d0:1002:0011::3
netmask 64
gateway 2607:f0d0:1002:0011::1
-'''
+"""
-V1_NET_CFG = {'config': [{'name': 'eth0',
+V1_NET_CFG = {
+ "config": [
+ {
+ "name": "eth0",
+ "subnets": [
+ {
+ "address": "192.168.1.5",
+ "broadcast": "192.168.1.0",
+ "gateway": "192.168.1.254",
+ "netmask": "255.255.255.0",
+ "type": "static",
+ }
+ ],
+ "type": "physical",
+ },
+ {
+ "name": "eth1",
+ "subnets": [{"control": "auto", "type": "dhcp4"}],
+ "type": "physical",
+ },
+ ],
+ "version": 1,
+}
- 'subnets': [{'address': '192.168.1.5',
- 'broadcast': '192.168.1.0',
- 'gateway': '192.168.1.254',
- 'netmask': '255.255.255.0',
- 'type': 'static'}],
- 'type': 'physical'},
- {'name': 'eth1',
- 'subnets': [{'control': 'auto', 'type': 'dhcp4'}],
- 'type': 'physical'}],
- 'version': 1}
+V1_NET_CFG_WITH_DUPS = """\
+# same value in interface specific dns and global dns
+# should produce single entry in network file
+version: 1
+config:
+ - type: physical
+ name: eth0
+ subnets:
+ - type: static
+ address: 192.168.0.102/24
+ dns_nameservers: [1.2.3.4]
+ dns_search: [test.com]
+ interface: eth0
+ - type: nameserver
+ address: [1.2.3.4]
+ search: [test.com]
+"""
V1_NET_CFG_OUTPUT = """\
# This file is generated from information provided by the datasource. Changes
@@ -125,19 +149,28 @@ auto eth1
iface eth1 inet dhcp
"""
-V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0',
- 'subnets': [{'address':
- '2607:f0d0:1002:0011::2',
- 'gateway':
- '2607:f0d0:1002:0011::1',
- 'netmask': '64',
- 'type': 'static6'}],
- 'type': 'physical'},
- {'name': 'eth1',
- 'subnets': [{'control': 'auto',
- 'type': 'dhcp4'}],
- 'type': 'physical'}],
- 'version': 1}
+V1_NET_CFG_IPV6 = {
+ "config": [
+ {
+ "name": "eth0",
+ "subnets": [
+ {
+ "address": "2607:f0d0:1002:0011::2",
+ "gateway": "2607:f0d0:1002:0011::1",
+ "netmask": "64",
+ "type": "static6",
+ }
+ ],
+ "type": "physical",
+ },
+ {
+ "name": "eth1",
+ "subnets": [{"control": "auto", "type": "dhcp4"}],
+ "type": "physical",
+ },
+ ],
+ "version": 1,
+}
V1_TO_V2_NET_CFG_OUTPUT = """\
@@ -175,14 +208,11 @@ network:
"""
V2_NET_CFG = {
- 'ethernets': {
- 'eth7': {
- 'addresses': ['192.168.1.5/24'],
- 'gateway4': '192.168.1.254'},
- 'eth9': {
- 'dhcp4': True}
+ "ethernets": {
+ "eth7": {"addresses": ["192.168.1.5/24"], "gateway4": "192.168.1.254"},
+ "eth9": {"dhcp4": True},
},
- 'version': 2
+ "version": 2,
}
@@ -218,21 +248,18 @@ class WriteBuffer(object):
class TestNetCfgDistroBase(FilesystemMockingTestCase):
-
def setUp(self):
super(TestNetCfgDistroBase, self).setUp()
- self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy')
- self.add_patch('cloudinit.util.system_info', 'm_sysinfo')
- self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')}
+ self.add_patch("cloudinit.util.system_is_snappy", "m_snappy")
def _get_distro(self, dname, renderers=None):
cls = distros.fetch(dname)
cfg = settings.CFG_BUILTIN
- cfg['system_info']['distro'] = dname
+ cfg["system_info"]["distro"] = dname
if renderers:
- cfg['system_info']['network'] = {'renderers': renderers}
+ cfg["system_info"]["network"] = {"renderers": renderers}
paths = helpers.Paths({})
- return cls(dname, cfg.get('system_info'), paths)
+ return cls(dname, cfg.get("system_info"), paths)
def assertCfgEquals(self, blob1, blob2):
b1 = dict(SysConf(blob1.strip().splitlines()))
@@ -247,23 +274,23 @@ class TestNetCfgDistroBase(FilesystemMockingTestCase):
class TestNetCfgDistroFreeBSD(TestNetCfgDistroBase):
-
def setUp(self):
super(TestNetCfgDistroFreeBSD, self).setUp()
- self.distro = self._get_distro('freebsd', renderers=['freebsd'])
+ self.distro = self._get_distro("freebsd", renderers=["freebsd"])
- def _apply_and_verify_freebsd(self, apply_fn, config, expected_cfgs=None,
- bringup=False):
+ def _apply_and_verify_freebsd(
+ self, apply_fn, config, expected_cfgs=None, bringup=False
+ ):
if not expected_cfgs:
- raise ValueError('expected_cfg must not be None')
+ raise ValueError("expected_cfg must not be None")
tmpd = None
- with mock.patch('cloudinit.net.freebsd.available') as m_avail:
+ with mock.patch("cloudinit.net.freebsd.available") as m_avail:
m_avail.return_value = True
with self.reRooted(tmpd) as tmpd:
- util.ensure_dir('/etc')
- util.ensure_file('/etc/rc.conf')
- util.ensure_file('/etc/resolv.conf')
+ util.ensure_dir("/etc")
+ util.ensure_file("/etc/rc.conf")
+ util.ensure_file("/etc/resolv.conf")
apply_fn(config, bringup)
results = dir2dict(tmpd)
@@ -274,14 +301,14 @@ class TestNetCfgDistroFreeBSD(TestNetCfgDistroBase):
print(results[cfgpath])
print("----------")
self.assertEqual(
- set(expected.split('\n')),
- set(results[cfgpath].split('\n')))
+ set(expected.split("\n")), set(results[cfgpath].split("\n"))
+ )
self.assertEqual(0o644, get_mode(cfgpath, tmpd))
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_apply_network_config_freebsd_standard(self, ifaces_mac):
ifaces_mac.return_value = {
- '00:15:5d:4c:73:00': 'eth0',
+ "00:15:5d:4c:73:00": "eth0",
}
rc_conf_expected = """\
defaultrouter=192.168.1.254
@@ -290,17 +317,19 @@ ifconfig_eth1=DHCP
"""
expected_cfgs = {
- '/etc/rc.conf': rc_conf_expected,
- '/etc/resolv.conf': ''
+ "/etc/rc.conf": rc_conf_expected,
+ "/etc/resolv.conf": "",
}
- self._apply_and_verify_freebsd(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify_freebsd(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ )
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_apply_network_config_freebsd_ifrename(self, ifaces_mac):
ifaces_mac.return_value = {
- '00:15:5d:4c:73:00': 'vtnet0',
+ "00:15:5d:4c:73:00": "vtnet0",
}
rc_conf_expected = """\
ifconfig_vtnet0_name=eth0
@@ -310,49 +339,51 @@ ifconfig_eth1=DHCP
"""
V1_NET_CFG_RENAME = copy.deepcopy(V1_NET_CFG)
- V1_NET_CFG_RENAME['config'][0]['mac_address'] = '00:15:5d:4c:73:00'
+ V1_NET_CFG_RENAME["config"][0]["mac_address"] = "00:15:5d:4c:73:00"
expected_cfgs = {
- '/etc/rc.conf': rc_conf_expected,
- '/etc/resolv.conf': ''
+ "/etc/rc.conf": rc_conf_expected,
+ "/etc/resolv.conf": "",
}
- self._apply_and_verify_freebsd(self.distro.apply_network_config,
- V1_NET_CFG_RENAME,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify_freebsd(
+ self.distro.apply_network_config,
+ V1_NET_CFG_RENAME,
+ expected_cfgs=expected_cfgs.copy(),
+ )
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_apply_network_config_freebsd_nameserver(self, ifaces_mac):
ifaces_mac.return_value = {
- '00:15:5d:4c:73:00': 'eth0',
+ "00:15:5d:4c:73:00": "eth0",
}
V1_NET_CFG_DNS = copy.deepcopy(V1_NET_CFG)
- ns = ['1.2.3.4']
- V1_NET_CFG_DNS['config'][0]['subnets'][0]['dns_nameservers'] = ns
- expected_cfgs = {
- '/etc/resolv.conf': 'nameserver 1.2.3.4\n'
- }
- self._apply_and_verify_freebsd(self.distro.apply_network_config,
- V1_NET_CFG_DNS,
- expected_cfgs=expected_cfgs.copy())
+ ns = ["1.2.3.4"]
+ V1_NET_CFG_DNS["config"][0]["subnets"][0]["dns_nameservers"] = ns
+ expected_cfgs = {"/etc/resolv.conf": "nameserver 1.2.3.4\n"}
+ self._apply_and_verify_freebsd(
+ self.distro.apply_network_config,
+ V1_NET_CFG_DNS,
+ expected_cfgs=expected_cfgs.copy(),
+ )
class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
-
def setUp(self):
super(TestNetCfgDistroUbuntuEni, self).setUp()
- self.distro = self._get_distro('ubuntu', renderers=['eni'])
+ self.distro = self._get_distro("ubuntu", renderers=["eni"])
def eni_path(self):
- return '/etc/network/interfaces.d/50-cloud-init.cfg'
+ return "/etc/network/interfaces.d/50-cloud-init.cfg"
- def _apply_and_verify_eni(self, apply_fn, config, expected_cfgs=None,
- bringup=False):
+ def _apply_and_verify_eni(
+ self, apply_fn, config, expected_cfgs=None, bringup=False
+ ):
if not expected_cfgs:
- raise ValueError('expected_cfg must not be None')
+ raise ValueError("expected_cfg must not be None")
tmpd = None
- with mock.patch('cloudinit.net.eni.available') as m_avail:
+ with mock.patch("cloudinit.net.eni.available") as m_avail:
m_avail.return_value = True
with self.reRooted(tmpd) as tmpd:
apply_fn(config, bringup)
@@ -372,35 +403,39 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
self.eni_path(): V1_NET_CFG_OUTPUT,
}
# ub_distro.apply_network_config(V1_NET_CFG, False)
- self._apply_and_verify_eni(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify_eni(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ )
def test_apply_network_config_ipv6_ub(self):
- expected_cfgs = {
- self.eni_path(): V1_NET_CFG_IPV6_OUTPUT
- }
- self._apply_and_verify_eni(self.distro.apply_network_config,
- V1_NET_CFG_IPV6,
- expected_cfgs=expected_cfgs.copy())
+ expected_cfgs = {self.eni_path(): V1_NET_CFG_IPV6_OUTPUT}
+ self._apply_and_verify_eni(
+ self.distro.apply_network_config,
+ V1_NET_CFG_IPV6,
+ expected_cfgs=expected_cfgs.copy(),
+ )
class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase):
def setUp(self):
super(TestNetCfgDistroUbuntuNetplan, self).setUp()
- self.distro = self._get_distro('ubuntu', renderers=['netplan'])
- self.devlist = ['eth0', 'lo']
+ self.distro = self._get_distro("ubuntu", renderers=["netplan"])
+ self.devlist = ["eth0", "lo"]
- def _apply_and_verify_netplan(self, apply_fn, config, expected_cfgs=None,
- bringup=False):
+ def _apply_and_verify_netplan(
+ self, apply_fn, config, expected_cfgs=None, bringup=False
+ ):
if not expected_cfgs:
- raise ValueError('expected_cfg must not be None')
+ raise ValueError("expected_cfg must not be None")
tmpd = None
- with mock.patch('cloudinit.net.netplan.available',
- return_value=True):
- with mock.patch("cloudinit.net.netplan.get_devicelist",
- return_value=self.devlist):
+ with mock.patch("cloudinit.net.netplan.available", return_value=True):
+ with mock.patch(
+ "cloudinit.net.netplan.get_devicelist",
+ return_value=self.devlist,
+ ):
with self.reRooted(tmpd) as tmpd:
apply_fn(config, bringup)
@@ -415,7 +450,7 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase):
self.assertEqual(0o644, get_mode(cfgpath, tmpd))
def netplan_path(self):
- return '/etc/netplan/50-cloud-init.yaml'
+ return "/etc/netplan/50-cloud-init.yaml"
def test_apply_network_config_v1_to_netplan_ub(self):
expected_cfgs = {
@@ -423,9 +458,11 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase):
}
# ub_distro.apply_network_config(V1_NET_CFG, False)
- self._apply_and_verify_netplan(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify_netplan(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ )
def test_apply_network_config_v1_ipv6_to_netplan_ub(self):
expected_cfgs = {
@@ -433,39 +470,43 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase):
}
# ub_distro.apply_network_config(V1_NET_CFG_IPV6, False)
- self._apply_and_verify_netplan(self.distro.apply_network_config,
- V1_NET_CFG_IPV6,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify_netplan(
+ self.distro.apply_network_config,
+ V1_NET_CFG_IPV6,
+ expected_cfgs=expected_cfgs.copy(),
+ )
def test_apply_network_config_v2_passthrough_ub(self):
expected_cfgs = {
self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT,
}
# ub_distro.apply_network_config(V2_NET_CFG, False)
- self._apply_and_verify_netplan(self.distro.apply_network_config,
- V2_NET_CFG,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify_netplan(
+ self.distro.apply_network_config,
+ V2_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ )
class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
-
def setUp(self):
super(TestNetCfgDistroRedhat, self).setUp()
- self.distro = self._get_distro('rhel', renderers=['sysconfig'])
+ self.distro = self._get_distro("rhel", renderers=["sysconfig"])
def ifcfg_path(self, ifname):
- return '/etc/sysconfig/network-scripts/ifcfg-%s' % ifname
+ return "/etc/sysconfig/network-scripts/ifcfg-%s" % ifname
def control_path(self):
- return '/etc/sysconfig/network'
+ return "/etc/sysconfig/network"
- def _apply_and_verify(self, apply_fn, config, expected_cfgs=None,
- bringup=False):
+ def _apply_and_verify(
+ self, apply_fn, config, expected_cfgs=None, bringup=False
+ ):
if not expected_cfgs:
- raise ValueError('expected_cfg must not be None')
+ raise ValueError("expected_cfg must not be None")
tmpd = None
- with mock.patch('cloudinit.net.sysconfig.available') as m_avail:
+ with mock.patch("cloudinit.net.sysconfig.available") as m_avail:
m_avail.return_value = True
with self.reRooted(tmpd) as tmpd:
apply_fn(config, bringup)
@@ -477,7 +518,8 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
def test_apply_network_config_rh(self):
expected_cfgs = {
- self.ifcfg_path('eth0'): dedent("""\
+ self.ifcfg_path("eth0"): dedent(
+ """\
BOOTPROTO=none
DEFROUTE=yes
DEVICE=eth0
@@ -488,27 +530,35 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- self.ifcfg_path('eth1'): dedent("""\
+ """
+ ),
+ self.ifcfg_path("eth1"): dedent(
+ """\
BOOTPROTO=dhcp
DEVICE=eth1
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- self.control_path(): dedent("""\
+ """
+ ),
+ self.control_path(): dedent(
+ """\
NETWORKING=yes
- """),
+ """
+ ),
}
# rh_distro.apply_network_config(V1_NET_CFG, False)
- self._apply_and_verify(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ )
def test_apply_network_config_ipv6_rh(self):
expected_cfgs = {
- self.ifcfg_path('eth0'): dedent("""\
+ self.ifcfg_path("eth0"): dedent(
+ """\
BOOTPROTO=none
DEFROUTE=yes
DEVICE=eth0
@@ -521,39 +571,54 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- self.ifcfg_path('eth1'): dedent("""\
+ """
+ ),
+ self.ifcfg_path("eth1"): dedent(
+ """\
BOOTPROTO=dhcp
DEVICE=eth1
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- self.control_path(): dedent("""\
+ """
+ ),
+ self.control_path(): dedent(
+ """\
NETWORKING=yes
NETWORKING_IPV6=yes
IPV6_AUTOCONF=no
- """),
+ """
+ ),
}
# rh_distro.apply_network_config(V1_NET_CFG_IPV6, False)
- self._apply_and_verify(self.distro.apply_network_config,
- V1_NET_CFG_IPV6,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify(
+ self.distro.apply_network_config,
+ V1_NET_CFG_IPV6,
+ expected_cfgs=expected_cfgs.copy(),
+ )
def test_vlan_render_unsupported(self):
"""Render officially unsupported vlan names."""
cfg = {
- 'version': 2,
- 'ethernets': {
- 'eth0': {'addresses': ["192.10.1.2/24"],
- 'match': {'macaddress': "00:16:3e:60:7c:df"}}},
- 'vlans': {
- 'infra0': {'addresses': ["10.0.1.2/16"],
- 'id': 1001, 'link': 'eth0'}},
+ "version": 2,
+ "ethernets": {
+ "eth0": {
+ "addresses": ["192.10.1.2/24"],
+ "match": {"macaddress": "00:16:3e:60:7c:df"},
+ }
+ },
+ "vlans": {
+ "infra0": {
+ "addresses": ["10.0.1.2/16"],
+ "id": 1001,
+ "link": "eth0",
+ }
+ },
}
expected_cfgs = {
- self.ifcfg_path('eth0'): dedent("""\
+ self.ifcfg_path("eth0"): dedent(
+ """\
BOOTPROTO=none
DEVICE=eth0
HWADDR=00:16:3e:60:7c:df
@@ -563,8 +628,10 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- self.ifcfg_path('infra0'): dedent("""\
+ """
+ ),
+ self.ifcfg_path("infra0"): dedent(
+ """\
BOOTPROTO=none
DEVICE=infra0
IPADDR=10.0.1.2
@@ -574,26 +641,33 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
PHYSDEV=eth0
USERCTL=no
VLAN=yes
- """),
- self.control_path(): dedent("""\
+ """
+ ),
+ self.control_path(): dedent(
+ """\
NETWORKING=yes
- """),
+ """
+ ),
}
self._apply_and_verify(
- self.distro.apply_network_config, cfg,
- expected_cfgs=expected_cfgs)
+ self.distro.apply_network_config, cfg, expected_cfgs=expected_cfgs
+ )
def test_vlan_render(self):
cfg = {
- 'version': 2,
- 'ethernets': {
- 'eth0': {'addresses': ["192.10.1.2/24"]}},
- 'vlans': {
- 'eth0.1001': {'addresses': ["10.0.1.2/16"],
- 'id': 1001, 'link': 'eth0'}},
+ "version": 2,
+ "ethernets": {"eth0": {"addresses": ["192.10.1.2/24"]}},
+ "vlans": {
+ "eth0.1001": {
+ "addresses": ["10.0.1.2/16"],
+ "id": 1001,
+ "link": "eth0",
+ }
+ },
}
expected_cfgs = {
- self.ifcfg_path('eth0'): dedent("""\
+ self.ifcfg_path("eth0"): dedent(
+ """\
BOOTPROTO=none
DEVICE=eth0
IPADDR=192.10.1.2
@@ -602,8 +676,10 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- self.ifcfg_path('eth0.1001'): dedent("""\
+ """
+ ),
+ self.ifcfg_path("eth0.1001"): dedent(
+ """\
BOOTPROTO=none
DEVICE=eth0.1001
IPADDR=10.0.1.2
@@ -613,32 +689,35 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
PHYSDEV=eth0
USERCTL=no
VLAN=yes
- """),
- self.control_path(): dedent("""\
+ """
+ ),
+ self.control_path(): dedent(
+ """\
NETWORKING=yes
- """),
+ """
+ ),
}
self._apply_and_verify(
- self.distro.apply_network_config, cfg,
- expected_cfgs=expected_cfgs)
+ self.distro.apply_network_config, cfg, expected_cfgs=expected_cfgs
+ )
class TestNetCfgDistroOpensuse(TestNetCfgDistroBase):
-
def setUp(self):
super(TestNetCfgDistroOpensuse, self).setUp()
- self.distro = self._get_distro('opensuse', renderers=['sysconfig'])
+ self.distro = self._get_distro("opensuse", renderers=["sysconfig"])
def ifcfg_path(self, ifname):
- return '/etc/sysconfig/network/ifcfg-%s' % ifname
+ return "/etc/sysconfig/network/ifcfg-%s" % ifname
- def _apply_and_verify(self, apply_fn, config, expected_cfgs=None,
- bringup=False):
+ def _apply_and_verify(
+ self, apply_fn, config, expected_cfgs=None, bringup=False
+ ):
if not expected_cfgs:
- raise ValueError('expected_cfg must not be None')
+ raise ValueError("expected_cfg must not be None")
tmpd = None
- with mock.patch('cloudinit.net.sysconfig.available') as m_avail:
+ with mock.patch("cloudinit.net.sysconfig.available") as m_avail:
m_avail.return_value = True
with self.reRooted(tmpd) as tmpd:
apply_fn(config, bringup)
@@ -651,52 +730,71 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase):
def test_apply_network_config_opensuse(self):
"""Opensuse uses apply_network_config and renders sysconfig"""
expected_cfgs = {
- self.ifcfg_path('eth0'): dedent("""\
+ self.ifcfg_path("eth0"): dedent(
+ """\
BOOTPROTO=static
IPADDR=192.168.1.5
NETMASK=255.255.255.0
STARTMODE=auto
- """),
- self.ifcfg_path('eth1'): dedent("""\
+ """
+ ),
+ self.ifcfg_path("eth1"): dedent(
+ """\
BOOTPROTO=dhcp4
STARTMODE=auto
- """),
+ """
+ ),
}
- self._apply_and_verify(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ )
def test_apply_network_config_ipv6_opensuse(self):
"""Opensuse uses apply_network_config and renders sysconfig w/ipv6"""
expected_cfgs = {
- self.ifcfg_path('eth0'): dedent("""\
+ self.ifcfg_path("eth0"): dedent(
+ """\
BOOTPROTO=static
IPADDR6=2607:f0d0:1002:0011::2/64
STARTMODE=auto
- """),
- self.ifcfg_path('eth1'): dedent("""\
+ """
+ ),
+ self.ifcfg_path("eth1"): dedent(
+ """\
BOOTPROTO=dhcp4
STARTMODE=auto
- """),
+ """
+ ),
}
- self._apply_and_verify(self.distro.apply_network_config,
- V1_NET_CFG_IPV6,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify(
+ self.distro.apply_network_config,
+ V1_NET_CFG_IPV6,
+ expected_cfgs=expected_cfgs.copy(),
+ )
class TestNetCfgDistroArch(TestNetCfgDistroBase):
def setUp(self):
super(TestNetCfgDistroArch, self).setUp()
- self.distro = self._get_distro('arch', renderers=['netplan'])
-
- def _apply_and_verify(self, apply_fn, config, expected_cfgs=None,
- bringup=False, with_netplan=False):
+ self.distro = self._get_distro("arch", renderers=["netplan"])
+
+ def _apply_and_verify(
+ self,
+ apply_fn,
+ config,
+ expected_cfgs=None,
+ bringup=False,
+ with_netplan=False,
+ ):
if not expected_cfgs:
- raise ValueError('expected_cfg must not be None')
+ raise ValueError("expected_cfg must not be None")
tmpd = None
- with mock.patch('cloudinit.net.netplan.available',
- return_value=with_netplan):
+ with mock.patch(
+ "cloudinit.net.netplan.available", return_value=with_netplan
+ ):
with self.reRooted(tmpd) as tmpd:
apply_fn(config, bringup)
@@ -711,10 +809,10 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
self.assertEqual(0o644, get_mode(cfgpath, tmpd))
def netctl_path(self, iface):
- return '/etc/netctl/%s' % iface
+ return "/etc/netctl/%s" % iface
def netplan_path(self):
- return '/etc/netplan/50-cloud-init.yaml'
+ return "/etc/netplan/50-cloud-init.yaml"
def test_apply_network_config_v1_without_netplan(self):
# Note that this is in fact an invalid netctl config:
@@ -724,33 +822,40 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
# still being used in absence of netplan, not the correctness of the
# rendered netctl config.
expected_cfgs = {
- self.netctl_path('eth0'): dedent("""\
+ self.netctl_path("eth0"): dedent(
+ """\
Address=192.168.1.5/255.255.255.0
Connection=ethernet
DNS=()
Gateway=192.168.1.254
IP=static
Interface=eth0
- """),
- self.netctl_path('eth1'): dedent("""\
+ """
+ ),
+ self.netctl_path("eth1"): dedent(
+ """\
Address=None/None
Connection=ethernet
DNS=()
Gateway=
IP=dhcp
Interface=eth1
- """),
+ """
+ ),
}
# ub_distro.apply_network_config(V1_NET_CFG, False)
- self._apply_and_verify(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy(),
- with_netplan=False)
+ self._apply_and_verify(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ with_netplan=False,
+ )
def test_apply_network_config_v1_with_netplan(self):
expected_cfgs = {
- self.netplan_path(): dedent("""\
+ self.netplan_path(): dedent(
+ """\
# generated by cloud-init
network:
version: 2
@@ -761,17 +866,148 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
gateway4: 192.168.1.254
eth1:
dhcp4: true
- """),
+ """
+ ),
}
- with mock.patch('cloudinit.util.is_FreeBSD', return_value=False):
- self._apply_and_verify(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy(),
- with_netplan=True)
+ with mock.patch(
+ "cloudinit.net.netplan.get_devicelist", return_value=[]
+ ):
+ self._apply_and_verify(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ with_netplan=True,
+ )
+
+
+class TestNetCfgDistroPhoton(TestNetCfgDistroBase):
+ def setUp(self):
+ super(TestNetCfgDistroPhoton, self).setUp()
+ self.distro = self._get_distro("photon", renderers=["networkd"])
+
+ def create_conf_dict(self, contents):
+ content_dict = {}
+ for line in contents:
+ if line:
+ line = line.strip()
+ if line and re.search(r"^\[(.+)\]$", line):
+ content_dict[line] = []
+ key = line
+ elif line:
+ assert key
+ content_dict[key].append(line)
+
+ return content_dict
+
+ def compare_dicts(self, actual, expected):
+ for k, v in actual.items():
+ self.assertEqual(sorted(expected[k]), sorted(v))
+
+ def _apply_and_verify(
+ self, apply_fn, config, expected_cfgs=None, bringup=False
+ ):
+ if not expected_cfgs:
+ raise ValueError("expected_cfg must not be None")
+
+ tmpd = None
+ with mock.patch("cloudinit.net.networkd.available") as m_avail:
+ m_avail.return_value = True
+ with self.reRooted(tmpd) as tmpd:
+ apply_fn(config, bringup)
+
+ results = dir2dict(tmpd)
+ for cfgpath, expected in expected_cfgs.items():
+ actual = self.create_conf_dict(results[cfgpath].splitlines())
+ self.compare_dicts(actual, expected)
+ self.assertEqual(0o644, get_mode(cfgpath, tmpd))
+
+ def nwk_file_path(self, ifname):
+ return "/etc/systemd/network/10-cloud-init-%s.network" % ifname
+
+ def net_cfg_1(self, ifname):
+ ret = (
+ """\
+ [Match]
+ Name=%s
+ [Network]
+ DHCP=no
+ [Address]
+ Address=192.168.1.5/24
+ [Route]
+ Gateway=192.168.1.254"""
+ % ifname
+ )
+ return ret
+
+ def net_cfg_2(self, ifname):
+ ret = (
+ """\
+ [Match]
+ Name=%s
+ [Network]
+ DHCP=ipv4"""
+ % ifname
+ )
+ return ret
+
+ def test_photon_network_config_v1(self):
+ tmp = self.net_cfg_1("eth0").splitlines()
+ expected_eth0 = self.create_conf_dict(tmp)
+
+ tmp = self.net_cfg_2("eth1").splitlines()
+ expected_eth1 = self.create_conf_dict(tmp)
+
+ expected_cfgs = {
+ self.nwk_file_path("eth0"): expected_eth0,
+ self.nwk_file_path("eth1"): expected_eth1,
+ }
+
+ self._apply_and_verify(
+ self.distro.apply_network_config, V1_NET_CFG, expected_cfgs.copy()
+ )
+
+ def test_photon_network_config_v2(self):
+ tmp = self.net_cfg_1("eth7").splitlines()
+ expected_eth7 = self.create_conf_dict(tmp)
+
+ tmp = self.net_cfg_2("eth9").splitlines()
+ expected_eth9 = self.create_conf_dict(tmp)
+
+ expected_cfgs = {
+ self.nwk_file_path("eth7"): expected_eth7,
+ self.nwk_file_path("eth9"): expected_eth9,
+ }
+
+ self._apply_and_verify(
+ self.distro.apply_network_config, V2_NET_CFG, expected_cfgs.copy()
+ )
+
+ def test_photon_network_config_v1_with_duplicates(self):
+ expected = """\
+ [Match]
+ Name=eth0
+ [Network]
+ DHCP=no
+ DNS=1.2.3.4
+ Domains=test.com
+ [Address]
+ Address=192.168.0.102/24"""
+
+ net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS)
+
+ expected = self.create_conf_dict(expected.splitlines())
+ expected_cfgs = {
+ self.nwk_file_path("eth0"): expected,
+ }
+
+ self._apply_and_verify(
+ self.distro.apply_network_config, net_cfg, expected_cfgs.copy()
+ )
def get_mode(path, target=None):
return os.stat(subp.target_path(target, path)).st_mode & 0o777
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/tests/test_networking.py b/tests/unittests/distros/test_networking.py
index ec508f4d..274647cb 100644
--- a/cloudinit/distros/tests/test_networking.py
+++ b/tests/unittests/distros/test_networking.py
@@ -1,3 +1,6 @@
+# See https://docs.pytest.org/en/stable/example
+# /parametrize.html#parametrizing-conditional-raising
+from contextlib import ExitStack as does_not_raise
from unittest import mock
import pytest
@@ -9,12 +12,8 @@ from cloudinit.distros.networking import (
Networking,
)
-# See https://docs.pytest.org/en/stable/example
-# /parametrize.html#parametrizing-conditional-raising
-from contextlib import ExitStack as does_not_raise
-
-@pytest.yield_fixture
+@pytest.fixture
def generic_networking_cls():
"""Returns a direct Networking subclass which errors on /sys usage.
@@ -35,12 +34,13 @@ def generic_networking_cls():
error = AssertionError("Unexpectedly used /sys in generic networking code")
with mock.patch(
- "cloudinit.net.get_sys_class_path", side_effect=error,
+ "cloudinit.net.get_sys_class_path",
+ side_effect=error,
):
yield TestNetworking
-@pytest.yield_fixture
+@pytest.fixture
def sys_class_net(tmpdir):
sys_class_net_path = tmpdir.join("sys/class/net")
sys_class_net_path.ensure_dir()
@@ -91,8 +91,10 @@ class TestLinuxNetworkingTrySetLinkUp:
m_is_up.return_value = True
is_success = LinuxNetworking().try_set_link_up(devname)
- assert (mock.call(['ip', 'link', 'set', devname, 'up']) ==
- m_subp.call_args_list[-1])
+ assert (
+ mock.call(["ip", "link", "set", devname, "up"])
+ == m_subp.call_args_list[-1]
+ )
assert is_success
def test_calls_subp_return_false(self, m_subp, m_is_up):
@@ -100,8 +102,10 @@ class TestLinuxNetworkingTrySetLinkUp:
m_is_up.return_value = False
is_success = LinuxNetworking().try_set_link_up(devname)
- assert (mock.call(['ip', 'link', 'set', devname, 'up']) ==
- m_subp.call_args_list[-1])
+ assert (
+ mock.call(["ip", "link", "set", devname, "up"])
+ == m_subp.call_args_list[-1]
+ )
assert not is_success
@@ -153,7 +157,9 @@ class TestNetworkingWaitForPhysDevs:
return netcfg
def test_skips_settle_if_all_present(
- self, generic_networking_cls, wait_for_physdevs_netcfg,
+ self,
+ generic_networking_cls,
+ wait_for_physdevs_netcfg,
):
networking = generic_networking_cls()
with mock.patch.object(
@@ -169,7 +175,9 @@ class TestNetworkingWaitForPhysDevs:
assert 0 == m_settle.call_count
def test_calls_udev_settle_on_missing(
- self, generic_networking_cls, wait_for_physdevs_netcfg,
+ self,
+ generic_networking_cls,
+ wait_for_physdevs_netcfg,
):
networking = generic_networking_cls()
with mock.patch.object(
diff --git a/tests/unittests/test_distros/test_opensuse.py b/tests/unittests/distros/test_opensuse.py
index b9bb9b3e..4a4b266f 100644
--- a/tests/unittests/test_distros/test_opensuse.py
+++ b/tests/unittests/distros/test_opensuse.py
@@ -1,12 +1,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.tests.helpers import CiTestCase
+from tests.unittests.helpers import CiTestCase
from . import _get_distro
class TestopenSUSE(CiTestCase):
-
def test_get_distro(self):
distro = _get_distro("opensuse")
- self.assertEqual(distro.osfamily, 'suse')
+ self.assertEqual(distro.osfamily, "suse")
diff --git a/tests/unittests/distros/test_photon.py b/tests/unittests/distros/test_photon.py
new file mode 100644
index 00000000..fed30c2b
--- /dev/null
+++ b/tests/unittests/distros/test_photon.py
@@ -0,0 +1,68 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import util
+from tests.unittests.helpers import CiTestCase, mock
+
+from . import _get_distro
+
+SYSTEM_INFO = {
+ "paths": {
+ "cloud_dir": "/var/lib/cloud/",
+ "templates_dir": "/etc/cloud/templates/",
+ },
+ "network": {"renderers": "networkd"},
+}
+
+
+class TestPhoton(CiTestCase):
+ with_logs = True
+ distro = _get_distro("photon", SYSTEM_INFO)
+ expected_log_line = "Rely on PhotonOS default network config"
+
+ def test_network_renderer(self):
+ self.assertEqual(self.distro._cfg["network"]["renderers"], "networkd")
+
+ def test_get_distro(self):
+ self.assertEqual(self.distro.osfamily, "photon")
+
+ @mock.patch("cloudinit.distros.photon.subp.subp")
+ def test_write_hostname(self, m_subp):
+ hostname = "myhostname"
+ hostfile = self.tmp_path("previous-hostname")
+ self.distro._write_hostname(hostname, hostfile)
+ self.assertEqual(hostname, util.load_file(hostfile))
+
+ ret = self.distro._read_hostname(hostfile)
+ self.assertEqual(ret, hostname)
+
+ m_subp.return_value = (None, None)
+ hostfile += "hostfile"
+ self.distro._write_hostname(hostname, hostfile)
+
+ m_subp.return_value = (hostname, None)
+ ret = self.distro._read_hostname(hostfile)
+ self.assertEqual(ret, hostname)
+
+ self.logs.truncate(0)
+ m_subp.return_value = (None, "bla")
+ self.distro._write_hostname(hostname, None)
+ self.assertIn("Error while setting hostname", self.logs.getvalue())
+
+ @mock.patch("cloudinit.net.generate_fallback_config")
+ def test_fallback_netcfg(self, m_fallback_cfg):
+
+ key = "disable_fallback_netcfg"
+ # Don't use fallback if no setting given
+ self.logs.truncate(0)
+ assert self.distro.generate_fallback_config() is None
+ self.assertIn(self.expected_log_line, self.logs.getvalue())
+
+ self.logs.truncate(0)
+ self.distro._cfg[key] = True
+ assert self.distro.generate_fallback_config() is None
+ self.assertIn(self.expected_log_line, self.logs.getvalue())
+
+ self.logs.truncate(0)
+ self.distro._cfg[key] = False
+ assert self.distro.generate_fallback_config() is not None
+ self.assertNotIn(self.expected_log_line, self.logs.getvalue())
diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/distros/test_resolv.py
index 7d940750..65e78101 100644
--- a/tests/unittests/test_distros/test_resolv.py
+++ b/tests/unittests/distros/test_resolv.py
@@ -1,18 +1,16 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.distros.parsers import resolv_conf
-
-from cloudinit.tests.helpers import TestCase
-
import re
+from cloudinit.distros.parsers import resolv_conf
+from tests.unittests.helpers import TestCase
-BASE_RESOLVE = '''
+BASE_RESOLVE = """
; generated by /sbin/dhclient-script
search blah.yahoo.com yahoo.com
nameserver 10.15.44.14
nameserver 10.15.30.92
-'''
+"""
BASE_RESOLVE = BASE_RESOLVE.strip()
@@ -27,39 +25,40 @@ class TestResolvHelper(TestCase):
self.assertIsNone(rp.local_domain)
rp.local_domain = "bob"
- self.assertEqual('bob', rp.local_domain)
- self.assertIn('domain bob', str(rp))
+ self.assertEqual("bob", rp.local_domain)
+ self.assertIn("domain bob", str(rp))
def test_nameservers(self):
rp = resolv_conf.ResolvConf(BASE_RESOLVE)
- self.assertIn('10.15.44.14', rp.nameservers)
- self.assertIn('10.15.30.92', rp.nameservers)
- rp.add_nameserver('10.2')
- self.assertIn('10.2', rp.nameservers)
- self.assertIn('nameserver 10.2', str(rp))
- self.assertNotIn('10.3', rp.nameservers)
+ self.assertIn("10.15.44.14", rp.nameservers)
+ self.assertIn("10.15.30.92", rp.nameservers)
+ rp.add_nameserver("10.2")
+ self.assertIn("10.2", rp.nameservers)
+ self.assertIn("nameserver 10.2", str(rp))
+ self.assertNotIn("10.3", rp.nameservers)
self.assertEqual(len(rp.nameservers), 3)
- rp.add_nameserver('10.2')
- rp.add_nameserver('10.3')
- self.assertNotIn('10.3', rp.nameservers)
+ rp.add_nameserver("10.2")
+ rp.add_nameserver("10.3")
+ self.assertNotIn("10.3", rp.nameservers)
def test_search_domains(self):
rp = resolv_conf.ResolvConf(BASE_RESOLVE)
- self.assertIn('yahoo.com', rp.search_domains)
- self.assertIn('blah.yahoo.com', rp.search_domains)
- rp.add_search_domain('bbb.y.com')
- self.assertIn('bbb.y.com', rp.search_domains)
- self.assertTrue(re.search(r'search(.*)bbb.y.com(.*)', str(rp)))
- self.assertIn('bbb.y.com', rp.search_domains)
- rp.add_search_domain('bbb.y.com')
+ self.assertIn("yahoo.com", rp.search_domains)
+ self.assertIn("blah.yahoo.com", rp.search_domains)
+ rp.add_search_domain("bbb.y.com")
+ self.assertIn("bbb.y.com", rp.search_domains)
+ self.assertTrue(re.search(r"search(.*)bbb.y.com(.*)", str(rp)))
+ self.assertIn("bbb.y.com", rp.search_domains)
+ rp.add_search_domain("bbb.y.com")
self.assertEqual(len(rp.search_domains), 3)
- rp.add_search_domain('bbb2.y.com')
+ rp.add_search_domain("bbb2.y.com")
self.assertEqual(len(rp.search_domains), 4)
- rp.add_search_domain('bbb3.y.com')
+ rp.add_search_domain("bbb3.y.com")
self.assertEqual(len(rp.search_domains), 5)
- rp.add_search_domain('bbb4.y.com')
+ rp.add_search_domain("bbb4.y.com")
self.assertEqual(len(rp.search_domains), 6)
- self.assertRaises(ValueError, rp.add_search_domain, 'bbb5.y.com')
+ self.assertRaises(ValueError, rp.add_search_domain, "bbb5.y.com")
self.assertEqual(len(rp.search_domains), 6)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_sles.py b/tests/unittests/distros/test_sles.py
index 33e3c457..66b8b13d 100644
--- a/tests/unittests/test_distros/test_sles.py
+++ b/tests/unittests/distros/test_sles.py
@@ -1,12 +1,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.tests.helpers import CiTestCase
+from tests.unittests.helpers import CiTestCase
from . import _get_distro
class TestSLES(CiTestCase):
-
def test_get_distro(self):
distro = _get_distro("sles")
- self.assertEqual(distro.osfamily, 'suse')
+ self.assertEqual(distro.osfamily, "suse")
diff --git a/tests/unittests/test_distros/test_sysconfig.py b/tests/unittests/distros/test_sysconfig.py
index c1d5b693..d0979e17 100644
--- a/tests/unittests/test_distros/test_sysconfig.py
+++ b/tests/unittests/distros/test_sysconfig.py
@@ -3,22 +3,23 @@
import re
from cloudinit.distros.parsers.sys_conf import SysConf
-
-from cloudinit.tests.helpers import TestCase
-
+from tests.unittests.helpers import TestCase
# Lots of good examples @
# http://content.hccfl.edu/pollock/AUnix1/SysconfigFilesDesc.txt
+
class TestSysConfHelper(TestCase):
# This function was added in 2.7, make it work for 2.6
def assertRegMatches(self, text, regexp):
regexp = re.compile(regexp)
- self.assertTrue(regexp.search(text),
- msg="%s must match %s!" % (text, regexp.pattern))
+ self.assertTrue(
+ regexp.search(text),
+ msg="%s must match %s!" % (text, regexp.pattern),
+ )
def test_parse_no_change(self):
- contents = '''# A comment
+ contents = """# A comment
USESMBAUTH=no
KEYTABLE=/usr/lib/kbd/keytables/us.map
SHORTDATE=$(date +%y:%m:%d:%H:%M)
@@ -28,59 +29,64 @@ NETMASK0=255.255.255.0
LIST=$LOGROOT/incremental-list
IPV6TO4_ROUTING='eth0-:0004::1/64 eth1-:0005::1/64'
ETHTOOL_OPTS="-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256"
-USEMD5=no'''
+USEMD5=no"""
conf = SysConf(contents.splitlines())
- self.assertEqual(conf['HOSTNAME'], 'blahblah')
- self.assertEqual(conf['SHORTDATE'], '$(date +%y:%m:%d:%H:%M)')
+ self.assertEqual(conf["HOSTNAME"], "blahblah")
+ self.assertEqual(conf["SHORTDATE"], "$(date +%y:%m:%d:%H:%M)")
# Should be unquoted
- self.assertEqual(conf['ETHTOOL_OPTS'], ('-K ${DEVICE} tso on; '
- '-G ${DEVICE} rx 256 tx 256'))
+ self.assertEqual(
+ conf["ETHTOOL_OPTS"],
+ "-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256",
+ )
self.assertEqual(contents, str(conf))
def test_parse_shell_vars(self):
- contents = 'USESMBAUTH=$XYZ'
+ contents = "USESMBAUTH=$XYZ"
conf = SysConf(contents.splitlines())
self.assertEqual(contents, str(conf))
- conf = SysConf('')
- conf['B'] = '${ZZ}d apples'
+ conf = SysConf("")
+ conf["B"] = "${ZZ}d apples"
# Should be quoted
self.assertEqual('B="${ZZ}d apples"', str(conf))
- conf = SysConf('')
- conf['B'] = '$? d apples'
+ conf = SysConf("")
+ conf["B"] = "$? d apples"
self.assertEqual('B="$? d apples"', str(conf))
contents = 'IPMI_WATCHDOG_OPTIONS="timeout=60"'
conf = SysConf(contents.splitlines())
- self.assertEqual('IPMI_WATCHDOG_OPTIONS=timeout=60', str(conf))
+ self.assertEqual("IPMI_WATCHDOG_OPTIONS=timeout=60", str(conf))
def test_parse_adjust(self):
contents = 'IPV6TO4_ROUTING="eth0-:0004::1/64 eth1-:0005::1/64"'
conf = SysConf(contents.splitlines())
# Should be unquoted
- self.assertEqual('eth0-:0004::1/64 eth1-:0005::1/64',
- conf['IPV6TO4_ROUTING'])
- conf['IPV6TO4_ROUTING'] = "blah \tblah"
+ self.assertEqual(
+ "eth0-:0004::1/64 eth1-:0005::1/64", conf["IPV6TO4_ROUTING"]
+ )
+ conf["IPV6TO4_ROUTING"] = "blah \tblah"
contents2 = str(conf).strip()
# Should be requoted due to whitespace
- self.assertRegMatches(contents2,
- r'IPV6TO4_ROUTING=[\']blah\s+blah[\']')
+ self.assertRegMatches(
+ contents2, r"IPV6TO4_ROUTING=[\']blah\s+blah[\']"
+ )
def test_parse_no_adjust_shell(self):
- conf = SysConf(''.splitlines())
- conf['B'] = ' $(time)'
+ conf = SysConf("".splitlines())
+ conf["B"] = " $(time)"
contents = str(conf)
- self.assertEqual('B= $(time)', contents)
+ self.assertEqual("B= $(time)", contents)
def test_parse_empty(self):
- contents = ''
+ contents = ""
conf = SysConf(contents.splitlines())
- self.assertEqual('', str(conf).strip())
+ self.assertEqual("", str(conf).strip())
def test_parse_add_new(self):
- contents = 'BLAH=b'
+ contents = "BLAH=b"
conf = SysConf(contents.splitlines())
- conf['Z'] = 'd'
+ conf["Z"] = "d"
contents = str(conf)
self.assertIn("Z=d", contents)
self.assertIn("BLAH=b", contents)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/distros/test_user_data_normalize.py b/tests/unittests/distros/test_user_data_normalize.py
new file mode 100644
index 00000000..67ea024b
--- /dev/null
+++ b/tests/unittests/distros/test_user_data_normalize.py
@@ -0,0 +1,365 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from unittest import mock
+
+from cloudinit import distros, helpers, settings
+from cloudinit.distros import ug_util
+from tests.unittests.helpers import TestCase
+
+bcfg = {
+ "name": "bob",
+ "plain_text_passwd": "ubuntu",
+ "home": "/home/ubuntu",
+ "shell": "/bin/bash",
+ "lock_passwd": True,
+ "gecos": "Ubuntu",
+ "groups": ["foo"],
+}
+
+
+class TestUGNormalize(TestCase):
+ def setUp(self):
+ super(TestUGNormalize, self).setUp()
+ self.add_patch("cloudinit.util.system_is_snappy", "m_snappy")
+
+ def _make_distro(self, dtype, def_user=None):
+ cfg = dict(settings.CFG_BUILTIN)
+ cfg["system_info"]["distro"] = dtype
+ paths = helpers.Paths(cfg["system_info"]["paths"])
+ distro_cls = distros.fetch(dtype)
+ if def_user:
+ cfg["system_info"]["default_user"] = def_user.copy()
+ distro = distro_cls(dtype, cfg["system_info"], paths)
+ return distro
+
+ def _norm(self, cfg, distro):
+ return ug_util.normalize_users_groups(cfg, distro)
+
+ def test_group_dict(self):
+ distro = self._make_distro("ubuntu")
+ g = {
+ "groups": [
+ {"ubuntu": ["foo", "bar"], "bob": "users"},
+ "cloud-users",
+ {"bob": "users2"},
+ ]
+ }
+ (_users, groups) = self._norm(g, distro)
+ self.assertIn("ubuntu", groups)
+ ub_members = groups["ubuntu"]
+ self.assertEqual(sorted(["foo", "bar"]), sorted(ub_members))
+ self.assertIn("bob", groups)
+ b_members = groups["bob"]
+ self.assertEqual(sorted(["users", "users2"]), sorted(b_members))
+
+ def test_basic_groups(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "groups": ["bob"],
+ }
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", groups)
+ self.assertEqual({}, users)
+
+ def test_csv_groups(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "groups": "bob,joe,steve",
+ }
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", groups)
+ self.assertIn("joe", groups)
+ self.assertIn("steve", groups)
+ self.assertEqual({}, users)
+
+ def test_more_groups(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {"groups": ["bob", "joe", "steve"]}
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", groups)
+ self.assertIn("joe", groups)
+ self.assertIn("steve", groups)
+ self.assertEqual({}, users)
+
+ def test_member_groups(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "groups": {
+ "bob": ["s"],
+ "joe": [],
+ "steve": [],
+ }
+ }
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", groups)
+ self.assertEqual(["s"], groups["bob"])
+ self.assertEqual([], groups["joe"])
+ self.assertIn("joe", groups)
+ self.assertIn("steve", groups)
+ self.assertEqual({}, users)
+
+ def test_users_simple_dict(self):
+ distro = self._make_distro("ubuntu", bcfg)
+ ug_cfg = {
+ "users": {
+ "default": True,
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+ ug_cfg = {
+ "users": {
+ "default": "yes",
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+ ug_cfg = {
+ "users": {
+ "default": "1",
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+
+ def test_users_simple_dict_no(self):
+ distro = self._make_distro("ubuntu", bcfg)
+ ug_cfg = {
+ "users": {
+ "default": False,
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertEqual({}, users)
+ ug_cfg = {
+ "users": {
+ "default": "no",
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertEqual({}, users)
+
+ def test_users_simple_csv(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "users": "joe,bob",
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("joe", users)
+ self.assertIn("bob", users)
+ self.assertEqual({"default": False}, users["joe"])
+ self.assertEqual({"default": False}, users["bob"])
+
+ def test_users_simple(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "users": ["joe", "bob"],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("joe", users)
+ self.assertIn("bob", users)
+ self.assertEqual({"default": False}, users["joe"])
+ self.assertEqual({"default": False}, users["bob"])
+
+ def test_users_old_user(self):
+ distro = self._make_distro("ubuntu", bcfg)
+ ug_cfg = {"user": "zetta", "users": "default"}
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertNotIn("bob", users) # Bob is not the default now, zetta is
+ self.assertIn("zetta", users)
+ self.assertTrue(users["zetta"]["default"])
+ self.assertNotIn("default", users)
+ ug_cfg = {"user": "zetta", "users": "default, joe"}
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertNotIn("bob", users) # Bob is not the default now, zetta is
+ self.assertIn("joe", users)
+ self.assertIn("zetta", users)
+ self.assertTrue(users["zetta"]["default"])
+ self.assertNotIn("default", users)
+ ug_cfg = {"user": "zetta", "users": ["bob", "joe"]}
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+ self.assertIn("joe", users)
+ self.assertIn("zetta", users)
+ self.assertTrue(users["zetta"]["default"])
+ ug_cfg = {
+ "user": "zetta",
+ "users": {
+ "bob": True,
+ "joe": True,
+ },
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+ self.assertIn("joe", users)
+ self.assertIn("zetta", users)
+ self.assertTrue(users["zetta"]["default"])
+ ug_cfg = {
+ "user": "zetta",
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("zetta", users)
+ ug_cfg = {}
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertEqual({}, users)
+ self.assertEqual({}, groups)
+
+ def test_users_dict_default_additional(self):
+ distro = self._make_distro("ubuntu", bcfg)
+ ug_cfg = {
+ "users": [{"name": "default", "blah": True}],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+ self.assertEqual(
+ ",".join(distro.get_default_user()["groups"]),
+ users["bob"]["groups"],
+ )
+ self.assertEqual(True, users["bob"]["blah"])
+ self.assertEqual(True, users["bob"]["default"])
+
+ def test_users_dict_extract(self):
+ distro = self._make_distro("ubuntu", bcfg)
+ ug_cfg = {
+ "users": [
+ "default",
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+ (name, config) = ug_util.extract_default(users)
+ self.assertEqual(name, "bob")
+ expected_config = {}
+ def_config = None
+ try:
+ def_config = distro.get_default_user()
+ except NotImplementedError:
+ pass
+ if not def_config:
+ def_config = {}
+ expected_config.update(def_config)
+
+ # Ignore these for now
+ expected_config.pop("name", None)
+ expected_config.pop("groups", None)
+ config.pop("groups", None)
+ self.assertEqual(config, expected_config)
+
+ def test_users_dict_default(self):
+ distro = self._make_distro("ubuntu", bcfg)
+ ug_cfg = {
+ "users": [
+ "default",
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+ self.assertEqual(
+ ",".join(distro.get_default_user()["groups"]),
+ users["bob"]["groups"],
+ )
+ self.assertEqual(True, users["bob"]["default"])
+
+ def test_users_dict_trans(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "users": [
+ {"name": "joe", "tr-me": True},
+ {"name": "bob"},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("joe", users)
+ self.assertIn("bob", users)
+ self.assertEqual({"tr_me": True, "default": False}, users["joe"])
+ self.assertEqual({"default": False}, users["bob"])
+
+ def test_users_dict(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "users": [
+ {"name": "joe"},
+ {"name": "bob"},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("joe", users)
+ self.assertIn("bob", users)
+ self.assertEqual({"default": False}, users["joe"])
+ self.assertEqual({"default": False}, users["bob"])
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_create_snap_user(self, mock_subp):
+ mock_subp.side_effect = [
+ ('{"username": "joe", "ssh-key-count": 1}\n', "")
+ ]
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "users": [
+ {"name": "joe", "snapuser": "joe@joe.com"},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ for (user, config) in users.items():
+ print("user=%s config=%s" % (user, config))
+ username = distro.create_user(user, **config)
+
+ snapcmd = ["snap", "create-user", "--sudoer", "--json", "joe@joe.com"]
+ mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd)
+ self.assertEqual(username, "joe")
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_create_snap_user_known(self, mock_subp):
+ mock_subp.side_effect = [
+ ('{"username": "joe", "ssh-key-count": 1}\n', "")
+ ]
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "users": [
+ {"name": "joe", "snapuser": "joe@joe.com", "known": True},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ for (user, config) in users.items():
+ print("user=%s config=%s" % (user, config))
+ username = distro.create_user(user, **config)
+
+ snapcmd = [
+ "snap",
+ "create-user",
+ "--sudoer",
+ "--json",
+ "--known",
+ "joe@joe.com",
+ ]
+ mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd)
+ self.assertEqual(username, "joe")
+
+ @mock.patch("cloudinit.util.system_is_snappy")
+ @mock.patch("cloudinit.util.is_group")
+ @mock.patch("cloudinit.subp.subp")
+ def test_add_user_on_snappy_system(
+ self, mock_subp, mock_isgrp, mock_snappy
+ ):
+ mock_isgrp.return_value = False
+ mock_subp.return_value = True
+ mock_snappy.return_value = True
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "users": [
+ {"name": "joe", "groups": "users", "create_groups": True},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ for (user, config) in users.items():
+ print("user=%s config=%s" % (user, config))
+ distro.add_user(user, **config)
+
+ groupcmd = ["groupadd", "users", "--extrausers"]
+ addcmd = ["useradd", "joe", "--extrausers", "--groups", "users", "-m"]
+
+ mock_subp.assert_any_call(groupcmd)
+ mock_subp.assert_any_call(addcmd, logstring=addcmd)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/__init__.py b/tests/unittests/filters/__init__.py
index e69de29b..e69de29b 100644
--- a/cloudinit/sources/tests/__init__.py
+++ b/tests/unittests/filters/__init__.py
diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/filters/test_launch_index.py
index 1492361e..679bdfc3 100644
--- a/tests/unittests/test_filters/test_launch_index.py
+++ b/tests/unittests/filters/test_launch_index.py
@@ -3,11 +3,10 @@
import copy
from itertools import filterfalse
-from cloudinit.tests import helpers
-
-from cloudinit.filters import launch_index
from cloudinit import user_data as ud
from cloudinit import util
+from cloudinit.filters import launch_index
+from tests.unittests import helpers
def count_messages(root):
@@ -20,7 +19,6 @@ def count_messages(root):
class TestLaunchFilter(helpers.ResourceUsingTestCase):
-
def assertCounts(self, message, expected_counts):
orig_message = copy.deepcopy(message)
for (index, count) in expected_counts.items():
@@ -54,7 +52,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
return True
def testMultiEmailIndex(self):
- test_data = helpers.readResource('filter_cloud_multipart_2.email')
+ test_data = helpers.readResource("filter_cloud_multipart_2.email")
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
self.assertTrue(count_messages(message) > 0)
@@ -69,7 +67,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
self.assertCounts(message, expected_counts)
def testHeaderEmailIndex(self):
- test_data = helpers.readResource('filter_cloud_multipart_header.email')
+ test_data = helpers.readResource("filter_cloud_multipart_header.email")
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
self.assertTrue(count_messages(message) > 0)
@@ -78,13 +76,13 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
expected_counts = {
5: 1,
-1: 0,
- 'c': 1,
+ "c": 1,
None: 1,
}
self.assertCounts(message, expected_counts)
def testConfigEmailIndex(self):
- test_data = helpers.readResource('filter_cloud_multipart_1.email')
+ test_data = helpers.readResource("filter_cloud_multipart_1.email")
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
self.assertTrue(count_messages(message) > 0)
@@ -98,7 +96,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
self.assertCounts(message, expected_counts)
def testNoneIndex(self):
- test_data = helpers.readResource('filter_cloud_multipart.yaml')
+ test_data = helpers.readResource("filter_cloud_multipart.yaml")
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
start_count = count_messages(message)
@@ -107,7 +105,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
self.assertTrue(self.equivalentMessage(message, filtered_message))
def testIndexes(self):
- test_data = helpers.readResource('filter_cloud_multipart.yaml')
+ test_data = helpers.readResource("filter_cloud_multipart.yaml")
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
start_count = count_messages(message)
@@ -126,10 +124,11 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
# None should just give all back
None: start_count,
# Non ints should be ignored
- 'c': start_count,
+ "c": start_count,
# Strings should be converted
- '1': 2,
+ "1": 2,
}
self.assertCounts(message, expected_counts)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/helpers.py b/tests/unittests/helpers.py
index 58f63b69..67fed8c9 100644
--- a/cloudinit/tests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -1,7 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
import functools
-import httpretty
import io
import logging
import os
@@ -13,18 +12,22 @@ import tempfile
import time
import unittest
from contextlib import ExitStack, contextmanager
+from pathlib import Path
from unittest import mock
from unittest.util import strclass
-from cloudinit.config.schema import (
- SchemaValidationError, validate_cloudconfig_schema)
-from cloudinit import cloud
-from cloudinit import distros
+import httpretty
+
+import cloudinit
+from cloudinit import cloud, distros
from cloudinit import helpers as ch
+from cloudinit import subp, util
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ validate_cloudconfig_schema,
+)
from cloudinit.sources import DataSourceNone
from cloudinit.templater import JINJA_AVAILABLE
-from cloudinit import subp
-from cloudinit import util
_real_subp = subp.subp
@@ -64,11 +67,11 @@ def retarget_many_wrapper(new_base, am, old_func):
if isinstance(path, str):
n_args[i] = rebase_path(path, new_base)
return old_func(*n_args, **kwds)
+
return wrapper
class TestCase(unittest.TestCase):
-
def reset_global_state(self):
"""Reset any global state to its original settings.
@@ -91,13 +94,13 @@ class TestCase(unittest.TestCase):
self.reset_global_state()
def shortDescription(self):
- return strclass(self.__class__) + '.' + self._testMethodName
+ return strclass(self.__class__) + "." + self._testMethodName
def add_patch(self, target, attr, *args, **kwargs):
"""Patches specified target object and sets it as attr on test
instance also schedules cleanup"""
- if 'autospec' not in kwargs:
- kwargs['autospec'] = True
+ if "autospec" not in kwargs:
+ kwargs["autospec"] = True
m = mock.patch(target, *args, **kwargs)
p = m.start()
self.addCleanup(m.stop)
@@ -106,7 +109,7 @@ class TestCase(unittest.TestCase):
class CiTestCase(TestCase):
"""This is the preferred test case base class unless user
- needs other test case classes below."""
+ needs other test case classes below."""
# Subclass overrides for specific test behavior
# Whether or not a unit test needs logfile setup
@@ -129,7 +132,7 @@ class CiTestCase(TestCase):
# Create a log handler so unit tests can search expected logs.
self.logger = logging.getLogger()
self.logs = io.StringIO()
- formatter = logging.Formatter('%(levelname)s: %(message)s')
+ formatter = logging.Formatter("%(levelname)s: %(message)s")
handler = logging.StreamHandler(self.logs)
handler.setFormatter(formatter)
self.old_handlers = self.logger.handlers
@@ -140,12 +143,13 @@ class CiTestCase(TestCase):
subp.subp = self._fake_subp
def _fake_subp(self, *args, **kwargs):
- if 'args' in kwargs:
- cmd = kwargs['args']
+ if "args" in kwargs:
+ cmd = kwargs["args"]
else:
if not args:
raise TypeError(
- "subp() missing 1 required positional argument: 'args'")
+ "subp() missing 1 required positional argument: 'args'"
+ )
cmd = args[0]
if not isinstance(cmd, str):
@@ -156,34 +160,37 @@ class CiTestCase(TestCase):
if isinstance(self.allowed_subp, bool):
pass_through = self.allowed_subp
else:
- pass_through = (
- (cmd in self.allowed_subp) or
- (self.SUBP_SHELL_TRUE in self.allowed_subp and
- kwargs.get('shell')))
+ pass_through = (cmd in self.allowed_subp) or (
+ self.SUBP_SHELL_TRUE in self.allowed_subp
+ and kwargs.get("shell")
+ )
if pass_through:
return _real_subp(*args, **kwargs)
raise Exception(
- "called subp. set self.allowed_subp=True to allow\n subp(%s)" %
- ', '.join([str(repr(a)) for a in args] +
- ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()]))
+ "called subp. set self.allowed_subp=True to allow\n subp(%s)"
+ % ", ".join(
+ [str(repr(a)) for a in args]
+ + ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()]
+ )
+ )
def tearDown(self):
if self.with_logs:
# Remove the handler we setup
logging.getLogger().handlers = self.old_handlers
- logging.getLogger().level = None
+ logging.getLogger().setLevel(logging.NOTSET)
subp.subp = _real_subp
super(CiTestCase, self).tearDown()
def tmp_dir(self, dir=None, cleanup=True):
# return a full path to a temporary directory that will be cleaned up.
if dir is None:
- tmpd = tempfile.mkdtemp(
- prefix="ci-%s." % self.__class__.__name__)
+ tmpd = tempfile.mkdtemp(prefix="ci-%s." % self.__class__.__name__)
else:
tmpd = tempfile.mkdtemp(dir=dir)
self.addCleanup(
- functools.partial(shutil.rmtree, tmpd, ignore_errors=True))
+ functools.partial(shutil.rmtree, tmpd, ignore_errors=True)
+ )
return tmpd
def tmp_path(self, path, dir=None):
@@ -206,7 +213,7 @@ class CiTestCase(TestCase):
if not sys_cfg:
sys_cfg = {}
tmp_paths = {}
- for var in ['templates_dir', 'run_dir', 'cloud_dir']:
+ for var in ["templates_dir", "run_dir", "cloud_dir"]:
tmp_paths[var] = self.tmp_path(var, dir=self.new_root)
util.ensure_dir(tmp_paths[var])
self.paths = ch.Paths(tmp_paths)
@@ -219,13 +226,13 @@ class CiTestCase(TestCase):
@classmethod
def random_string(cls, length=8):
- """ return a random lowercase string with default length of 8"""
- return ''.join(
- random.choice(string.ascii_lowercase) for _ in range(length))
+ """return a random lowercase string with default length of 8"""
+ return "".join(
+ random.choice(string.ascii_lowercase) for _ in range(length)
+ )
class ResourceUsingTestCase(CiTestCase):
-
def setUp(self):
super(ResourceUsingTestCase, self).setUp()
self.resource_path = None
@@ -233,14 +240,13 @@ class ResourceUsingTestCase(CiTestCase):
def getCloudPaths(self, ds=None):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
- cp = ch.Paths({'cloud_dir': tmpdir,
- 'templates_dir': resourceLocation()},
- ds=ds)
+ cp = ch.Paths(
+ {"cloud_dir": tmpdir, "templates_dir": resourceLocation()}, ds=ds
+ )
return cp
class FilesystemMockingTestCase(ResourceUsingTestCase):
-
def setUp(self):
super(FilesystemMockingTestCase, self).setUp()
self.patched_funcs = ExitStack()
@@ -251,10 +257,10 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
def replicateTestRoot(self, example_root, target_root):
real_root = resourceLocation()
- real_root = os.path.join(real_root, 'roots', example_root)
+ real_root = os.path.join(real_root, "roots", example_root)
for (dir_path, _dirnames, filenames) in os.walk(real_root):
real_path = dir_path
- make_path = rebase_path(real_path[len(real_root):], target_root)
+ make_path = rebase_path(real_path[len(real_root) :], target_root)
util.ensure_dir(make_path)
for f in filenames:
real_path = util.abs_join(real_path, f)
@@ -263,72 +269,89 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
def patchUtils(self, new_root):
patch_funcs = {
- util: [('write_file', 1),
- ('append_file', 1),
- ('load_file', 1),
- ('ensure_dir', 1),
- ('chmod', 1),
- ('delete_dir_contents', 1),
- ('del_file', 1),
- ('sym_link', -1),
- ('copy', -1)],
+ util: [
+ ("write_file", 1),
+ ("append_file", 1),
+ ("load_file", 1),
+ ("ensure_dir", 1),
+ ("chmod", 1),
+ ("delete_dir_contents", 1),
+ ("del_file", 1),
+ ("sym_link", -1),
+ ("copy", -1),
+ ],
}
for (mod, funcs) in patch_funcs.items():
for (f, am) in funcs:
func = getattr(mod, f)
trap_func = retarget_many_wrapper(new_root, am, func)
self.patched_funcs.enter_context(
- mock.patch.object(mod, f, trap_func))
+ mock.patch.object(mod, f, trap_func)
+ )
# Handle subprocess calls
- func = getattr(subp, 'subp')
+ func = getattr(subp, "subp")
def nsubp(*_args, **_kwargs):
- return ('', '')
+ return ("", "")
self.patched_funcs.enter_context(
- mock.patch.object(subp, 'subp', nsubp))
+ mock.patch.object(subp, "subp", nsubp)
+ )
def null_func(*_args, **_kwargs):
return None
- for f in ['chownbyid', 'chownbyname']:
+ for f in ["chownbyid", "chownbyname"]:
self.patched_funcs.enter_context(
- mock.patch.object(util, f, null_func))
+ mock.patch.object(util, f, null_func)
+ )
def patchOS(self, new_root):
patch_funcs = {
- os.path: [('isfile', 1), ('exists', 1),
- ('islink', 1), ('isdir', 1), ('lexists', 1)],
- os: [('listdir', 1), ('mkdir', 1),
- ('lstat', 1), ('symlink', 2),
- ('stat', 1)]
+ os.path: [
+ ("isfile", 1),
+ ("exists", 1),
+ ("islink", 1),
+ ("isdir", 1),
+ ("lexists", 1),
+ ],
+ os: [
+ ("listdir", 1),
+ ("mkdir", 1),
+ ("lstat", 1),
+ ("symlink", 2),
+ ("stat", 1),
+ ],
}
- if hasattr(os, 'scandir'):
+ if hasattr(os, "scandir"):
# py27 does not have scandir
- patch_funcs[os].append(('scandir', 1))
+ patch_funcs[os].append(("scandir", 1))
for (mod, funcs) in patch_funcs.items():
for f, nargs in funcs:
func = getattr(mod, f)
trap_func = retarget_many_wrapper(new_root, nargs, func)
self.patched_funcs.enter_context(
- mock.patch.object(mod, f, trap_func))
+ mock.patch.object(mod, f, trap_func)
+ )
def patchOpen(self, new_root):
trap_func = retarget_many_wrapper(new_root, 1, open)
self.patched_funcs.enter_context(
- mock.patch('builtins.open', trap_func)
+ mock.patch("builtins.open", trap_func)
)
def patchStdoutAndStderr(self, stdout=None, stderr=None):
if stdout is not None:
self.patched_funcs.enter_context(
- mock.patch.object(sys, 'stdout', stdout))
+ mock.patch.object(sys, "stdout", stdout)
+ )
if stderr is not None:
self.patched_funcs.enter_context(
- mock.patch.object(sys, 'stderr', stderr))
+ mock.patch.object(sys, "stderr", stderr)
+ )
def reRoot(self, root=None):
if root is None:
@@ -353,30 +376,32 @@ class HttprettyTestCase(CiTestCase):
# And make sure reset and enable/disable are done.
def setUp(self):
- self.restore_proxy = os.environ.get('http_proxy')
+ self.restore_proxy = os.environ.get("http_proxy")
if self.restore_proxy is not None:
- del os.environ['http_proxy']
+ del os.environ["http_proxy"]
super(HttprettyTestCase, self).setUp()
httpretty.HTTPretty.allow_net_connect = False
httpretty.reset()
httpretty.enable()
+ # Stop the logging from HttpPretty so our logs don't get mixed
+ # up with its logs
+ logging.getLogger("httpretty.core").setLevel(logging.CRITICAL)
def tearDown(self):
httpretty.disable()
httpretty.reset()
if self.restore_proxy:
- os.environ['http_proxy'] = self.restore_proxy
+ os.environ["http_proxy"] = self.restore_proxy
super(HttprettyTestCase, self).tearDown()
class SchemaTestCaseMixin(unittest.TestCase):
-
def assertSchemaValid(self, cfg, msg="Valid Schema failed validation."):
"""Assert the config is valid per self.schema.
If there is only one top level key in the schema properties, then
the cfg will be put under that key."""
- props = list(self.schema.get('properties'))
+ props = list(self.schema.get("properties"))
# put cfg under top level key if there is only one in the schema
if len(props) == 1:
cfg = {props[0]: cfg}
@@ -397,7 +422,7 @@ def populate_dir(path, files):
if isinstance(content, bytes):
fp.write(content)
else:
- fp.write(content.encode('utf-8'))
+ fp.write(content.encode("utf-8"))
fp.close()
ret.append(p)
@@ -420,7 +445,7 @@ def dir2dict(startdir, prefix=None):
for root, _dirs, files in os.walk(startdir):
for fname in files:
fpath = os.path.join(root, fname)
- key = fpath[len(prefix):]
+ key = fpath[len(prefix) :]
flist[key] = util.load_file(fpath)
return flist
@@ -438,16 +463,16 @@ def wrap_and_call(prefix, mocks, func, *args, **kwargs):
return_value: return from 'func'
"""
- delim = '.'
+ delim = "."
if prefix is None:
- prefix = ''
+ prefix = ""
prefix = prefix.rstrip(delim)
unwraps = []
for fname, kw in mocks.items():
if prefix:
fname = delim.join((prefix, fname))
if not isinstance(kw, dict):
- kw = {'return_value': kw}
+ kw = {"return_value": kw}
p = mock.patch(fname, **kw)
p.start()
unwraps.append(p)
@@ -459,19 +484,20 @@ def wrap_and_call(prefix, mocks, func, *args, **kwargs):
def resourceLocation(subname=None):
- path = os.path.join('tests', 'data')
+ path = cloud_init_project_dir("tests/data")
if not subname:
return path
return os.path.join(path, subname)
-def readResource(name, mode='r'):
+def readResource(name, mode="r"):
with open(resourceLocation(name), mode) as fh:
return fh.read()
try:
import jsonschema
+
assert jsonschema # avoid pyflakes error F401: import unused
_missing_jsonschema_dep = False
except ImportError:
@@ -480,7 +506,8 @@ except ImportError:
def skipUnlessJsonSchema():
return skipIf(
- _missing_jsonschema_dep, "No python-jsonschema dependency present.")
+ _missing_jsonschema_dep, "No python-jsonschema dependency present."
+ )
def skipUnlessJinja():
@@ -492,13 +519,36 @@ def skipIfJinja():
# older versions of mock do not have the useful 'assert_not_called'
-if not hasattr(mock.Mock, 'assert_not_called'):
+if not hasattr(mock.Mock, "assert_not_called"):
+
def __mock_assert_not_called(mmock):
if mmock.call_count != 0:
- msg = ("[citest] Expected '%s' to not have been called. "
- "Called %s times." %
- (mmock._mock_name or 'mock', mmock.call_count))
+ msg = (
+ "[citest] Expected '%s' to not have been called. "
+ "Called %s times."
+ % (mmock._mock_name or "mock", mmock.call_count)
+ )
raise AssertionError(msg)
+
mock.Mock.assert_not_called = __mock_assert_not_called
+
+def get_top_level_dir() -> Path:
+ """Return the absolute path to the top cloudinit project directory
+
+ @return Path('<top-cloudinit-dir>')
+ """
+ return Path(cloudinit.__file__).parent.parent.resolve()
+
+
+def cloud_init_project_dir(sub_path: str) -> str:
+ """Get a path within the cloudinit project directory
+
+ @return str of the combined path
+
+ Example: cloud_init_project_dir("my/path") -> "/path/to/cloud-init/my/path"
+ """
+ return str(get_top_level_dir() / sub_path)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/__init__.py b/tests/unittests/net/__init__.py
index e69de29b..e69de29b 100644
--- a/cloudinit/tests/__init__.py
+++ b/tests/unittests/net/__init__.py
diff --git a/tests/unittests/net/test_dhcp.py b/tests/unittests/net/test_dhcp.py
new file mode 100644
index 00000000..876873d5
--- /dev/null
+++ b/tests/unittests/net/test_dhcp.py
@@ -0,0 +1,797 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import signal
+from textwrap import dedent
+
+import httpretty
+
+import cloudinit.net as net
+from cloudinit.net.dhcp import (
+ InvalidDHCPLeaseFileError,
+ dhcp_discovery,
+ maybe_perform_dhcp_discovery,
+ networkd_load_leases,
+ parse_dhcp_lease_file,
+ parse_static_routes,
+)
+from cloudinit.util import ensure_file, write_file
+from tests.unittests.helpers import (
+ CiTestCase,
+ HttprettyTestCase,
+ mock,
+ populate_dir,
+ wrap_and_call,
+)
+
+
+class TestParseDHCPLeasesFile(CiTestCase):
+ def test_parse_empty_lease_file_errors(self):
+ """parse_dhcp_lease_file errors when file content is empty."""
+ empty_file = self.tmp_path("leases")
+ ensure_file(empty_file)
+ with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager:
+ parse_dhcp_lease_file(empty_file)
+ error = context_manager.exception
+ self.assertIn("Cannot parse empty dhcp lease file", str(error))
+
+ def test_parse_malformed_lease_file_content_errors(self):
+ """parse_dhcp_lease_file errors when file content isn't dhcp leases."""
+ non_lease_file = self.tmp_path("leases")
+ write_file(non_lease_file, "hi mom.")
+ with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager:
+ parse_dhcp_lease_file(non_lease_file)
+ error = context_manager.exception
+ self.assertIn("Cannot parse dhcp lease file", str(error))
+
+ def test_parse_multiple_leases(self):
+ """parse_dhcp_lease_file returns a list of all leases within."""
+ lease_file = self.tmp_path("leases")
+ content = dedent(
+ """
+ lease {
+ interface "wlp3s0";
+ fixed-address 192.168.2.74;
+ filename "http://192.168.2.50/boot.php?mac=${netX}";
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ renew 4 2017/07/27 18:02:30;
+ expire 5 2017/07/28 07:08:15;
+ }
+ lease {
+ interface "wlp3s0";
+ fixed-address 192.168.2.74;
+ filename "http://192.168.2.50/boot.php?mac=${netX}";
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """
+ )
+ expected = [
+ {
+ "interface": "wlp3s0",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ "renew": "4 2017/07/27 18:02:30",
+ "expire": "5 2017/07/28 07:08:15",
+ "filename": "http://192.168.2.50/boot.php?mac=${netX}",
+ },
+ {
+ "interface": "wlp3s0",
+ "fixed-address": "192.168.2.74",
+ "filename": "http://192.168.2.50/boot.php?mac=${netX}",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ },
+ ]
+ write_file(lease_file, content)
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
+
+
+class TestDHCPRFC3442(CiTestCase):
+ def test_parse_lease_finds_rfc3442_classless_static_routes(self):
+ """parse_dhcp_lease_file returns rfc3442-classless-static-routes."""
+ lease_file = self.tmp_path("leases")
+ content = dedent(
+ """
+ lease {
+ interface "wlp3s0";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ option rfc3442-classless-static-routes 0,130,56,240,1;
+ renew 4 2017/07/27 18:02:30;
+ expire 5 2017/07/28 07:08:15;
+ }
+ """
+ )
+ expected = [
+ {
+ "interface": "wlp3s0",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ "rfc3442-classless-static-routes": "0,130,56,240,1",
+ "renew": "4 2017/07/27 18:02:30",
+ "expire": "5 2017/07/28 07:08:15",
+ }
+ ]
+ write_file(lease_file, content)
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
+
+ def test_parse_lease_finds_classless_static_routes(self):
+ """
+ parse_dhcp_lease_file returns classless-static-routes
+ for Centos lease format.
+ """
+ lease_file = self.tmp_path("leases")
+ content = dedent(
+ """
+ lease {
+ interface "wlp3s0";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ option classless-static-routes 0 130.56.240.1;
+ renew 4 2017/07/27 18:02:30;
+ expire 5 2017/07/28 07:08:15;
+ }
+ """
+ )
+ expected = [
+ {
+ "interface": "wlp3s0",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ "classless-static-routes": "0 130.56.240.1",
+ "renew": "4 2017/07/27 18:02:30",
+ "expire": "5 2017/07/28 07:08:15",
+ }
+ ]
+ write_file(lease_file, content)
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
+
+ @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_obtain_lease_parses_static_routes(self, m_maybe, m_ipv4):
+ """EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network"""
+ lease = [
+ {
+ "interface": "wlp3s0",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ "rfc3442-classless-static-routes": "0,130,56,240,1",
+ "renew": "4 2017/07/27 18:02:30",
+ "expire": "5 2017/07/28 07:08:15",
+ }
+ ]
+ m_maybe.return_value = lease
+ eph = net.dhcp.EphemeralDHCPv4()
+ eph.obtain_lease()
+ expected_kwargs = {
+ "interface": "wlp3s0",
+ "ip": "192.168.2.74",
+ "prefix_or_mask": "255.255.255.0",
+ "broadcast": "192.168.2.255",
+ "static_routes": [("0.0.0.0/0", "130.56.240.1")],
+ "router": "192.168.2.1",
+ }
+ m_ipv4.assert_called_with(**expected_kwargs)
+
+ @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_obtain_centos_lease_parses_static_routes(self, m_maybe, m_ipv4):
+ """
+ EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network
+ for Centos Lease format
+ """
+ lease = [
+ {
+ "interface": "wlp3s0",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ "classless-static-routes": "0 130.56.240.1",
+ "renew": "4 2017/07/27 18:02:30",
+ "expire": "5 2017/07/28 07:08:15",
+ }
+ ]
+ m_maybe.return_value = lease
+ eph = net.dhcp.EphemeralDHCPv4()
+ eph.obtain_lease()
+ expected_kwargs = {
+ "interface": "wlp3s0",
+ "ip": "192.168.2.74",
+ "prefix_or_mask": "255.255.255.0",
+ "broadcast": "192.168.2.255",
+ "static_routes": [("0.0.0.0/0", "130.56.240.1")],
+ "router": "192.168.2.1",
+ }
+ m_ipv4.assert_called_with(**expected_kwargs)
+
+
+class TestDHCPParseStaticRoutes(CiTestCase):
+
+ with_logs = True
+
+ def parse_static_routes_empty_string(self):
+ self.assertEqual([], parse_static_routes(""))
+
+ def test_parse_static_routes_invalid_input_returns_empty_list(self):
+ rfc3442 = "32,169,254,169,254,130,56,248"
+ self.assertEqual([], parse_static_routes(rfc3442))
+
+ def test_parse_static_routes_bogus_width_returns_empty_list(self):
+ rfc3442 = "33,169,254,169,254,130,56,248"
+ self.assertEqual([], parse_static_routes(rfc3442))
+
+ def test_parse_static_routes_single_ip(self):
+ rfc3442 = "32,169,254,169,254,130,56,248,255"
+ self.assertEqual(
+ [("169.254.169.254/32", "130.56.248.255")],
+ parse_static_routes(rfc3442),
+ )
+
+ def test_parse_static_routes_single_ip_handles_trailing_semicolon(self):
+ rfc3442 = "32,169,254,169,254,130,56,248,255;"
+ self.assertEqual(
+ [("169.254.169.254/32", "130.56.248.255")],
+ parse_static_routes(rfc3442),
+ )
+
+ def test_parse_static_routes_default_route(self):
+ rfc3442 = "0,130,56,240,1"
+ self.assertEqual(
+ [("0.0.0.0/0", "130.56.240.1")], parse_static_routes(rfc3442)
+ )
+
+ def test_unspecified_gateway(self):
+ rfc3442 = "32,169,254,169,254,0,0,0,0"
+ self.assertEqual(
+ [("169.254.169.254/32", "0.0.0.0")], parse_static_routes(rfc3442)
+ )
+
+ def test_parse_static_routes_class_c_b_a(self):
+ class_c = "24,192,168,74,192,168,0,4"
+ class_b = "16,172,16,172,16,0,4"
+ class_a = "8,10,10,0,0,4"
+ rfc3442 = ",".join([class_c, class_b, class_a])
+ self.assertEqual(
+ sorted(
+ [
+ ("192.168.74.0/24", "192.168.0.4"),
+ ("172.16.0.0/16", "172.16.0.4"),
+ ("10.0.0.0/8", "10.0.0.4"),
+ ]
+ ),
+ sorted(parse_static_routes(rfc3442)),
+ )
+
+ def test_parse_static_routes_logs_error_truncated(self):
+ bad_rfc3442 = {
+ "class_c": "24,169,254,169,10",
+ "class_b": "16,172,16,10",
+ "class_a": "8,10,10",
+ "gateway": "0,0",
+ "netlen": "33,0",
+ }
+ for rfc3442 in bad_rfc3442.values():
+ self.assertEqual([], parse_static_routes(rfc3442))
+
+ logs = self.logs.getvalue()
+ self.assertEqual(len(bad_rfc3442.keys()), len(logs.splitlines()))
+
+ def test_parse_static_routes_returns_valid_routes_until_parse_err(self):
+ class_c = "24,192,168,74,192,168,0,4"
+ class_b = "16,172,16,172,16,0,4"
+ class_a_error = "8,10,10,0,0"
+ rfc3442 = ",".join([class_c, class_b, class_a_error])
+ self.assertEqual(
+ sorted(
+ [
+ ("192.168.74.0/24", "192.168.0.4"),
+ ("172.16.0.0/16", "172.16.0.4"),
+ ]
+ ),
+ sorted(parse_static_routes(rfc3442)),
+ )
+
+ logs = self.logs.getvalue()
+ self.assertIn(rfc3442, logs.splitlines()[0])
+
+ def test_redhat_format(self):
+ redhat_format = "24.191.168.128 192.168.128.1,0 192.168.128.1"
+ self.assertEqual(
+ sorted(
+ [
+ ("191.168.128.0/24", "192.168.128.1"),
+ ("0.0.0.0/0", "192.168.128.1"),
+ ]
+ ),
+ sorted(parse_static_routes(redhat_format)),
+ )
+
+ def test_redhat_format_with_a_space_too_much_after_comma(self):
+ redhat_format = "24.191.168.128 192.168.128.1, 0 192.168.128.1"
+ self.assertEqual(
+ sorted(
+ [
+ ("191.168.128.0/24", "192.168.128.1"),
+ ("0.0.0.0/0", "192.168.128.1"),
+ ]
+ ),
+ sorted(parse_static_routes(redhat_format)),
+ )
+
+
+class TestDHCPDiscoveryClean(CiTestCase):
+ with_logs = True
+
+ @mock.patch("cloudinit.net.dhcp.find_fallback_nic")
+ def test_no_fallback_nic_found(self, m_fallback_nic):
+ """Log and do nothing when nic is absent and no fallback is found."""
+ m_fallback_nic.return_value = None # No fallback nic found
+ self.assertEqual([], maybe_perform_dhcp_discovery())
+ self.assertIn(
+ "Skip dhcp_discovery: Unable to find fallback nic.",
+ self.logs.getvalue(),
+ )
+
+ def test_provided_nic_does_not_exist(self):
+ """When the provided nic doesn't exist, log a message and no-op."""
+ self.assertEqual([], maybe_perform_dhcp_discovery("idontexist"))
+ self.assertIn(
+ "Skip dhcp_discovery: nic idontexist not found in get_devicelist.",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.net.dhcp.subp.which")
+ @mock.patch("cloudinit.net.dhcp.find_fallback_nic")
+ def test_absent_dhclient_command(self, m_fallback, m_which):
+ """When dhclient doesn't exist in the OS, log the issue and no-op."""
+ m_fallback.return_value = "eth9"
+ m_which.return_value = None # dhclient isn't found
+ self.assertEqual([], maybe_perform_dhcp_discovery())
+ self.assertIn(
+ "Skip dhclient configuration: No dhclient command found.",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.temp_utils.os.getuid")
+ @mock.patch("cloudinit.net.dhcp.dhcp_discovery")
+ @mock.patch("cloudinit.net.dhcp.subp.which")
+ @mock.patch("cloudinit.net.dhcp.find_fallback_nic")
+ def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid):
+ """maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery."""
+ m_uid.return_value = 0 # Fake root user for tmpdir
+ m_fback.return_value = "eth9"
+ m_which.return_value = "/sbin/dhclient"
+ m_dhcp.return_value = {"address": "192.168.2.2"}
+ retval = wrap_and_call(
+ "cloudinit.temp_utils",
+ {"_TMPDIR": {"new": None}, "os.getuid": 0},
+ maybe_perform_dhcp_discovery,
+ )
+ self.assertEqual({"address": "192.168.2.2"}, retval)
+ self.assertEqual(
+ 1, m_dhcp.call_count, "dhcp_discovery not called once"
+ )
+ call = m_dhcp.call_args_list[0]
+ self.assertEqual("/sbin/dhclient", call[0][0])
+ self.assertEqual("eth9", call[0][1])
+ self.assertIn("/var/tmp/cloud-init/cloud-init-dhcp-", call[0][2])
+
+ @mock.patch("time.sleep", mock.MagicMock())
+ @mock.patch("cloudinit.net.dhcp.os.kill")
+ @mock.patch("cloudinit.net.dhcp.subp.subp")
+ def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(
+ self, m_subp, m_kill
+ ):
+ """dhcp_discovery logs a warning when pidfile contains invalid content.
+
+ Lease processing still occurs and no proc kill is attempted.
+ """
+ m_subp.return_value = ("", "")
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, "dhclient.orig")
+ script_content = "#!/bin/bash\necho fake-dhclient"
+ write_file(dhclient_script, script_content, mode=0o755)
+ write_file(self.tmp_path("dhclient.pid", tmpdir), "") # Empty pid ''
+ lease_content = dedent(
+ """
+ lease {
+ interface "eth9";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """
+ )
+ write_file(self.tmp_path("dhcp.leases", tmpdir), lease_content)
+
+ self.assertCountEqual(
+ [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ }
+ ],
+ dhcp_discovery(dhclient_script, "eth9", tmpdir),
+ )
+ self.assertIn(
+ "dhclient(pid=, parentpid=unknown) failed "
+ "to daemonize after 10.0 seconds",
+ self.logs.getvalue(),
+ )
+ m_kill.assert_not_called()
+
+ @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
+ @mock.patch("cloudinit.net.dhcp.os.kill")
+ @mock.patch("cloudinit.net.dhcp.util.wait_for_files")
+ @mock.patch("cloudinit.net.dhcp.subp.subp")
+ def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(
+ self, m_subp, m_wait, m_kill, m_getppid
+ ):
+ """dhcp_discovery waits for the presence of pidfile and dhcp.leases."""
+ m_subp.return_value = ("", "")
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, "dhclient.orig")
+ script_content = "#!/bin/bash\necho fake-dhclient"
+ write_file(dhclient_script, script_content, mode=0o755)
+ # Don't create pid or leases file
+ pidfile = self.tmp_path("dhclient.pid", tmpdir)
+ leasefile = self.tmp_path("dhcp.leases", tmpdir)
+ m_wait.return_value = [pidfile] # Return the missing pidfile wait for
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
+ self.assertEqual([], dhcp_discovery(dhclient_script, "eth9", tmpdir))
+ self.assertEqual(
+ mock.call([pidfile, leasefile], maxwait=5, naplen=0.01),
+ m_wait.call_args_list[0],
+ )
+ self.assertIn(
+ "WARNING: dhclient did not produce expected files: dhclient.pid",
+ self.logs.getvalue(),
+ )
+ m_kill.assert_not_called()
+
+ @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
+ @mock.patch("cloudinit.net.dhcp.os.kill")
+ @mock.patch("cloudinit.net.dhcp.subp.subp")
+ def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid):
+ """dhcp_discovery brings up the interface and runs dhclient.
+
+ It also returns the parsed dhcp.leases file generated in the sandbox.
+ """
+ m_subp.return_value = ("", "")
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, "dhclient.orig")
+ script_content = "#!/bin/bash\necho fake-dhclient"
+ write_file(dhclient_script, script_content, mode=0o755)
+ lease_content = dedent(
+ """
+ lease {
+ interface "eth9";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """
+ )
+ lease_file = os.path.join(tmpdir, "dhcp.leases")
+ write_file(lease_file, lease_content)
+ pid_file = os.path.join(tmpdir, "dhclient.pid")
+ my_pid = 1
+ write_file(pid_file, "%d\n" % my_pid)
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
+
+ self.assertCountEqual(
+ [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ }
+ ],
+ dhcp_discovery(dhclient_script, "eth9", tmpdir),
+ )
+ # dhclient script got copied
+ with open(os.path.join(tmpdir, "dhclient")) as stream:
+ self.assertEqual(script_content, stream.read())
+ # Interface was brought up before dhclient called from sandbox
+ m_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "dev", "eth9", "up"], capture=True
+ ),
+ mock.call(
+ [
+ os.path.join(tmpdir, "dhclient"),
+ "-1",
+ "-v",
+ "-lf",
+ lease_file,
+ "-pf",
+ os.path.join(tmpdir, "dhclient.pid"),
+ "eth9",
+ "-sf",
+ "/bin/true",
+ ],
+ capture=True,
+ ),
+ ]
+ )
+ m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
+
+ @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
+ @mock.patch("cloudinit.net.dhcp.os.kill")
+ @mock.patch("cloudinit.net.dhcp.subp.subp")
+ def test_dhcp_discovery_outside_sandbox(self, m_subp, m_kill, m_getppid):
+ """dhcp_discovery brings up the interface and runs dhclient.
+
+ It also returns the parsed dhcp.leases file generated in the sandbox.
+ """
+ m_subp.return_value = ("", "")
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, "dhclient.orig")
+ script_content = "#!/bin/bash\necho fake-dhclient"
+ write_file(dhclient_script, script_content, mode=0o755)
+ lease_content = dedent(
+ """
+ lease {
+ interface "eth9";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """
+ )
+ lease_file = os.path.join(tmpdir, "dhcp.leases")
+ write_file(lease_file, lease_content)
+ pid_file = os.path.join(tmpdir, "dhclient.pid")
+ my_pid = 1
+ write_file(pid_file, "%d\n" % my_pid)
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
+
+ with mock.patch("os.access", return_value=False):
+ self.assertCountEqual(
+ [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ }
+ ],
+ dhcp_discovery(dhclient_script, "eth9", tmpdir),
+ )
+ # dhclient script got copied
+ with open(os.path.join(tmpdir, "dhclient.orig")) as stream:
+ self.assertEqual(script_content, stream.read())
+ # Interface was brought up before dhclient called from sandbox
+ m_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "dev", "eth9", "up"], capture=True
+ ),
+ mock.call(
+ [
+ os.path.join(tmpdir, "dhclient.orig"),
+ "-1",
+ "-v",
+ "-lf",
+ lease_file,
+ "-pf",
+ os.path.join(tmpdir, "dhclient.pid"),
+ "eth9",
+ "-sf",
+ "/bin/true",
+ ],
+ capture=True,
+ ),
+ ]
+ )
+ m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
+
+ @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
+ @mock.patch("cloudinit.net.dhcp.os.kill")
+ @mock.patch("cloudinit.net.dhcp.subp.subp")
+ def test_dhcp_output_error_stream(self, m_subp, m_kill, m_getppid):
+ """ "dhcp_log_func is called with the output and error streams of
+ dhclinet when the callable is passed."""
+ dhclient_err = "FAKE DHCLIENT ERROR"
+ dhclient_out = "FAKE DHCLIENT OUT"
+ m_subp.return_value = (dhclient_out, dhclient_err)
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, "dhclient.orig")
+ script_content = "#!/bin/bash\necho fake-dhclient"
+ write_file(dhclient_script, script_content, mode=0o755)
+ lease_content = dedent(
+ """
+ lease {
+ interface "eth9";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """
+ )
+ lease_file = os.path.join(tmpdir, "dhcp.leases")
+ write_file(lease_file, lease_content)
+ pid_file = os.path.join(tmpdir, "dhclient.pid")
+ my_pid = 1
+ write_file(pid_file, "%d\n" % my_pid)
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
+
+ def dhcp_log_func(out, err):
+ self.assertEqual(out, dhclient_out)
+ self.assertEqual(err, dhclient_err)
+
+ dhcp_discovery(
+ dhclient_script, "eth9", tmpdir, dhcp_log_func=dhcp_log_func
+ )
+
+
+class TestSystemdParseLeases(CiTestCase):
+
+ lxd_lease = dedent(
+ """\
+ # This is private data. Do not parse.
+ ADDRESS=10.75.205.242
+ NETMASK=255.255.255.0
+ ROUTER=10.75.205.1
+ SERVER_ADDRESS=10.75.205.1
+ NEXT_SERVER=10.75.205.1
+ BROADCAST=10.75.205.255
+ T1=1580
+ T2=2930
+ LIFETIME=3600
+ DNS=10.75.205.1
+ DOMAINNAME=lxd
+ HOSTNAME=a1
+ CLIENTID=ffe617693400020000ab110c65a6a0866931c2
+ """
+ )
+
+ lxd_parsed = {
+ "ADDRESS": "10.75.205.242",
+ "NETMASK": "255.255.255.0",
+ "ROUTER": "10.75.205.1",
+ "SERVER_ADDRESS": "10.75.205.1",
+ "NEXT_SERVER": "10.75.205.1",
+ "BROADCAST": "10.75.205.255",
+ "T1": "1580",
+ "T2": "2930",
+ "LIFETIME": "3600",
+ "DNS": "10.75.205.1",
+ "DOMAINNAME": "lxd",
+ "HOSTNAME": "a1",
+ "CLIENTID": "ffe617693400020000ab110c65a6a0866931c2",
+ }
+
+ azure_lease = dedent(
+ """\
+ # This is private data. Do not parse.
+ ADDRESS=10.132.0.5
+ NETMASK=255.255.255.255
+ ROUTER=10.132.0.1
+ SERVER_ADDRESS=169.254.169.254
+ NEXT_SERVER=10.132.0.1
+ MTU=1460
+ T1=43200
+ T2=75600
+ LIFETIME=86400
+ DNS=169.254.169.254
+ NTP=169.254.169.254
+ DOMAINNAME=c.ubuntu-foundations.internal
+ DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal
+ HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal
+ ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1
+ CLIENTID=ff405663a200020000ab11332859494d7a8b4c
+ OPTION_245=624c3620
+ """
+ )
+
+ azure_parsed = {
+ "ADDRESS": "10.132.0.5",
+ "NETMASK": "255.255.255.255",
+ "ROUTER": "10.132.0.1",
+ "SERVER_ADDRESS": "169.254.169.254",
+ "NEXT_SERVER": "10.132.0.1",
+ "MTU": "1460",
+ "T1": "43200",
+ "T2": "75600",
+ "LIFETIME": "86400",
+ "DNS": "169.254.169.254",
+ "NTP": "169.254.169.254",
+ "DOMAINNAME": "c.ubuntu-foundations.internal",
+ "DOMAIN_SEARCH_LIST": "c.ubuntu-foundations.internal google.internal",
+ "HOSTNAME": "tribaal-test-171002-1349.c.ubuntu-foundations.internal",
+ "ROUTES": "10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1",
+ "CLIENTID": "ff405663a200020000ab11332859494d7a8b4c",
+ "OPTION_245": "624c3620",
+ }
+
+ def setUp(self):
+ super(TestSystemdParseLeases, self).setUp()
+ self.lease_d = self.tmp_dir()
+
+ def test_no_leases_returns_empty_dict(self):
+ """A leases dir with no lease files should return empty dictionary."""
+ self.assertEqual({}, networkd_load_leases(self.lease_d))
+
+ def test_no_leases_dir_returns_empty_dict(self):
+ """A non-existing leases dir should return empty dict."""
+ enodir = os.path.join(self.lease_d, "does-not-exist")
+ self.assertEqual({}, networkd_load_leases(enodir))
+
+ def test_single_leases_file(self):
+ """A leases dir with one leases file."""
+ populate_dir(self.lease_d, {"2": self.lxd_lease})
+ self.assertEqual(
+ {"2": self.lxd_parsed}, networkd_load_leases(self.lease_d)
+ )
+
+ def test_single_azure_leases_file(self):
+ """On Azure, option 245 should be present, verify it specifically."""
+ populate_dir(self.lease_d, {"1": self.azure_lease})
+ self.assertEqual(
+ {"1": self.azure_parsed}, networkd_load_leases(self.lease_d)
+ )
+
+ def test_multiple_files(self):
+ """Multiple leases files on azure with one found return that value."""
+ self.maxDiff = None
+ populate_dir(
+ self.lease_d, {"1": self.azure_lease, "9": self.lxd_lease}
+ )
+ self.assertEqual(
+ {"1": self.azure_parsed, "9": self.lxd_parsed},
+ networkd_load_leases(self.lease_d),
+ )
+
+
+class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_ephemeral_dhcp_no_network_if_url_connectivity(self, m_dhcp):
+ """No EphemeralDhcp4 network setup when connectivity_url succeeds."""
+ url = "http://example.org/index.html"
+
+ httpretty.register_uri(httpretty.GET, url)
+ with net.dhcp.EphemeralDHCPv4(
+ connectivity_url_data={"url": url},
+ ) as lease:
+ self.assertIsNone(lease)
+ # Ensure that no teardown happens:
+ m_dhcp.assert_not_called()
+
+ @mock.patch("cloudinit.net.dhcp.subp.subp")
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_ephemeral_dhcp_setup_network_if_url_connectivity(
+ self, m_dhcp, m_subp
+ ):
+ """No EphemeralDhcp4 network setup when connectivity_url succeeds."""
+ url = "http://example.org/index.html"
+ fake_lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.2",
+ "subnet-mask": "255.255.0.0",
+ }
+ m_dhcp.return_value = [fake_lease]
+ m_subp.return_value = ("", "")
+
+ httpretty.register_uri(httpretty.GET, url, body={}, status=404)
+ with net.dhcp.EphemeralDHCPv4(
+ connectivity_url_data={"url": url},
+ ) as lease:
+ self.assertEqual(fake_lease, lease)
+ # Ensure that dhcp discovery occurs
+ m_dhcp.called_once_with()
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/net/test_init.py b/tests/unittests/net/test_init.py
new file mode 100644
index 00000000..18b3fe59
--- /dev/null
+++ b/tests/unittests/net/test_init.py
@@ -0,0 +1,1734 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import errno
+import ipaddress
+import os
+import textwrap
+from unittest import mock
+
+import httpretty
+import pytest
+import requests
+
+import cloudinit.net as net
+from cloudinit import safeyaml as yaml
+from cloudinit.subp import ProcessExecutionError
+from cloudinit.util import ensure_file, write_file
+from tests.unittests.helpers import CiTestCase, HttprettyTestCase
+
+
+class TestSysDevPath(CiTestCase):
+ def test_sys_dev_path(self):
+ """sys_dev_path returns a path under SYS_CLASS_NET for a device."""
+ dev = "something"
+ path = "attribute"
+ expected = net.SYS_CLASS_NET + dev + "/" + path
+ self.assertEqual(expected, net.sys_dev_path(dev, path))
+
+ def test_sys_dev_path_without_path(self):
+ """When path param isn't provided it defaults to empty string."""
+ dev = "something"
+ expected = net.SYS_CLASS_NET + dev + "/"
+ self.assertEqual(expected, net.sys_dev_path(dev))
+
+
+class TestReadSysNet(CiTestCase):
+ with_logs = True
+
+ def setUp(self):
+ super(TestReadSysNet, self).setUp()
+ sys_mock = mock.patch("cloudinit.net.get_sys_class_path")
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + "/"
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_read_sys_net_strips_contents_of_sys_path(self):
+ """read_sys_net strips whitespace from the contents of a sys file."""
+ content = "some stuff with trailing whitespace\t\r\n"
+ write_file(os.path.join(self.sysdir, "dev", "attr"), content)
+ self.assertEqual(content.strip(), net.read_sys_net("dev", "attr"))
+
+ def test_read_sys_net_reraises_oserror(self):
+ """read_sys_net raises OSError/IOError when file doesn't exist."""
+ # Non-specific Exception because versions of python OSError vs IOError.
+ with self.assertRaises(Exception) as context_manager: # noqa: H202
+ net.read_sys_net("dev", "attr")
+ error = context_manager.exception
+ self.assertIn("No such file or directory", str(error))
+
+ def test_read_sys_net_handles_error_with_on_enoent(self):
+ """read_sys_net handles OSError/IOError with on_enoent if provided."""
+ handled_errors = []
+
+ def on_enoent(e):
+ handled_errors.append(e)
+
+ net.read_sys_net("dev", "attr", on_enoent=on_enoent)
+ error = handled_errors[0]
+ self.assertIsInstance(error, Exception)
+ self.assertIn("No such file or directory", str(error))
+
+ def test_read_sys_net_translates_content(self):
+ """read_sys_net translates content when translate dict is provided."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, "dev", "attr"), content)
+ translate = {"you're welcome": "de nada"}
+ self.assertEqual(
+ "de nada", net.read_sys_net("dev", "attr", translate=translate)
+ )
+
+ def test_read_sys_net_errors_on_translation_failures(self):
+ """read_sys_net raises a KeyError and logs details on failure."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, "dev", "attr"), content)
+ with self.assertRaises(KeyError) as context_manager:
+ net.read_sys_net("dev", "attr", translate={})
+ error = context_manager.exception
+ self.assertEqual('"you\'re welcome"', str(error))
+ self.assertIn(
+ "Found unexpected (not translatable) value 'you're welcome' in "
+ "'{0}dev/attr".format(self.sysdir),
+ self.logs.getvalue(),
+ )
+
+ def test_read_sys_net_handles_handles_with_onkeyerror(self):
+ """read_sys_net handles translation errors calling on_keyerror."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, "dev", "attr"), content)
+ handled_errors = []
+
+ def on_keyerror(e):
+ handled_errors.append(e)
+
+ net.read_sys_net("dev", "attr", translate={}, on_keyerror=on_keyerror)
+ error = handled_errors[0]
+ self.assertIsInstance(error, KeyError)
+ self.assertEqual('"you\'re welcome"', str(error))
+
+ def test_read_sys_net_safe_false_on_translate_failure(self):
+ """read_sys_net_safe returns False on translation failures."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, "dev", "attr"), content)
+ self.assertFalse(net.read_sys_net_safe("dev", "attr", translate={}))
+
+ def test_read_sys_net_safe_returns_false_on_noent_failure(self):
+ """read_sys_net_safe returns False on file not found failures."""
+ self.assertFalse(net.read_sys_net_safe("dev", "attr"))
+
+ def test_read_sys_net_int_returns_none_on_error(self):
+ """read_sys_net_safe returns None on failures."""
+ self.assertFalse(net.read_sys_net_int("dev", "attr"))
+
+ def test_read_sys_net_int_returns_none_on_valueerror(self):
+ """read_sys_net_safe returns None when content is not an int."""
+ write_file(os.path.join(self.sysdir, "dev", "attr"), "NOTINT\n")
+ self.assertFalse(net.read_sys_net_int("dev", "attr"))
+
+ def test_read_sys_net_int_returns_integer_from_content(self):
+ """read_sys_net_safe returns None on failures."""
+ write_file(os.path.join(self.sysdir, "dev", "attr"), "1\n")
+ self.assertEqual(1, net.read_sys_net_int("dev", "attr"))
+
+ def test_is_up_true(self):
+ """is_up is True if sys/net/devname/operstate is 'up' or 'unknown'."""
+ for state in ["up", "unknown"]:
+ write_file(os.path.join(self.sysdir, "eth0", "operstate"), state)
+ self.assertTrue(net.is_up("eth0"))
+
+ def test_is_up_false(self):
+ """is_up is False if sys/net/devname/operstate is 'down' or invalid."""
+ for state in ["down", "incomprehensible"]:
+ write_file(os.path.join(self.sysdir, "eth0", "operstate"), state)
+ self.assertFalse(net.is_up("eth0"))
+
+ def test_is_bridge(self):
+ """is_bridge is True when /sys/net/devname/bridge exists."""
+ self.assertFalse(net.is_bridge("eth0"))
+ ensure_file(os.path.join(self.sysdir, "eth0", "bridge"))
+ self.assertTrue(net.is_bridge("eth0"))
+
+ def test_is_bond(self):
+ """is_bond is True when /sys/net/devname/bonding exists."""
+ self.assertFalse(net.is_bond("eth0"))
+ ensure_file(os.path.join(self.sysdir, "eth0", "bonding"))
+ self.assertTrue(net.is_bond("eth0"))
+
+ def test_get_master(self):
+ """get_master returns the path when /sys/net/devname/master exists."""
+ self.assertIsNone(net.get_master("enP1s1"))
+ master_path = os.path.join(self.sysdir, "enP1s1", "master")
+ ensure_file(master_path)
+ self.assertEqual(master_path, net.get_master("enP1s1"))
+
+ def test_master_is_bridge_or_bond(self):
+ bridge_mac = "aa:bb:cc:aa:bb:cc"
+ bond_mac = "cc:bb:aa:cc:bb:aa"
+
+ # No master => False
+ write_file(os.path.join(self.sysdir, "eth1", "address"), bridge_mac)
+ write_file(os.path.join(self.sysdir, "eth2", "address"), bond_mac)
+
+ self.assertFalse(net.master_is_bridge_or_bond("eth1"))
+ self.assertFalse(net.master_is_bridge_or_bond("eth2"))
+
+ # masters without bridge/bonding => False
+ write_file(os.path.join(self.sysdir, "br0", "address"), bridge_mac)
+ write_file(os.path.join(self.sysdir, "bond0", "address"), bond_mac)
+
+ os.symlink("../br0", os.path.join(self.sysdir, "eth1", "master"))
+ os.symlink("../bond0", os.path.join(self.sysdir, "eth2", "master"))
+
+ self.assertFalse(net.master_is_bridge_or_bond("eth1"))
+ self.assertFalse(net.master_is_bridge_or_bond("eth2"))
+
+ # masters with bridge/bonding => True
+ write_file(os.path.join(self.sysdir, "br0", "bridge"), "")
+ write_file(os.path.join(self.sysdir, "bond0", "bonding"), "")
+
+ self.assertTrue(net.master_is_bridge_or_bond("eth1"))
+ self.assertTrue(net.master_is_bridge_or_bond("eth2"))
+
+ def test_master_is_openvswitch(self):
+ ovs_mac = "bb:cc:aa:bb:cc:aa"
+
+ # No master => False
+ write_file(os.path.join(self.sysdir, "eth1", "address"), ovs_mac)
+
+ self.assertFalse(net.master_is_bridge_or_bond("eth1"))
+
+ # masters without ovs-system => False
+ write_file(os.path.join(self.sysdir, "ovs-system", "address"), ovs_mac)
+
+ os.symlink(
+ "../ovs-system", os.path.join(self.sysdir, "eth1", "master")
+ )
+
+ self.assertFalse(net.master_is_openvswitch("eth1"))
+
+ # masters with ovs-system => True
+ os.symlink(
+ "../ovs-system",
+ os.path.join(self.sysdir, "eth1", "upper_ovs-system"),
+ )
+
+ self.assertTrue(net.master_is_openvswitch("eth1"))
+
+ def test_is_vlan(self):
+ """is_vlan is True when /sys/net/devname/uevent has DEVTYPE=vlan."""
+ ensure_file(os.path.join(self.sysdir, "eth0", "uevent"))
+ self.assertFalse(net.is_vlan("eth0"))
+ content = "junk\nDEVTYPE=vlan\njunk\n"
+ write_file(os.path.join(self.sysdir, "eth0", "uevent"), content)
+ self.assertTrue(net.is_vlan("eth0"))
+
+
+class TestGenerateFallbackConfig(CiTestCase):
+ def setUp(self):
+ super(TestGenerateFallbackConfig, self).setUp()
+ sys_mock = mock.patch("cloudinit.net.get_sys_class_path")
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + "/"
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+ self.add_patch(
+ "cloudinit.net.util.is_container",
+ "m_is_container",
+ return_value=False,
+ )
+ self.add_patch("cloudinit.net.util.udevadm_settle", "m_settle")
+ self.add_patch(
+ "cloudinit.net.is_netfailover", "m_netfail", return_value=False
+ )
+ self.add_patch(
+ "cloudinit.net.is_netfail_master",
+ "m_netfail_master",
+ return_value=False,
+ )
+
+ def test_generate_fallback_finds_connected_eth_with_mac(self):
+ """generate_fallback_config finds any connected device with a mac."""
+ write_file(os.path.join(self.sysdir, "eth0", "carrier"), "1")
+ write_file(os.path.join(self.sysdir, "eth1", "carrier"), "1")
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth1", "address"), mac)
+ expected = {
+ "ethernets": {
+ "eth1": {
+ "match": {"macaddress": mac},
+ "dhcp4": True,
+ "set-name": "eth1",
+ }
+ },
+ "version": 2,
+ }
+ self.assertEqual(expected, net.generate_fallback_config())
+
+ def test_generate_fallback_finds_dormant_eth_with_mac(self):
+ """generate_fallback_config finds any dormant device with a mac."""
+ write_file(os.path.join(self.sysdir, "eth0", "dormant"), "1")
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth0", "address"), mac)
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "match": {"macaddress": mac},
+ "dhcp4": True,
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ self.assertEqual(expected, net.generate_fallback_config())
+
+ def test_generate_fallback_finds_eth_by_operstate(self):
+ """generate_fallback_config finds any dormant device with a mac."""
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth0", "address"), mac)
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "match": {"macaddress": mac},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ valid_operstates = ["dormant", "down", "lowerlayerdown", "unknown"]
+ for state in valid_operstates:
+ write_file(os.path.join(self.sysdir, "eth0", "operstate"), state)
+ self.assertEqual(expected, net.generate_fallback_config())
+ write_file(os.path.join(self.sysdir, "eth0", "operstate"), "noworky")
+ self.assertIsNone(net.generate_fallback_config())
+
+ def test_generate_fallback_config_skips_veth(self):
+ """generate_fallback_config will skip any veth interfaces."""
+ # A connected veth which gets ignored
+ write_file(os.path.join(self.sysdir, "veth0", "carrier"), "1")
+ self.assertIsNone(net.generate_fallback_config())
+
+ def test_generate_fallback_config_skips_bridges(self):
+ """generate_fallback_config will skip any bridges interfaces."""
+ # A connected veth which gets ignored
+ write_file(os.path.join(self.sysdir, "eth0", "carrier"), "1")
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth0", "address"), mac)
+ ensure_file(os.path.join(self.sysdir, "eth0", "bridge"))
+ self.assertIsNone(net.generate_fallback_config())
+
+ def test_generate_fallback_config_skips_bonds(self):
+ """generate_fallback_config will skip any bonded interfaces."""
+ # A connected veth which gets ignored
+ write_file(os.path.join(self.sysdir, "eth0", "carrier"), "1")
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth0", "address"), mac)
+ ensure_file(os.path.join(self.sysdir, "eth0", "bonding"))
+ self.assertIsNone(net.generate_fallback_config())
+
+ def test_generate_fallback_config_skips_netfail_devs(self):
+ """gen_fallback_config ignores netfail primary,sby no mac on master."""
+ mac = "aa:bb:cc:aa:bb:cc" # netfailover devs share the same mac
+ for iface in ["ens3", "ens3sby", "enP0s1f3"]:
+ write_file(os.path.join(self.sysdir, iface, "carrier"), "1")
+ write_file(
+ os.path.join(self.sysdir, iface, "addr_assign_type"), "0"
+ )
+ write_file(os.path.join(self.sysdir, iface, "address"), mac)
+
+ def is_netfail(iface, _driver=None):
+ # ens3 is the master
+ if iface == "ens3":
+ return False
+ return True
+
+ self.m_netfail.side_effect = is_netfail
+
+ def is_netfail_master(iface, _driver=None):
+ # ens3 is the master
+ if iface == "ens3":
+ return True
+ return False
+
+ self.m_netfail_master.side_effect = is_netfail_master
+ expected = {
+ "ethernets": {
+ "ens3": {
+ "dhcp4": True,
+ "match": {"name": "ens3"},
+ "set-name": "ens3",
+ }
+ },
+ "version": 2,
+ }
+ result = net.generate_fallback_config()
+ self.assertEqual(expected, result)
+
+
+class TestNetFindFallBackNic(CiTestCase):
+ def setUp(self):
+ super(TestNetFindFallBackNic, self).setUp()
+ sys_mock = mock.patch("cloudinit.net.get_sys_class_path")
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + "/"
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+ self.add_patch(
+ "cloudinit.net.util.is_container",
+ "m_is_container",
+ return_value=False,
+ )
+ self.add_patch("cloudinit.net.util.udevadm_settle", "m_settle")
+
+ def test_generate_fallback_finds_first_connected_eth_with_mac(self):
+ """find_fallback_nic finds any connected device with a mac."""
+ write_file(os.path.join(self.sysdir, "eth0", "carrier"), "1")
+ write_file(os.path.join(self.sysdir, "eth1", "carrier"), "1")
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth1", "address"), mac)
+ self.assertEqual("eth1", net.find_fallback_nic())
+
+
+class TestGetDeviceList(CiTestCase):
+ def setUp(self):
+ super(TestGetDeviceList, self).setUp()
+ sys_mock = mock.patch("cloudinit.net.get_sys_class_path")
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + "/"
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_get_devicelist_raise_oserror(self):
+ """get_devicelist raise any non-ENOENT OSerror."""
+ error = OSError("Can not do it")
+ error.errno = errno.EPERM # Set non-ENOENT
+ self.m_sys_path.side_effect = error
+ with self.assertRaises(OSError) as context_manager:
+ net.get_devicelist()
+ exception = context_manager.exception
+ self.assertEqual("Can not do it", str(exception))
+
+ def test_get_devicelist_empty_without_sys_net(self):
+ """get_devicelist returns empty list when missing SYS_CLASS_NET."""
+ self.m_sys_path.return_value = "idontexist"
+ self.assertEqual([], net.get_devicelist())
+
+ def test_get_devicelist_empty_with_no_devices_in_sys_net(self):
+ """get_devicelist returns empty directoty listing for SYS_CLASS_NET."""
+ self.assertEqual([], net.get_devicelist())
+
+ def test_get_devicelist_lists_any_subdirectories_in_sys_net(self):
+ """get_devicelist returns a directory listing for SYS_CLASS_NET."""
+ write_file(os.path.join(self.sysdir, "eth0", "operstate"), "up")
+ write_file(os.path.join(self.sysdir, "eth1", "operstate"), "up")
+ self.assertCountEqual(["eth0", "eth1"], net.get_devicelist())
+
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
+class TestGetInterfaceMAC(CiTestCase):
+ def setUp(self):
+ super(TestGetInterfaceMAC, self).setUp()
+ sys_mock = mock.patch("cloudinit.net.get_sys_class_path")
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + "/"
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_get_interface_mac_false_with_no_mac(self):
+ """get_device_list returns False when no mac is reported."""
+ ensure_file(os.path.join(self.sysdir, "eth0", "bonding"))
+ mac_path = os.path.join(self.sysdir, "eth0", "address")
+ self.assertFalse(os.path.exists(mac_path))
+ self.assertFalse(net.get_interface_mac("eth0"))
+
+ def test_get_interface_mac(self):
+ """get_interfaces returns the mac from SYS_CLASS_NET/dev/address."""
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth1", "address"), mac)
+ self.assertEqual(mac, net.get_interface_mac("eth1"))
+
+ def test_get_interface_mac_grabs_bonding_address(self):
+ """get_interfaces returns the source device mac for bonded devices."""
+ source_dev_mac = "aa:bb:cc:aa:bb:cc"
+ bonded_mac = "dd:ee:ff:dd:ee:ff"
+ write_file(os.path.join(self.sysdir, "eth1", "address"), bonded_mac)
+ write_file(
+ os.path.join(self.sysdir, "eth1", "bonding_slave", "perm_hwaddr"),
+ source_dev_mac,
+ )
+ self.assertEqual(source_dev_mac, net.get_interface_mac("eth1"))
+
+ def test_get_interfaces_empty_list_without_sys_net(self):
+ """get_interfaces returns an empty list when missing SYS_CLASS_NET."""
+ self.m_sys_path.return_value = "idontexist"
+ self.assertEqual([], net.get_interfaces())
+
+ def test_get_interfaces_by_mac_skips_empty_mac(self):
+ """Ignore 00:00:00:00:00:00 addresses from get_interfaces_by_mac."""
+ empty_mac = "00:00:00:00:00:00"
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth1", "address"), empty_mac)
+ write_file(os.path.join(self.sysdir, "eth1", "addr_assign_type"), "0")
+ write_file(os.path.join(self.sysdir, "eth2", "addr_assign_type"), "0")
+ write_file(os.path.join(self.sysdir, "eth2", "address"), mac)
+ expected = [("eth2", "aa:bb:cc:aa:bb:cc", None, None)]
+ self.assertEqual(expected, net.get_interfaces())
+
+ def test_get_interfaces_by_mac_skips_missing_mac(self):
+ """Ignore interfaces without an address from get_interfaces_by_mac."""
+ write_file(os.path.join(self.sysdir, "eth1", "addr_assign_type"), "0")
+ address_path = os.path.join(self.sysdir, "eth1", "address")
+ self.assertFalse(os.path.exists(address_path))
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth2", "addr_assign_type"), "0")
+ write_file(os.path.join(self.sysdir, "eth2", "address"), mac)
+ expected = [("eth2", "aa:bb:cc:aa:bb:cc", None, None)]
+ self.assertEqual(expected, net.get_interfaces())
+
+ def test_get_interfaces_by_mac_skips_master_devs(self):
+ """Ignore interfaces with a master device which would have dup mac."""
+ mac1 = mac2 = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth1", "addr_assign_type"), "0")
+ write_file(os.path.join(self.sysdir, "eth1", "address"), mac1)
+ write_file(os.path.join(self.sysdir, "eth1", "master"), "blah")
+ write_file(os.path.join(self.sysdir, "eth2", "addr_assign_type"), "0")
+ write_file(os.path.join(self.sysdir, "eth2", "address"), mac2)
+ expected = [("eth2", mac2, None, None)]
+ self.assertEqual(expected, net.get_interfaces())
+
+ @mock.patch("cloudinit.net.is_netfailover")
+ def test_get_interfaces_by_mac_skips_netfailvoer(self, m_netfail):
+ """Ignore interfaces if netfailover primary or standby."""
+ mac = "aa:bb:cc:aa:bb:cc" # netfailover devs share the same mac
+ for iface in ["ens3", "ens3sby", "enP0s1f3"]:
+ write_file(
+ os.path.join(self.sysdir, iface, "addr_assign_type"), "0"
+ )
+ write_file(os.path.join(self.sysdir, iface, "address"), mac)
+
+ def is_netfail(iface, _driver=None):
+ # ens3 is the master
+ if iface == "ens3":
+ return False
+ else:
+ return True
+
+ m_netfail.side_effect = is_netfail
+ expected = [("ens3", mac, None, None)]
+ self.assertEqual(expected, net.get_interfaces())
+
+ def test_get_interfaces_does_not_skip_phys_members_of_bridges_and_bonds(
+ self,
+ ):
+ bridge_mac = "aa:bb:cc:aa:bb:cc"
+ bond_mac = "cc:bb:aa:cc:bb:aa"
+ ovs_mac = "bb:cc:aa:bb:cc:aa"
+
+ write_file(os.path.join(self.sysdir, "br0", "address"), bridge_mac)
+ write_file(os.path.join(self.sysdir, "br0", "bridge"), "")
+
+ write_file(os.path.join(self.sysdir, "bond0", "address"), bond_mac)
+ write_file(os.path.join(self.sysdir, "bond0", "bonding"), "")
+
+ write_file(os.path.join(self.sysdir, "ovs-system", "address"), ovs_mac)
+
+ write_file(os.path.join(self.sysdir, "eth1", "address"), bridge_mac)
+ os.symlink("../br0", os.path.join(self.sysdir, "eth1", "master"))
+
+ write_file(os.path.join(self.sysdir, "eth2", "address"), bond_mac)
+ os.symlink("../bond0", os.path.join(self.sysdir, "eth2", "master"))
+
+ write_file(os.path.join(self.sysdir, "eth3", "address"), ovs_mac)
+ os.symlink(
+ "../ovs-system", os.path.join(self.sysdir, "eth3", "master")
+ )
+ os.symlink(
+ "../ovs-system",
+ os.path.join(self.sysdir, "eth3", "upper_ovs-system"),
+ )
+
+ interface_names = [interface[0] for interface in net.get_interfaces()]
+ self.assertEqual(
+ ["eth1", "eth2", "eth3", "ovs-system"], sorted(interface_names)
+ )
+
+
+class TestInterfaceHasOwnMAC(CiTestCase):
+ def setUp(self):
+ super(TestInterfaceHasOwnMAC, self).setUp()
+ sys_mock = mock.patch("cloudinit.net.get_sys_class_path")
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + "/"
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_interface_has_own_mac_false_when_stolen(self):
+ """Return False from interface_has_own_mac when address is stolen."""
+ write_file(os.path.join(self.sysdir, "eth1", "addr_assign_type"), "2")
+ self.assertFalse(net.interface_has_own_mac("eth1"))
+
+ def test_interface_has_own_mac_true_when_not_stolen(self):
+ """Return False from interface_has_own_mac when mac isn't stolen."""
+ valid_assign_types = ["0", "1", "3"]
+ assign_path = os.path.join(self.sysdir, "eth1", "addr_assign_type")
+ for _type in valid_assign_types:
+ write_file(assign_path, _type)
+ self.assertTrue(net.interface_has_own_mac("eth1"))
+
+ def test_interface_has_own_mac_strict_errors_on_absent_assign_type(self):
+ """When addr_assign_type is absent, interface_has_own_mac errors."""
+ with self.assertRaises(ValueError):
+ net.interface_has_own_mac("eth1", strict=True)
+
+
+@mock.patch("cloudinit.net.subp.subp")
+class TestEphemeralIPV4Network(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestEphemeralIPV4Network, self).setUp()
+ sys_mock = mock.patch("cloudinit.net.get_sys_class_path")
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + "/"
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_ephemeral_ipv4_network_errors_on_missing_params(self, m_subp):
+ """No required params for EphemeralIPv4Network can be None."""
+ required_params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "prefix_or_mask": "255.255.255.0",
+ "broadcast": "192.168.2.255",
+ }
+ for key in required_params.keys():
+ params = copy.deepcopy(required_params)
+ params[key] = None
+ with self.assertRaises(ValueError) as context_manager:
+ net.EphemeralIPv4Network(**params)
+ error = context_manager.exception
+ self.assertIn("Cannot init network on", str(error))
+ self.assertEqual(0, m_subp.call_count)
+
+ def test_ephemeral_ipv4_network_errors_invalid_mask_prefix(self, m_subp):
+ """Raise an error when prefix_or_mask is not a netmask or prefix."""
+ params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "broadcast": "192.168.2.255",
+ }
+ invalid_masks = ("invalid", "invalid.", "123.123.123")
+ for error_val in invalid_masks:
+ params["prefix_or_mask"] = error_val
+ with self.assertRaises(ValueError) as context_manager:
+ with net.EphemeralIPv4Network(**params):
+ pass
+ error = context_manager.exception
+ self.assertIn(
+ "Cannot setup network, invalid prefix or netmask: ", str(error)
+ )
+ self.assertEqual(0, m_subp.call_count)
+
+ def test_ephemeral_ipv4_network_performs_teardown(self, m_subp):
+ """EphemeralIPv4Network performs teardown on the device if setup."""
+ expected_setup_calls = [
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ "192.168.2.2/24",
+ "broadcast",
+ "192.168.2.255",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ ),
+ mock.call(
+ ["ip", "-family", "inet", "link", "set", "dev", "eth0", "up"],
+ capture=True,
+ ),
+ ]
+ expected_teardown_calls = [
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "link",
+ "set",
+ "dev",
+ "eth0",
+ "down",
+ ],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "del",
+ "192.168.2.2/24",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ ]
+ params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "prefix_or_mask": "255.255.255.0",
+ "broadcast": "192.168.2.255",
+ }
+ with net.EphemeralIPv4Network(**params):
+ self.assertEqual(expected_setup_calls, m_subp.call_args_list)
+ m_subp.assert_has_calls(expected_teardown_calls)
+
+ @mock.patch("cloudinit.net.readurl")
+ def test_ephemeral_ipv4_no_network_if_url_connectivity(
+ self, m_readurl, m_subp
+ ):
+ """No network setup is performed if we can successfully connect to
+ connectivity_url."""
+ params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "prefix_or_mask": "255.255.255.0",
+ "broadcast": "192.168.2.255",
+ "connectivity_url_data": {"url": "http://example.org/index.html"},
+ }
+
+ with net.EphemeralIPv4Network(**params):
+ self.assertEqual(
+ [mock.call(url="http://example.org/index.html", timeout=5)],
+ m_readurl.call_args_list,
+ )
+ # Ensure that no teardown happens:
+ m_subp.assert_has_calls([])
+
+ def test_ephemeral_ipv4_network_noop_when_configured(self, m_subp):
+ """EphemeralIPv4Network handles exception when address is setup.
+
+ It performs no cleanup as the interface was already setup.
+ """
+ params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "prefix_or_mask": "255.255.255.0",
+ "broadcast": "192.168.2.255",
+ }
+ m_subp.side_effect = ProcessExecutionError(
+ "", "RTNETLINK answers: File exists", 2
+ )
+ expected_calls = [
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ "192.168.2.2/24",
+ "broadcast",
+ "192.168.2.255",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ )
+ ]
+ with net.EphemeralIPv4Network(**params):
+ pass
+ self.assertEqual(expected_calls, m_subp.call_args_list)
+ self.assertIn(
+ "Skip ephemeral network setup, eth0 already has address",
+ self.logs.getvalue(),
+ )
+
+ def test_ephemeral_ipv4_network_with_prefix(self, m_subp):
+ """EphemeralIPv4Network takes a valid prefix to setup the network."""
+ params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "prefix_or_mask": "24",
+ "broadcast": "192.168.2.255",
+ }
+ for prefix_val in ["24", 16]: # prefix can be int or string
+ params["prefix_or_mask"] = prefix_val
+ with net.EphemeralIPv4Network(**params):
+ pass
+ m_subp.assert_has_calls(
+ [
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ "192.168.2.2/24",
+ "broadcast",
+ "192.168.2.255",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ )
+ ]
+ )
+ m_subp.assert_has_calls(
+ [
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ "192.168.2.2/16",
+ "broadcast",
+ "192.168.2.255",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ )
+ ]
+ )
+
+ def test_ephemeral_ipv4_network_with_new_default_route(self, m_subp):
+ """Add the route when router is set and no default route exists."""
+ params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "prefix_or_mask": "255.255.255.0",
+ "broadcast": "192.168.2.255",
+ "router": "192.168.2.1",
+ }
+ m_subp.return_value = "", "" # Empty response from ip route gw check
+ expected_setup_calls = [
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ "192.168.2.2/24",
+ "broadcast",
+ "192.168.2.255",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ ),
+ mock.call(
+ ["ip", "-family", "inet", "link", "set", "dev", "eth0", "up"],
+ capture=True,
+ ),
+ mock.call(["ip", "route", "show", "0.0.0.0/0"], capture=True),
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "add",
+ "192.168.2.1",
+ "dev",
+ "eth0",
+ "src",
+ "192.168.2.2",
+ ],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "add",
+ "default",
+ "via",
+ "192.168.2.1",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ ]
+ expected_teardown_calls = [
+ mock.call(
+ ["ip", "-4", "route", "del", "default", "dev", "eth0"],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "del",
+ "192.168.2.1",
+ "dev",
+ "eth0",
+ "src",
+ "192.168.2.2",
+ ],
+ capture=True,
+ ),
+ ]
+
+ with net.EphemeralIPv4Network(**params):
+ self.assertEqual(expected_setup_calls, m_subp.call_args_list)
+ m_subp.assert_has_calls(expected_teardown_calls)
+
+ def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp):
+ params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "prefix_or_mask": "255.255.255.255",
+ "broadcast": "192.168.2.255",
+ "static_routes": [
+ ("192.168.2.1/32", "0.0.0.0"),
+ ("169.254.169.254/32", "192.168.2.1"),
+ ("0.0.0.0/0", "192.168.2.1"),
+ ],
+ "router": "192.168.2.1",
+ }
+ expected_setup_calls = [
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ "192.168.2.2/32",
+ "broadcast",
+ "192.168.2.255",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ ),
+ mock.call(
+ ["ip", "-family", "inet", "link", "set", "dev", "eth0", "up"],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "append",
+ "192.168.2.1/32",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "append",
+ "169.254.169.254/32",
+ "via",
+ "192.168.2.1",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "append",
+ "0.0.0.0/0",
+ "via",
+ "192.168.2.1",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ ]
+ expected_teardown_calls = [
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "del",
+ "0.0.0.0/0",
+ "via",
+ "192.168.2.1",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "del",
+ "169.254.169.254/32",
+ "via",
+ "192.168.2.1",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ mock.call(
+ ["ip", "-4", "route", "del", "192.168.2.1/32", "dev", "eth0"],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "link",
+ "set",
+ "dev",
+ "eth0",
+ "down",
+ ],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "del",
+ "192.168.2.2/32",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ ]
+ with net.EphemeralIPv4Network(**params):
+ self.assertEqual(expected_setup_calls, m_subp.call_args_list)
+ m_subp.assert_has_calls(expected_setup_calls + expected_teardown_calls)
+
+
+class TestApplyNetworkCfgNames(CiTestCase):
+ V1_CONFIG = textwrap.dedent(
+ """\
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: "52:54:00:12:34:00"
+ subnets:
+ - type: static
+ address: 10.0.2.15
+ netmask: 255.255.255.0
+ gateway: 10.0.2.2
+ """
+ )
+ V2_CONFIG = textwrap.dedent(
+ """\
+ version: 2
+ ethernets:
+ interface0:
+ match:
+ macaddress: "52:54:00:12:34:00"
+ addresses:
+ - 10.0.2.15/24
+ gateway4: 10.0.2.2
+ set-name: interface0
+ """
+ )
+
+ V2_CONFIG_NO_SETNAME = textwrap.dedent(
+ """\
+ version: 2
+ ethernets:
+ interface0:
+ match:
+ macaddress: "52:54:00:12:34:00"
+ addresses:
+ - 10.0.2.15/24
+ gateway4: 10.0.2.2
+ """
+ )
+
+ V2_CONFIG_NO_MAC = textwrap.dedent(
+ """\
+ version: 2
+ ethernets:
+ interface0:
+ match:
+ driver: virtio-net
+ addresses:
+ - 10.0.2.15/24
+ gateway4: 10.0.2.2
+ set-name: interface0
+ """
+ )
+
+ @mock.patch("cloudinit.net.device_devid")
+ @mock.patch("cloudinit.net.device_driver")
+ @mock.patch("cloudinit.net._rename_interfaces")
+ def test_apply_v1_renames(
+ self, m_rename_interfaces, m_device_driver, m_device_devid
+ ):
+ m_device_driver.return_value = "virtio_net"
+ m_device_devid.return_value = "0x15d8"
+
+ net.apply_network_config_names(yaml.load(self.V1_CONFIG))
+
+ call = ["52:54:00:12:34:00", "interface0", "virtio_net", "0x15d8"]
+ m_rename_interfaces.assert_called_with([call])
+
+ @mock.patch("cloudinit.net.device_devid")
+ @mock.patch("cloudinit.net.device_driver")
+ @mock.patch("cloudinit.net._rename_interfaces")
+ def test_apply_v2_renames(
+ self, m_rename_interfaces, m_device_driver, m_device_devid
+ ):
+ m_device_driver.return_value = "virtio_net"
+ m_device_devid.return_value = "0x15d8"
+
+ net.apply_network_config_names(yaml.load(self.V2_CONFIG))
+
+ call = ["52:54:00:12:34:00", "interface0", "virtio_net", "0x15d8"]
+ m_rename_interfaces.assert_called_with([call])
+
+ @mock.patch("cloudinit.net._rename_interfaces")
+ def test_apply_v2_renames_skips_without_setname(self, m_rename_interfaces):
+ net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_SETNAME))
+ m_rename_interfaces.assert_called_with([])
+
+ @mock.patch("cloudinit.net._rename_interfaces")
+ def test_apply_v2_renames_skips_without_mac(self, m_rename_interfaces):
+ net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_MAC))
+ m_rename_interfaces.assert_called_with([])
+
+ def test_apply_v2_renames_raises_runtime_error_on_unknown_version(self):
+ with self.assertRaises(RuntimeError):
+ net.apply_network_config_names(yaml.load("version: 3"))
+
+
+class TestHasURLConnectivity(HttprettyTestCase):
+ def setUp(self):
+ super(TestHasURLConnectivity, self).setUp()
+ self.url = "http://fake/"
+ self.kwargs = {"allow_redirects": True, "timeout": 5.0}
+
+ @mock.patch("cloudinit.net.readurl")
+ def test_url_timeout_on_connectivity_check(self, m_readurl):
+ """A timeout of 5 seconds is provided when reading a url."""
+ self.assertTrue(
+ net.has_url_connectivity({"url": self.url}),
+ "Expected True on url connect",
+ )
+
+ def test_true_on_url_connectivity_success(self):
+ httpretty.register_uri(httpretty.GET, self.url)
+ self.assertTrue(
+ net.has_url_connectivity({"url": self.url}),
+ "Expected True on url connect",
+ )
+
+ @mock.patch("requests.Session.request")
+ def test_true_on_url_connectivity_timeout(self, m_request):
+ """A timeout raised accessing the url will return False."""
+ m_request.side_effect = requests.Timeout("Fake Connection Timeout")
+ self.assertFalse(
+ net.has_url_connectivity({"url": self.url}),
+ "Expected False on url timeout",
+ )
+
+ def test_true_on_url_connectivity_failure(self):
+ httpretty.register_uri(httpretty.GET, self.url, body={}, status=404)
+ self.assertFalse(
+ net.has_url_connectivity({"url": self.url}),
+ "Expected False on url fail",
+ )
+
+
+def _mk_v1_phys(mac, name, driver, device_id):
+ v1_cfg = {"type": "physical", "name": name, "mac_address": mac}
+ params = {}
+ if driver:
+ params.update({"driver": driver})
+ if device_id:
+ params.update({"device_id": device_id})
+
+ if params:
+ v1_cfg.update({"params": params})
+
+ return v1_cfg
+
+
+def _mk_v2_phys(mac, name, driver=None, device_id=None):
+ v2_cfg = {"set-name": name, "match": {"macaddress": mac}}
+ if driver:
+ v2_cfg["match"].update({"driver": driver})
+ if device_id:
+ v2_cfg["match"].update({"device_id": device_id})
+
+ return v2_cfg
+
+
+class TestExtractPhysdevs(CiTestCase):
+ def setUp(self):
+ super(TestExtractPhysdevs, self).setUp()
+ self.add_patch("cloudinit.net.device_driver", "m_driver")
+ self.add_patch("cloudinit.net.device_devid", "m_devid")
+
+ def test_extract_physdevs_looks_up_driver_v1(self):
+ driver = "virtio"
+ self.m_driver.return_value = driver
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", None, "0x1000"],
+ ]
+ netcfg = {
+ "version": 1,
+ "config": [_mk_v1_phys(*args) for args in physdevs],
+ }
+ # insert the driver value for verification
+ physdevs[0][2] = driver
+ self.assertEqual(
+ sorted(physdevs), sorted(net.extract_physdevs(netcfg))
+ )
+ self.m_driver.assert_called_with("eth0")
+
+ def test_extract_physdevs_looks_up_driver_v2(self):
+ driver = "virtio"
+ self.m_driver.return_value = driver
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", None, "0x1000"],
+ ]
+ netcfg = {
+ "version": 2,
+ "ethernets": {args[1]: _mk_v2_phys(*args) for args in physdevs},
+ }
+ # insert the driver value for verification
+ physdevs[0][2] = driver
+ self.assertEqual(
+ sorted(physdevs), sorted(net.extract_physdevs(netcfg))
+ )
+ self.m_driver.assert_called_with("eth0")
+
+ def test_extract_physdevs_looks_up_devid_v1(self):
+ devid = "0x1000"
+ self.m_devid.return_value = devid
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", None],
+ ]
+ netcfg = {
+ "version": 1,
+ "config": [_mk_v1_phys(*args) for args in physdevs],
+ }
+ # insert the driver value for verification
+ physdevs[0][3] = devid
+ self.assertEqual(
+ sorted(physdevs), sorted(net.extract_physdevs(netcfg))
+ )
+ self.m_devid.assert_called_with("eth0")
+
+ def test_extract_physdevs_looks_up_devid_v2(self):
+ devid = "0x1000"
+ self.m_devid.return_value = devid
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", None],
+ ]
+ netcfg = {
+ "version": 2,
+ "ethernets": {args[1]: _mk_v2_phys(*args) for args in physdevs},
+ }
+ # insert the driver value for verification
+ physdevs[0][3] = devid
+ self.assertEqual(
+ sorted(physdevs), sorted(net.extract_physdevs(netcfg))
+ )
+ self.m_devid.assert_called_with("eth0")
+
+ def test_get_v1_type_physical(self):
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", "0x1000"],
+ ["00:11:22:33:44:55", "ens3", "e1000", "0x1643"],
+ ["09:87:65:43:21:10", "ens0p1", "mlx4_core", "0:0:1000"],
+ ]
+ netcfg = {
+ "version": 1,
+ "config": [_mk_v1_phys(*args) for args in physdevs],
+ }
+ self.assertEqual(
+ sorted(physdevs), sorted(net.extract_physdevs(netcfg))
+ )
+
+ def test_get_v2_type_physical(self):
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", "0x1000"],
+ ["00:11:22:33:44:55", "ens3", "e1000", "0x1643"],
+ ["09:87:65:43:21:10", "ens0p1", "mlx4_core", "0:0:1000"],
+ ]
+ netcfg = {
+ "version": 2,
+ "ethernets": {args[1]: _mk_v2_phys(*args) for args in physdevs},
+ }
+ self.assertEqual(
+ sorted(physdevs), sorted(net.extract_physdevs(netcfg))
+ )
+
+ def test_get_v2_type_physical_skips_if_no_set_name(self):
+ netcfg = {
+ "version": 2,
+ "ethernets": {
+ "ens3": {
+ "match": {"macaddress": "00:11:22:33:44:55"},
+ }
+ },
+ }
+ self.assertEqual([], net.extract_physdevs(netcfg))
+
+ def test_runtime_error_on_unknown_netcfg_version(self):
+ with self.assertRaises(RuntimeError):
+ net.extract_physdevs({"version": 3, "awesome_config": []})
+
+
+class TestNetFailOver(CiTestCase):
+ def setUp(self):
+ super(TestNetFailOver, self).setUp()
+ self.add_patch("cloudinit.net.util", "m_util")
+ self.add_patch("cloudinit.net.read_sys_net", "m_read_sys_net")
+ self.add_patch("cloudinit.net.device_driver", "m_device_driver")
+
+ def test_get_dev_features(self):
+ devname = self.random_string()
+ features = self.random_string()
+ self.m_read_sys_net.return_value = features
+
+ self.assertEqual(features, net.get_dev_features(devname))
+ self.assertEqual(1, self.m_read_sys_net.call_count)
+ self.assertEqual(
+ mock.call(devname, "device/features"),
+ self.m_read_sys_net.call_args_list[0],
+ )
+
+ def test_get_dev_features_none_returns_empty_string(self):
+ devname = self.random_string()
+ self.m_read_sys_net.side_effect = Exception("error")
+ self.assertEqual("", net.get_dev_features(devname))
+ self.assertEqual(1, self.m_read_sys_net.call_count)
+ self.assertEqual(
+ mock.call(devname, "device/features"),
+ self.m_read_sys_net.call_args_list[0],
+ )
+
+ @mock.patch("cloudinit.net.get_dev_features")
+ def test_has_netfail_standby_feature(self, m_dev_features):
+ devname = self.random_string()
+ standby_features = ("0" * 62) + "1" + "0"
+ m_dev_features.return_value = standby_features
+ self.assertTrue(net.has_netfail_standby_feature(devname))
+
+ @mock.patch("cloudinit.net.get_dev_features")
+ def test_has_netfail_standby_feature_short_is_false(self, m_dev_features):
+ devname = self.random_string()
+ standby_features = self.random_string()
+ m_dev_features.return_value = standby_features
+ self.assertFalse(net.has_netfail_standby_feature(devname))
+
+ @mock.patch("cloudinit.net.get_dev_features")
+ def test_has_netfail_standby_feature_not_present_is_false(
+ self, m_dev_features
+ ):
+ devname = self.random_string()
+ standby_features = "0" * 64
+ m_dev_features.return_value = standby_features
+ self.assertFalse(net.has_netfail_standby_feature(devname))
+
+ @mock.patch("cloudinit.net.get_dev_features")
+ def test_has_netfail_standby_feature_no_features_is_false(
+ self, m_dev_features
+ ):
+ devname = self.random_string()
+ standby_features = None
+ m_dev_features.return_value = standby_features
+ self.assertFalse(net.has_netfail_standby_feature(devname))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_master(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = "virtio_net"
+ m_exists.return_value = False # no master sysfs attr
+ m_standby.return_value = True # has standby feature flag
+ self.assertTrue(net.is_netfail_master(devname, driver))
+
+ @mock.patch("cloudinit.net.sys_dev_path")
+ def test_is_netfail_master_checks_master_attr(self, m_sysdev):
+ devname = self.random_string()
+ driver = "virtio_net"
+ m_sysdev.return_value = self.random_string()
+ self.assertFalse(net.is_netfail_master(devname, driver))
+ self.assertEqual(1, m_sysdev.call_count)
+ self.assertEqual(
+ mock.call(devname, path="master"), m_sysdev.call_args_list[0]
+ )
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_master_wrong_driver(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ self.assertFalse(net.is_netfail_master(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_master_has_master_attr(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = "virtio_net"
+ m_exists.return_value = True # has master sysfs attr
+ self.assertFalse(net.is_netfail_master(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_master_no_standby_feat(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = "virtio_net"
+ m_exists.return_value = False # no master sysfs attr
+ m_standby.return_value = False # no standby feature flag
+ self.assertFalse(net.is_netfail_master(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ def test_is_netfail_primary(self, m_sysdev, m_exists, m_standby):
+ devname = self.random_string()
+ driver = self.random_string() # device not virtio_net
+ master_devname = self.random_string()
+ m_sysdev.return_value = "%s/%s" % (
+ self.random_string(),
+ master_devname,
+ )
+ m_exists.return_value = True # has master sysfs attr
+ self.m_device_driver.return_value = "virtio_net" # master virtio_net
+ m_standby.return_value = True # has standby feature flag
+ self.assertTrue(net.is_netfail_primary(devname, driver))
+ self.assertEqual(1, self.m_device_driver.call_count)
+ self.assertEqual(
+ mock.call(master_devname), self.m_device_driver.call_args_list[0]
+ )
+ self.assertEqual(1, m_standby.call_count)
+ self.assertEqual(
+ mock.call(master_devname), m_standby.call_args_list[0]
+ )
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ def test_is_netfail_primary_wrong_driver(
+ self, m_sysdev, m_exists, m_standby
+ ):
+ devname = self.random_string()
+ driver = "virtio_net"
+ self.assertFalse(net.is_netfail_primary(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ def test_is_netfail_primary_no_master(self, m_sysdev, m_exists, m_standby):
+ devname = self.random_string()
+ driver = self.random_string() # device not virtio_net
+ m_exists.return_value = False # no master sysfs attr
+ self.assertFalse(net.is_netfail_primary(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ def test_is_netfail_primary_bad_master(
+ self, m_sysdev, m_exists, m_standby
+ ):
+ devname = self.random_string()
+ driver = self.random_string() # device not virtio_net
+ master_devname = self.random_string()
+ m_sysdev.return_value = "%s/%s" % (
+ self.random_string(),
+ master_devname,
+ )
+ m_exists.return_value = True # has master sysfs attr
+ self.m_device_driver.return_value = "XXXX" # master not virtio_net
+ self.assertFalse(net.is_netfail_primary(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ def test_is_netfail_primary_no_standby(
+ self, m_sysdev, m_exists, m_standby
+ ):
+ devname = self.random_string()
+ driver = self.random_string() # device not virtio_net
+ master_devname = self.random_string()
+ m_sysdev.return_value = "%s/%s" % (
+ self.random_string(),
+ master_devname,
+ )
+ m_exists.return_value = True # has master sysfs attr
+ self.m_device_driver.return_value = "virtio_net" # master virtio_net
+ m_standby.return_value = False # master has no standby feature flag
+ self.assertFalse(net.is_netfail_primary(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_standby(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = "virtio_net"
+ m_exists.return_value = True # has master sysfs attr
+ m_standby.return_value = True # has standby feature flag
+ self.assertTrue(net.is_netfail_standby(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_standby_wrong_driver(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ self.assertFalse(net.is_netfail_standby(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_standby_no_master(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = "virtio_net"
+ m_exists.return_value = False # has master sysfs attr
+ self.assertFalse(net.is_netfail_standby(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_standby_no_standby_feature(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = "virtio_net"
+ m_exists.return_value = True # has master sysfs attr
+ m_standby.return_value = False # has standby feature flag
+ self.assertFalse(net.is_netfail_standby(devname, driver))
+
+ @mock.patch("cloudinit.net.is_netfail_standby")
+ @mock.patch("cloudinit.net.is_netfail_primary")
+ def test_is_netfailover_primary(self, m_primary, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ m_primary.return_value = True
+ m_standby.return_value = False
+ self.assertTrue(net.is_netfailover(devname, driver))
+
+ @mock.patch("cloudinit.net.is_netfail_standby")
+ @mock.patch("cloudinit.net.is_netfail_primary")
+ def test_is_netfailover_standby(self, m_primary, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ m_primary.return_value = False
+ m_standby.return_value = True
+ self.assertTrue(net.is_netfailover(devname, driver))
+
+ @mock.patch("cloudinit.net.is_netfail_standby")
+ @mock.patch("cloudinit.net.is_netfail_primary")
+ def test_is_netfailover_returns_false(self, m_primary, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ m_primary.return_value = False
+ m_standby.return_value = False
+ self.assertFalse(net.is_netfailover(devname, driver))
+
+
+class TestOpenvswitchIsInstalled:
+ """Test cloudinit.net.openvswitch_is_installed.
+
+ Uses the ``clear_lru_cache`` local autouse fixture to allow us to test
+ despite the ``lru_cache`` decorator on the unit under test.
+ """
+
+ @pytest.fixture(autouse=True)
+ def clear_lru_cache(self):
+ net.openvswitch_is_installed.cache_clear()
+
+ @pytest.mark.parametrize(
+ "expected,which_return", [(True, "/some/path"), (False, None)]
+ )
+ @mock.patch("cloudinit.net.subp.which")
+ def test_mirrors_which_result(self, m_which, expected, which_return):
+ m_which.return_value = which_return
+ assert expected == net.openvswitch_is_installed()
+
+ @mock.patch("cloudinit.net.subp.which")
+ def test_only_calls_which_once(self, m_which):
+ net.openvswitch_is_installed()
+ net.openvswitch_is_installed()
+ assert 1 == m_which.call_count
+
+
+@mock.patch("cloudinit.net.subp.subp", return_value=("", ""))
+class TestGetOVSInternalInterfaces:
+ """Test cloudinit.net.get_ovs_internal_interfaces.
+
+ Uses the ``clear_lru_cache`` local autouse fixture to allow us to test
+ despite the ``lru_cache`` decorator on the unit under test.
+ """
+
+ @pytest.fixture(autouse=True)
+ def clear_lru_cache(self):
+ net.get_ovs_internal_interfaces.cache_clear()
+
+ def test_command_used(self, m_subp):
+ """Test we use the correct command when we call subp"""
+ net.get_ovs_internal_interfaces()
+
+ assert [
+ mock.call(net.OVS_INTERNAL_INTERFACE_LOOKUP_CMD)
+ ] == m_subp.call_args_list
+
+ def test_subp_contents_split_and_returned(self, m_subp):
+ """Test that the command output is appropriately mangled."""
+ stdout = "iface1\niface2\niface3\n"
+ m_subp.return_value = (stdout, "")
+
+ assert [
+ "iface1",
+ "iface2",
+ "iface3",
+ ] == net.get_ovs_internal_interfaces()
+
+ def test_database_connection_error_handled_gracefully(self, m_subp):
+ """Test that the error indicating OVS is down is handled gracefully."""
+ m_subp.side_effect = ProcessExecutionError(
+ stderr="database connection failed"
+ )
+
+ assert [] == net.get_ovs_internal_interfaces()
+
+ def test_other_errors_raised(self, m_subp):
+ """Test that only database connection errors are handled."""
+ m_subp.side_effect = ProcessExecutionError()
+
+ with pytest.raises(ProcessExecutionError):
+ net.get_ovs_internal_interfaces()
+
+ def test_only_runs_once(self, m_subp):
+ """Test that we cache the value."""
+ net.get_ovs_internal_interfaces()
+ net.get_ovs_internal_interfaces()
+
+ assert 1 == m_subp.call_count
+
+
+@mock.patch("cloudinit.net.get_ovs_internal_interfaces")
+@mock.patch("cloudinit.net.openvswitch_is_installed")
+class TestIsOpenVSwitchInternalInterface:
+ def test_false_if_ovs_not_installed(
+ self, m_openvswitch_is_installed, _m_get_ovs_internal_interfaces
+ ):
+ """Test that OVS' absence returns False."""
+ m_openvswitch_is_installed.return_value = False
+
+ assert not net.is_openvswitch_internal_interface("devname")
+
+ @pytest.mark.parametrize(
+ "detected_interfaces,devname,expected_return",
+ [
+ ([], "devname", False),
+ (["notdevname"], "devname", False),
+ (["devname"], "devname", True),
+ (["some", "other", "devices", "and", "ours"], "ours", True),
+ ],
+ )
+ def test_return_value_based_on_detected_interfaces(
+ self,
+ m_openvswitch_is_installed,
+ m_get_ovs_internal_interfaces,
+ detected_interfaces,
+ devname,
+ expected_return,
+ ):
+ """Test that the detected interfaces are used correctly."""
+ m_openvswitch_is_installed.return_value = True
+ m_get_ovs_internal_interfaces.return_value = detected_interfaces
+ assert expected_return == net.is_openvswitch_internal_interface(
+ devname
+ )
+
+
+class TestIsIpAddress:
+ """Tests for net.is_ip_address.
+
+ Instead of testing with values we rely on the ipaddress stdlib module to
+ handle all values correctly, so simply test that is_ip_address defers to
+ the ipaddress module correctly.
+ """
+
+ @pytest.mark.parametrize(
+ "ip_address_side_effect,expected_return",
+ (
+ (ValueError, False),
+ (lambda _: ipaddress.IPv4Address("192.168.0.1"), True),
+ (lambda _: ipaddress.IPv6Address("2001:db8::"), True),
+ ),
+ )
+ def test_is_ip_address(self, ip_address_side_effect, expected_return):
+ with mock.patch(
+ "cloudinit.net.ipaddress.ip_address",
+ side_effect=ip_address_side_effect,
+ ) as m_ip_address:
+ ret = net.is_ip_address(mock.sentinel.ip_address_in)
+ assert expected_return == ret
+ expected_call = mock.call(mock.sentinel.ip_address_in)
+ assert [expected_call] == m_ip_address.call_args_list
+
+
+class TestIsIpv4Address:
+ """Tests for net.is_ipv4_address.
+
+ Instead of testing with values we rely on the ipaddress stdlib module to
+ handle all values correctly, so simply test that is_ipv4_address defers to
+ the ipaddress module correctly.
+ """
+
+ @pytest.mark.parametrize(
+ "ipv4address_mock,expected_return",
+ (
+ (mock.Mock(side_effect=ValueError), False),
+ (
+ mock.Mock(return_value=ipaddress.IPv4Address("192.168.0.1")),
+ True,
+ ),
+ ),
+ )
+ def test_is_ip_address(self, ipv4address_mock, expected_return):
+ with mock.patch(
+ "cloudinit.net.ipaddress.IPv4Address", ipv4address_mock
+ ) as m_ipv4address:
+ ret = net.is_ipv4_address(mock.sentinel.ip_address_in)
+ assert expected_return == ret
+ expected_call = mock.call(mock.sentinel.ip_address_in)
+ assert [expected_call] == m_ipv4address.call_args_list
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/net/test_network_state.py b/tests/unittests/net/test_network_state.py
new file mode 100644
index 00000000..471d969a
--- /dev/null
+++ b/tests/unittests/net/test_network_state.py
@@ -0,0 +1,222 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import ipaddress
+from unittest import mock
+
+import pytest
+
+from cloudinit import safeyaml
+from cloudinit.net import network_state
+from tests.unittests.helpers import CiTestCase
+
+netstate_path = "cloudinit.net.network_state"
+
+
+_V1_CONFIG_NAMESERVERS = """\
+network:
+ version: 1
+ config:
+ - type: nameserver
+ interface: {iface}
+ address:
+ - 192.168.1.1
+ - 8.8.8.8
+ search:
+ - spam.local
+ - type: nameserver
+ address:
+ - 192.168.1.0
+ - 4.4.4.4
+ search:
+ - eggs.local
+ - type: physical
+ name: eth0
+ mac_address: '00:11:22:33:44:55'
+ - type: physical
+ name: eth1
+ mac_address: '66:77:88:99:00:11'
+"""
+
+V1_CONFIG_NAMESERVERS_VALID = _V1_CONFIG_NAMESERVERS.format(iface="eth1")
+V1_CONFIG_NAMESERVERS_INVALID = _V1_CONFIG_NAMESERVERS.format(iface="eth90")
+
+V2_CONFIG_NAMESERVERS = """\
+network:
+ version: 2
+ ethernets:
+ eth0:
+ match:
+ macaddress: '00:11:22:33:44:55'
+ nameservers:
+ search: [spam.local, eggs.local]
+ addresses: [8.8.8.8]
+ eth1:
+ match:
+ macaddress: '66:77:88:99:00:11'
+ set-name: "ens92"
+ nameservers:
+ search: [foo.local, bar.local]
+ addresses: [4.4.4.4]
+"""
+
+
+class TestNetworkStateParseConfig(CiTestCase):
+ def setUp(self):
+ super(TestNetworkStateParseConfig, self).setUp()
+ nsi_path = netstate_path + ".NetworkStateInterpreter"
+ self.add_patch(nsi_path, "m_nsi")
+
+ def test_missing_version_returns_none(self):
+ ncfg = {}
+ with self.assertRaises(RuntimeError):
+ network_state.parse_net_config_data(ncfg)
+
+ def test_unknown_versions_returns_none(self):
+ ncfg = {"version": 13.2}
+ with self.assertRaises(RuntimeError):
+ network_state.parse_net_config_data(ncfg)
+
+ def test_version_2_passes_self_as_config(self):
+ ncfg = {"version": 2, "otherconfig": {}, "somemore": [1, 2, 3]}
+ network_state.parse_net_config_data(ncfg)
+ self.assertEqual(
+ [mock.call(version=2, config=ncfg)], self.m_nsi.call_args_list
+ )
+
+ def test_valid_config_gets_network_state(self):
+ ncfg = {"version": 2, "otherconfig": {}, "somemore": [1, 2, 3]}
+ result = network_state.parse_net_config_data(ncfg)
+ self.assertNotEqual(None, result)
+
+ def test_empty_v1_config_gets_network_state(self):
+ ncfg = {"version": 1, "config": []}
+ result = network_state.parse_net_config_data(ncfg)
+ self.assertNotEqual(None, result)
+
+ def test_empty_v2_config_gets_network_state(self):
+ ncfg = {"version": 2}
+ result = network_state.parse_net_config_data(ncfg)
+ self.assertNotEqual(None, result)
+
+
+class TestNetworkStateParseConfigV2(CiTestCase):
+ def test_version_2_ignores_renderer_key(self):
+ ncfg = {"version": 2, "renderer": "networkd", "ethernets": {}}
+ nsi = network_state.NetworkStateInterpreter(
+ version=ncfg["version"], config=ncfg
+ )
+ nsi.parse_config(skip_broken=False)
+ self.assertEqual(ncfg, nsi.as_dict()["config"])
+
+
+class TestNetworkStateParseNameservers:
+ def _parse_network_state_from_config(self, config):
+ yaml = safeyaml.load(config)
+ return network_state.parse_net_config_data(yaml["network"])
+
+ def test_v1_nameservers_valid(self):
+ config = self._parse_network_state_from_config(
+ V1_CONFIG_NAMESERVERS_VALID
+ )
+
+ # If an interface was specified, DNS shouldn't be in the global list
+ assert ["192.168.1.0", "4.4.4.4"] == sorted(config.dns_nameservers)
+ assert ["eggs.local"] == config.dns_searchdomains
+
+ # If an interface was specified, DNS should be part of the interface
+ for iface in config.iter_interfaces():
+ if iface["name"] == "eth1":
+ assert iface["dns"]["addresses"] == ["192.168.1.1", "8.8.8.8"]
+ assert iface["dns"]["search"] == ["spam.local"]
+ else:
+ assert "dns" not in iface
+
+ def test_v1_nameservers_invalid(self):
+ with pytest.raises(ValueError):
+ self._parse_network_state_from_config(
+ V1_CONFIG_NAMESERVERS_INVALID
+ )
+
+ def test_v2_nameservers(self):
+ config = self._parse_network_state_from_config(V2_CONFIG_NAMESERVERS)
+
+ # Ensure DNS defined on interface exists on interface
+ for iface in config.iter_interfaces():
+ if iface["name"] == "eth0":
+ assert iface["dns"] == {
+ "nameservers": ["8.8.8.8"],
+ "search": ["spam.local", "eggs.local"],
+ }
+ else:
+ assert iface["dns"] == {
+ "nameservers": ["4.4.4.4"],
+ "search": ["foo.local", "bar.local"],
+ }
+
+ # Ensure DNS defined on interface also exists globally (since there
+ # is no global DNS definitions in v2)
+ assert ["4.4.4.4", "8.8.8.8"] == sorted(config.dns_nameservers)
+ assert [
+ "bar.local",
+ "eggs.local",
+ "foo.local",
+ "spam.local",
+ ] == sorted(config.dns_searchdomains)
+
+
+class TestNetworkStateHelperFunctions(CiTestCase):
+ def test_mask_to_net_prefix_ipv4(self):
+ netmask_value = "255.255.255.0"
+ expected = 24
+ prefix_value = network_state.ipv4_mask_to_net_prefix(netmask_value)
+ assert prefix_value == expected
+
+ def test_mask_to_net_prefix_all_bits_ipv4(self):
+ netmask_value = "255.255.255.255"
+ expected = 32
+ prefix_value = network_state.ipv4_mask_to_net_prefix(netmask_value)
+ assert prefix_value == expected
+
+ def test_mask_to_net_prefix_to_many_bits_ipv4(self):
+ netmask_value = "33"
+ self.assertRaises(
+ ValueError, network_state.ipv4_mask_to_net_prefix, netmask_value
+ )
+
+ def test_mask_to_net_prefix_all_bits_ipv6(self):
+ netmask_value = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"
+ expected = 128
+ prefix_value = network_state.ipv6_mask_to_net_prefix(netmask_value)
+ assert prefix_value == expected
+
+ def test_mask_to_net_prefix_ipv6(self):
+ netmask_value = "ffff:ffff:ffff:ffff::"
+ expected = 64
+ prefix_value = network_state.ipv6_mask_to_net_prefix(netmask_value)
+ assert prefix_value == expected
+
+ def test_mask_to_net_prefix_raises_value_error(self):
+ netmask_value = "ff:ff:ff:ff::"
+ self.assertRaises(
+ ValueError, network_state.ipv6_mask_to_net_prefix, netmask_value
+ )
+
+ def test_mask_to_net_prefix_to_many_bits_ipv6(self):
+ netmask_value = "129"
+ self.assertRaises(
+ ValueError, network_state.ipv6_mask_to_net_prefix, netmask_value
+ )
+
+ def test_mask_to_net_prefix_ipv4_object(self):
+ netmask_value = ipaddress.IPv4Address("255.255.255.255")
+ expected = 32
+ prefix_value = network_state.ipv4_mask_to_net_prefix(netmask_value)
+ assert prefix_value == expected
+
+ def test_mask_to_net_prefix_ipv6_object(self):
+ netmask_value = ipaddress.IPv6Address("ffff:ffff:ffff::")
+ expected = 48
+ prefix_value = network_state.ipv6_mask_to_net_prefix(netmask_value)
+ assert prefix_value == expected
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/net/test_networkd.py b/tests/unittests/net/test_networkd.py
new file mode 100644
index 00000000..ec1d04e9
--- /dev/null
+++ b/tests/unittests/net/test_networkd.py
@@ -0,0 +1,64 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import safeyaml
+from cloudinit.net import network_state, networkd
+
+V2_CONFIG_SET_NAME = """\
+network:
+ version: 2
+ ethernets:
+ eth0:
+ match:
+ macaddress: '00:11:22:33:44:55'
+ nameservers:
+ search: [spam.local, eggs.local]
+ addresses: [8.8.8.8]
+ eth1:
+ match:
+ macaddress: '66:77:88:99:00:11'
+ set-name: "ens92"
+ nameservers:
+ search: [foo.local, bar.local]
+ addresses: [4.4.4.4]
+"""
+
+V2_CONFIG_SET_NAME_RENDERED_ETH0 = """[Match]
+MACAddress=00:11:22:33:44:55
+Name=eth0
+
+[Network]
+DHCP=no
+DNS=8.8.8.8
+Domains=spam.local eggs.local
+
+"""
+
+V2_CONFIG_SET_NAME_RENDERED_ETH1 = """[Match]
+MACAddress=66:77:88:99:00:11
+Name=ens92
+
+[Network]
+DHCP=no
+DNS=4.4.4.4
+Domains=foo.local bar.local
+
+"""
+
+
+class TestNetworkdRenderState:
+ def _parse_network_state_from_config(self, config):
+ yaml = safeyaml.load(config)
+ return network_state.parse_net_config_data(yaml["network"])
+
+ def test_networkd_render_with_set_name(self):
+ ns = self._parse_network_state_from_config(V2_CONFIG_SET_NAME)
+ renderer = networkd.Renderer()
+ rendered_content = renderer._render_content(ns)
+
+ assert "eth0" in rendered_content
+ assert rendered_content["eth0"] == V2_CONFIG_SET_NAME_RENDERED_ETH0
+ assert "ens92" in rendered_content
+ assert rendered_content["ens92"] == V2_CONFIG_SET_NAME_RENDERED_ETH1
+
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/azurecloud/__init__.py b/tests/unittests/runs/__init__.py
index e69de29b..e69de29b 100644
--- a/tests/cloud_tests/platforms/azurecloud/__init__.py
+++ b/tests/unittests/runs/__init__.py
diff --git a/tests/unittests/runs/test_merge_run.py b/tests/unittests/runs/test_merge_run.py
new file mode 100644
index 00000000..1b1b5595
--- /dev/null
+++ b/tests/unittests/runs/test_merge_run.py
@@ -0,0 +1,61 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import shutil
+import tempfile
+
+from cloudinit import safeyaml, stages, util
+from cloudinit.settings import PER_INSTANCE
+from tests.unittests import helpers
+
+
+class TestMergeRun(helpers.FilesystemMockingTestCase):
+ def _patchIn(self, root):
+ self.patchOS(root)
+ self.patchUtils(root)
+
+ def test_none_ds(self):
+ new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, new_root)
+ self.replicateTestRoot("simple_ubuntu", new_root)
+ cfg = {
+ "datasource_list": ["None"],
+ "cloud_init_modules": ["write-files"],
+ "system_info": {"paths": {"run_dir": new_root}},
+ }
+ ud = helpers.readResource("user_data.1.txt")
+ cloud_cfg = safeyaml.dumps(cfg)
+ util.ensure_dir(os.path.join(new_root, "etc", "cloud"))
+ util.write_file(
+ os.path.join(new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg
+ )
+ self._patchIn(new_root)
+
+ # Now start verifying whats created
+ initer = stages.Init()
+ initer.read_cfg()
+ initer.initialize()
+ initer.fetch()
+ initer.datasource.userdata_raw = ud
+ initer.instancify()
+ initer.update()
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
+ mirrors = initer.distro.get_option("package_mirrors")
+ self.assertEqual(1, len(mirrors))
+ mirror = mirrors[0]
+ self.assertEqual(mirror["arches"], ["i386", "amd64", "blah"])
+ mods = stages.Modules(initer)
+ (which_ran, failures) = mods.run_section("cloud_init_modules")
+ self.assertTrue(len(failures) == 0)
+ self.assertTrue(os.path.exists("/etc/blah.ini"))
+ self.assertIn("write-files", which_ran)
+ contents = util.load_file("/etc/blah.ini")
+ self.assertEqual(contents, "blah")
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/runs/test_simple_run.py
index cb3aae60..38cf9494 100644
--- a/tests/unittests/test_runs/test_simple_run.py
+++ b/tests/unittests/runs/test_simple_run.py
@@ -3,12 +3,9 @@
import copy
import os
-
+from cloudinit import safeyaml, stages, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import safeyaml
-from cloudinit import stages
-from cloudinit.tests import helpers
-from cloudinit import util
+from tests.unittests import helpers
class TestSimpleRun(helpers.FilesystemMockingTestCase):
@@ -18,27 +15,28 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
def setUp(self):
super(TestSimpleRun, self).setUp()
self.new_root = self.tmp_dir()
- self.replicateTestRoot('simple_ubuntu', self.new_root)
+ self.replicateTestRoot("simple_ubuntu", self.new_root)
# Seed cloud.cfg file for our tests
self.cfg = {
- 'datasource_list': ['None'],
- 'runcmd': ['ls /etc'], # test ALL_DISTROS
- 'spacewalk': {}, # test non-ubuntu distros module definition
- 'system_info': {'paths': {'run_dir': self.new_root}},
- 'write_files': [
+ "datasource_list": ["None"],
+ "runcmd": ["ls /etc"], # test ALL_DISTROS
+ "spacewalk": {}, # test non-ubuntu distros module definition
+ "system_info": {"paths": {"run_dir": self.new_root}},
+ "write_files": [
{
- 'path': '/etc/blah.ini',
- 'content': 'blah',
- 'permissions': 0o755,
+ "path": "/etc/blah.ini",
+ "content": "blah",
+ "permissions": 0o755,
},
],
- 'cloud_init_modules': ['write-files', 'spacewalk', 'runcmd'],
+ "cloud_init_modules": ["write-files", "spacewalk", "runcmd"],
}
cloud_cfg = safeyaml.dumps(self.cfg)
- util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
- util.write_file(os.path.join(self.new_root, 'etc',
- 'cloud', 'cloud.cfg'), cloud_cfg)
+ util.ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
+ util.write_file(
+ os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg
+ )
self.patchOS(self.new_root)
self.patchUtils(self.new_root)
@@ -49,12 +47,12 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.read_cfg()
initer.initialize()
self.assertTrue(os.path.exists("/var/lib/cloud"))
- for d in ['scripts', 'seed', 'instances', 'handlers', 'sem', 'data']:
+ for d in ["scripts", "seed", "instances", "handlers", "sem", "data"]:
self.assertTrue(os.path.isdir(os.path.join("/var/lib/cloud", d)))
initer.fetch()
iid = initer.instancify()
- self.assertEqual(iid, 'iid-datasource-none')
+ self.assertEqual(iid, "iid-datasource-none")
initer.update()
self.assertTrue(os.path.islink("var/lib/cloud/instance"))
@@ -66,20 +64,25 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data', initer.consume_data,
- args=[PER_INSTANCE], freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (which_ran, failures) = mods.run_section('cloud_init_modules')
+ (which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
- self.assertTrue(os.path.exists('/etc/blah.ini'))
- self.assertIn('write-files', which_ran)
- contents = util.load_file('/etc/blah.ini')
- self.assertEqual(contents, 'blah')
+ self.assertTrue(os.path.exists("/etc/blah.ini"))
+ self.assertIn("write-files", which_ran)
+ contents = util.load_file("/etc/blah.ini")
+ self.assertEqual(contents, "blah")
self.assertNotIn(
"Skipping modules ['write-files'] because they are not verified on"
" distro 'ubuntu'",
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
def test_none_ds_skips_modules_which_define_unmatched_distros(self):
"""Skip modules which define distros which don't match the current."""
@@ -89,17 +92,22 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data', initer.consume_data,
- args=[PER_INSTANCE], freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (which_ran, failures) = mods.run_section('cloud_init_modules')
+ (which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertIn(
"Skipping modules 'spacewalk' because they are not verified on"
" distro 'ubuntu'",
- self.logs.getvalue())
- self.assertNotIn('spacewalk', which_ran)
+ self.logs.getvalue(),
+ )
+ self.assertNotIn("spacewalk", which_ran)
def test_none_ds_runs_modules_which_distros_all(self):
"""Skip modules which define distros attribute as supporting 'all'.
@@ -113,28 +121,34 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data', initer.consume_data,
- args=[PER_INSTANCE], freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (which_ran, failures) = mods.run_section('cloud_init_modules')
+ (which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
- self.assertIn('runcmd', which_ran)
+ self.assertIn("runcmd", which_ran)
self.assertNotIn(
"Skipping modules 'runcmd' because they are not verified on"
" distro 'ubuntu'",
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
def test_none_ds_forces_run_via_unverified_modules(self):
"""run_section forced skipped modules by using unverified_modules."""
# re-write cloud.cfg with unverified_modules override
cfg = copy.deepcopy(self.cfg)
- cfg['unverified_modules'] = ['spacewalk'] # Would have skipped
+ cfg["unverified_modules"] = ["spacewalk"] # Would have skipped
cloud_cfg = safeyaml.dumps(cfg)
- util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
- util.write_file(os.path.join(self.new_root, 'etc',
- 'cloud', 'cloud.cfg'), cloud_cfg)
+ util.ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
+ util.write_file(
+ os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg
+ )
initer = stages.Init()
initer.read_cfg()
@@ -142,16 +156,20 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data', initer.consume_data,
- args=[PER_INSTANCE], freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (which_ran, failures) = mods.run_section('cloud_init_modules')
+ (which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
- self.assertIn('spacewalk', which_ran)
+ self.assertIn("spacewalk", which_ran)
self.assertIn(
- "running unverified_modules: 'spacewalk'",
- self.logs.getvalue())
+ "running unverified_modules: 'spacewalk'", self.logs.getvalue()
+ )
def test_none_ds_run_with_no_config_modules(self):
"""run_section will report no modules run when none are configured."""
@@ -159,11 +177,12 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
# re-write cloud.cfg with unverified_modules override
cfg = copy.deepcopy(self.cfg)
# Represent empty configuration in /etc/cloud/cloud.cfg
- cfg['cloud_init_modules'] = None
+ cfg["cloud_init_modules"] = None
cloud_cfg = safeyaml.dumps(cfg)
- util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
- util.write_file(os.path.join(self.new_root, 'etc',
- 'cloud', 'cloud.cfg'), cloud_cfg)
+ util.ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
+ util.write_file(
+ os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg
+ )
initer = stages.Init()
initer.read_cfg()
@@ -171,12 +190,17 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data', initer.consume_data,
- args=[PER_INSTANCE], freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (which_ran, failures) = mods.run_section('cloud_init_modules')
+ (which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertEqual([], which_ran)
+
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/__init__.py b/tests/unittests/sources/__init__.py
index e69de29b..e69de29b 100644
--- a/tests/cloud_tests/platforms/ec2/__init__.py
+++ b/tests/unittests/sources/__init__.py
diff --git a/cloudinit/sources/helpers/tests/test_netlink.py b/tests/unittests/sources/helpers/test_netlink.py
index cafe3961..5eabf104 100644
--- a/cloudinit/sources/helpers/tests/test_netlink.py
+++ b/tests/unittests/sources/helpers/test_netlink.py
@@ -2,48 +2,64 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.tests.helpers import CiTestCase, mock
+import codecs
import socket
import struct
-import codecs
+
from cloudinit.sources.helpers.netlink import (
- NetlinkCreateSocketError, create_bound_netlink_socket, read_netlink_socket,
- read_rta_oper_state, unpack_rta_attr, wait_for_media_disconnect_connect,
- wait_for_nic_attach_event, wait_for_nic_detach_event,
- OPER_DOWN, OPER_UP, OPER_DORMANT, OPER_LOWERLAYERDOWN, OPER_NOTPRESENT,
- OPER_TESTING, OPER_UNKNOWN, RTATTR_START_OFFSET, RTM_NEWLINK, RTM_DELLINK,
- RTM_SETLINK, RTM_GETLINK, MAX_SIZE)
+ MAX_SIZE,
+ OPER_DORMANT,
+ OPER_DOWN,
+ OPER_LOWERLAYERDOWN,
+ OPER_NOTPRESENT,
+ OPER_TESTING,
+ OPER_UNKNOWN,
+ OPER_UP,
+ RTATTR_START_OFFSET,
+ RTM_DELLINK,
+ RTM_GETLINK,
+ RTM_NEWLINK,
+ RTM_SETLINK,
+ NetlinkCreateSocketError,
+ create_bound_netlink_socket,
+ read_netlink_socket,
+ read_rta_oper_state,
+ unpack_rta_attr,
+ wait_for_media_disconnect_connect,
+ wait_for_nic_attach_event,
+ wait_for_nic_detach_event,
+)
+from tests.unittests.helpers import CiTestCase, mock
def int_to_bytes(i):
- '''convert integer to binary: eg: 1 to \x01'''
- hex_value = '{0:x}'.format(i)
- hex_value = '0' * (len(hex_value) % 2) + hex_value
- return codecs.decode(hex_value, 'hex_codec')
+ """convert integer to binary: eg: 1 to \x01"""
+ hex_value = "{0:x}".format(i)
+ hex_value = "0" * (len(hex_value) % 2) + hex_value
+ return codecs.decode(hex_value, "hex_codec")
class TestCreateBoundNetlinkSocket(CiTestCase):
-
- @mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
+ @mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
def test_socket_error_on_create(self, m_socket):
- '''create_bound_netlink_socket catches socket creation exception'''
+ """create_bound_netlink_socket catches socket creation exception"""
"""NetlinkCreateSocketError is raised when socket creation errors."""
m_socket.side_effect = socket.error("Fake socket failure")
with self.assertRaises(NetlinkCreateSocketError) as ctx_mgr:
create_bound_netlink_socket()
self.assertEqual(
- 'Exception during netlink socket create: Fake socket failure',
- str(ctx_mgr.exception))
+ "Exception during netlink socket create: Fake socket failure",
+ str(ctx_mgr.exception),
+ )
class TestReadNetlinkSocket(CiTestCase):
-
- @mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
- @mock.patch('cloudinit.sources.helpers.netlink.select.select')
+ @mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+ @mock.patch("cloudinit.sources.helpers.netlink.select.select")
def test_read_netlink_socket(self, m_select, m_socket):
- '''read_netlink_socket able to receive data'''
- data = 'netlinktest'
+ """read_netlink_socket able to receive data"""
+ data = "netlinktest"
m_select.return_value = [m_socket], None, None
m_socket.recv.return_value = data
recv_data = read_netlink_socket(m_socket, 2)
@@ -52,10 +68,10 @@ class TestReadNetlinkSocket(CiTestCase):
self.assertIsNotNone(recv_data)
self.assertEqual(recv_data, data)
- @mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
- @mock.patch('cloudinit.sources.helpers.netlink.select.select')
+ @mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+ @mock.patch("cloudinit.sources.helpers.netlink.select.select")
def test_netlink_read_timeout(self, m_select, m_socket):
- '''read_netlink_socket should timeout if nothing to read'''
+ """read_netlink_socket should timeout if nothing to read"""
m_select.return_value = [], None, None
data = read_netlink_socket(m_socket, 1)
m_select.assert_called_with([m_socket], [], [], 1)
@@ -63,35 +79,43 @@ class TestReadNetlinkSocket(CiTestCase):
self.assertIsNone(data)
def test_read_invalid_socket(self):
- '''read_netlink_socket raises assert error if socket is invalid'''
+ """read_netlink_socket raises assert error if socket is invalid"""
socket = None
with self.assertRaises(AssertionError) as context:
read_netlink_socket(socket, 1)
- self.assertTrue('netlink socket is none' in str(context.exception))
+ self.assertTrue("netlink socket is none" in str(context.exception))
class TestParseNetlinkMessage(CiTestCase):
-
def test_read_rta_oper_state(self):
- '''read_rta_oper_state could parse netlink message and extract data'''
+ """read_rta_oper_state could parse netlink message and extract data"""
ifname = "eth0"
bytes = ifname.encode("utf-8")
buf = bytearray(48)
- struct.pack_into("HH4sHHc", buf, RTATTR_START_OFFSET, 8, 3, bytes, 5,
- 16, int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc",
+ buf,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_DOWN),
+ )
interface_state = read_rta_oper_state(buf)
self.assertEqual(interface_state.ifname, ifname)
self.assertEqual(interface_state.operstate, OPER_DOWN)
def test_read_none_data(self):
- '''read_rta_oper_state raises assert error if data is none'''
+ """read_rta_oper_state raises assert error if data is none"""
data = None
with self.assertRaises(AssertionError) as context:
read_rta_oper_state(data)
- self.assertEqual('data is none', str(context.exception))
+ self.assertEqual("data is none", str(context.exception))
def test_read_invalid_rta_operstate_none(self):
- '''read_rta_oper_state returns none if operstate is none'''
+ """read_rta_oper_state returns none if operstate is none"""
ifname = "eth0"
buf = bytearray(40)
bytes = ifname.encode("utf-8")
@@ -100,65 +124,84 @@ class TestParseNetlinkMessage(CiTestCase):
self.assertIsNone(interface_state)
def test_read_invalid_rta_ifname_none(self):
- '''read_rta_oper_state returns none if ifname is none'''
+ """read_rta_oper_state returns none if ifname is none"""
buf = bytearray(40)
- struct.pack_into("HHc", buf, RTATTR_START_OFFSET, 5, 16,
- int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HHc", buf, RTATTR_START_OFFSET, 5, 16, int_to_bytes(OPER_DOWN)
+ )
interface_state = read_rta_oper_state(buf)
self.assertIsNone(interface_state)
def test_read_invalid_data_len(self):
- '''raise assert error if data size is smaller than required size'''
+ """raise assert error if data size is smaller than required size"""
buf = bytearray(32)
with self.assertRaises(AssertionError) as context:
read_rta_oper_state(buf)
- self.assertTrue('length of data is smaller than RTATTR_START_OFFSET' in
- str(context.exception))
+ self.assertTrue(
+ "length of data is smaller than RTATTR_START_OFFSET"
+ in str(context.exception)
+ )
def test_unpack_rta_attr_none_data(self):
- '''unpack_rta_attr raises assert error if data is none'''
+ """unpack_rta_attr raises assert error if data is none"""
data = None
with self.assertRaises(AssertionError) as context:
unpack_rta_attr(data, RTATTR_START_OFFSET)
- self.assertTrue('data is none' in str(context.exception))
+ self.assertTrue("data is none" in str(context.exception))
def test_unpack_rta_attr_invalid_offset(self):
- '''unpack_rta_attr raises assert error if offset is invalid'''
+ """unpack_rta_attr raises assert error if offset is invalid"""
data = bytearray(48)
with self.assertRaises(AssertionError) as context:
unpack_rta_attr(data, "offset")
- self.assertTrue('offset is not integer' in str(context.exception))
+ self.assertTrue("offset is not integer" in str(context.exception))
with self.assertRaises(AssertionError) as context:
unpack_rta_attr(data, 31)
- self.assertTrue('rta offset is less than expected length' in
- str(context.exception))
+ self.assertTrue(
+ "rta offset is less than expected length" in str(context.exception)
+ )
-@mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
-@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket')
+@mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+@mock.patch("cloudinit.sources.helpers.netlink.read_netlink_socket")
class TestNicAttachDetach(CiTestCase):
with_logs = True
def _media_switch_data(self, ifname, msg_type, operstate):
- '''construct netlink data with specified fields'''
+ """construct netlink data with specified fields"""
if ifname and operstate is not None:
data = bytearray(48)
bytes = ifname.encode("utf-8")
- struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(operstate))
+ struct.pack_into(
+ "HH4sHHc",
+ data,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
elif ifname:
data = bytearray(40)
bytes = ifname.encode("utf-8")
struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes)
elif operstate:
data = bytearray(40)
- struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16,
- int_to_bytes(operstate))
+ struct.pack_into(
+ "HHc",
+ data,
+ RTATTR_START_OFFSET,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0)
return data
def test_nic_attached_oper_down(self, m_read_netlink_socket, m_socket):
- '''Test for a new nic attached'''
+ """Test for a new nic attached"""
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
m_read_netlink_socket.side_effect = [data_op_down]
@@ -167,7 +210,7 @@ class TestNicAttachDetach(CiTestCase):
self.assertEqual(ifname, ifread)
def test_nic_attached_oper_up(self, m_read_netlink_socket, m_socket):
- '''Test for a new nic attached'''
+ """Test for a new nic attached"""
ifname = "eth0"
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
m_read_netlink_socket.side_effect = [data_op_up]
@@ -176,7 +219,7 @@ class TestNicAttachDetach(CiTestCase):
self.assertEqual(ifname, ifread)
def test_nic_attach_ignore_existing(self, m_read_netlink_socket, m_socket):
- '''Test that we read only the interfaces we are interested in.'''
+ """Test that we read only the interfaces we are interested in."""
data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN)
data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN)
m_read_netlink_socket.side_effect = [data_eth0, data_eth1]
@@ -185,7 +228,7 @@ class TestNicAttachDetach(CiTestCase):
self.assertEqual("eth1", ifread)
def test_nic_attach_read_first(self, m_read_netlink_socket, m_socket):
- '''Test that we read only the interfaces we are interested in.'''
+ """Test that we read only the interfaces we are interested in."""
data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN)
data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN)
m_read_netlink_socket.side_effect = [data_eth0, data_eth1]
@@ -194,7 +237,7 @@ class TestNicAttachDetach(CiTestCase):
self.assertEqual("eth0", ifread)
def test_nic_detached(self, m_read_netlink_socket, m_socket):
- '''Test for an existing nic detached'''
+ """Test for an existing nic detached"""
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_DELLINK, OPER_DOWN)
m_read_netlink_socket.side_effect = [data_op_down]
@@ -203,32 +246,46 @@ class TestNicAttachDetach(CiTestCase):
self.assertEqual(ifname, ifread)
-@mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
-@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket')
+@mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+@mock.patch("cloudinit.sources.helpers.netlink.read_netlink_socket")
class TestWaitForMediaDisconnectConnect(CiTestCase):
with_logs = True
def _media_switch_data(self, ifname, msg_type, operstate):
- '''construct netlink data with specified fields'''
+ """construct netlink data with specified fields"""
if ifname and operstate is not None:
data = bytearray(48)
bytes = ifname.encode("utf-8")
- struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(operstate))
+ struct.pack_into(
+ "HH4sHHc",
+ data,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
elif ifname:
data = bytearray(40)
bytes = ifname.encode("utf-8")
struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes)
elif operstate:
data = bytearray(40)
- struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16,
- int_to_bytes(operstate))
+ struct.pack_into(
+ "HHc",
+ data,
+ RTATTR_START_OFFSET,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0)
return data
- def test_media_down_up_scenario(self, m_read_netlink_socket,
- m_socket):
- '''Test for media down up sequence for required interface name'''
+ def test_media_down_up_scenario(self, m_read_netlink_socket, m_socket):
+ """Test for media down up sequence for required interface name"""
ifname = "eth0"
# construct data for Oper State down
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
@@ -238,15 +295,16 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 2)
- def test_wait_for_media_switch_diff_interface(self, m_read_netlink_socket,
- m_socket):
- '''wait_for_media_disconnect_connect ignores unexpected interfaces.
+ def test_wait_for_media_switch_diff_interface(
+ self, m_read_netlink_socket, m_socket
+ ):
+ """wait_for_media_disconnect_connect ignores unexpected interfaces.
The first two messages are for other interfaces and last two are for
expected interface. So the function exit only after receiving last
2 messages and therefore the call count for m_read_netlink_socket
has to be 4
- '''
+ """
other_ifname = "eth1"
expected_ifname = "eth0"
data_op_down_eth1 = self._media_switch_data(
@@ -259,51 +317,50 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
expected_ifname, RTM_NEWLINK, OPER_DOWN
)
data_op_up_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_UP)
+ expected_ifname, RTM_NEWLINK, OPER_UP
+ )
m_read_netlink_socket.side_effect = [
data_op_down_eth1,
data_op_up_eth1,
data_op_down_eth0,
- data_op_up_eth0
+ data_op_up_eth0,
]
wait_for_media_disconnect_connect(m_socket, expected_ifname)
- self.assertIn('Ignored netlink event on interface %s' % other_ifname,
- self.logs.getvalue())
+ self.assertIn(
+ "Ignored netlink event on interface %s" % other_ifname,
+ self.logs.getvalue(),
+ )
self.assertEqual(m_read_netlink_socket.call_count, 4)
def test_invalid_msgtype_getlink(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect ignores GETLINK events.
+ """wait_for_media_disconnect_connect ignores GETLINK events.
The first two messages are for oper down and up for RTM_GETLINK type
which netlink module will ignore. The last 2 messages are RTM_NEWLINK
with oper state down and up messages. Therefore the call count for
m_read_netlink_socket has to be 4 ignoring first 2 messages
of RTM_GETLINK
- '''
+ """
ifname = "eth0"
data_getlink_down = self._media_switch_data(
ifname, RTM_GETLINK, OPER_DOWN
)
- data_getlink_up = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_UP
- )
+ data_getlink_up = self._media_switch_data(ifname, RTM_GETLINK, OPER_UP)
data_newlink_down = self._media_switch_data(
ifname, RTM_NEWLINK, OPER_DOWN
)
- data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP
- )
+ data_newlink_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
m_read_netlink_socket.side_effect = [
data_getlink_down,
data_getlink_up,
data_newlink_down,
- data_newlink_up
+ data_newlink_up,
]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
def test_invalid_msgtype_setlink(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect ignores SETLINK events.
+ """wait_for_media_disconnect_connect ignores SETLINK events.
The first two messages are for oper down and up for RTM_GETLINK type
which it will ignore. 3rd and 4th messages are RTM_NEWLINK with down
@@ -311,34 +368,31 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
sees down->up scenario. So the call count for m_read_netlink_socket
has to be 4 ignoring first 2 messages of RTM_GETLINK and
last 2 messages of RTM_NEWLINK
- '''
+ """
ifname = "eth0"
data_setlink_down = self._media_switch_data(
ifname, RTM_SETLINK, OPER_DOWN
)
- data_setlink_up = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_UP
- )
+ data_setlink_up = self._media_switch_data(ifname, RTM_SETLINK, OPER_UP)
data_newlink_down = self._media_switch_data(
ifname, RTM_NEWLINK, OPER_DOWN
)
- data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP
- )
+ data_newlink_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
m_read_netlink_socket.side_effect = [
data_setlink_down,
data_setlink_up,
data_newlink_down,
data_newlink_up,
data_newlink_down,
- data_newlink_up
+ data_newlink_up,
]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
- def test_netlink_invalid_switch_scenario(self, m_read_netlink_socket,
- m_socket):
- '''returns only if it receives UP event after a DOWN event'''
+ def test_netlink_invalid_switch_scenario(
+ self, m_read_netlink_socket, m_socket
+ ):
+ """returns only if it receives UP event after a DOWN event"""
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
@@ -358,114 +412,153 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
ifname, RTM_NEWLINK, OPER_UNKNOWN
)
m_read_netlink_socket.side_effect = [
- data_op_up, data_op_up,
- data_op_dormant, data_op_up,
- data_op_notpresent, data_op_up,
- data_op_lowerdown, data_op_up,
- data_op_testing, data_op_up,
- data_op_unknown, data_op_up,
- data_op_down, data_op_up
+ data_op_up,
+ data_op_up,
+ data_op_dormant,
+ data_op_up,
+ data_op_notpresent,
+ data_op_up,
+ data_op_lowerdown,
+ data_op_up,
+ data_op_testing,
+ data_op_up,
+ data_op_unknown,
+ data_op_up,
+ data_op_down,
+ data_op_up,
]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 14)
- def test_netlink_valid_inbetween_transitions(self, m_read_netlink_socket,
- m_socket):
- '''wait_for_media_disconnect_connect handles in between transitions'''
+ def test_netlink_valid_inbetween_transitions(
+ self, m_read_netlink_socket, m_socket
+ ):
+ """wait_for_media_disconnect_connect handles in between transitions"""
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
data_op_dormant = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DORMANT)
+ ifname, RTM_NEWLINK, OPER_DORMANT
+ )
data_op_unknown = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UNKNOWN)
+ ifname, RTM_NEWLINK, OPER_UNKNOWN
+ )
m_read_netlink_socket.side_effect = [
- data_op_down, data_op_dormant,
- data_op_unknown, data_op_up
+ data_op_down,
+ data_op_dormant,
+ data_op_unknown,
+ data_op_up,
]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
def test_netlink_invalid_operstate(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect should handle invalid operstates.
+ """wait_for_media_disconnect_connect should handle invalid operstates.
The function should not fail and return even if it receives invalid
operstates. It always should wait for down up sequence.
- '''
+ """
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7)
m_read_netlink_socket.side_effect = [
- data_op_invalid, data_op_up,
- data_op_down, data_op_invalid,
- data_op_up
+ data_op_invalid,
+ data_op_up,
+ data_op_down,
+ data_op_invalid,
+ data_op_up,
]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 5)
def test_wait_invalid_socket(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect handle none netlink socket.'''
+ """wait_for_media_disconnect_connect handle none netlink socket."""
socket = None
ifname = "eth0"
with self.assertRaises(AssertionError) as context:
wait_for_media_disconnect_connect(socket, ifname)
- self.assertTrue('netlink socket is none' in str(context.exception))
+ self.assertTrue("netlink socket is none" in str(context.exception))
def test_wait_invalid_ifname(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect handle none interface name'''
+ """wait_for_media_disconnect_connect handle none interface name"""
ifname = None
with self.assertRaises(AssertionError) as context:
wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertTrue('interface name is none' in str(context.exception))
+ self.assertTrue("interface name is none" in str(context.exception))
ifname = ""
with self.assertRaises(AssertionError) as context:
wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertTrue('interface name cannot be empty' in
- str(context.exception))
+ self.assertTrue(
+ "interface name cannot be empty" in str(context.exception)
+ )
def test_wait_invalid_rta_attr(self, m_read_netlink_socket, m_socket):
- ''' wait_for_media_disconnect_connect handles invalid rta data'''
+ """wait_for_media_disconnect_connect handles invalid rta data"""
ifname = "eth0"
data_invalid1 = self._media_switch_data(None, RTM_NEWLINK, OPER_DOWN)
data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None)
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
m_read_netlink_socket.side_effect = [
- data_invalid1, data_invalid2, data_op_down, data_op_up
+ data_invalid1,
+ data_invalid2,
+ data_op_down,
+ data_op_up,
]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
def test_read_multiple_netlink_msgs(self, m_read_netlink_socket, m_socket):
- '''Read multiple messages in single receive call'''
+ """Read multiple messages in single receive call"""
ifname = "eth0"
bytes = ifname.encode("utf-8")
data = bytearray(96)
struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0)
struct.pack_into(
- "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ "HH4sHHc",
+ data,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_DOWN),
)
struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0)
struct.pack_into(
- "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
- 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ "HH4sHHc",
+ data,
+ 48 + RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_UP),
)
m_read_netlink_socket.return_value = data
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 1)
def test_read_partial_netlink_msgs(self, m_read_netlink_socket, m_socket):
- '''Read partial messages in receive call'''
+ """Read partial messages in receive call"""
ifname = "eth0"
bytes = ifname.encode("utf-8")
data1 = bytearray(112)
data2 = bytearray(32)
struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0)
struct.pack_into(
- "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ "HH4sHHc",
+ data1,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_DOWN),
)
struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0)
struct.pack_into(
diff --git a/tests/unittests/sources/helpers/test_openstack.py b/tests/unittests/sources/helpers/test_openstack.py
new file mode 100644
index 00000000..eb87b1ce
--- /dev/null
+++ b/tests/unittests/sources/helpers/test_openstack.py
@@ -0,0 +1,62 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+# ./cloudinit/sources/helpers/tests/test_openstack.py
+from unittest import mock
+
+from cloudinit.sources.helpers import openstack
+from tests.unittests import helpers as test_helpers
+
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
+class TestConvertNetJson(test_helpers.CiTestCase):
+ def test_phy_types(self):
+ """Verify the different known physical types are handled."""
+ # network_data.json example from
+ # https://docs.openstack.org/nova/latest/user/metadata.html
+ mac0 = "fa:16:3e:9c:bf:3d"
+ net_json = {
+ "links": [
+ {
+ "ethernet_mac_address": mac0,
+ "id": "tapcd9f6d46-4a",
+ "mtu": None,
+ "type": "bridge",
+ "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc",
+ }
+ ],
+ "networks": [
+ {
+ "id": "network0",
+ "link": "tapcd9f6d46-4a",
+ "network_id": "99e88329-f20d-4741-9593-25bf07847b16",
+ "type": "ipv4_dhcp",
+ }
+ ],
+ "services": [{"address": "8.8.8.8", "type": "dns"}],
+ }
+ macs = {mac0: "eth0"}
+
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "fa:16:3e:9c:bf:3d",
+ "mtu": None,
+ "name": "eth0",
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ },
+ {"address": "8.8.8.8", "type": "nameserver"},
+ ],
+ }
+
+ for t in openstack.KNOWN_PHYSICAL_TYPES:
+ net_json["links"][0]["type"] = t
+ self.assertEqual(
+ expected,
+ openstack.convert_net_json(
+ network_json=net_json, known_macs=macs
+ ),
+ )
diff --git a/tests/unittests/sources/test_aliyun.py b/tests/unittests/sources/test_aliyun.py
new file mode 100644
index 00000000..8a61d5ee
--- /dev/null
+++ b/tests/unittests/sources/test_aliyun.py
@@ -0,0 +1,287 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import functools
+import os
+from unittest import mock
+
+import httpretty
+
+from cloudinit import helpers
+from cloudinit.sources import DataSourceAliYun as ay
+from cloudinit.sources.DataSourceEc2 import convert_ec2_metadata_network_config
+from tests.unittests import helpers as test_helpers
+
+DEFAULT_METADATA = {
+ "instance-id": "aliyun-test-vm-00",
+ "eipv4": "10.0.0.1",
+ "hostname": "test-hostname",
+ "image-id": "m-test",
+ "launch-index": "0",
+ "mac": "00:16:3e:00:00:00",
+ "network-type": "vpc",
+ "private-ipv4": "192.168.0.1",
+ "serial-number": "test-string",
+ "vpc-cidr-block": "192.168.0.0/16",
+ "vpc-id": "test-vpc",
+ "vswitch-id": "test-vpc",
+ "vswitch-cidr-block": "192.168.0.0/16",
+ "zone-id": "test-zone-1",
+ "ntp-conf": {
+ "ntp_servers": [
+ "ntp1.aliyun.com",
+ "ntp2.aliyun.com",
+ "ntp3.aliyun.com",
+ ]
+ },
+ "source-address": [
+ "http://mirrors.aliyun.com",
+ "http://mirrors.aliyuncs.com",
+ ],
+ "public-keys": {
+ "key-pair-1": {"openssh-key": "ssh-rsa AAAAB3..."},
+ "key-pair-2": {"openssh-key": "ssh-rsa AAAAB3..."},
+ },
+}
+
+DEFAULT_USERDATA = """\
+#cloud-config
+
+hostname: localhost"""
+
+
+def register_mock_metaserver(base_url, data):
+ def register_helper(register, base_url, body):
+ if isinstance(body, str):
+ register(base_url, body)
+ elif isinstance(body, list):
+ register(base_url.rstrip("/"), "\n".join(body) + "\n")
+ elif isinstance(body, dict):
+ if not body:
+ register(
+ base_url.rstrip("/") + "/", "not found", status_code=404
+ )
+ vals = []
+ for k, v in body.items():
+ if isinstance(v, (str, list)):
+ suffix = k.rstrip("/")
+ else:
+ suffix = k.rstrip("/") + "/"
+ vals.append(suffix)
+ url = base_url.rstrip("/") + "/" + suffix
+ register_helper(register, url, v)
+ register(base_url, "\n".join(vals) + "\n")
+
+ register = functools.partial(httpretty.register_uri, httpretty.GET)
+ register_helper(register, base_url, data)
+
+
+class TestAliYunDatasource(test_helpers.HttprettyTestCase):
+ def setUp(self):
+ super(TestAliYunDatasource, self).setUp()
+ cfg = {"datasource": {"AliYun": {"timeout": "1", "max_wait": "1"}}}
+ distro = {}
+ paths = helpers.Paths({"run_dir": self.tmp_dir()})
+ self.ds = ay.DataSourceAliYun(cfg, distro, paths)
+ self.metadata_address = self.ds.metadata_urls[0]
+
+ @property
+ def default_metadata(self):
+ return DEFAULT_METADATA
+
+ @property
+ def default_userdata(self):
+ return DEFAULT_USERDATA
+
+ @property
+ def metadata_url(self):
+ return (
+ os.path.join(
+ self.metadata_address,
+ self.ds.min_metadata_version,
+ "meta-data",
+ )
+ + "/"
+ )
+
+ @property
+ def userdata_url(self):
+ return os.path.join(
+ self.metadata_address, self.ds.min_metadata_version, "user-data"
+ )
+
+ # EC2 provides an instance-identity document which must return 404 here
+ # for this test to pass.
+ @property
+ def default_identity(self):
+ return {}
+
+ @property
+ def identity_url(self):
+ return os.path.join(
+ self.metadata_address,
+ self.ds.min_metadata_version,
+ "dynamic",
+ "instance-identity",
+ )
+
+ def regist_default_server(self):
+ register_mock_metaserver(self.metadata_url, self.default_metadata)
+ register_mock_metaserver(self.userdata_url, self.default_userdata)
+ register_mock_metaserver(self.identity_url, self.default_identity)
+
+ def _test_get_data(self):
+ self.assertEqual(self.ds.metadata, self.default_metadata)
+ self.assertEqual(
+ self.ds.userdata_raw, self.default_userdata.encode("utf8")
+ )
+
+ def _test_get_sshkey(self):
+ pub_keys = [
+ v["openssh-key"]
+ for (_, v) in self.default_metadata["public-keys"].items()
+ ]
+ self.assertEqual(self.ds.get_public_ssh_keys(), pub_keys)
+
+ def _test_get_iid(self):
+ self.assertEqual(
+ self.default_metadata["instance-id"], self.ds.get_instance_id()
+ )
+
+ def _test_host_name(self):
+ self.assertEqual(
+ self.default_metadata["hostname"], self.ds.get_hostname()
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
+ def test_with_mock_server(self, m_is_aliyun):
+ m_is_aliyun.return_value = True
+ self.regist_default_server()
+ ret = self.ds.get_data()
+ self.assertEqual(True, ret)
+ self.assertEqual(1, m_is_aliyun.call_count)
+ self._test_get_data()
+ self._test_get_sshkey()
+ self._test_get_iid()
+ self._test_host_name()
+ self.assertEqual("aliyun", self.ds.cloud_name)
+ self.assertEqual("ec2", self.ds.platform)
+ self.assertEqual(
+ "metadata (http://100.100.100.200)", self.ds.subplatform
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
+ def test_returns_false_when_not_on_aliyun(self, m_is_aliyun):
+ """If is_aliyun returns false, then get_data should return False."""
+ m_is_aliyun.return_value = False
+ self.regist_default_server()
+ ret = self.ds.get_data()
+ self.assertEqual(1, m_is_aliyun.call_count)
+ self.assertEqual(False, ret)
+
+ def test_parse_public_keys(self):
+ public_keys = {}
+ self.assertEqual(ay.parse_public_keys(public_keys), [])
+
+ public_keys = {"key-pair-0": "ssh-key-0"}
+ self.assertEqual(
+ ay.parse_public_keys(public_keys), [public_keys["key-pair-0"]]
+ )
+
+ public_keys = {"key-pair-0": "ssh-key-0", "key-pair-1": "ssh-key-1"}
+ self.assertEqual(
+ set(ay.parse_public_keys(public_keys)),
+ set([public_keys["key-pair-0"], public_keys["key-pair-1"]]),
+ )
+
+ public_keys = {"key-pair-0": ["ssh-key-0", "ssh-key-1"]}
+ self.assertEqual(
+ ay.parse_public_keys(public_keys), public_keys["key-pair-0"]
+ )
+
+ public_keys = {"key-pair-0": {"openssh-key": []}}
+ self.assertEqual(ay.parse_public_keys(public_keys), [])
+
+ public_keys = {"key-pair-0": {"openssh-key": "ssh-key-0"}}
+ self.assertEqual(
+ ay.parse_public_keys(public_keys),
+ [public_keys["key-pair-0"]["openssh-key"]],
+ )
+
+ public_keys = {
+ "key-pair-0": {"openssh-key": ["ssh-key-0", "ssh-key-1"]}
+ }
+ self.assertEqual(
+ ay.parse_public_keys(public_keys),
+ public_keys["key-pair-0"]["openssh-key"],
+ )
+
+ def test_route_metric_calculated_without_device_number(self):
+ """Test that route-metric code works without `device-number`
+
+ `device-number` is part of EC2 metadata, but not supported on aliyun.
+ Attempting to access it will raise a KeyError.
+
+ LP: #1917875
+ """
+ netcfg = convert_ec2_metadata_network_config(
+ {
+ "interfaces": {
+ "macs": {
+ "06:17:04:d7:26:09": {
+ "interface-id": "eni-e44ef49e",
+ },
+ "06:17:04:d7:26:08": {
+ "interface-id": "eni-e44ef49f",
+ },
+ }
+ }
+ },
+ macs_to_nics={
+ "06:17:04:d7:26:09": "eth0",
+ "06:17:04:d7:26:08": "eth1",
+ },
+ )
+
+ met0 = netcfg["ethernets"]["eth0"]["dhcp4-overrides"]["route-metric"]
+ met1 = netcfg["ethernets"]["eth1"]["dhcp4-overrides"]["route-metric"]
+
+ # route-metric numbers should be 100 apart
+ assert 100 == abs(met0 - met1)
+
+
+class TestIsAliYun(test_helpers.CiTestCase):
+ ALIYUN_PRODUCT = "Alibaba Cloud ECS"
+ read_dmi_data_expected = [mock.call("system-product-name")]
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
+ def test_true_on_aliyun_product(self, m_read_dmi_data):
+ """Should return true if the dmi product data has expected value."""
+ m_read_dmi_data.return_value = self.ALIYUN_PRODUCT
+ ret = ay._is_aliyun()
+ self.assertEqual(
+ self.read_dmi_data_expected, m_read_dmi_data.call_args_list
+ )
+ self.assertEqual(True, ret)
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
+ def test_false_on_empty_string(self, m_read_dmi_data):
+ """Should return false on empty value returned."""
+ m_read_dmi_data.return_value = ""
+ ret = ay._is_aliyun()
+ self.assertEqual(
+ self.read_dmi_data_expected, m_read_dmi_data.call_args_list
+ )
+ self.assertEqual(False, ret)
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
+ def test_false_on_unknown_string(self, m_read_dmi_data):
+ """Should return false on an unrelated string."""
+ m_read_dmi_data.return_value = "cubs win"
+ ret = ay._is_aliyun()
+ self.assertEqual(
+ self.read_dmi_data_expected, m_read_dmi_data.call_args_list
+ )
+ self.assertEqual(False, ret)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/sources/test_altcloud.py
index 7a5393ac..44dfafd9 100644
--- a/tests/unittests/test_datasource/test_altcloud.py
+++ b/tests/unittests/sources/test_altcloud.py
@@ -6,54 +6,47 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-'''
+"""
This test file exercises the code in sources DataSourceAltCloud.py
-'''
+"""
import os
import shutil
import tempfile
-from cloudinit import dmi
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.tests.helpers import CiTestCase, mock
-
import cloudinit.sources.DataSourceAltCloud as dsac
+from cloudinit import dmi, helpers, subp, util
+from tests.unittests.helpers import CiTestCase, mock
-OS_UNAME_ORIG = getattr(os, 'uname')
+OS_UNAME_ORIG = getattr(os, "uname")
def _write_user_data_files(mount_dir, value):
- '''
+ """
Populate the deltacloud_user_data_file the user_data_file
which would be populated with user data.
- '''
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
+ """
+ deltacloud_user_data_file = mount_dir + "/deltacloud-user-data.txt"
+ user_data_file = mount_dir + "/user-data.txt"
- udfile = open(deltacloud_user_data_file, 'w')
+ udfile = open(deltacloud_user_data_file, "w")
udfile.write(value)
udfile.close()
os.chmod(deltacloud_user_data_file, 0o664)
- udfile = open(user_data_file, 'w')
+ udfile = open(user_data_file, "w")
udfile.write(value)
udfile.close()
os.chmod(user_data_file, 0o664)
-def _remove_user_data_files(mount_dir,
- dc_file=True,
- non_dc_file=True):
- '''
+def _remove_user_data_files(mount_dir, dc_file=True, non_dc_file=True):
+ """
Remove the test files: deltacloud_user_data_file and
user_data_file
- '''
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
+ """
+ deltacloud_user_data_file = mount_dir + "/deltacloud-user-data.txt"
+ user_data_file = mount_dir + "/user-data.txt"
# Ignore any failures removeing files that are already gone.
if dc_file:
@@ -70,9 +63,10 @@ def _remove_user_data_files(mount_dir,
def _dmi_data(expected):
- '''
+ """
Spoof the data received over DMI
- '''
+ """
+
def _data(key):
return expected
@@ -80,19 +74,19 @@ def _dmi_data(expected):
class TestGetCloudType(CiTestCase):
- '''Test to exercise method: DataSourceAltCloud.get_cloud_type()'''
+ """Test to exercise method: DataSourceAltCloud.get_cloud_type()"""
with_logs = True
def setUp(self):
- '''Set up.'''
+ """Set up."""
super(TestGetCloudType, self).setUp()
self.tmp = self.tmp_dir()
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
self.dmi_data = dmi.read_dmi_data
# We have a different code path for arm to deal with LP1243287
# We have to switch arch to x86_64 to avoid test failure
- force_arch('x86_64')
+ force_arch("x86_64")
def tearDown(self):
# Reset
@@ -101,216 +95,226 @@ class TestGetCloudType(CiTestCase):
def test_cloud_info_file_ioerror(self):
"""Return UNKNOWN when /etc/sysconfig/cloud-info exists but errors."""
- self.assertEqual('/etc/sysconfig/cloud-info', dsac.CLOUD_INFO_FILE)
+ self.assertEqual("/etc/sysconfig/cloud-info", dsac.CLOUD_INFO_FILE)
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
# Attempting to read the directory generates IOError
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.tmp):
- self.assertEqual('UNKNOWN', dsrc.get_cloud_type())
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.tmp):
+ self.assertEqual("UNKNOWN", dsrc.get_cloud_type())
self.assertIn(
- "[Errno 21] Is a directory: '%s'" % self.tmp,
- self.logs.getvalue())
+ "[Errno 21] Is a directory: '%s'" % self.tmp, self.logs.getvalue()
+ )
def test_cloud_info_file(self):
"""Return uppercase stripped content from /etc/sysconfig/cloud-info."""
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- cloud_info = self.tmp_path('cloud-info', dir=self.tmp)
- util.write_file(cloud_info, ' OverRiDdeN CloudType ')
+ cloud_info = self.tmp_path("cloud-info", dir=self.tmp)
+ util.write_file(cloud_info, " OverRiDdeN CloudType ")
# Attempting to read the directory generates IOError
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', cloud_info):
- self.assertEqual('OVERRIDDEN CLOUDTYPE', dsrc.get_cloud_type())
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", cloud_info):
+ self.assertEqual("OVERRIDDEN CLOUDTYPE", dsrc.get_cloud_type())
def test_rhev(self):
- '''
+ """
Test method get_cloud_type() for RHEVm systems.
Forcing read_dmi_data return to match a RHEVm system: RHEV Hypervisor
- '''
- dmi.read_dmi_data = _dmi_data('RHEV')
+ """
+ dmi.read_dmi_data = _dmi_data("RHEV")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- self.assertEqual('RHEV', dsrc.get_cloud_type())
+ self.assertEqual("RHEV", dsrc.get_cloud_type())
def test_vsphere(self):
- '''
+ """
Test method get_cloud_type() for vSphere systems.
Forcing read_dmi_data return to match a vSphere system: RHEV Hypervisor
- '''
- dmi.read_dmi_data = _dmi_data('VMware Virtual Platform')
+ """
+ dmi.read_dmi_data = _dmi_data("VMware Virtual Platform")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- self.assertEqual('VSPHERE', dsrc.get_cloud_type())
+ self.assertEqual("VSPHERE", dsrc.get_cloud_type())
def test_unknown(self):
- '''
+ """
Test method get_cloud_type() for unknown systems.
Forcing read_dmi_data return to match an unrecognized return.
- '''
- dmi.read_dmi_data = _dmi_data('Unrecognized Platform')
+ """
+ dmi.read_dmi_data = _dmi_data("Unrecognized Platform")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- self.assertEqual('UNKNOWN', dsrc.get_cloud_type())
+ self.assertEqual("UNKNOWN", dsrc.get_cloud_type())
class TestGetDataCloudInfoFile(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.get_data()
With a contrived CLOUD_INFO_FILE
- '''
+ """
+
def setUp(self):
- '''Set up.'''
+ """Set up."""
self.tmp = self.tmp_dir()
self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
- self.cloud_info_file = self.tmp_path('cloud-info', dir=self.tmp)
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
+ self.cloud_info_file = self.tmp_path("cloud-info", dir=self.tmp)
def test_rhev(self):
- '''Success Test module get_data() forcing RHEV.'''
+ """Success Test module get_data() forcing RHEV."""
- util.write_file(self.cloud_info_file, 'RHEV')
+ util.write_file(self.cloud_info_file, "RHEV")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_rhevm = lambda: True
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(True, dsrc.get_data())
- self.assertEqual('altcloud', dsrc.cloud_name)
- self.assertEqual('altcloud', dsrc.platform_type)
- self.assertEqual('rhev (/dev/fd0)', dsrc.subplatform)
+ self.assertEqual("altcloud", dsrc.cloud_name)
+ self.assertEqual("altcloud", dsrc.platform_type)
+ self.assertEqual("rhev (/dev/fd0)", dsrc.subplatform)
def test_vsphere(self):
- '''Success Test module get_data() forcing VSPHERE.'''
+ """Success Test module get_data() forcing VSPHERE."""
- util.write_file(self.cloud_info_file, 'VSPHERE')
+ util.write_file(self.cloud_info_file, "VSPHERE")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_vsphere = lambda: True
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(True, dsrc.get_data())
- self.assertEqual('altcloud', dsrc.cloud_name)
- self.assertEqual('altcloud', dsrc.platform_type)
- self.assertEqual('vsphere (unknown)', dsrc.subplatform)
+ self.assertEqual("altcloud", dsrc.cloud_name)
+ self.assertEqual("altcloud", dsrc.platform_type)
+ self.assertEqual("vsphere (unknown)", dsrc.subplatform)
def test_fail_rhev(self):
- '''Failure Test module get_data() forcing RHEV.'''
+ """Failure Test module get_data() forcing RHEV."""
- util.write_file(self.cloud_info_file, 'RHEV')
+ util.write_file(self.cloud_info_file, "RHEV")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_rhevm = lambda: False
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(False, dsrc.get_data())
def test_fail_vsphere(self):
- '''Failure Test module get_data() forcing VSPHERE.'''
+ """Failure Test module get_data() forcing VSPHERE."""
- util.write_file(self.cloud_info_file, 'VSPHERE')
+ util.write_file(self.cloud_info_file, "VSPHERE")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_vsphere = lambda: False
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(False, dsrc.get_data())
def test_unrecognized(self):
- '''Failure Test module get_data() forcing unrecognized.'''
+ """Failure Test module get_data() forcing unrecognized."""
- util.write_file(self.cloud_info_file, 'unrecognized')
+ util.write_file(self.cloud_info_file, "unrecognized")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(False, dsrc.get_data())
class TestGetDataNoCloudInfoFile(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.get_data()
Without a CLOUD_INFO_FILE
- '''
+ """
+
def setUp(self):
- '''Set up.'''
+ """Set up."""
self.tmp = self.tmp_dir()
self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
self.dmi_data = dmi.read_dmi_data
- dsac.CLOUD_INFO_FILE = \
- 'no such file'
+ dsac.CLOUD_INFO_FILE = "no such file"
# We have a different code path for arm to deal with LP1243287
# We have to switch arch to x86_64 to avoid test failure
- force_arch('x86_64')
+ force_arch("x86_64")
def tearDown(self):
# Reset
- dsac.CLOUD_INFO_FILE = \
- '/etc/sysconfig/cloud-info'
+ dsac.CLOUD_INFO_FILE = "/etc/sysconfig/cloud-info"
dmi.read_dmi_data = self.dmi_data
# Return back to original arch
force_arch()
def test_rhev_no_cloud_file(self):
- '''Test No cloud info file module get_data() forcing RHEV.'''
+ """Test No cloud info file module get_data() forcing RHEV."""
- dmi.read_dmi_data = _dmi_data('RHEV Hypervisor')
+ dmi.read_dmi_data = _dmi_data("RHEV Hypervisor")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_rhevm = lambda: True
self.assertEqual(True, dsrc.get_data())
def test_vsphere_no_cloud_file(self):
- '''Test No cloud info file module get_data() forcing VSPHERE.'''
+ """Test No cloud info file module get_data() forcing VSPHERE."""
- dmi.read_dmi_data = _dmi_data('VMware Virtual Platform')
+ dmi.read_dmi_data = _dmi_data("VMware Virtual Platform")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_vsphere = lambda: True
self.assertEqual(True, dsrc.get_data())
def test_failure_no_cloud_file(self):
- '''Test No cloud info file module get_data() forcing unrecognized.'''
+ """Test No cloud info file module get_data() forcing unrecognized."""
- dmi.read_dmi_data = _dmi_data('Unrecognized Platform')
+ dmi.read_dmi_data = _dmi_data("Unrecognized Platform")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.get_data())
class TestUserDataRhevm(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.user_data_rhevm()
- '''
+ """
+
def setUp(self):
- '''Set up.'''
- self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ """Set up."""
+ self.paths = helpers.Paths({"cloud_dir": "/tmp"})
self.mount_dir = self.tmp_dir()
- _write_user_data_files(self.mount_dir, 'test user data')
+ _write_user_data_files(self.mount_dir, "test user data")
self.add_patch(
- 'cloudinit.sources.DataSourceAltCloud.modprobe_floppy',
- 'm_modprobe_floppy', return_value=None)
+ "cloudinit.sources.DataSourceAltCloud.modprobe_floppy",
+ "m_modprobe_floppy",
+ return_value=None,
+ )
self.add_patch(
- 'cloudinit.sources.DataSourceAltCloud.util.udevadm_settle',
- 'm_udevadm_settle', return_value=('', ''))
+ "cloudinit.sources.DataSourceAltCloud.util.udevadm_settle",
+ "m_udevadm_settle",
+ return_value=("", ""),
+ )
self.add_patch(
- 'cloudinit.sources.DataSourceAltCloud.util.mount_cb',
- 'm_mount_cb')
+ "cloudinit.sources.DataSourceAltCloud.util.mount_cb", "m_mount_cb"
+ )
def test_mount_cb_fails(self):
- '''Test user_data_rhevm() where mount_cb fails.'''
+ """Test user_data_rhevm() where mount_cb fails."""
self.m_mount_cb.side_effect = util.MountFailedError("Failed Mount")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_modprobe_fails(self):
- '''Test user_data_rhevm() where modprobe fails.'''
+ """Test user_data_rhevm() where modprobe fails."""
self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError(
- "Failed modprobe")
+ "Failed modprobe"
+ )
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_no_modprobe_cmd(self):
- '''Test user_data_rhevm() with no modprobe command.'''
+ """Test user_data_rhevm() with no modprobe command."""
self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError(
- "No such file or dir")
+ "No such file or dir"
+ )
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_udevadm_fails(self):
- '''Test user_data_rhevm() where udevadm fails.'''
+ """Test user_data_rhevm() where udevadm fails."""
self.m_udevadm_settle.side_effect = subp.ProcessExecutionError(
- "Failed settle.")
+ "Failed settle."
+ )
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_no_udevadm_cmd(self):
- '''Test user_data_rhevm() with no udevadm command.'''
+ """Test user_data_rhevm() with no udevadm command."""
self.m_udevadm_settle.side_effect = OSError("No such file or dir")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
@@ -318,16 +322,17 @@ class TestUserDataRhevm(CiTestCase):
class TestUserDataVsphere(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.user_data_vsphere()
- '''
+ """
+
def setUp(self):
- '''Set up.'''
+ """Set up."""
self.tmp = self.tmp_dir()
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
self.mount_dir = tempfile.mkdtemp()
- _write_user_data_files(self.mount_dir, 'test user data')
+ _write_user_data_files(self.mount_dir, "test user data")
def tearDown(self):
# Reset
@@ -340,13 +345,12 @@ class TestUserDataVsphere(CiTestCase):
except OSError:
pass
- dsac.CLOUD_INFO_FILE = \
- '/etc/sysconfig/cloud-info'
+ dsac.CLOUD_INFO_FILE = "/etc/sysconfig/cloud-info"
@mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with")
@mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb")
def test_user_data_vsphere_no_cdrom(self, m_mount_cb, m_find_devs_with):
- '''Test user_data_vsphere() where mount_cb fails.'''
+ """Test user_data_vsphere() where mount_cb fails."""
m_mount_cb.return_value = []
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
@@ -356,7 +360,7 @@ class TestUserDataVsphere(CiTestCase):
@mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with")
@mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb")
def test_user_data_vsphere_mcb_fail(self, m_mount_cb, m_find_devs_with):
- '''Test user_data_vsphere() where mount_cb fails.'''
+ """Test user_data_vsphere() where mount_cb fails."""
m_find_devs_with.return_value = ["/dev/mock/cdrom"]
m_mount_cb.side_effect = util.MountFailedError("Unable To mount")
@@ -370,28 +374,30 @@ class TestUserDataVsphere(CiTestCase):
def test_user_data_vsphere_success(self, m_mount_cb, m_find_devs_with):
"""Test user_data_vsphere() where successful."""
m_find_devs_with.return_value = ["/dev/mock/cdrom"]
- m_mount_cb.return_value = 'raw userdata from cdrom'
+ m_mount_cb.return_value = "raw userdata from cdrom"
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- cloud_info = self.tmp_path('cloud-info', dir=self.tmp)
- util.write_file(cloud_info, 'VSPHERE')
+ cloud_info = self.tmp_path("cloud-info", dir=self.tmp)
+ util.write_file(cloud_info, "VSPHERE")
self.assertEqual(True, dsrc.user_data_vsphere())
- m_find_devs_with.assert_called_once_with('LABEL=CDROM')
+ m_find_devs_with.assert_called_once_with("LABEL=CDROM")
m_mount_cb.assert_called_once_with(
- '/dev/mock/cdrom', dsac.read_user_data_callback)
- with mock.patch.object(dsrc, 'get_cloud_type', return_value='VSPHERE'):
- self.assertEqual('vsphere (/dev/mock/cdrom)', dsrc.subplatform)
+ "/dev/mock/cdrom", dsac.read_user_data_callback
+ )
+ with mock.patch.object(dsrc, "get_cloud_type", return_value="VSPHERE"):
+ self.assertEqual("vsphere (/dev/mock/cdrom)", dsrc.subplatform)
class TestReadUserDataCallback(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.read_user_data_callback()
- '''
+ """
+
def setUp(self):
- '''Set up.'''
- self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ """Set up."""
+ self.paths = helpers.Paths({"cloud_dir": "/tmp"})
self.mount_dir = tempfile.mkdtemp()
- _write_user_data_files(self.mount_dir, 'test user data')
+ _write_user_data_files(self.mount_dir, "test user data")
def tearDown(self):
# Reset
@@ -405,46 +411,49 @@ class TestReadUserDataCallback(CiTestCase):
pass
def test_callback_both(self):
- '''Test read_user_data_callback() with both files.'''
+ """Test read_user_data_callback() with both files."""
- self.assertEqual('test user data',
- dsac.read_user_data_callback(self.mount_dir))
+ self.assertEqual(
+ "test user data", dsac.read_user_data_callback(self.mount_dir)
+ )
def test_callback_dc(self):
- '''Test read_user_data_callback() with only DC file.'''
+ """Test read_user_data_callback() with only DC file."""
- _remove_user_data_files(self.mount_dir,
- dc_file=False,
- non_dc_file=True)
+ _remove_user_data_files(
+ self.mount_dir, dc_file=False, non_dc_file=True
+ )
- self.assertEqual('test user data',
- dsac.read_user_data_callback(self.mount_dir))
+ self.assertEqual(
+ "test user data", dsac.read_user_data_callback(self.mount_dir)
+ )
def test_callback_non_dc(self):
- '''Test read_user_data_callback() with only non-DC file.'''
+ """Test read_user_data_callback() with only non-DC file."""
- _remove_user_data_files(self.mount_dir,
- dc_file=True,
- non_dc_file=False)
+ _remove_user_data_files(
+ self.mount_dir, dc_file=True, non_dc_file=False
+ )
- self.assertEqual('test user data',
- dsac.read_user_data_callback(self.mount_dir))
+ self.assertEqual(
+ "test user data", dsac.read_user_data_callback(self.mount_dir)
+ )
def test_callback_none(self):
- '''Test read_user_data_callback() no files are found.'''
+ """Test read_user_data_callback() no files are found."""
_remove_user_data_files(self.mount_dir)
self.assertIsNone(dsac.read_user_data_callback(self.mount_dir))
def force_arch(arch=None):
-
def _os_uname():
- return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', arch)
+ return ("LINUX", "NODENAME", "RELEASE", "VERSION", arch)
if arch:
- setattr(os, 'uname', _os_uname)
+ setattr(os, "uname", _os_uname)
elif arch is None:
- setattr(os, 'uname', OS_UNAME_ORIG)
+ setattr(os, "uname", OS_UNAME_ORIG)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py
new file mode 100644
index 00000000..5f956a63
--- /dev/null
+++ b/tests/unittests/sources/test_azure.py
@@ -0,0 +1,4306 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import crypt
+import json
+import os
+import stat
+import xml.etree.ElementTree as ET
+
+import httpretty
+import pytest
+import requests
+import yaml
+
+from cloudinit import distros, helpers, url_helper
+from cloudinit.sources import UNSET
+from cloudinit.sources import DataSourceAzure as dsaz
+from cloudinit.sources import InvalidMetaDataException
+from cloudinit.sources.helpers import netlink
+from cloudinit.util import (
+ MountFailedError,
+ b64e,
+ decode_binary,
+ json_dumps,
+ load_file,
+ load_json,
+ write_file,
+)
+from cloudinit.version import version_string as vs
+from tests.unittests.helpers import (
+ CiTestCase,
+ ExitStack,
+ HttprettyTestCase,
+ mock,
+ populate_dir,
+ resourceLocation,
+ wrap_and_call,
+)
+
+MOCKPATH = "cloudinit.sources.DataSourceAzure."
+
+
+@pytest.fixture
+def azure_ds(paths):
+ """Provide DataSourceAzure instance with mocks for minimal test case."""
+ with mock.patch(MOCKPATH + "_is_platform_viable", return_value=True):
+ yield dsaz.DataSourceAzure(sys_cfg={}, distro=mock.Mock(), paths=paths)
+
+
+@pytest.fixture
+def mock_azure_helper_readurl():
+ with mock.patch(
+ "cloudinit.sources.helpers.azure.url_helper.readurl", autospec=True
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_azure_get_metadata_from_fabric():
+ with mock.patch(
+ MOCKPATH + "get_metadata_from_fabric",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_azure_report_failure_to_fabric():
+ with mock.patch(
+ MOCKPATH + "report_failure_to_fabric",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_dmi_read_dmi_data():
+ def fake_read(key: str) -> str:
+ if key == "system-uuid":
+ return "fake-system-uuid"
+ raise RuntimeError()
+
+ with mock.patch(
+ MOCKPATH + "dmi.read_dmi_data",
+ side_effect=fake_read,
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_net_dhcp_maybe_perform_dhcp_discovery():
+ with mock.patch(
+ "cloudinit.net.dhcp.maybe_perform_dhcp_discovery",
+ return_value=[
+ {
+ "unknown-245": "aa:bb:cc:dd",
+ "interface": "ethBoot0",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ }
+ ],
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_net_dhcp_EphemeralIPv4Network():
+ with mock.patch(
+ "cloudinit.net.dhcp.EphemeralIPv4Network",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_get_interfaces():
+ with mock.patch(MOCKPATH + "net.get_interfaces", return_value=[]) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_get_interface_mac():
+ with mock.patch(
+ MOCKPATH + "net.get_interface_mac",
+ return_value="001122334455",
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_netlink():
+ with mock.patch(
+ MOCKPATH + "netlink",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_os_path_isfile():
+ with mock.patch(MOCKPATH + "os.path.isfile", autospec=True) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_readurl():
+ with mock.patch(MOCKPATH + "readurl", autospec=True) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_subp_subp():
+ with mock.patch(MOCKPATH + "subp.subp", side_effect=[]) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_ensure_dir():
+ with mock.patch(
+ MOCKPATH + "util.ensure_dir",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_find_devs_with():
+ with mock.patch(MOCKPATH + "util.find_devs_with", autospec=True) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_load_file():
+ with mock.patch(
+ MOCKPATH + "util.load_file",
+ autospec=True,
+ return_value=b"",
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_mount_cb():
+ with mock.patch(
+ MOCKPATH + "util.mount_cb",
+ autospec=True,
+ return_value=({}, "", {}, {}),
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_write_file():
+ with mock.patch(
+ MOCKPATH + "util.write_file",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+def construct_valid_ovf_env(
+ data=None, pubkeys=None, userdata=None, platform_settings=None
+):
+ if data is None:
+ data = {"HostName": "FOOHOST"}
+ if pubkeys is None:
+ pubkeys = {}
+
+ content = """<?xml version="1.0" encoding="utf-8"?>
+<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:wa="http://schemas.microsoft.com/windowsazure"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+
+ <wa:ProvisioningSection><wa:Version>1.0</wa:Version>
+ <LinuxProvisioningConfigurationSet
+ xmlns="http://schemas.microsoft.com/windowsazure"
+ xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
+ <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
+ """
+ for key, dval in data.items():
+ if isinstance(dval, dict):
+ val = dict(dval).get("text")
+ attrs = " " + " ".join(
+ [
+ "%s='%s'" % (k, v)
+ for k, v in dict(dval).items()
+ if k != "text"
+ ]
+ )
+ else:
+ val = dval
+ attrs = ""
+ content += "<%s%s>%s</%s>\n" % (key, attrs, val, key)
+
+ if userdata:
+ content += "<UserData>%s</UserData>\n" % (b64e(userdata))
+
+ if pubkeys:
+ content += "<SSH><PublicKeys>\n"
+ for fp, path, value in pubkeys:
+ content += " <PublicKey>"
+ if fp and path:
+ content += "<Fingerprint>%s</Fingerprint><Path>%s</Path>" % (
+ fp,
+ path,
+ )
+ if value:
+ content += "<Value>%s</Value>" % value
+ content += "</PublicKey>\n"
+ content += "</PublicKeys></SSH>"
+ content += """
+ </LinuxProvisioningConfigurationSet>
+ </wa:ProvisioningSection>
+ <wa:PlatformSettingsSection><wa:Version>1.0</wa:Version>
+ <PlatformSettings xmlns="http://schemas.microsoft.com/windowsazure"
+ xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
+ <KmsServerHostname>kms.core.windows.net</KmsServerHostname>
+ <ProvisionGuestAgent>false</ProvisionGuestAgent>
+ <GuestAgentPackageName i:nil="true" />"""
+ if platform_settings:
+ for k, v in platform_settings.items():
+ content += "<%s>%s</%s>\n" % (k, v, k)
+ if "PreprovisionedVMType" not in platform_settings:
+ content += """<PreprovisionedVMType i:nil="true" />"""
+ content += """</PlatformSettings></wa:PlatformSettingsSection>
+</Environment>"""
+
+ return content
+
+
+NETWORK_METADATA = {
+ "compute": {
+ "location": "eastus2",
+ "name": "my-hostname",
+ "offer": "UbuntuServer",
+ "osType": "Linux",
+ "placementGroupId": "",
+ "platformFaultDomain": "0",
+ "platformUpdateDomain": "0",
+ "publisher": "Canonical",
+ "resourceGroupName": "srugroup1",
+ "sku": "19.04-DAILY",
+ "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777",
+ "tags": "",
+ "version": "19.04.201906190",
+ "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642",
+ "vmScaleSetName": "",
+ "vmSize": "Standard_DS1_v2",
+ "zone": "",
+ "publicKeys": [{"keyData": "ssh-rsa key1", "path": "path1"}],
+ },
+ "network": {
+ "interface": [
+ {
+ "macAddress": "000D3A047598",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [{"prefix": "24", "address": "10.0.0.0"}],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81",
+ }
+ ],
+ },
+ }
+ ]
+ },
+}
+
+SECONDARY_INTERFACE = {
+ "macAddress": "220D3A047598",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [{"prefix": "24", "address": "10.0.1.0"}],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.1.5",
+ }
+ ],
+ },
+}
+
+SECONDARY_INTERFACE_NO_IP = {
+ "macAddress": "220D3A047598",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [{"prefix": "24", "address": "10.0.1.0"}],
+ "ipAddress": [],
+ },
+}
+
+IMDS_NETWORK_METADATA = {
+ "interface": [
+ {
+ "macAddress": "000D3A047598",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [{"prefix": "24", "address": "10.0.0.0"}],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81",
+ }
+ ],
+ },
+ }
+ ]
+}
+
+EXAMPLE_UUID = "d0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8"
+
+
+class TestParseNetworkConfig(CiTestCase):
+
+ maxDiff = None
+ fallback_config = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:11:22:33:44:55",
+ "params": {"driver": "hv_netsvc"},
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_single_ipv4_nic_configuration(self, m_driver):
+ """parse_network_config emits dhcp on single nic with ipv4"""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_increases_route_metric_for_non_primary_nics(self, m_driver):
+ """parse_network_config increases route-metric for each nic"""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ },
+ "eth1": {
+ "set-name": "eth1",
+ "match": {"macaddress": "22:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 200},
+ },
+ "eth2": {
+ "set-name": "eth2",
+ "match": {"macaddress": "33:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 300},
+ },
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE)
+ third_intf = copy.deepcopy(SECONDARY_INTERFACE)
+ third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33")
+ third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
+ third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
+ imds_data["network"]["interface"].append(third_intf)
+ self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_ipv4_and_ipv6_route_metrics_match_for_nics(self, m_driver):
+ """parse_network_config emits matching ipv4 and ipv6 route-metrics."""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "addresses": ["10.0.0.5/24", "2001:dead:beef::2/128"],
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ },
+ "eth1": {
+ "set-name": "eth1",
+ "match": {"macaddress": "22:0d:3a:04:75:98"},
+ "dhcp4": True,
+ "dhcp6": False,
+ "dhcp4-overrides": {"route-metric": 200},
+ },
+ "eth2": {
+ "set-name": "eth2",
+ "match": {"macaddress": "33:0d:3a:04:75:98"},
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 300},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 300},
+ },
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ nic1 = imds_data["network"]["interface"][0]
+ nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
+
+ nic1["ipv6"] = {
+ "subnet": [{"address": "2001:dead:beef::16"}],
+ "ipAddress": [
+ {"privateIpAddress": "2001:dead:beef::1"},
+ {"privateIpAddress": "2001:dead:beef::2"},
+ ],
+ }
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE)
+ third_intf = copy.deepcopy(SECONDARY_INTERFACE)
+ third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33")
+ third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
+ third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
+ third_intf["ipv6"] = {
+ "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}],
+ "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}],
+ }
+ imds_data["network"]["interface"].append(third_intf)
+ self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_ipv4_secondary_ips_will_be_static_addrs(self, m_driver):
+ """parse_network_config emits primary ipv4 as dhcp others are static"""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "addresses": ["10.0.0.5/24"],
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ nic1 = imds_data["network"]["interface"][0]
+ nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
+
+ nic1["ipv6"] = {
+ "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
+ "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}],
+ }
+ self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_ipv6_secondary_ips_will_be_static_cidrs(self, m_driver):
+ """parse_network_config emits primary ipv6 as dhcp others are static"""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "addresses": ["10.0.0.5/24", "2001:dead:beef::2/10"],
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ nic1 = imds_data["network"]["interface"][0]
+ nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
+
+ # Secondary ipv6 addresses currently ignored/unconfigured
+ nic1["ipv6"] = {
+ "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
+ "ipAddress": [
+ {"privateIpAddress": "2001:dead:beef::1"},
+ {"privateIpAddress": "2001:dead:beef::2"},
+ ],
+ }
+ self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver",
+ return_value="hv_netvsc",
+ )
+ def test_match_driver_for_netvsc(self, m_driver):
+ """parse_network_config emits driver when using netvsc."""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {
+ "macaddress": "00:0d:3a:04:75:98",
+ "driver": "hv_netvsc",
+ },
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ @mock.patch("cloudinit.net.generate_fallback_config")
+ def test_parse_network_config_uses_fallback_cfg_when_no_network_metadata(
+ self, m_fallback_config, m_driver
+ ):
+ """parse_network_config generates fallback network config when the
+ IMDS instance metadata is corrupted/invalid, such as when
+ network metadata is not present.
+ """
+ imds_metadata_missing_network_metadata = copy.deepcopy(
+ NETWORK_METADATA
+ )
+ del imds_metadata_missing_network_metadata["network"]
+ m_fallback_config.return_value = self.fallback_config
+ self.assertEqual(
+ self.fallback_config,
+ dsaz.parse_network_config(imds_metadata_missing_network_metadata),
+ )
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ @mock.patch("cloudinit.net.generate_fallback_config")
+ def test_parse_network_config_uses_fallback_cfg_when_no_interface_metadata(
+ self, m_fallback_config, m_driver
+ ):
+ """parse_network_config generates fallback network config when the
+ IMDS instance metadata is corrupted/invalid, such as when
+ network interface metadata is not present.
+ """
+ imds_metadata_missing_interface_metadata = copy.deepcopy(
+ NETWORK_METADATA
+ )
+ del imds_metadata_missing_interface_metadata["network"]["interface"]
+ m_fallback_config.return_value = self.fallback_config
+ self.assertEqual(
+ self.fallback_config,
+ dsaz.parse_network_config(
+ imds_metadata_missing_interface_metadata
+ ),
+ )
+
+
+class TestGetMetadataFromIMDS(HttprettyTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestGetMetadataFromIMDS, self).setUp()
+ self.network_md_url = "{}/instance?api-version=2019-06-01".format(
+ dsaz.IMDS_URL
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ def test_get_metadata_uses_instance_url(self, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ metadata"""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
+
+ dsaz.get_metadata_from_imds(retries=3, md_type=dsaz.MetadataType.ALL)
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance?api-version=2019-06-01",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ def test_get_network_metadata_uses_network_url(self, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ network metadata"""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
+
+ dsaz.get_metadata_from_imds(
+ retries=3, md_type=dsaz.MetadataType.NETWORK
+ )
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance/network?api-version="
+ "2019-06-01",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ def test_get_default_metadata_uses_instance_url(self, m_dhcp, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ metadata"""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
+
+ dsaz.get_metadata_from_imds(retries=3)
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance?api-version=2019-06-01",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ def test_get_metadata_uses_extended_url(self, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ metadata"""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
+
+ dsaz.get_metadata_from_imds(
+ retries=3,
+ md_type=dsaz.MetadataType.ALL,
+ api_version="2021-08-01",
+ )
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance?api-version="
+ "2021-08-01&extended=true",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ def test_get_metadata_performs_dhcp_when_network_is_down(self, m_readurl):
+ """Perform DHCP setup when nic is not up."""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(NETWORK_METADATA).encode("utf-8")
+ )
+
+ self.assertEqual(
+ NETWORK_METADATA, dsaz.get_metadata_from_imds(retries=2)
+ )
+
+ self.assertIn(
+ "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
+ self.logs.getvalue(),
+ )
+
+ m_readurl.assert_called_with(
+ self.network_md_url,
+ exception_cb=mock.ANY,
+ headers={"Metadata": "true"},
+ retries=2,
+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
+ infinite=False,
+ )
+
+ @mock.patch("cloudinit.url_helper.time.sleep")
+ def test_get_metadata_from_imds_empty_when_no_imds_present(self, m_sleep):
+ """Return empty dict when IMDS network metadata is absent."""
+ httpretty.register_uri(
+ httpretty.GET,
+ dsaz.IMDS_URL + "/instance?api-version=2017-12-01",
+ body={},
+ status=404,
+ )
+
+ self.assertEqual({}, dsaz.get_metadata_from_imds(retries=2))
+
+ self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list)
+ self.assertIn(
+ "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("requests.Session.request")
+ @mock.patch("cloudinit.url_helper.time.sleep")
+ def test_get_metadata_from_imds_retries_on_timeout(
+ self, m_sleep, m_request
+ ):
+ """Retry IMDS network metadata on timeout errors."""
+
+ self.attempt = 0
+ m_request.side_effect = requests.Timeout("Fake Connection Timeout")
+
+ def retry_callback(request, uri, headers):
+ self.attempt += 1
+ raise requests.Timeout("Fake connection timeout")
+
+ httpretty.register_uri(
+ httpretty.GET,
+ dsaz.IMDS_URL + "instance?api-version=2017-12-01",
+ body=retry_callback,
+ )
+
+ self.assertEqual({}, dsaz.get_metadata_from_imds(retries=3))
+
+ self.assertEqual([mock.call(1)] * 3, m_sleep.call_args_list)
+ self.assertIn(
+ "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
+ self.logs.getvalue(),
+ )
+
+
+class TestAzureDataSource(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestAzureDataSource, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ # patch cloud_dir, so our 'seed_dir' is guaranteed empty
+ self.paths = helpers.Paths(
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
+ self.waagent_d = os.path.join(self.tmp, "var", "lib", "waagent")
+
+ self.patches = ExitStack()
+ self.addCleanup(self.patches.close)
+
+ self.patches.enter_context(
+ mock.patch.object(dsaz, "_get_random_seed", return_value="wild")
+ )
+
+ self.m_dhcp = self.patches.enter_context(
+ mock.patch.object(
+ dsaz,
+ "EphemeralDHCPv4",
+ autospec=True,
+ )
+ )
+ self.m_dhcp.return_value.lease = {}
+ self.m_dhcp.return_value.iface = "eth4"
+
+ self.m_get_metadata_from_imds = self.patches.enter_context(
+ mock.patch.object(
+ dsaz,
+ "get_metadata_from_imds",
+ mock.MagicMock(return_value=NETWORK_METADATA),
+ )
+ )
+ self.m_fallback_nic = self.patches.enter_context(
+ mock.patch(
+ "cloudinit.sources.net.find_fallback_nic", return_value="eth9"
+ )
+ )
+ self.m_remove_ubuntu_network_scripts = self.patches.enter_context(
+ mock.patch.object(
+ dsaz,
+ "maybe_remove_ubuntu_network_config_scripts",
+ mock.MagicMock(),
+ )
+ )
+ super(TestAzureDataSource, self).setUp()
+
+ def apply_patches(self, patches):
+ for module, name, new in patches:
+ self.patches.enter_context(mock.patch.object(module, name, new))
+
+ def _get_mockds(self):
+ sysctl_out = (
+ "dev.storvsc.3.%pnpinfo: "
+ "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "
+ "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n"
+ )
+ sysctl_out += (
+ "dev.storvsc.2.%pnpinfo: "
+ "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "
+ "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n"
+ )
+ sysctl_out += (
+ "dev.storvsc.1.%pnpinfo: "
+ "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "
+ "deviceid=00000000-0001-8899-0000-000000000000\n"
+ )
+ camctl_devbus = """
+scbus0 on ata0 bus 0
+scbus1 on ata1 bus 0
+scbus2 on blkvsc0 bus 0
+scbus3 on blkvsc1 bus 0
+scbus4 on storvsc2 bus 0
+scbus5 on storvsc3 bus 0
+scbus-1 on xpt0 bus 0
+ """
+ camctl_dev = """
+<Msft Virtual CD/ROM 1.0> at scbus1 target 0 lun 0 (cd0,pass0)
+<Msft Virtual Disk 1.0> at scbus2 target 0 lun 0 (da0,pass1)
+<Msft Virtual Disk 1.0> at scbus3 target 1 lun 0 (da1,pass2)
+ """
+ self.apply_patches(
+ [
+ (
+ dsaz,
+ "get_dev_storvsc_sysctl",
+ mock.MagicMock(return_value=sysctl_out),
+ ),
+ (
+ dsaz,
+ "get_camcontrol_dev_bus",
+ mock.MagicMock(return_value=camctl_devbus),
+ ),
+ (
+ dsaz,
+ "get_camcontrol_dev",
+ mock.MagicMock(return_value=camctl_dev),
+ ),
+ ]
+ )
+ return dsaz
+
+ def _get_ds(
+ self,
+ data,
+ distro="ubuntu",
+ apply_network=None,
+ instance_id=None,
+ write_ovf_to_data_dir: bool = False,
+ write_ovf_to_seed_dir: bool = True,
+ ):
+ def _wait_for_files(flist, _maxwait=None, _naplen=None):
+ data["waited"] = flist
+ return []
+
+ def _load_possible_azure_ds(seed_dir, cache_dir):
+ yield seed_dir
+ yield dsaz.DEFAULT_PROVISIONING_ISO_DEV
+ yield from data.get("dsdevs", [])
+ if cache_dir:
+ yield cache_dir
+
+ seed_dir = os.path.join(self.paths.seed_dir, "azure")
+ if write_ovf_to_seed_dir and data.get("ovfcontent") is not None:
+ populate_dir(seed_dir, {"ovf-env.xml": data["ovfcontent"]})
+
+ if write_ovf_to_data_dir and data.get("ovfcontent") is not None:
+ populate_dir(self.waagent_d, {"ovf-env.xml": data["ovfcontent"]})
+
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+
+ self.m_is_platform_viable = mock.MagicMock(autospec=True)
+ self.m_get_metadata_from_fabric = mock.MagicMock(return_value=[])
+ self.m_report_failure_to_fabric = mock.MagicMock(autospec=True)
+ self.m_get_interfaces = mock.MagicMock(
+ return_value=[
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("eth0", "00:15:5d:69:63:ba", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ )
+ self.m_list_possible_azure_ds = mock.MagicMock(
+ side_effect=_load_possible_azure_ds
+ )
+
+ if instance_id:
+ self.instance_id = instance_id
+ else:
+ self.instance_id = EXAMPLE_UUID
+
+ def _dmi_mocks(key):
+ if key == "system-uuid":
+ return self.instance_id
+ elif key == "chassis-asset-tag":
+ return "7783-7084-3265-9085-8269-3286-77"
+
+ self.apply_patches(
+ [
+ (
+ dsaz,
+ "list_possible_azure_ds",
+ self.m_list_possible_azure_ds,
+ ),
+ (dsaz, "_is_platform_viable", self.m_is_platform_viable),
+ (
+ dsaz,
+ "get_metadata_from_fabric",
+ self.m_get_metadata_from_fabric,
+ ),
+ (
+ dsaz,
+ "report_failure_to_fabric",
+ self.m_report_failure_to_fabric,
+ ),
+ (dsaz, "get_boot_telemetry", mock.MagicMock()),
+ (dsaz, "get_system_info", mock.MagicMock()),
+ (
+ dsaz.net,
+ "get_interface_mac",
+ mock.MagicMock(return_value="00:15:5d:69:63:ba"),
+ ),
+ (
+ dsaz.net,
+ "get_interfaces",
+ self.m_get_interfaces,
+ ),
+ (dsaz.subp, "which", lambda x: True),
+ (
+ dsaz.dmi,
+ "read_dmi_data",
+ mock.MagicMock(side_effect=_dmi_mocks),
+ ),
+ (
+ dsaz.util,
+ "wait_for_files",
+ mock.MagicMock(side_effect=_wait_for_files),
+ ),
+ ]
+ )
+
+ if isinstance(distro, str):
+ distro_cls = distros.fetch(distro)
+ distro = distro_cls(distro, data.get("sys_cfg", {}), self.paths)
+ dsrc = dsaz.DataSourceAzure(
+ data.get("sys_cfg", {}), distro=distro, paths=self.paths
+ )
+ if apply_network is not None:
+ dsrc.ds_cfg["apply_network_config"] = apply_network
+
+ return dsrc
+
+ def _get_and_setup(self, dsrc):
+ ret = dsrc.get_data()
+ if ret:
+ dsrc.setup(True)
+ return ret
+
+ def xml_equals(self, oxml, nxml):
+ """Compare two sets of XML to make sure they are equal"""
+
+ def create_tag_index(xml):
+ et = ET.fromstring(xml)
+ ret = {}
+ for x in et.iter():
+ ret[x.tag] = x
+ return ret
+
+ def tags_exists(x, y):
+ for tag in x.keys():
+ assert tag in y
+ for tag in y.keys():
+ assert tag in x
+
+ def tags_equal(x, y):
+ for x_val in x.values():
+ y_val = y.get(x_val.tag)
+ assert x_val.text == y_val.text
+
+ old_cnt = create_tag_index(oxml)
+ new_cnt = create_tag_index(nxml)
+ tags_exists(old_cnt, new_cnt)
+ tags_equal(old_cnt, new_cnt)
+
+ def xml_notequals(self, oxml, nxml):
+ try:
+ self.xml_equals(oxml, nxml)
+ except AssertionError:
+ return
+ raise AssertionError("XML is the same")
+
+ def test_get_resource_disk(self):
+ ds = self._get_mockds()
+ dev = ds.get_resource_disk_on_freebsd(1)
+ self.assertEqual("da1", dev)
+
+ def test_not_is_platform_viable_seed_should_return_no_datasource(self):
+ """Check seed_dir using _is_platform_viable and return False."""
+ # Return a non-matching asset tag value
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = False
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_report_failure"
+ ) as m_report_failure:
+ ret = dsrc.get_data()
+ self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+ self.assertFalse(ret)
+ # Assert that for non viable platforms,
+ # there is no communication with the Azure datasource.
+ self.assertEqual(0, m_crawl_metadata.call_count)
+ self.assertEqual(0, m_report_failure.call_count)
+
+ def test_platform_viable_but_no_devs_should_return_no_datasource(self):
+ """For platforms where the Azure platform is viable
+ (which is indicated by the matching asset tag),
+ the absence of any devs at all (devs == candidate sources
+ for crawling Azure datasource) is NOT expected.
+ Report failure to Azure as this is an unexpected fatal error.
+ """
+ data = {}
+ dsrc = self._get_ds(data)
+ with mock.patch.object(dsrc, "_report_failure") as m_report_failure:
+ self.m_is_platform_viable.return_value = True
+ ret = dsrc.get_data()
+ self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+ self.assertFalse(ret)
+ self.assertEqual(1, m_report_failure.call_count)
+
+ def test_crawl_metadata_exception_returns_no_datasource(self):
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = True
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ m_crawl_metadata.side_effect = Exception
+ ret = dsrc.get_data()
+ self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+ self.assertEqual(1, m_crawl_metadata.call_count)
+ self.assertFalse(ret)
+
+ def test_crawl_metadata_exception_should_report_failure_with_msg(self):
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = True
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_report_failure"
+ ) as m_report_failure:
+ m_crawl_metadata.side_effect = Exception
+ dsrc.get_data()
+ self.assertEqual(1, m_crawl_metadata.call_count)
+ m_report_failure.assert_called_once_with(
+ description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
+
+ def test_crawl_metadata_exc_should_log_could_not_crawl_msg(self):
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = True
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ m_crawl_metadata.side_effect = Exception
+ dsrc.get_data()
+ self.assertEqual(1, m_crawl_metadata.call_count)
+ self.assertIn(
+ "Could not crawl Azure metadata", self.logs.getvalue()
+ )
+
+ def test_basic_seed_dir(self):
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, "")
+ self.assertEqual(dsrc.metadata["local-hostname"], odata["HostName"])
+ self.assertTrue(
+ os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml"))
+ )
+ self.assertEqual("azure", dsrc.cloud_name)
+ self.assertEqual("azure", dsrc.platform_type)
+ self.assertEqual(
+ "seed-dir (%s/seed/azure)" % self.tmp, dsrc.subplatform
+ )
+
+ def test_data_dir_without_imds_data(self):
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+ dsrc = self._get_ds(
+ data, write_ovf_to_data_dir=True, write_ovf_to_seed_dir=False
+ )
+
+ self.m_get_metadata_from_imds.return_value = {}
+ with mock.patch(MOCKPATH + "util.mount_cb") as m_mount_cb:
+ m_mount_cb.side_effect = [
+ MountFailedError("fail"),
+ ({"local-hostname": "me"}, "ud", {"cfg": ""}, {}),
+ ]
+ ret = dsrc.get_data()
+
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, "")
+ self.assertEqual(dsrc.metadata["local-hostname"], odata["HostName"])
+ self.assertTrue(
+ os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml"))
+ )
+ self.assertEqual("azure", dsrc.cloud_name)
+ self.assertEqual("azure", dsrc.platform_type)
+ self.assertEqual("seed-dir (%s)" % self.waagent_d, dsrc.subplatform)
+
+ def test_basic_dev_file(self):
+ """When a device path is used, present that in subplatform."""
+ data = {"sys_cfg": {}, "dsdevs": ["/dev/cd0"]}
+ dsrc = self._get_ds(data)
+ # DSAzure will attempt to mount /dev/sr0 first, which should
+ # fail with mount error since the list of devices doesn't have
+ # /dev/sr0
+ with mock.patch(MOCKPATH + "util.mount_cb") as m_mount_cb:
+ m_mount_cb.side_effect = [
+ MountFailedError("fail"),
+ ({"local-hostname": "me"}, "ud", {"cfg": ""}, {}),
+ ]
+ self.assertTrue(dsrc.get_data())
+ self.assertEqual(dsrc.userdata_raw, "ud")
+ self.assertEqual(dsrc.metadata["local-hostname"], "me")
+ self.assertEqual("azure", dsrc.cloud_name)
+ self.assertEqual("azure", dsrc.platform_type)
+ self.assertEqual("config-disk (/dev/cd0)", dsrc.subplatform)
+
+ def test_get_data_non_ubuntu_will_not_remove_network_scripts(self):
+ """get_data on non-Ubuntu will not remove ubuntu net scripts."""
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ dsrc = self._get_ds(data, distro="debian")
+ dsrc.get_data()
+ self.m_remove_ubuntu_network_scripts.assert_not_called()
+
+ def test_get_data_on_ubuntu_will_remove_network_scripts(self):
+ """get_data will remove ubuntu net scripts on Ubuntu distro."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+
+ dsrc = self._get_ds(data, distro="ubuntu")
+ dsrc.get_data()
+ self.m_remove_ubuntu_network_scripts.assert_called_once_with()
+
+ def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self):
+ """When apply_network_config false, do not remove scripts on Ubuntu."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": False}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+
+ dsrc = self._get_ds(data, distro="ubuntu")
+ dsrc.get_data()
+ self.m_remove_ubuntu_network_scripts.assert_not_called()
+
+ def test_crawl_metadata_returns_structured_data_and_caches_nothing(self):
+ """Return all structured metadata and cache no class attributes."""
+ yaml_cfg = ""
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserData": {"text": "FOOBAR", "encoding": "plain"},
+ "dscfg": {"text": yaml_cfg, "encoding": "plain"},
+ }
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+ dsrc = self._get_ds(data)
+ expected_cfg = {
+ "PreprovisionedVMType": None,
+ "PreprovisionedVm": False,
+ "datasource": {"Azure": {}},
+ "system_info": {"default_user": {"name": "myuser"}},
+ }
+ expected_metadata = {
+ "azure_data": {
+ "configurationsettype": "LinuxProvisioningConfiguration"
+ },
+ "imds": NETWORK_METADATA,
+ "instance-id": EXAMPLE_UUID,
+ "local-hostname": "myhost",
+ "random_seed": "wild",
+ }
+
+ crawled_metadata = dsrc.crawl_metadata()
+
+ self.assertCountEqual(
+ crawled_metadata.keys(),
+ ["cfg", "files", "metadata", "userdata_raw"],
+ )
+ self.assertEqual(crawled_metadata["cfg"], expected_cfg)
+ self.assertEqual(
+ list(crawled_metadata["files"].keys()), ["ovf-env.xml"]
+ )
+ self.assertIn(
+ b"<HostName>myhost</HostName>",
+ crawled_metadata["files"]["ovf-env.xml"],
+ )
+ self.assertEqual(crawled_metadata["metadata"], expected_metadata)
+ self.assertEqual(crawled_metadata["userdata_raw"], "FOOBAR")
+ self.assertEqual(dsrc.userdata_raw, None)
+ self.assertEqual(dsrc.metadata, {})
+ self.assertEqual(dsrc._metadata_imds, UNSET)
+ self.assertFalse(
+ os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml"))
+ )
+
+ def test_crawl_metadata_raises_invalid_metadata_on_error(self):
+ """crawl_metadata raises an exception on invalid ovf-env.xml."""
+ data = {"ovfcontent": "BOGUS", "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ error_msg = (
+ "BrokenAzureDataSource: Invalid ovf-env.xml:"
+ " syntax error: line 1, column 0"
+ )
+ with self.assertRaises(InvalidMetaDataException) as cm:
+ dsrc.crawl_metadata()
+ self.assertEqual(str(cm.exception), error_msg)
+
+ def test_crawl_metadata_call_imds_once_no_reprovision(self):
+ """If reprovisioning, report ready at the end"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "False"}
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ dsrc.crawl_metadata()
+ self.assertEqual(1, self.m_get_metadata_from_imds.call_count)
+
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds")
+ def test_crawl_metadata_call_imds_twice_with_reprovision(
+ self, poll_imds_func, m_report_ready, m_write
+ ):
+ """If reprovisioning, imds metadata will be fetched twice"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "True"}
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ poll_imds_func.return_value = ovfenv
+ dsrc.crawl_metadata()
+ self.assertEqual(2, self.m_get_metadata_from_imds.call_count)
+
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds")
+ def test_crawl_metadata_on_reprovision_reports_ready(
+ self, poll_imds_func, m_report_ready, m_write
+ ):
+ """If reprovisioning, report ready at the end"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "True"}
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ poll_imds_func.return_value = ovfenv
+ dsrc.crawl_metadata()
+ self.assertEqual(1, m_report_ready.call_count)
+
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds")
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure."
+ "_wait_for_all_nics_ready"
+ )
+ def test_crawl_metadata_waits_for_nic_on_savable_vms(
+ self, detect_nics, poll_imds_func, report_ready_func, m_write
+ ):
+ """If reprovisioning, report ready at the end"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={
+ "PreprovisionedVMType": "Savable",
+ "PreprovisionedVm": "True",
+ }
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ poll_imds_func.return_value = ovfenv
+ dsrc.crawl_metadata()
+ self.assertEqual(1, report_ready_func.call_count)
+ self.assertEqual(1, detect_nics.call_count)
+
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
+ @mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
+ )
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready",
+ return_value=True,
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.readurl")
+ def test_crawl_metadata_on_reprovision_reports_ready_using_lease(
+ self, m_readurl, m_report_ready, m_media_switch, m_write
+ ):
+ """If reprovisioning, report ready using the obtained lease"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "True"}
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ self.m_dhcp.return_value.obtain_lease.return_value = lease
+ m_media_switch.return_value = None
+
+ reprovision_ovfenv = construct_valid_ovf_env()
+ m_readurl.return_value = url_helper.StringResponse(
+ reprovision_ovfenv.encode("utf-8")
+ )
+
+ dsrc.crawl_metadata()
+
+ assert m_report_ready.mock_calls == [
+ mock.call(),
+ mock.call(pubkey_info=None),
+ ]
+
+ def test_waagent_d_has_0700_perms(self):
+ # we expect /var/lib/waagent to be created 0700
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertTrue(os.path.isdir(self.waagent_d))
+ self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_network_config_set_from_imds(self, m_driver):
+ """Datasource.network_config returns IMDS network data."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ expected_network_config = {
+ "ethernets": {
+ "eth0": {
+ "set-name": "eth0",
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ }
+ },
+ "version": 2,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(expected_network_config, dsrc.network_config)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_network_config_set_from_imds_route_metric_for_secondary_nic(
+ self, m_driver
+ ):
+ """Datasource.network_config adds route-metric to secondary nics."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ expected_network_config = {
+ "ethernets": {
+ "eth0": {
+ "set-name": "eth0",
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ },
+ "eth1": {
+ "set-name": "eth1",
+ "match": {"macaddress": "22:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 200},
+ },
+ "eth2": {
+ "set-name": "eth2",
+ "match": {"macaddress": "33:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 300},
+ },
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE)
+ third_intf = copy.deepcopy(SECONDARY_INTERFACE)
+ third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33")
+ third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
+ third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
+ imds_data["network"]["interface"].append(third_intf)
+
+ self.m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(expected_network_config, dsrc.network_config)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_network_config_set_from_imds_for_secondary_nic_no_ip(
+ self, m_driver
+ ):
+ """If an IP address is empty then there should no config for it."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ expected_network_config = {
+ "ethernets": {
+ "eth0": {
+ "set-name": "eth0",
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ }
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE_NO_IP)
+ self.m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(expected_network_config, dsrc.network_config)
+
+ def test_availability_zone_set_from_imds(self):
+ """Datasource.availability returns IMDS platformFaultDomain."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual("0", dsrc.availability_zone)
+
+ def test_region_set_from_imds(self):
+ """Datasource.region returns IMDS region location."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual("eastus2", dsrc.region)
+
+ def test_sys_cfg_set_never_destroy_ntfs(self):
+ sys_cfg = {
+ "datasource": {
+ "Azure": {"never_destroy_ntfs": "user-supplied-value"}
+ }
+ }
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data={}),
+ "sys_cfg": sys_cfg,
+ }
+
+ dsrc = self._get_ds(data)
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
+ self.assertEqual(
+ dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS),
+ "user-supplied-value",
+ )
+
+ def test_username_used(self):
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ dsrc.cfg["system_info"]["default_user"]["name"], "myuser"
+ )
+
+ def test_password_given(self):
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserPassword": "mypass",
+ }
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertIn("default_user", dsrc.cfg["system_info"])
+ defuser = dsrc.cfg["system_info"]["default_user"]
+
+ # default user should be updated username and should not be locked.
+ self.assertEqual(defuser["name"], odata["UserName"])
+ self.assertFalse(defuser["lock_passwd"])
+ # passwd is crypt formated string $id$salt$encrypted
+ # encrypting plaintext with salt value of everything up to final '$'
+ # should equal that after the '$'
+ pos = defuser["passwd"].rfind("$") + 1
+ self.assertEqual(
+ defuser["passwd"],
+ crypt.crypt(odata["UserPassword"], defuser["passwd"][0:pos]),
+ )
+
+ # the same hashed value should also be present in cfg['password']
+ self.assertEqual(defuser["passwd"], dsrc.cfg["password"])
+
+ def test_user_not_locked_if_password_redacted(self):
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserPassword": dsaz.DEF_PASSWD_REDACTION,
+ }
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertIn("default_user", dsrc.cfg["system_info"])
+ defuser = dsrc.cfg["system_info"]["default_user"]
+
+ # default user should be updated username and should not be locked.
+ self.assertEqual(defuser["name"], odata["UserName"])
+ self.assertIn("lock_passwd", defuser)
+ self.assertFalse(defuser["lock_passwd"])
+
+ def test_userdata_plain(self):
+ mydata = "FOOBAR"
+ odata = {"UserData": {"text": mydata, "encoding": "plain"}}
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(decode_binary(dsrc.userdata_raw), mydata)
+
+ def test_userdata_found(self):
+ mydata = "FOOBAR"
+ odata = {"UserData": {"text": b64e(mydata), "encoding": "base64"}}
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, mydata.encode("utf-8"))
+
+ def test_default_ephemeral_configs_ephemeral_exists(self):
+ # make sure the ephemeral configs are correct if disk present
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ orig_exists = dsaz.os.path.exists
+
+ def changed_exists(path):
+ return (
+ True if path == dsaz.RESOURCE_DISK_PATH else orig_exists(path)
+ )
+
+ with mock.patch(MOCKPATH + "os.path.exists", new=changed_exists):
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+
+ self.assertEqual(
+ dsrc.device_name_to_device("ephemeral0"),
+ dsaz.RESOURCE_DISK_PATH,
+ )
+ assert "disk_setup" in cfg
+ assert "fs_setup" in cfg
+ self.assertIsInstance(cfg["disk_setup"], dict)
+ self.assertIsInstance(cfg["fs_setup"], list)
+
+ def test_default_ephemeral_configs_ephemeral_does_not_exist(self):
+ # make sure the ephemeral configs are correct if disk not present
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ orig_exists = dsaz.os.path.exists
+
+ def changed_exists(path):
+ return (
+ False if path == dsaz.RESOURCE_DISK_PATH else orig_exists(path)
+ )
+
+ with mock.patch(MOCKPATH + "os.path.exists", new=changed_exists):
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+
+ assert "disk_setup" not in cfg
+ assert "fs_setup" not in cfg
+
+ def test_provide_disk_aliases(self):
+ # Make sure that user can affect disk aliases
+ dscfg = {"disk_aliases": {"ephemeral0": "/dev/sdc"}}
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "dscfg": {"text": b64e(yaml.dump(dscfg)), "encoding": "base64"},
+ }
+ usercfg = {
+ "disk_setup": {
+ "/dev/sdc": {"something": "..."},
+ "ephemeral0": False,
+ }
+ }
+ userdata = "#cloud-config" + yaml.dump(usercfg) + "\n"
+
+ ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata)
+ data = {"ovfcontent": ovfcontent, "sys_cfg": {}}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+ self.assertTrue(cfg)
+
+ def test_userdata_arrives(self):
+ userdata = "This is my user-data"
+ xml = construct_valid_ovf_env(data={}, userdata=userdata)
+ data = {"ovfcontent": xml}
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+
+ self.assertEqual(userdata.encode("us-ascii"), dsrc.userdata_raw)
+
+ def test_password_redacted_in_ovf(self):
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserPassword": "mypass",
+ }
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+
+ self.assertTrue(ret)
+ ovf_env_path = os.path.join(self.waagent_d, "ovf-env.xml")
+
+ # The XML should not be same since the user password is redacted
+ on_disk_ovf = load_file(ovf_env_path)
+ self.xml_notequals(data["ovfcontent"], on_disk_ovf)
+
+ # Make sure that the redacted password on disk is not used by CI
+ self.assertNotEqual(
+ dsrc.cfg.get("password"), dsaz.DEF_PASSWD_REDACTION
+ )
+
+ # Make sure that the password was really encrypted
+ et = ET.fromstring(on_disk_ovf)
+ for elem in et.iter():
+ if "UserPassword" in elem.tag:
+ self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text)
+
+ def test_ovf_env_arrives_in_waagent_dir(self):
+ xml = construct_valid_ovf_env(data={}, userdata="FOODATA")
+ dsrc = self._get_ds({"ovfcontent": xml})
+ dsrc.get_data()
+
+ # 'data_dir' is '/var/lib/waagent' (walinux-agent's state dir)
+ # we expect that the ovf-env.xml file is copied there.
+ ovf_env_path = os.path.join(self.waagent_d, "ovf-env.xml")
+ self.assertTrue(os.path.exists(ovf_env_path))
+ self.xml_equals(xml, load_file(ovf_env_path))
+
+ def test_ovf_can_include_unicode(self):
+ xml = construct_valid_ovf_env(data={})
+ xml = "\ufeff{0}".format(xml)
+ dsrc = self._get_ds({"ovfcontent": xml})
+ dsrc.get_data()
+
+ def test_dsaz_report_ready_returns_true_when_report_succeeds(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ assert dsrc._report_ready() == []
+
+ @mock.patch(MOCKPATH + "report_diagnostic_event")
+ def test_dsaz_report_ready_failure_reports_telemetry(self, m_report_diag):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ self.m_get_metadata_from_fabric.side_effect = Exception("foo")
+
+ with pytest.raises(Exception):
+ dsrc._report_ready()
+
+ assert m_report_diag.mock_calls == [
+ mock.call(
+ "Error communicating with Azure fabric; "
+ "You may experience connectivity issues: foo",
+ logger_func=dsaz.LOG.warning,
+ )
+ ]
+
+ def test_dsaz_report_failure_returns_true_when_report_succeeds(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ self.assertTrue(dsrc._report_failure())
+ self.assertEqual(1, self.m_report_failure_to_fabric.call_count)
+
+ def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc(
+ self,
+ ):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_ephemeral_dhcp_ctx"
+ ) as m_ephemeral_dhcp_ctx, mock.patch.object(
+ dsrc.distro.networking, "is_up"
+ ) as m_dsrc_distro_networking_is_up:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ # setup mocks to allow using cached ephemeral dhcp lease
+ m_dsrc_distro_networking_is_up.return_value = True
+ test_lease_dhcp_option_245 = "test_lease_dhcp_option_245"
+ test_lease = {"unknown-245": test_lease_dhcp_option_245}
+ m_ephemeral_dhcp_ctx.lease = test_lease
+
+ # We expect 2 calls to report_failure_to_fabric,
+ # because we try 2 different methods of calling report failure.
+ # The different methods are attempted in the following order:
+ # 1. Using cached ephemeral dhcp context to report failure to Azure
+ # 2. Using new ephemeral dhcp to report failure to Azure
+ self.m_report_failure_to_fabric.side_effect = Exception
+ self.assertFalse(dsrc._report_failure())
+ self.assertEqual(2, self.m_report_failure_to_fabric.call_count)
+
+ def test_dsaz_report_failure_description_msg(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ test_msg = "Test report failure description message"
+ self.assertTrue(dsrc._report_failure(description=test_msg))
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ dhcp_opts=mock.ANY, description=test_msg
+ )
+
+ def test_dsaz_report_failure_no_description_msg(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ m_crawl_metadata.side_effect = Exception
+
+ self.assertTrue(dsrc._report_failure()) # no description msg
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ dhcp_opts=mock.ANY, description=None
+ )
+
+ def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_wireserver_endpoint", return_value="test-ep"
+ ) as m_wireserver_endpoint:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ self.assertTrue(dsrc._report_failure())
+
+ # ensure called with cached ephemeral dhcp lease option 245
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ description=mock.ANY, dhcp_opts=m_wireserver_endpoint
+ )
+
+ def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ test_lease_dhcp_option_245 = "test_lease_dhcp_option_245"
+ test_lease = {
+ "unknown-245": test_lease_dhcp_option_245,
+ "interface": "eth0",
+ }
+ self.m_dhcp.return_value.obtain_lease.return_value = test_lease
+
+ self.assertTrue(dsrc._report_failure())
+
+ # ensure called with the newly discovered
+ # ephemeral dhcp lease option 245
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245
+ )
+
+ def test_exception_fetching_fabric_data_doesnt_propagate(self):
+ """Errors communicating with fabric should warn, but return True."""
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ self.m_get_metadata_from_fabric.side_effect = Exception
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
+
+ def test_fabric_data_included_in_metadata(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ self.m_get_metadata_from_fabric.return_value = ["ssh-key-value"]
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
+ self.assertEqual(["ssh-key-value"], dsrc.metadata["public-keys"])
+
+ def test_instance_id_case_insensitive(self):
+ """Return the previous iid when current is a case-insensitive match."""
+ lower_iid = EXAMPLE_UUID.lower()
+ upper_iid = EXAMPLE_UUID.upper()
+ # lowercase current UUID
+ ds = self._get_ds(
+ {"ovfcontent": construct_valid_ovf_env()}, instance_id=lower_iid
+ )
+ # UPPERCASE previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ upper_iid,
+ )
+ ds.get_data()
+ self.assertEqual(upper_iid, ds.metadata["instance-id"])
+
+ # UPPERCASE current UUID
+ ds = self._get_ds(
+ {"ovfcontent": construct_valid_ovf_env()}, instance_id=upper_iid
+ )
+ # lowercase previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ lower_iid,
+ )
+ ds.get_data()
+ self.assertEqual(lower_iid, ds.metadata["instance-id"])
+
+ def test_instance_id_endianness(self):
+ """Return the previous iid when dmi uuid is the byteswapped iid."""
+ ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ # byte-swapped previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8",
+ )
+ ds.get_data()
+ self.assertEqual(
+ "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8", ds.metadata["instance-id"]
+ )
+ # not byte-swapped previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ "644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8",
+ )
+ ds.get_data()
+ self.assertEqual(self.instance_id, ds.metadata["instance-id"])
+
+ def test_instance_id_from_dmidecode_used(self):
+ ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ ds.get_data()
+ self.assertEqual(self.instance_id, ds.metadata["instance-id"])
+
+ def test_instance_id_from_dmidecode_used_for_builtin(self):
+ ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ ds.get_data()
+ self.assertEqual(self.instance_id, ds.metadata["instance-id"])
+
+ @mock.patch(MOCKPATH + "util.is_FreeBSD")
+ @mock.patch(MOCKPATH + "_check_freebsd_cdrom")
+ def test_list_possible_azure_ds(self, m_check_fbsd_cdrom, m_is_FreeBSD):
+ """On FreeBSD, possible devs should show /dev/cd0."""
+ m_is_FreeBSD.return_value = True
+ m_check_fbsd_cdrom.return_value = True
+ possible_ds = []
+ for src in dsaz.list_possible_azure_ds("seed_dir", "cache_dir"):
+ possible_ds.append(src)
+ self.assertEqual(
+ possible_ds,
+ [
+ "seed_dir",
+ dsaz.DEFAULT_PROVISIONING_ISO_DEV,
+ "/dev/cd0",
+ "cache_dir",
+ ],
+ )
+ self.assertEqual(
+ [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list
+ )
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ @mock.patch("cloudinit.net.generate_fallback_config")
+ def test_imds_network_config(self, mock_fallback, m_driver):
+ """Network config is generated from IMDS network data when present."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ expected_cfg = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+
+ self.assertEqual(expected_cfg, dsrc.network_config)
+ mock_fallback.assert_not_called()
+
+ @mock.patch("cloudinit.net.get_interface_mac")
+ @mock.patch("cloudinit.net.get_devicelist")
+ @mock.patch("cloudinit.net.device_driver")
+ @mock.patch("cloudinit.net.generate_fallback_config")
+ def test_imds_network_ignored_when_apply_network_config_false(
+ self, mock_fallback, mock_dd, mock_devlist, mock_get_mac
+ ):
+ """When apply_network_config is False, use fallback instead of IMDS."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": False}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ fallback_config = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:11:22:33:44:55",
+ "params": {"driver": "hv_netsvc"},
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
+ mock_fallback.return_value = fallback_config
+
+ mock_devlist.return_value = ["eth0"]
+ mock_dd.return_value = ["hv_netsvc"]
+ mock_get_mac.return_value = "00:11:22:33:44:55"
+
+ dsrc = self._get_ds(data)
+ self.assertTrue(dsrc.get_data())
+ self.assertEqual(dsrc.network_config, fallback_config)
+
+ @mock.patch("cloudinit.net.get_interface_mac")
+ @mock.patch("cloudinit.net.get_devicelist")
+ @mock.patch("cloudinit.net.device_driver")
+ @mock.patch("cloudinit.net.generate_fallback_config", autospec=True)
+ def test_fallback_network_config(
+ self, mock_fallback, mock_dd, mock_devlist, mock_get_mac
+ ):
+ """On absent IMDS network data, generate network fallback config."""
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ fallback_config = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:11:22:33:44:55",
+ "params": {"driver": "hv_netsvc"},
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
+ mock_fallback.return_value = fallback_config
+
+ mock_devlist.return_value = ["eth0"]
+ mock_dd.return_value = ["hv_netsvc"]
+ mock_get_mac.return_value = "00:11:22:33:44:55"
+
+ dsrc = self._get_ds(data)
+ # Represent empty response from network imds
+ self.m_get_metadata_from_imds.return_value = {}
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ netconfig = dsrc.network_config
+ self.assertEqual(netconfig, fallback_config)
+ mock_fallback.assert_called_with(
+ blacklist_drivers=["mlx4_core", "mlx5_core"], config_driver=True
+ )
+
+ @mock.patch(MOCKPATH + "net.get_interfaces", autospec=True)
+ def test_blacklist_through_distro(self, m_net_get_interfaces):
+ """Verify Azure DS updates blacklist drivers in the distro's
+ networking object."""
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsrc = self._get_ds(data, distro=distro)
+ dsrc.get_data()
+ self.assertEqual(
+ distro.networking.blacklist_drivers, dsaz.BLACKLIST_DRIVERS
+ )
+
+ distro.networking.get_interfaces_by_mac()
+ self.m_get_interfaces.assert_called_with(
+ blacklist_drivers=dsaz.BLACKLIST_DRIVERS
+ )
+
+ @mock.patch(
+ "cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates"
+ )
+ def test_get_public_ssh_keys_with_imds(self, m_parse_certificates):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ dsrc.setup(True)
+ ssh_keys = dsrc.get_public_ssh_keys()
+ self.assertEqual(ssh_keys, ["ssh-rsa key1"])
+ self.assertEqual(m_parse_certificates.call_count, 0)
+
+ def test_key_without_crlf_valid(self):
+ test_key = "ssh-rsa somerandomkeystuff some comment"
+ assert True is dsaz._key_is_openssh_formatted(test_key)
+
+ def test_key_with_crlf_invalid(self):
+ test_key = "ssh-rsa someran\r\ndomkeystuff some comment"
+ assert False is dsaz._key_is_openssh_formatted(test_key)
+
+ def test_key_endswith_crlf_valid(self):
+ test_key = "ssh-rsa somerandomkeystuff some comment\r\n"
+ assert True is dsaz._key_is_openssh_formatted(test_key)
+
+ @mock.patch(
+ "cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates"
+ )
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_get_public_ssh_keys_with_no_openssh_format(
+ self, m_get_metadata_from_imds, m_parse_certificates
+ ):
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["compute"]["publicKeys"][0]["keyData"] = "no-openssh-format"
+ m_get_metadata_from_imds.return_value = imds_data
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ dsrc.setup(True)
+ ssh_keys = dsrc.get_public_ssh_keys()
+ self.assertEqual(ssh_keys, [])
+ self.assertEqual(m_parse_certificates.call_count, 0)
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_get_public_ssh_keys_without_imds(self, m_get_metadata_from_imds):
+ m_get_metadata_from_imds.return_value = dict()
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsaz.get_metadata_from_fabric.return_value = ["key2"]
+ dsrc.get_data()
+ dsrc.setup(True)
+ ssh_keys = dsrc.get_public_ssh_keys()
+ self.assertEqual(ssh_keys, ["key2"])
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_imds_api_version_wanted_nonexistent(
+ self, m_get_metadata_from_imds
+ ):
+ def get_metadata_from_imds_side_eff(*args, **kwargs):
+ if kwargs["api_version"] == dsaz.IMDS_VER_WANT:
+ raise url_helper.UrlError("No IMDS version", code=400)
+ return NETWORK_METADATA
+
+ m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertIsNotNone(dsrc.metadata)
+
+ assert m_get_metadata_from_imds.mock_calls == [
+ mock.call(
+ retries=0,
+ md_type=dsaz.MetadataType.ALL,
+ api_version="2021-08-01",
+ exc_cb=mock.ANY,
+ ),
+ mock.call(
+ retries=10,
+ md_type=dsaz.MetadataType.ALL,
+ api_version="2019-06-01",
+ exc_cb=mock.ANY,
+ infinite=False,
+ ),
+ ]
+
+ @mock.patch(
+ MOCKPATH + "get_metadata_from_imds", return_value=NETWORK_METADATA
+ )
+ def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertIsNotNone(dsrc.metadata)
+
+ assert m_get_metadata_from_imds.mock_calls == [
+ mock.call(
+ retries=0,
+ md_type=dsaz.MetadataType.ALL,
+ api_version="2021-08-01",
+ exc_cb=mock.ANY,
+ )
+ ]
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_hostname_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(dsrc.metadata["local-hostname"], "hostname1")
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_username_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(
+ dsrc.cfg["system_info"]["default_user"]["name"], "username1"
+ )
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_disable_password_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertTrue(dsrc.metadata["disable_password"])
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_userdata_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ userdata = "userdataImds"
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ imds_data["compute"]["userData"] = b64e(userdata)
+ m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, userdata.encode("utf-8"))
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_userdata_from_imds_with_customdata_from_OVF(
+ self, m_get_metadata_from_imds
+ ):
+ userdataOVF = "userdataOVF"
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserData": {"text": b64e(userdataOVF), "encoding": "base64"},
+ }
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+
+ userdataImds = "userdataImds"
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ imds_data["compute"]["userData"] = b64e(userdataImds)
+ m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, userdataOVF.encode("utf-8"))
+
+
+class TestLoadAzureDsDir(CiTestCase):
+ """Tests for load_azure_ds_dir."""
+
+ def setUp(self):
+ self.source_dir = self.tmp_dir()
+ super(TestLoadAzureDsDir, self).setUp()
+
+ def test_missing_ovf_env_xml_raises_non_azure_datasource_error(self):
+ """load_azure_ds_dir raises an error When ovf-env.xml doesn't exit."""
+ with self.assertRaises(dsaz.NonAzureDataSource) as context_manager:
+ dsaz.load_azure_ds_dir(self.source_dir)
+ self.assertEqual(
+ "No ovf-env file found", str(context_manager.exception)
+ )
+
+ def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self):
+ """load_azure_ds_dir calls read_azure_ovf to parse the xml."""
+ ovf_path = os.path.join(self.source_dir, "ovf-env.xml")
+ with open(ovf_path, "wb") as stream:
+ stream.write(b"invalid xml")
+ with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager:
+ dsaz.load_azure_ds_dir(self.source_dir)
+ self.assertEqual(
+ "Invalid ovf-env.xml: syntax error: line 1, column 0",
+ str(context_manager.exception),
+ )
+
+
+class TestReadAzureOvf(CiTestCase):
+ def test_invalid_xml_raises_non_azure_ds(self):
+ invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
+ self.assertRaises(
+ dsaz.BrokenAzureDataSource, dsaz.read_azure_ovf, invalid_xml
+ )
+
+ def test_load_with_pubkeys(self):
+ mypklist = [{"fingerprint": "fp1", "path": "path1", "value": ""}]
+ pubkeys = [(x["fingerprint"], x["path"], x["value"]) for x in mypklist]
+ content = construct_valid_ovf_env(pubkeys=pubkeys)
+ (_md, _ud, cfg) = dsaz.read_azure_ovf(content)
+ for mypk in mypklist:
+ self.assertIn(mypk, cfg["_pubkeys"])
+
+
+class TestCanDevBeReformatted(CiTestCase):
+ warning_file = "dataloss_warning_readme.txt"
+
+ def _domock(self, mockpath, sattr=None):
+ patcher = mock.patch(mockpath)
+ setattr(self, sattr, patcher.start())
+ self.addCleanup(patcher.stop)
+
+ def patchup(self, devs):
+ bypath = {}
+ for path, data in devs.items():
+ bypath[path] = data
+ if "realpath" in data:
+ bypath[data["realpath"]] = data
+ for ppath, pdata in data.get("partitions", {}).items():
+ bypath[ppath] = pdata
+ if "realpath" in data:
+ bypath[pdata["realpath"]] = pdata
+
+ def realpath(d):
+ return bypath[d].get("realpath", d)
+
+ def partitions_on_device(devpath):
+ parts = bypath.get(devpath, {}).get("partitions", {})
+ ret = []
+ for path, data in parts.items():
+ ret.append((data.get("num"), realpath(path)))
+ # return sorted by partition number
+ return sorted(ret, key=lambda d: d[0])
+
+ def mount_cb(device, callback, mtype, update_env_for_mount):
+ self.assertEqual("ntfs", mtype)
+ self.assertEqual("C", update_env_for_mount.get("LANG"))
+ p = self.tmp_dir()
+ for f in bypath.get(device).get("files", []):
+ write_file(os.path.join(p, f), content=f)
+ return callback(p)
+
+ def has_ntfs_fs(device):
+ return bypath.get(device, {}).get("fs") == "ntfs"
+
+ p = MOCKPATH
+ self._domock(p + "_partitions_on_device", "m_partitions_on_device")
+ self._domock(p + "_has_ntfs_filesystem", "m_has_ntfs_filesystem")
+ self._domock(p + "util.mount_cb", "m_mount_cb")
+ self._domock(p + "os.path.realpath", "m_realpath")
+ self._domock(p + "os.path.exists", "m_exists")
+ self._domock(p + "util.SeLinuxGuard", "m_selguard")
+
+ self.m_exists.side_effect = lambda p: p in bypath
+ self.m_realpath.side_effect = realpath
+ self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs
+ self.m_mount_cb.side_effect = mount_cb
+ self.m_partitions_on_device.side_effect = partitions_on_device
+ self.m_selguard.__enter__ = mock.Mock(return_value=False)
+ self.m_selguard.__exit__ = mock.Mock()
+
+ def test_three_partitions_is_false(self):
+ """A disk with 3 partitions can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {"num": 2},
+ "/dev/sda3": {"num": 3},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("3 or more", msg.lower())
+
+ def test_no_partitions_is_false(self):
+ """A disk with no partitions can not be formatted."""
+ self.patchup({"/dev/sda": {}})
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("not partitioned", msg.lower())
+
+ def test_two_partitions_not_ntfs_false(self):
+ """2 partitions and 2nd not ntfs can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {"num": 2, "fs": "ext4", "files": []},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("not ntfs", msg.lower())
+
+ def test_two_partitions_ntfs_populated_false(self):
+ """2 partitions and populated ntfs fs on 2nd can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {
+ "num": 2,
+ "fs": "ntfs",
+ "files": ["secret.txt"],
+ },
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("files on it", msg.lower())
+
+ def test_two_partitions_ntfs_empty_is_true(self):
+ """2 partitions and empty ntfs fs on 2nd can be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {"num": 2, "fs": "ntfs", "files": []},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertTrue(value)
+ self.assertIn("safe for", msg.lower())
+
+ def test_one_partition_not_ntfs_false(self):
+ """1 partition witih fs other than ntfs can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1, "fs": "zfs"},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("not ntfs", msg.lower())
+
+ def test_one_partition_ntfs_populated_false(self):
+ """1 mountable ntfs partition with many files can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": ["file1.txt", "file2.exe"],
+ },
+ }
+ }
+ }
+ )
+ with mock.patch.object(dsaz.LOG, "warning") as warning:
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ wmsg = warning.call_args[0][0]
+ self.assertIn(
+ "looks like you're using NTFS on the ephemeral disk", wmsg
+ )
+ self.assertFalse(value)
+ self.assertIn("files on it", msg.lower())
+
+ def test_one_partition_ntfs_empty_is_true(self):
+ """1 mountable ntfs partition and no files can be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1, "fs": "ntfs", "files": []}
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertTrue(value)
+ self.assertIn("safe for", msg.lower())
+
+ def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self):
+ """1 mountable ntfs partition and only warn file can be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": ["dataloss_warning_readme.txt"],
+ }
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertTrue(value)
+ self.assertIn("safe for", msg.lower())
+
+ def test_one_partition_through_realpath_is_true(self):
+ """A symlink to a device with 1 ntfs partition can be formatted."""
+ epath = "/dev/disk/cloud/azure_resource"
+ self.patchup(
+ {
+ epath: {
+ "realpath": "/dev/sdb",
+ "partitions": {
+ epath
+ + "-part1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": [self.warning_file],
+ "realpath": "/dev/sdb1",
+ }
+ },
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(epath, preserve_ntfs=False)
+ self.assertTrue(value)
+ self.assertIn("safe for", msg.lower())
+
+ def test_three_partition_through_realpath_is_false(self):
+ """A symlink to a device with 3 partitions can not be formatted."""
+ epath = "/dev/disk/cloud/azure_resource"
+ self.patchup(
+ {
+ epath: {
+ "realpath": "/dev/sdb",
+ "partitions": {
+ epath
+ + "-part1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": [self.warning_file],
+ "realpath": "/dev/sdb1",
+ },
+ epath
+ + "-part2": {
+ "num": 2,
+ "fs": "ext3",
+ "realpath": "/dev/sdb2",
+ },
+ epath
+ + "-part3": {
+ "num": 3,
+ "fs": "ext",
+ "realpath": "/dev/sdb3",
+ },
+ },
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(epath, preserve_ntfs=False)
+ self.assertFalse(value)
+ self.assertIn("3 or more", msg.lower())
+
+ def test_ntfs_mount_errors_true(self):
+ """can_dev_be_reformatted does not fail if NTFS is unknown fstype."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1, "fs": "ntfs", "files": []}
+ }
+ }
+ }
+ )
+
+ error_msgs = [
+ "Stderr: mount: unknown filesystem type 'ntfs'", # RHEL
+ "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'", # SLES
+ ]
+
+ for err_msg in error_msgs:
+ self.m_mount_cb.side_effect = MountFailedError(
+ "Failed mounting %s to %s due to: \nUnexpected.\n%s"
+ % ("/dev/sda", "/fake-tmp/dir", err_msg)
+ )
+
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertTrue(value)
+ self.assertIn("cannot mount NTFS, assuming", msg)
+
+ def test_never_destroy_ntfs_config_false(self):
+ """Normally formattable situation with never_destroy_ntfs set."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": ["dataloss_warning_readme.txt"],
+ }
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=True
+ )
+ self.assertFalse(value)
+ self.assertIn(
+ "config says to never destroy NTFS "
+ "(datasource.Azure.never_destroy_ntfs)",
+ msg,
+ )
+
+
+class TestClearCachedData(CiTestCase):
+ def test_clear_cached_attrs_clears_imds(self):
+ """All class attributes are reset to defaults, including imds data."""
+ tmp = self.tmp_dir()
+ paths = helpers.Paths({"cloud_dir": tmp, "run_dir": tmp})
+ dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=paths)
+ clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds]
+ dsrc.metadata = "md"
+ dsrc.userdata = "ud"
+ dsrc._metadata_imds = "imds"
+ dsrc._dirty_cache = True
+ dsrc.clear_cached_attrs()
+ self.assertEqual(
+ [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds], clean_values
+ )
+
+
+class TestAzureNetExists(CiTestCase):
+ def test_azure_net_must_exist_for_legacy_objpkl(self):
+ """DataSourceAzureNet must exist for old obj.pkl files
+ that reference it."""
+ self.assertTrue(hasattr(dsaz, "DataSourceAzureNet"))
+
+
+class TestPreprovisioningReadAzureOvfFlag(CiTestCase):
+ def test_read_azure_ovf_with_true_flag(self):
+ """The read_azure_ovf method should set the PreprovisionedVM
+ cfg flag if the proper setting is present."""
+ content = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "True"}
+ )
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertTrue(cfg["PreprovisionedVm"])
+
+ def test_read_azure_ovf_with_false_flag(self):
+ """The read_azure_ovf method should set the PreprovisionedVM
+ cfg flag to false if the proper setting is false."""
+ content = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "False"}
+ )
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertFalse(cfg["PreprovisionedVm"])
+
+ def test_read_azure_ovf_without_flag(self):
+ """The read_azure_ovf method should not set the
+ PreprovisionedVM cfg flag."""
+ content = construct_valid_ovf_env()
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertFalse(cfg["PreprovisionedVm"])
+ self.assertEqual(None, cfg["PreprovisionedVMType"])
+
+ def test_read_azure_ovf_with_running_type(self):
+ """The read_azure_ovf method should set PreprovisionedVMType
+ cfg flag to Running."""
+ content = construct_valid_ovf_env(
+ platform_settings={
+ "PreprovisionedVMType": "Running",
+ "PreprovisionedVm": "True",
+ }
+ )
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertTrue(cfg["PreprovisionedVm"])
+ self.assertEqual("Running", cfg["PreprovisionedVMType"])
+
+ def test_read_azure_ovf_with_savable_type(self):
+ """The read_azure_ovf method should set PreprovisionedVMType
+ cfg flag to Savable."""
+ content = construct_valid_ovf_env(
+ platform_settings={
+ "PreprovisionedVMType": "Savable",
+ "PreprovisionedVm": "True",
+ }
+ )
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertTrue(cfg["PreprovisionedVm"])
+ self.assertEqual("Savable", cfg["PreprovisionedVMType"])
+
+
+@pytest.mark.parametrize(
+ "ovf_cfg,imds_md,pps_type",
+ [
+ (
+ {"PreprovisionedVm": False, "PreprovisionedVMType": None},
+ {},
+ dsaz.PPSType.NONE,
+ ),
+ (
+ {"PreprovisionedVm": True, "PreprovisionedVMType": "Running"},
+ {},
+ dsaz.PPSType.RUNNING,
+ ),
+ (
+ {"PreprovisionedVm": True, "PreprovisionedVMType": "Savable"},
+ {},
+ dsaz.PPSType.SAVABLE,
+ ),
+ (
+ {"PreprovisionedVm": True},
+ {},
+ dsaz.PPSType.RUNNING,
+ ),
+ (
+ {},
+ {"extended": {"compute": {"ppsType": "None"}}},
+ dsaz.PPSType.NONE,
+ ),
+ (
+ {},
+ {"extended": {"compute": {"ppsType": "Running"}}},
+ dsaz.PPSType.RUNNING,
+ ),
+ (
+ {},
+ {"extended": {"compute": {"ppsType": "Savable"}}},
+ dsaz.PPSType.SAVABLE,
+ ),
+ (
+ {"PreprovisionedVm": False, "PreprovisionedVMType": None},
+ {"extended": {"compute": {"ppsType": "None"}}},
+ dsaz.PPSType.NONE,
+ ),
+ (
+ {"PreprovisionedVm": True, "PreprovisionedVMType": "Running"},
+ {"extended": {"compute": {"ppsType": "Running"}}},
+ dsaz.PPSType.RUNNING,
+ ),
+ (
+ {"PreprovisionedVm": True, "PreprovisionedVMType": "Savable"},
+ {"extended": {"compute": {"ppsType": "Savable"}}},
+ dsaz.PPSType.SAVABLE,
+ ),
+ (
+ {"PreprovisionedVm": True},
+ {"extended": {"compute": {"ppsType": "Running"}}},
+ dsaz.PPSType.RUNNING,
+ ),
+ ],
+)
+class TestDeterminePPSTypeScenarios:
+ @mock.patch("os.path.isfile", return_value=False)
+ def test_determine_pps_without_reprovision_marker(
+ self, is_file, azure_ds, ovf_cfg, imds_md, pps_type
+ ):
+ assert azure_ds._determine_pps_type(ovf_cfg, imds_md) == pps_type
+
+ @mock.patch("os.path.isfile", return_value=True)
+ def test_determine_pps_with_reprovision_marker(
+ self, is_file, azure_ds, ovf_cfg, imds_md, pps_type
+ ):
+ assert (
+ azure_ds._determine_pps_type(ovf_cfg, imds_md)
+ == dsaz.PPSType.UNKNOWN
+ )
+ assert is_file.mock_calls == [mock.call(dsaz.REPROVISION_MARKER_FILE)]
+
+
+@mock.patch("os.path.isfile", return_value=False)
+class TestReprovision(CiTestCase):
+ def setUp(self):
+ super(TestReprovision, self).setUp()
+ tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path("/var/lib/waagent", tmp)
+ self.paths = helpers.Paths({"cloud_dir": tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+
+ @mock.patch(MOCKPATH + "DataSourceAzure._poll_imds")
+ def test_reprovision_calls__poll_imds(self, _poll_imds, isfile):
+ """_reprovision will poll IMDS."""
+ isfile.return_value = False
+ hostname = "myhost"
+ username = "myuser"
+ odata = {"HostName": hostname, "UserName": username}
+ _poll_imds.return_value = construct_valid_ovf_env(data=odata)
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ dsa._reprovision()
+ _poll_imds.assert_called_with()
+
+
+class TestPreprovisioningHotAttachNics(CiTestCase):
+ def setUp(self):
+ super(TestPreprovisioningHotAttachNics, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path("/var/lib/waagent", self.tmp)
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
+
+ @mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_nic_detach_event",
+ autospec=True,
+ )
+ @mock.patch(MOCKPATH + "util.write_file", autospec=True)
+ def test_nic_detach_writes_marker(self, m_writefile, m_detach):
+ """When we detect that a nic gets detached, we write a marker for it"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ nl_sock = mock.MagicMock()
+ dsa._wait_for_nic_detach(nl_sock)
+ m_detach.assert_called_with(nl_sock)
+ self.assertEqual(1, m_detach.call_count)
+ m_writefile.assert_called_with(
+ dsaz.REPROVISION_NIC_DETACHED_MARKER_FILE, mock.ANY
+ )
+
+ @mock.patch(MOCKPATH + "util.write_file", autospec=True)
+ @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface")
+ @mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ def test_detect_nic_attach_reports_ready_and_waits_for_detach(
+ self, m_detach, m_report_ready, m_fallback_if, m_writefile
+ ):
+ """Report ready first and then wait for nic detach"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa._wait_for_all_nics_ready()
+ m_fallback_if.return_value = "Dummy interface"
+ self.assertEqual(1, m_report_ready.call_count)
+ self.assertEqual(1, m_detach.call_count)
+ self.assertEqual(1, m_writefile.call_count)
+ m_writefile.assert_called_with(
+ dsaz.REPORTED_READY_MARKER_FILE, mock.ANY
+ )
+
+ @mock.patch("os.path.isfile")
+ @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ @mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ def test_detect_nic_attach_skips_report_ready_when_marker_present(
+ self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile
+ ):
+ """Skip reporting ready if we already have a marker file."""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+
+ def isfile(key):
+ return key == dsaz.REPORTED_READY_MARKER_FILE
+
+ m_isfile.side_effect = isfile
+ dsa._wait_for_all_nics_ready()
+ m_fallback_if.return_value = "Dummy interface"
+ self.assertEqual(0, m_report_ready.call_count)
+ self.assertEqual(0, m_dhcp.call_count)
+ self.assertEqual(1, m_detach.call_count)
+
+ @mock.patch("os.path.isfile")
+ @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4")
+ @mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ def test_detect_nic_attach_skips_nic_detach_when_marker_present(
+ self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile
+ ):
+ """Skip wait for nic detach if it already happened."""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+
+ m_isfile.return_value = True
+ dsa._wait_for_all_nics_ready()
+ m_fallback_if.return_value = "Dummy interface"
+ self.assertEqual(0, m_report_ready.call_count)
+ self.assertEqual(0, m_dhcp.call_count)
+ self.assertEqual(0, m_detach.call_count)
+
+ @mock.patch(MOCKPATH + "DataSourceAzure.wait_for_link_up", autospec=True)
+ @mock.patch("cloudinit.sources.helpers.netlink.wait_for_nic_attach_event")
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ @mock.patch("os.path.isfile")
+ def test_wait_for_nic_attach_if_no_fallback_interface(
+ self,
+ m_isfile,
+ m_detach,
+ m_dhcpv4,
+ m_imds,
+ m_fallback_if,
+ m_attach,
+ m_link_up,
+ ):
+ """Wait for nic attach if we do not have a fallback interface"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+
+ m_isfile.return_value = True
+ m_attach.return_value = "eth0"
+ dhcp_ctx = mock.MagicMock(lease=lease)
+ dhcp_ctx.obtain_lease.return_value = lease
+ m_dhcpv4.return_value = dhcp_ctx
+ m_imds.return_value = IMDS_NETWORK_METADATA
+ m_fallback_if.return_value = None
+
+ dsa._wait_for_all_nics_ready()
+
+ self.assertEqual(0, m_detach.call_count)
+ self.assertEqual(1, m_attach.call_count)
+ self.assertEqual(1, m_dhcpv4.call_count)
+ self.assertEqual(1, m_imds.call_count)
+ self.assertEqual(1, m_link_up.call_count)
+ m_link_up.assert_called_with(mock.ANY, "eth0")
+
+ @mock.patch(MOCKPATH + "DataSourceAzure.wait_for_link_up")
+ @mock.patch("cloudinit.sources.helpers.netlink.wait_for_nic_attach_event")
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ @mock.patch(MOCKPATH + "DataSourceAzure.get_imds_data_with_api_fallback")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ @mock.patch("os.path.isfile")
+ def test_wait_for_nic_attach_multinic_attach(
+ self,
+ m_isfile,
+ m_detach,
+ m_dhcpv4,
+ m_imds,
+ m_fallback_if,
+ m_attach,
+ m_link_up,
+ ):
+ """Wait for nic attach if we do not have a fallback interface"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+
+ # Simulate two NICs by adding the same one twice.
+ md = {
+ "interface": [
+ IMDS_NETWORK_METADATA["interface"][0],
+ IMDS_NETWORK_METADATA["interface"][0],
+ ]
+ }
+
+ m_isfile.return_value = True
+ m_attach.side_effect = [
+ "eth0",
+ "eth1",
+ ]
+ dhcp_ctx = mock.MagicMock(lease=lease)
+ dhcp_ctx.obtain_lease.return_value = lease
+ m_dhcpv4.return_value = dhcp_ctx
+ m_imds.side_effect = [md]
+ m_fallback_if.return_value = None
+
+ dsa._wait_for_all_nics_ready()
+
+ self.assertEqual(0, m_detach.call_count)
+ self.assertEqual(2, m_attach.call_count)
+ # DHCP and network metadata calls will only happen on the primary NIC.
+ self.assertEqual(1, m_dhcpv4.call_count)
+ self.assertEqual(1, m_imds.call_count)
+ self.assertEqual(2, m_link_up.call_count)
+
+ @mock.patch("cloudinit.url_helper.time.sleep", autospec=True)
+ @mock.patch("requests.Session.request", autospec=True)
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ def test_check_if_nic_is_primary_retries_on_failures(
+ self, m_dhcpv4, m_request, m_sleep
+ ):
+ """Retry polling for network metadata on all failures except timeout
+ and network unreachable errors"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+
+ # Simulate two NICs by adding the same one twice.
+ md = {
+ "interface": [
+ IMDS_NETWORK_METADATA["interface"][0],
+ IMDS_NETWORK_METADATA["interface"][0],
+ ]
+ }
+
+ m_req = mock.Mock(content=json.dumps(md))
+ m_request.side_effect = [
+ requests.Timeout("Fake connection timeout"),
+ requests.ConnectionError("Fake Network Unreachable"),
+ m_req,
+ ]
+ m_dhcpv4.return_value.lease = lease
+
+ is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0")
+ self.assertEqual(True, is_primary)
+ self.assertEqual(2, expected_nic_count)
+ assert len(m_request.mock_calls) == 3
+
+ # Re-run tests to verify max retries.
+ m_request.reset_mock()
+ m_request.side_effect = [
+ requests.Timeout("Fake connection timeout")
+ ] * 6 + [requests.ConnectionError("Fake Network Unreachable")] * 6
+
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+
+ is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1")
+ self.assertEqual(False, is_primary)
+ assert len(m_request.mock_calls) == 11
+
+ @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up")
+ def test_wait_for_link_up_returns_if_already_up(self, m_is_link_up):
+ """Waiting for link to be up should return immediately if the link is
+ already up."""
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+ m_is_link_up.return_value = True
+
+ dsa.wait_for_link_up("eth0")
+ self.assertEqual(1, m_is_link_up.call_count)
+
+ @mock.patch(MOCKPATH + "net.is_up", autospec=True)
+ @mock.patch(MOCKPATH + "util.write_file")
+ @mock.patch("cloudinit.net.read_sys_net", return_value="device-id")
+ @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up")
+ def test_wait_for_link_up_checks_link_after_sleep(
+ self, m_try_set_link_up, m_read_sys_net, m_writefile, m_is_up
+ ):
+ """Waiting for link to be up should return immediately if the link is
+ already up."""
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+ m_try_set_link_up.return_value = False
+
+ callcount = 0
+
+ def is_up_mock(key):
+ nonlocal callcount
+ if callcount == 0:
+ callcount += 1
+ return False
+ return True
+
+ m_is_up.side_effect = is_up_mock
+
+ with mock.patch("cloudinit.sources.DataSourceAzure.sleep"):
+ dsa.wait_for_link_up("eth0")
+ self.assertEqual(2, m_try_set_link_up.call_count)
+ self.assertEqual(2, m_is_up.call_count)
+
+ @mock.patch(MOCKPATH + "util.write_file")
+ @mock.patch("cloudinit.net.read_sys_net", return_value="device-id")
+ @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up")
+ def test_wait_for_link_up_writes_to_device_file(
+ self, m_is_link_up, m_read_sys_net, m_writefile
+ ):
+ """Waiting for link to be up should return immediately if the link is
+ already up."""
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+
+ callcount = 0
+
+ def linkup(key):
+ nonlocal callcount
+ if callcount == 0:
+ callcount += 1
+ return False
+ return True
+
+ m_is_link_up.side_effect = linkup
+
+ dsa.wait_for_link_up("eth0")
+ self.assertEqual(2, m_is_link_up.call_count)
+ self.assertEqual(1, m_read_sys_net.call_count)
+ self.assertEqual(2, m_writefile.call_count)
+
+ @mock.patch(
+ "cloudinit.sources.helpers.netlink.create_bound_netlink_socket"
+ )
+ def test_wait_for_all_nics_ready_raises_if_socket_fails(self, m_socket):
+ """Waiting for all nics should raise exception if netlink socket
+ creation fails."""
+
+ m_socket.side_effect = netlink.NetlinkCreateSocketError
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+
+ self.assertRaises(
+ netlink.NetlinkCreateSocketError, dsa._wait_for_all_nics_ready
+ )
+ # dsa._wait_for_all_nics_ready()
+
+
+@mock.patch("cloudinit.net.find_fallback_nic", return_value="eth9")
+@mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+@mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
+)
+@mock.patch("requests.Session.request")
+@mock.patch(MOCKPATH + "DataSourceAzure._report_ready", return_value=True)
+class TestPreprovisioningPollIMDS(CiTestCase):
+ def setUp(self):
+ super(TestPreprovisioningPollIMDS, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path("/var/lib/waagent", self.tmp)
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+
+ @mock.patch("time.sleep", mock.MagicMock())
+ def test_poll_imds_re_dhcp_on_timeout(
+ self,
+ m_report_ready,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """The poll_imds will retry DHCP on IMDS timeout."""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ m_dhcp.return_value = [lease]
+ m_media_switch.return_value = None
+ dhcp_ctx = mock.MagicMock(lease=lease)
+ dhcp_ctx.obtain_lease.return_value = lease
+
+ self.tries = 0
+
+ def fake_timeout_once(**kwargs):
+ self.tries += 1
+ if self.tries == 1:
+ raise requests.Timeout("Fake connection timeout")
+ elif self.tries in (2, 3):
+ response = requests.Response()
+ response.status_code = 404 if self.tries == 2 else 410
+ raise requests.exceptions.HTTPError(
+ "fake {}".format(response.status_code), response=response
+ )
+ # Third try should succeed and stop retries or redhcp
+ return mock.MagicMock(status_code=200, text="good", content="good")
+
+ m_request.side_effect = fake_timeout_once
+
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ dsa._poll_imds()
+
+ assert m_report_ready.mock_calls == [mock.call()]
+
+ self.assertEqual(3, m_dhcp.call_count, "Expected 3 DHCP calls")
+ self.assertEqual(4, self.tries, "Expected 4 total reads from IMDS")
+
+ @mock.patch("os.path.isfile")
+ def test_poll_imds_skips_dhcp_if_ctx_present(
+ self,
+ m_isfile,
+ report_ready_func,
+ fake_resp,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """The poll_imds function should reuse the dhcp ctx if it is already
+ present. This happens when we wait for nic to be hot-attached before
+ polling for reprovisiondata. Note that if this ctx is set when
+ _poll_imds is called, then it is not expected to be waiting for
+ media_disconnect_connect either."""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ m_isfile.return_value = True
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa._ephemeral_dhcp_ctx = mock.Mock(lease={})
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ dsa._poll_imds()
+ self.assertEqual(0, m_dhcp.call_count)
+ self.assertEqual(0, m_media_switch.call_count)
+
+ @mock.patch("os.path.isfile")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ def test_poll_imds_does_dhcp_on_retries_if_ctx_present(
+ self,
+ m_ephemeral_dhcpv4,
+ m_isfile,
+ report_ready_func,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """The poll_imds function should reuse the dhcp ctx if it is already
+ present. This happens when we wait for nic to be hot-attached before
+ polling for reprovisiondata. Note that if this ctx is set when
+ _poll_imds is called, then it is not expected to be waiting for
+ media_disconnect_connect either."""
+
+ tries = 0
+
+ def fake_timeout_once(**kwargs):
+ nonlocal tries
+ tries += 1
+ if tries == 1:
+ raise requests.Timeout("Fake connection timeout")
+ return mock.MagicMock(status_code=200, text="good", content="good")
+
+ m_request.side_effect = fake_timeout_once
+ report_file = self.tmp_path("report_marker", self.tmp)
+ m_isfile.return_value = True
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ with mock.patch(
+ MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file
+ ), mock.patch.object(dsa, "_ephemeral_dhcp_ctx") as m_dhcp_ctx:
+ m_dhcp_ctx.obtain_lease.return_value = "Dummy lease"
+ dsa._ephemeral_dhcp_ctx = m_dhcp_ctx
+ dsa._poll_imds()
+ self.assertEqual(1, m_dhcp_ctx.clean_network.call_count)
+ self.assertEqual(1, m_ephemeral_dhcpv4.call_count)
+ self.assertEqual(0, m_media_switch.call_count)
+ self.assertEqual(2, m_request.call_count)
+
+ def test_does_not_poll_imds_report_ready_when_marker_file_exists(
+ self,
+ m_report_ready,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """poll_imds should not call report ready when the reported ready
+ marker file exists"""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ write_file(report_file, content="dont run report_ready :)")
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
+ m_media_switch.return_value = None
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ dsa._poll_imds()
+ self.assertEqual(m_report_ready.call_count, 0)
+
+ def test_poll_imds_report_ready_success_writes_marker_file(
+ self,
+ m_report_ready,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """poll_imds should write the report_ready marker file if
+ reporting ready succeeds"""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
+ m_media_switch.return_value = None
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ self.assertFalse(os.path.exists(report_file))
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ dsa._poll_imds()
+ self.assertEqual(m_report_ready.call_count, 1)
+ self.assertTrue(os.path.exists(report_file))
+
+ def test_poll_imds_report_ready_failure_raises_exc_and_doesnt_write_marker(
+ self,
+ m_report_ready,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """poll_imds should write the report_ready marker file if
+ reporting ready succeeds"""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
+ m_media_switch.return_value = None
+ m_report_ready.side_effect = [Exception("fail")]
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ self.assertFalse(os.path.exists(report_file))
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ self.assertRaises(InvalidMetaDataException, dsa._poll_imds)
+ self.assertEqual(m_report_ready.call_count, 1)
+ self.assertFalse(os.path.exists(report_file))
+
+
+@mock.patch(MOCKPATH + "DataSourceAzure._report_ready", mock.MagicMock())
+@mock.patch(MOCKPATH + "subp.subp", mock.MagicMock())
+@mock.patch(MOCKPATH + "util.write_file", mock.MagicMock())
+@mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
+)
+@mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network", autospec=True)
+@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+@mock.patch("requests.Session.request")
+class TestAzureDataSourcePreprovisioning(CiTestCase):
+ def setUp(self):
+ super(TestAzureDataSourcePreprovisioning, self).setUp()
+ tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path("/var/lib/waagent", tmp)
+ self.paths = helpers.Paths({"cloud_dir": tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+
+ def test_poll_imds_returns_ovf_env(
+ self, m_request, m_dhcp, m_net, m_media_switch
+ ):
+ """The _poll_imds method should return the ovf_env.xml."""
+ m_media_switch.return_value = None
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ }
+ ]
+ url = "http://{0}/metadata/reprovisiondata?api-version=2019-06-01"
+ host = "169.254.169.254"
+ full_url = url.format(host)
+ m_request.return_value = mock.MagicMock(
+ status_code=200, text="ovf", content="ovf"
+ )
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ self.assertTrue(len(dsa._poll_imds()) > 0)
+ self.assertEqual(
+ m_request.call_args_list,
+ [
+ mock.call(
+ allow_redirects=True,
+ headers={
+ "Metadata": "true",
+ "User-Agent": "Cloud-Init/%s" % vs(),
+ },
+ method="GET",
+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
+ url=full_url,
+ )
+ ],
+ )
+ self.assertEqual(m_dhcp.call_count, 2)
+ m_net.assert_any_call(
+ broadcast="192.168.2.255",
+ interface="eth9",
+ ip="192.168.2.9",
+ prefix_or_mask="255.255.255.0",
+ router="192.168.2.1",
+ static_routes=None,
+ )
+ self.assertEqual(m_net.call_count, 2)
+
+ def test__reprovision_calls__poll_imds(
+ self, m_request, m_dhcp, m_net, m_media_switch
+ ):
+ """The _reprovision method should call poll IMDS."""
+ m_media_switch.return_value = None
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
+ url = "http://{0}/metadata/reprovisiondata?api-version=2019-06-01"
+ host = "169.254.169.254"
+ full_url = url.format(host)
+ hostname = "myhost"
+ username = "myuser"
+ odata = {"HostName": hostname, "UserName": username}
+ content = construct_valid_ovf_env(data=odata)
+ m_request.return_value = mock.MagicMock(
+ status_code=200, text=content, content=content
+ )
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ md, _ud, cfg, _d = dsa._reprovision()
+ self.assertEqual(md["local-hostname"], hostname)
+ self.assertEqual(cfg["system_info"]["default_user"]["name"], username)
+ self.assertIn(
+ mock.call(
+ allow_redirects=True,
+ headers={
+ "Metadata": "true",
+ "User-Agent": "Cloud-Init/%s" % vs(),
+ },
+ method="GET",
+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
+ url=full_url,
+ ),
+ m_request.call_args_list,
+ )
+ self.assertEqual(m_dhcp.call_count, 2)
+ m_net.assert_any_call(
+ broadcast="192.168.2.255",
+ interface="eth9",
+ ip="192.168.2.9",
+ prefix_or_mask="255.255.255.0",
+ router="192.168.2.1",
+ static_routes=None,
+ )
+ self.assertEqual(m_net.call_count, 2)
+
+
+class TestRemoveUbuntuNetworkConfigScripts(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestRemoveUbuntuNetworkConfigScripts, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_remove_network_scripts_removes_both_files_and_directories(self):
+ """Any files or directories in paths are removed when present."""
+ file1 = self.tmp_path("file1", dir=self.tmp)
+ subdir = self.tmp_path("sub1", dir=self.tmp)
+ subfile = self.tmp_path("leaf1", dir=subdir)
+ write_file(file1, "file1content")
+ write_file(subfile, "leafcontent")
+ dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1])
+
+ for path in (file1, subdir, subfile):
+ self.assertFalse(
+ os.path.exists(path), "Found unremoved: %s" % path
+ )
+
+ expected_logs = [
+ "INFO: Removing Ubuntu extended network scripts because cloud-init"
+ " updates Azure network configuration on the following events:"
+ " ['boot', 'boot-legacy']",
+ "Recursively deleting %s" % subdir,
+ "Attempting to remove %s" % file1,
+ ]
+ for log in expected_logs:
+ self.assertIn(log, self.logs.getvalue())
+
+ def test_remove_network_scripts_only_attempts_removal_if_path_exists(self):
+ """Any files or directories absent are skipped without error."""
+ dsaz.maybe_remove_ubuntu_network_config_scripts(
+ paths=[
+ self.tmp_path("nodirhere/", dir=self.tmp),
+ self.tmp_path("notfilehere", dir=self.tmp),
+ ]
+ )
+ self.assertNotIn("/not/a", self.logs.getvalue()) # No delete logs
+
+ @mock.patch(MOCKPATH + "os.path.exists")
+ def test_remove_network_scripts_default_removes_stock_scripts(
+ self, m_exists
+ ):
+ """Azure's stock ubuntu image scripts and artifacts are removed."""
+ # Report path absent on all to avoid delete operation
+ m_exists.return_value = False
+ dsaz.maybe_remove_ubuntu_network_config_scripts()
+ calls = m_exists.call_args_list
+ for path in dsaz.UBUNTU_EXTENDED_NETWORK_SCRIPTS:
+ self.assertIn(mock.call(path), calls)
+
+
+class TestWBIsPlatformViable(CiTestCase):
+ """White box tests for _is_platform_viable."""
+
+ with_logs = True
+
+ @mock.patch(MOCKPATH + "dmi.read_dmi_data")
+ def test_true_on_non_azure_chassis(self, m_read_dmi_data):
+ """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG."""
+ m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG
+ self.assertTrue(dsaz._is_platform_viable("doesnotmatter"))
+
+ @mock.patch(MOCKPATH + "os.path.exists")
+ @mock.patch(MOCKPATH + "dmi.read_dmi_data")
+ def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist):
+ """Return True if ovf-env.xml exists in known seed dirs."""
+ # Non-matching Azure chassis-asset-tag
+ m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + "X"
+
+ m_exist.return_value = True
+ self.assertTrue(dsaz._is_platform_viable("/some/seed/dir"))
+ m_exist.called_once_with("/other/seed/dir")
+
+ def test_false_on_no_matching_azure_criteria(self):
+ """Report non-azure on unmatched asset tag, ovf-env absent and no dev.
+
+ Return False when the asset tag doesn't match Azure's static
+ AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs
+ and no devices have a label starting with prefix 'rd_rdfe_'.
+ """
+ self.assertFalse(
+ wrap_and_call(
+ MOCKPATH,
+ {
+ "os.path.exists": False,
+ # Non-matching Azure chassis-asset-tag
+ "dmi.read_dmi_data": dsaz.AZURE_CHASSIS_ASSET_TAG + "X",
+ "subp.which": None,
+ },
+ dsaz._is_platform_viable,
+ "doesnotmatter",
+ )
+ )
+ self.assertIn(
+ "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format(
+ dsaz.AZURE_CHASSIS_ASSET_TAG + "X"
+ ),
+ self.logs.getvalue(),
+ )
+
+
+class TestRandomSeed(CiTestCase):
+ """Test proper handling of random_seed"""
+
+ def test_non_ascii_seed_is_serializable(self):
+ """Pass if a random string from the Azure infrastructure which
+ contains at least one non-Unicode character can be converted to/from
+ JSON without alteration and without throwing an exception.
+ """
+ path = resourceLocation("azure/non_unicode_random_string")
+ result = dsaz._get_random_seed(path)
+
+ obj = {"seed": result}
+ try:
+ serialized = json_dumps(obj)
+ deserialized = load_json(serialized)
+ except UnicodeDecodeError:
+ self.fail("Non-serializable random seed returned")
+
+ self.assertEqual(deserialized["seed"], result)
+
+
+class TestProvisioning:
+ @pytest.fixture(autouse=True)
+ def provisioning_setup(
+ self,
+ azure_ds,
+ mock_azure_get_metadata_from_fabric,
+ mock_azure_report_failure_to_fabric,
+ mock_net_dhcp_maybe_perform_dhcp_discovery,
+ mock_net_dhcp_EphemeralIPv4Network,
+ mock_dmi_read_dmi_data,
+ mock_get_interfaces,
+ mock_get_interface_mac,
+ mock_netlink,
+ mock_os_path_isfile,
+ mock_readurl,
+ mock_subp_subp,
+ mock_util_ensure_dir,
+ mock_util_find_devs_with,
+ mock_util_load_file,
+ mock_util_mount_cb,
+ mock_util_write_file,
+ ):
+ self.azure_ds = azure_ds
+ self.mock_azure_get_metadata_from_fabric = (
+ mock_azure_get_metadata_from_fabric
+ )
+ self.mock_azure_report_failure_to_fabric = (
+ mock_azure_report_failure_to_fabric
+ )
+ self.mock_net_dhcp_maybe_perform_dhcp_discovery = (
+ mock_net_dhcp_maybe_perform_dhcp_discovery
+ )
+ self.mock_net_dhcp_EphemeralIPv4Network = (
+ mock_net_dhcp_EphemeralIPv4Network
+ )
+ self.mock_dmi_read_dmi_data = mock_dmi_read_dmi_data
+ self.mock_get_interfaces = mock_get_interfaces
+ self.mock_get_interface_mac = mock_get_interface_mac
+ self.mock_netlink = mock_netlink
+ self.mock_os_path_isfile = mock_os_path_isfile
+ self.mock_readurl = mock_readurl
+ self.mock_subp_subp = mock_subp_subp
+ self.mock_util_ensure_dir = mock_util_ensure_dir
+ self.mock_util_find_devs_with = mock_util_find_devs_with
+ self.mock_util_load_file = mock_util_load_file
+ self.mock_util_mount_cb = mock_util_mount_cb
+ self.mock_util_write_file = mock_util_write_file
+
+ self.imds_md = {
+ "extended": {"compute": {"ppsType": "None"}},
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "011122334455",
+ },
+ ]
+ },
+ }
+
+ def test_no_pps(self):
+ self.mock_readurl.side_effect = [
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ ]
+ self.mock_azure_get_metadata_from_fabric.return_value = []
+ self.mock_os_path_isfile.side_effect = [False, False, False]
+
+ self.azure_ds._get_data()
+
+ assert self.mock_os_path_isfile.mock_calls == [
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call(
+ os.path.join(
+ self.azure_ds.paths.cloud_dir, "seed/azure/ovf-env.xml"
+ )
+ ),
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ ]
+
+ assert self.mock_readurl.mock_calls == [
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ ]
+
+ # Verify DHCP is setup once.
+ assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
+ mock.call(None, dsaz.dhcp_log_cb)
+ ]
+ assert self.azure_ds._wireserver_endpoint == "aa:bb:cc:dd"
+ assert self.azure_ds._is_ephemeral_networking_up() is False
+
+ # Verify DMI usage.
+ assert self.mock_dmi_read_dmi_data.mock_calls == [
+ mock.call("system-uuid")
+ ]
+ assert self.azure_ds.metadata["instance-id"] == "fake-system-uuid"
+
+ # Verify IMDS metadata.
+ assert self.azure_ds.metadata["imds"] == self.imds_md
+
+ # Verify reporting ready once.
+ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev="/dev/sr0",
+ pubkey_info=None,
+ )
+ ]
+
+ # Verify netlink.
+ assert self.mock_netlink.mock_calls == []
+
+ def test_running_pps(self):
+ self.imds_md["extended"]["compute"]["ppsType"] = "Running"
+ ovf_data = {"HostName": "myhost", "UserName": "myuser"}
+
+ nl_sock = mock.MagicMock()
+ self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock
+ self.mock_readurl.side_effect = [
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ mock.MagicMock(
+ contents=construct_valid_ovf_env(data=ovf_data).encode()
+ ),
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ ]
+ self.mock_azure_get_metadata_from_fabric.return_value = []
+ self.mock_os_path_isfile.side_effect = [False, False, False, False]
+
+ self.azure_ds._get_data()
+
+ assert self.mock_os_path_isfile.mock_calls == [
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call(
+ os.path.join(
+ self.azure_ds.paths.cloud_dir, "seed/azure/ovf-env.xml"
+ )
+ ),
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call("/var/lib/cloud/data/reported_ready"),
+ ]
+
+ assert self.mock_readurl.mock_calls == [
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/reprovisiondata?"
+ "api-version=2019-06-01",
+ timeout=2,
+ headers={"Metadata": "true"},
+ exception_cb=mock.ANY,
+ infinite=True,
+ log_req_resp=False,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ ]
+
+ # Verify DHCP is setup twice.
+ assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
+ mock.call(None, dsaz.dhcp_log_cb),
+ mock.call(None, dsaz.dhcp_log_cb),
+ ]
+ assert self.azure_ds._wireserver_endpoint == "aa:bb:cc:dd"
+ assert self.azure_ds._is_ephemeral_networking_up() is False
+
+ # Verify DMI usage.
+ assert self.mock_dmi_read_dmi_data.mock_calls == [
+ mock.call("system-uuid")
+ ]
+ assert self.azure_ds.metadata["instance-id"] == "fake-system-uuid"
+
+ # Verify IMDS metadata.
+ assert self.azure_ds.metadata["imds"] == self.imds_md
+
+ # Verify reporting ready twice.
+ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev="/dev/sr0",
+ pubkey_info=None,
+ ),
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev=None,
+ pubkey_info=None,
+ ),
+ ]
+
+ # Verify netlink operations for Running PPS.
+ assert self.mock_netlink.mock_calls == [
+ mock.call.create_bound_netlink_socket(),
+ mock.call.wait_for_media_disconnect_connect(mock.ANY, "ethBoot0"),
+ mock.call.create_bound_netlink_socket().__bool__(),
+ mock.call.create_bound_netlink_socket().close(),
+ ]
+
+ def test_savable_pps(self):
+ self.imds_md["extended"]["compute"]["ppsType"] = "Savable"
+ ovf_data = {"HostName": "myhost", "UserName": "myuser"}
+
+ nl_sock = mock.MagicMock()
+ self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock
+ self.mock_netlink.wait_for_nic_detach_event.return_value = "eth9"
+ self.mock_netlink.wait_for_nic_attach_event.return_value = (
+ "ethAttached1"
+ )
+ self.mock_readurl.side_effect = [
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ mock.MagicMock(
+ contents=json.dumps(self.imds_md["network"]).encode()
+ ),
+ mock.MagicMock(
+ contents=construct_valid_ovf_env(data=ovf_data).encode()
+ ),
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ ]
+ self.mock_azure_get_metadata_from_fabric.return_value = []
+ self.mock_os_path_isfile.side_effect = [
+ False, # /var/lib/cloud/data/poll_imds
+ False, # seed/azure/ovf-env.xml
+ False, # /var/lib/cloud/data/poll_imds
+ False, # /var/lib/cloud/data/reported_ready
+ False, # /var/lib/cloud/data/reported_ready
+ False, # /var/lib/cloud/data/nic_detached
+ True, # /var/lib/cloud/data/reported_ready
+ ]
+ self.azure_ds._fallback_interface = False
+
+ self.azure_ds._get_data()
+
+ assert self.mock_os_path_isfile.mock_calls == [
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call(
+ os.path.join(
+ self.azure_ds.paths.cloud_dir, "seed/azure/ovf-env.xml"
+ )
+ ),
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call("/var/lib/cloud/data/reported_ready"),
+ mock.call("/var/lib/cloud/data/reported_ready"),
+ mock.call("/var/lib/cloud/data/nic_detached"),
+ mock.call("/var/lib/cloud/data/reported_ready"),
+ ]
+
+ assert self.mock_readurl.mock_calls == [
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/instance/network?"
+ "api-version=2019-06-01",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=mock.ANY,
+ infinite=True,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/reprovisiondata?"
+ "api-version=2019-06-01",
+ timeout=2,
+ headers={"Metadata": "true"},
+ exception_cb=mock.ANY,
+ infinite=True,
+ log_req_resp=False,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ ]
+
+ # Verify DHCP is setup twice.
+ assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
+ mock.call(None, dsaz.dhcp_log_cb),
+ mock.call("ethAttached1", dsaz.dhcp_log_cb),
+ ]
+ assert self.azure_ds._wireserver_endpoint == "aa:bb:cc:dd"
+ assert self.azure_ds._is_ephemeral_networking_up() is False
+
+ # Verify DMI usage.
+ assert self.mock_dmi_read_dmi_data.mock_calls == [
+ mock.call("system-uuid")
+ ]
+ assert self.azure_ds.metadata["instance-id"] == "fake-system-uuid"
+
+ # Verify IMDS metadata.
+ assert self.azure_ds.metadata["imds"] == self.imds_md
+
+ # Verify reporting ready twice.
+ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev="/dev/sr0",
+ pubkey_info=None,
+ ),
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev=None,
+ pubkey_info=None,
+ ),
+ ]
+
+ # Verify netlink operations for Savable PPS.
+ assert self.mock_netlink.mock_calls == [
+ mock.call.create_bound_netlink_socket(),
+ mock.call.wait_for_nic_detach_event(nl_sock),
+ mock.call.wait_for_nic_attach_event(nl_sock, ["ethAttached1"]),
+ mock.call.create_bound_netlink_socket().__bool__(),
+ mock.call.create_bound_netlink_socket().close(),
+ ]
+
+
+class TestValidateIMDSMetadata:
+ @pytest.mark.parametrize(
+ "mac,expected",
+ [
+ ("001122aabbcc", "00:11:22:aa:bb:cc"),
+ ("001122AABBCC", "00:11:22:aa:bb:cc"),
+ ("00:11:22:aa:bb:cc", "00:11:22:aa:bb:cc"),
+ ("00:11:22:AA:BB:CC", "00:11:22:aa:bb:cc"),
+ ("pass-through-the-unexpected", "pass-through-the-unexpected"),
+ ("", ""),
+ ],
+ )
+ def test_normalize_scenarios(self, mac, expected):
+ normalized = dsaz.normalize_mac_address(mac)
+ assert normalized == expected
+
+ def test_empty(
+ self, azure_ds, caplog, mock_get_interfaces, mock_get_interface_mac
+ ):
+ imds_md = {}
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is False
+ assert (
+ "cloudinit.sources.DataSourceAzure",
+ 30,
+ "IMDS network metadata has incomplete configuration: None",
+ ) in caplog.record_tuples
+
+ def test_validates_one_nic(
+ self, azure_ds, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "001122334455",
+ }
+ ]
+ }
+ }
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is True
+
+ def test_validates_multiple_nic(
+ self, azure_ds, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("test1", "01:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "001122334455",
+ },
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "011122334455",
+ },
+ ]
+ }
+ }
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is True
+
+ def test_missing_all(
+ self, azure_ds, caplog, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("test1", "01:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {"network": {"interface": []}}
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is False
+ assert (
+ "cloudinit.sources.DataSourceAzure",
+ 30,
+ "IMDS network metadata is missing configuration for NICs "
+ "['00:11:22:33:44:55', '01:11:22:33:44:55']: "
+ f"{imds_md['network']!r}",
+ ) in caplog.record_tuples
+
+ def test_missing_primary(
+ self, azure_ds, caplog, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("test1", "01:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "011122334455",
+ },
+ ]
+ }
+ }
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is False
+ assert (
+ "cloudinit.sources.DataSourceAzure",
+ 30,
+ "IMDS network metadata is missing configuration for NICs "
+ f"['00:11:22:33:44:55']: {imds_md['network']!r}",
+ ) in caplog.record_tuples
+ assert (
+ "cloudinit.sources.DataSourceAzure",
+ 30,
+ "IMDS network metadata is missing primary NIC "
+ f"'00:11:22:33:44:55': {imds_md['network']!r}",
+ ) in caplog.record_tuples
+
+ def test_missing_secondary(
+ self, azure_ds, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("test1", "01:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "001122334455",
+ },
+ ]
+ }
+ }
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is False
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py
index b8899807..98143bc3 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/sources/test_azure_helper.py
@@ -9,10 +9,9 @@ from xml.etree import ElementTree
from xml.sax.saxutils import escape, unescape
from cloudinit.sources.helpers import azure as azure_helper
-from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir
-
-from cloudinit.util import load_file
from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim
+from cloudinit.util import load_file
+from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
GOAL_STATE_TEMPLATE = """\
<?xml version="1.0" encoding="utf-8"?>
@@ -52,7 +51,7 @@ GOAL_STATE_TEMPLATE = """\
</GoalState>
"""
-HEALTH_REPORT_XML_TEMPLATE = '''\
+HEALTH_REPORT_XML_TEMPLATE = """\
<?xml version="1.0" encoding="utf-8"?>
<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
@@ -70,14 +69,16 @@ HEALTH_REPORT_XML_TEMPLATE = '''\
</RoleInstanceList>
</Container>
</Health>
-'''
+"""
-HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent('''\
+HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent(
+ """\
<Details>
<SubStatus>{health_substatus}</SubStatus>
<Description>{health_description}</Description>
</Details>
- ''')
+ """
+)
HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512
@@ -87,24 +88,27 @@ class SentinelException(Exception):
class TestFindEndpoint(CiTestCase):
-
def setUp(self):
super(TestFindEndpoint, self).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.load_file = patches.enter_context(
- mock.patch.object(azure_helper.util, 'load_file'))
+ mock.patch.object(azure_helper.util, "load_file")
+ )
self.dhcp_options = patches.enter_context(
- mock.patch.object(wa_shim, '_load_dhclient_json'))
+ mock.patch.object(wa_shim, "_load_dhclient_json")
+ )
self.networkd_leases = patches.enter_context(
- mock.patch.object(wa_shim, '_networkd_get_value_from_leases'))
+ mock.patch.object(wa_shim, "_networkd_get_value_from_leases")
+ )
self.networkd_leases.return_value = None
def test_missing_file(self):
- """wa_shim find_endpoint uses default endpoint if leasefile not found
+ """wa_shim find_endpoint uses default endpoint if
+ leasefile not found
"""
self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16")
@@ -112,82 +116,93 @@ class TestFindEndpoint(CiTestCase):
"""wa_shim find_endpoint uses default endpoint if leasefile is found
but does not contain DHCP Option 245 (whose value is the endpoint)
"""
- self.load_file.return_value = ''
- self.dhcp_options.return_value = {'eth0': {'key': 'value'}}
+ self.load_file.return_value = ""
+ self.dhcp_options.return_value = {"eth0": {"key": "value"}}
self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16")
@staticmethod
def _build_lease_content(encoded_address):
endpoint = azure_helper._get_dhcp_endpoint_option_name()
- return '\n'.join([
- 'lease {',
- ' interface "eth0";',
- ' option {0} {1};'.format(endpoint, encoded_address),
- '}'])
+ return "\n".join(
+ [
+ "lease {",
+ ' interface "eth0";',
+ " option {0} {1};".format(endpoint, encoded_address),
+ "}",
+ ]
+ )
def test_from_dhcp_client(self):
self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}}
- self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None))
-
- @mock.patch('cloudinit.sources.helpers.azure.util.is_FreeBSD')
- def test_latest_lease_used(self, m_is_freebsd):
- m_is_freebsd.return_value = False # To avoid hitting load_file
- encoded_addresses = ['5:4:3:2', '4:3:2:1']
- file_content = '\n'.join([self._build_lease_content(encoded_address)
- for encoded_address in encoded_addresses])
+ self.assertEqual("5.4.3.2", wa_shim.find_endpoint(None))
+
+ def test_latest_lease_used(self):
+ encoded_addresses = ["5:4:3:2", "4:3:2:1"]
+ file_content = "\n".join(
+ [
+ self._build_lease_content(encoded_address)
+ for encoded_address in encoded_addresses
+ ]
+ )
self.load_file.return_value = file_content
- self.assertEqual(encoded_addresses[-1].replace(':', '.'),
- wa_shim.find_endpoint("foobar"))
+ self.assertEqual(
+ encoded_addresses[-1].replace(":", "."),
+ wa_shim.find_endpoint("foobar"),
+ )
class TestExtractIpAddressFromLeaseValue(CiTestCase):
-
def test_hex_string(self):
- ip_address, encoded_address = '98.76.54.32', '62:4c:36:20'
+ ip_address, encoded_address = "98.76.54.32", "62:4c:36:20"
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
def test_hex_string_with_single_character_part(self):
- ip_address, encoded_address = '4.3.2.1', '4:3:2:1'
+ ip_address, encoded_address = "4.3.2.1", "4:3:2:1"
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
def test_packed_string(self):
- ip_address, encoded_address = '98.76.54.32', 'bL6 '
+ ip_address, encoded_address = "98.76.54.32", "bL6 "
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
def test_packed_string_with_escaped_quote(self):
- ip_address, encoded_address = '100.72.34.108', 'dH\\"l'
+ ip_address, encoded_address = "100.72.34.108", 'dH\\"l'
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
def test_packed_string_containing_a_colon(self):
- ip_address, encoded_address = '100.72.58.108', 'dH:l'
+ ip_address, encoded_address = "100.72.58.108", "dH:l"
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
class TestGoalStateParsing(CiTestCase):
default_parameters = {
- 'incarnation': 1,
- 'container_id': 'MyContainerId',
- 'instance_id': 'MyInstanceId',
- 'certificates_url': 'MyCertificatesUrl',
+ "incarnation": 1,
+ "container_id": "MyContainerId",
+ "instance_id": "MyInstanceId",
+ "certificates_url": "MyCertificatesUrl",
}
def _get_formatted_goal_state_xml_string(self, **kwargs):
parameters = self.default_parameters.copy()
parameters.update(kwargs)
xml = GOAL_STATE_TEMPLATE.format(**parameters)
- if parameters['certificates_url'] is None:
+ if parameters["certificates_url"] is None:
new_xml_lines = []
for line in xml.splitlines():
- if 'Certificates' in line:
+ if "Certificates" in line:
continue
new_xml_lines.append(line)
- xml = '\n'.join(new_xml_lines)
+ xml = "\n".join(new_xml_lines)
return xml
def _get_goal_state(self, m_azure_endpoint_client=None, **kwargs):
@@ -197,17 +212,17 @@ class TestGoalStateParsing(CiTestCase):
return azure_helper.GoalState(xml, m_azure_endpoint_client)
def test_incarnation_parsed_correctly(self):
- incarnation = '123'
+ incarnation = "123"
goal_state = self._get_goal_state(incarnation=incarnation)
self.assertEqual(incarnation, goal_state.incarnation)
def test_container_id_parsed_correctly(self):
- container_id = 'TestContainerId'
+ container_id = "TestContainerId"
goal_state = self._get_goal_state(container_id=container_id)
self.assertEqual(container_id, goal_state.container_id)
def test_instance_id_parsed_correctly(self):
- instance_id = 'TestInstanceId'
+ instance_id = "TestInstanceId"
goal_state = self._get_goal_state(instance_id=instance_id)
self.assertEqual(instance_id, goal_state.instance_id)
@@ -216,67 +231,72 @@ class TestGoalStateParsing(CiTestCase):
previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8"
self.assertTrue(
- azure_helper.is_byte_swapped(previous_iid, current_iid))
+ azure_helper.is_byte_swapped(previous_iid, current_iid)
+ )
def test_instance_id_no_byte_swap_same_instance_id(self):
previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
self.assertFalse(
- azure_helper.is_byte_swapped(previous_iid, current_iid))
+ azure_helper.is_byte_swapped(previous_iid, current_iid)
+ )
def test_instance_id_no_byte_swap_diff_instance_id(self):
previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
self.assertFalse(
- azure_helper.is_byte_swapped(previous_iid, current_iid))
+ azure_helper.is_byte_swapped(previous_iid, current_iid)
+ )
def test_certificates_xml_parsed_and_fetched_correctly(self):
m_azure_endpoint_client = mock.MagicMock()
- certificates_url = 'TestCertificatesUrl'
+ certificates_url = "TestCertificatesUrl"
goal_state = self._get_goal_state(
m_azure_endpoint_client=m_azure_endpoint_client,
- certificates_url=certificates_url)
+ certificates_url=certificates_url,
+ )
certificates_xml = goal_state.certificates_xml
self.assertEqual(1, m_azure_endpoint_client.get.call_count)
self.assertEqual(
- certificates_url,
- m_azure_endpoint_client.get.call_args[0][0])
+ certificates_url, m_azure_endpoint_client.get.call_args[0][0]
+ )
self.assertTrue(
- m_azure_endpoint_client.get.call_args[1].get(
- 'secure', False))
+ m_azure_endpoint_client.get.call_args[1].get("secure", False)
+ )
self.assertEqual(
- m_azure_endpoint_client.get.return_value.contents,
- certificates_xml)
+ m_azure_endpoint_client.get.return_value.contents, certificates_xml
+ )
def test_missing_certificates_skips_http_get(self):
m_azure_endpoint_client = mock.MagicMock()
goal_state = self._get_goal_state(
m_azure_endpoint_client=m_azure_endpoint_client,
- certificates_url=None)
+ certificates_url=None,
+ )
certificates_xml = goal_state.certificates_xml
self.assertEqual(0, m_azure_endpoint_client.get.call_count)
self.assertIsNone(certificates_xml)
def test_invalid_goal_state_xml_raises_parse_error(self):
- xml = 'random non-xml data'
+ xml = "random non-xml data"
with self.assertRaises(ElementTree.ParseError):
azure_helper.GoalState(xml, mock.MagicMock())
def test_missing_container_id_in_goal_state_xml_raises_exc(self):
xml = self._get_formatted_goal_state_xml_string()
- xml = re.sub('<ContainerId>.*</ContainerId>', '', xml)
+ xml = re.sub("<ContainerId>.*</ContainerId>", "", xml)
with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
azure_helper.GoalState(xml, mock.MagicMock())
def test_missing_instance_id_in_goal_state_xml_raises_exc(self):
xml = self._get_formatted_goal_state_xml_string()
- xml = re.sub('<InstanceId>.*</InstanceId>', '', xml)
+ xml = re.sub("<InstanceId>.*</InstanceId>", "", xml)
with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
azure_helper.GoalState(xml, mock.MagicMock())
def test_missing_incarnation_in_goal_state_xml_raises_exc(self):
xml = self._get_formatted_goal_state_xml_string()
- xml = re.sub('<Incarnation>.*</Incarnation>', '', xml)
+ xml = re.sub("<Incarnation>.*</Incarnation>", "", xml)
with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
azure_helper.GoalState(xml, mock.MagicMock())
@@ -284,8 +304,8 @@ class TestGoalStateParsing(CiTestCase):
class TestAzureEndpointHttpClient(CiTestCase):
regular_headers = {
- 'x-ms-agent-name': 'WALinuxAgent',
- 'x-ms-version': '2012-11-30',
+ "x-ms-agent-name": "WALinuxAgent",
+ "x-ms-version": "2012-11-30",
}
def setUp(self):
@@ -293,43 +313,48 @@ class TestAzureEndpointHttpClient(CiTestCase):
patches = ExitStack()
self.addCleanup(patches.close)
self.m_http_with_retries = patches.enter_context(
- mock.patch.object(azure_helper, 'http_with_retries'))
+ mock.patch.object(azure_helper, "http_with_retries")
+ )
def test_non_secure_get(self):
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- url = 'MyTestUrl'
+ url = "MyTestUrl"
response = client.get(url, secure=False)
self.assertEqual(1, self.m_http_with_retries.call_count)
self.assertEqual(self.m_http_with_retries.return_value, response)
self.assertEqual(
mock.call(url, headers=self.regular_headers),
- self.m_http_with_retries.call_args)
+ self.m_http_with_retries.call_args,
+ )
def test_non_secure_get_raises_exception(self):
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- url = 'MyTestUrl'
+ url = "MyTestUrl"
self.m_http_with_retries.side_effect = SentinelException
self.assertRaises(SentinelException, client.get, url, secure=False)
self.assertEqual(1, self.m_http_with_retries.call_count)
def test_secure_get(self):
- url = 'MyTestUrl'
+ url = "MyTestUrl"
m_certificate = mock.MagicMock()
expected_headers = self.regular_headers.copy()
- expected_headers.update({
- "x-ms-cipher-name": "DES_EDE3_CBC",
- "x-ms-guest-agent-public-x509-cert": m_certificate,
- })
+ expected_headers.update(
+ {
+ "x-ms-cipher-name": "DES_EDE3_CBC",
+ "x-ms-guest-agent-public-x509-cert": m_certificate,
+ }
+ )
client = azure_helper.AzureEndpointHttpClient(m_certificate)
response = client.get(url, secure=True)
self.assertEqual(1, self.m_http_with_retries.call_count)
self.assertEqual(self.m_http_with_retries.return_value, response)
self.assertEqual(
mock.call(url, headers=expected_headers),
- self.m_http_with_retries.call_args)
+ self.m_http_with_retries.call_args,
+ )
def test_secure_get_raises_exception(self):
- url = 'MyTestUrl'
+ url = "MyTestUrl"
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
self.m_http_with_retries.side_effect = SentinelException
self.assertRaises(SentinelException, client.get, url, secure=True)
@@ -337,44 +362,50 @@ class TestAzureEndpointHttpClient(CiTestCase):
def test_post(self):
m_data = mock.MagicMock()
- url = 'MyTestUrl'
+ url = "MyTestUrl"
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
response = client.post(url, data=m_data)
self.assertEqual(1, self.m_http_with_retries.call_count)
self.assertEqual(self.m_http_with_retries.return_value, response)
self.assertEqual(
mock.call(url, data=m_data, headers=self.regular_headers),
- self.m_http_with_retries.call_args)
+ self.m_http_with_retries.call_args,
+ )
def test_post_raises_exception(self):
m_data = mock.MagicMock()
- url = 'MyTestUrl'
+ url = "MyTestUrl"
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
self.m_http_with_retries.side_effect = SentinelException
self.assertRaises(SentinelException, client.post, url, data=m_data)
self.assertEqual(1, self.m_http_with_retries.call_count)
def test_post_with_extra_headers(self):
- url = 'MyTestUrl'
+ url = "MyTestUrl"
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- extra_headers = {'test': 'header'}
+ extra_headers = {"test": "header"}
client.post(url, extra_headers=extra_headers)
expected_headers = self.regular_headers.copy()
expected_headers.update(extra_headers)
self.assertEqual(1, self.m_http_with_retries.call_count)
self.assertEqual(
mock.call(url, data=mock.ANY, headers=expected_headers),
- self.m_http_with_retries.call_args)
+ self.m_http_with_retries.call_args,
+ )
def test_post_with_sleep_with_extra_headers_raises_exception(self):
m_data = mock.MagicMock()
- url = 'MyTestUrl'
- extra_headers = {'test': 'header'}
+ url = "MyTestUrl"
+ extra_headers = {"test": "header"}
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
self.m_http_with_retries.side_effect = SentinelException
self.assertRaises(
- SentinelException, client.post,
- url, data=m_data, extra_headers=extra_headers)
+ SentinelException,
+ client.post,
+ url,
+ data=m_data,
+ extra_headers=extra_headers,
+ )
self.assertEqual(1, self.m_http_with_retries.call_count)
@@ -384,6 +415,7 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
max_readurl_attempts = 240
default_readurl_timeout = 5
+ sleep_duration_between_retries = 5
periodic_logging_attempts = 12
def setUp(self):
@@ -393,122 +425,139 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
self.m_readurl = patches.enter_context(
mock.patch.object(
- azure_helper.url_helper, 'readurl', mock.MagicMock()))
- patches.enter_context(
- mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
+ azure_helper.url_helper, "readurl", mock.MagicMock()
+ )
+ )
+ self.m_sleep = patches.enter_context(
+ mock.patch.object(azure_helper.time, "sleep", autospec=True)
+ )
def test_http_with_retries(self):
- self.m_readurl.return_value = 'TestResp'
+ self.m_readurl.return_value = "TestResp"
self.assertEqual(
- azure_helper.http_with_retries('testurl'),
- self.m_readurl.return_value)
+ azure_helper.http_with_retries("testurl"),
+ self.m_readurl.return_value,
+ )
self.assertEqual(self.m_readurl.call_count, 1)
- def test_http_with_retries_propagates_readurl_exc_and_logs_exc(
- self):
+ def test_http_with_retries_propagates_readurl_exc_and_logs_exc(self):
self.m_readurl.side_effect = SentinelException
self.assertRaises(
- SentinelException, azure_helper.http_with_retries, 'testurl')
+ SentinelException, azure_helper.http_with_retries, "testurl"
+ )
self.assertEqual(self.m_readurl.call_count, self.max_readurl_attempts)
self.assertIsNotNone(
re.search(
- r'Failed HTTP request with Azure endpoint \S* during '
- r'attempt \d+ with exception: \S*',
- self.logs.getvalue()))
+ r"Failed HTTP request with Azure endpoint \S* during "
+ r"attempt \d+ with exception: \S*",
+ self.logs.getvalue(),
+ )
+ )
self.assertIsNone(
re.search(
- r'Successful HTTP request with Azure endpoint \S* after '
- r'\d+ attempts',
- self.logs.getvalue()))
+ r"Successful HTTP request with Azure endpoint \S* after "
+ r"\d+ attempts",
+ self.logs.getvalue(),
+ )
+ )
def test_http_with_retries_delayed_success_due_to_temporary_readurl_exc(
- self):
- self.m_readurl.side_effect = \
- [SentinelException] * self.periodic_logging_attempts + \
- ['TestResp']
- self.m_readurl.return_value = 'TestResp'
-
- response = azure_helper.http_with_retries('testurl')
+ self,
+ ):
+ self.m_readurl.side_effect = [
+ SentinelException
+ ] * self.periodic_logging_attempts + ["TestResp"]
+ self.m_readurl.return_value = "TestResp"
+
+ response = azure_helper.http_with_retries("testurl")
+ self.assertEqual(response, self.m_readurl.return_value)
self.assertEqual(
- response,
- self.m_readurl.return_value)
+ self.m_readurl.call_count, self.periodic_logging_attempts + 1
+ )
+
+ # Ensure that cloud-init did sleep between each failed request
self.assertEqual(
- self.m_readurl.call_count,
- self.periodic_logging_attempts + 1)
+ self.m_sleep.call_count, self.periodic_logging_attempts
+ )
+ self.m_sleep.assert_called_with(self.sleep_duration_between_retries)
def test_http_with_retries_long_delay_logs_periodic_failure_msg(self):
- self.m_readurl.side_effect = \
- [SentinelException] * self.periodic_logging_attempts + \
- ['TestResp']
- self.m_readurl.return_value = 'TestResp'
+ self.m_readurl.side_effect = [
+ SentinelException
+ ] * self.periodic_logging_attempts + ["TestResp"]
+ self.m_readurl.return_value = "TestResp"
- azure_helper.http_with_retries('testurl')
+ azure_helper.http_with_retries("testurl")
self.assertEqual(
- self.m_readurl.call_count,
- self.periodic_logging_attempts + 1)
+ self.m_readurl.call_count, self.periodic_logging_attempts + 1
+ )
self.assertIsNotNone(
re.search(
- r'Failed HTTP request with Azure endpoint \S* during '
- r'attempt \d+ with exception: \S*',
- self.logs.getvalue()))
+ r"Failed HTTP request with Azure endpoint \S* during "
+ r"attempt \d+ with exception: \S*",
+ self.logs.getvalue(),
+ )
+ )
self.assertIsNotNone(
re.search(
- r'Successful HTTP request with Azure endpoint \S* after '
- r'\d+ attempts',
- self.logs.getvalue()))
+ r"Successful HTTP request with Azure endpoint \S* after "
+ r"\d+ attempts",
+ self.logs.getvalue(),
+ )
+ )
def test_http_with_retries_short_delay_does_not_log_periodic_failure_msg(
- self):
- self.m_readurl.side_effect = \
- [SentinelException] * \
- (self.periodic_logging_attempts - 1) + \
- ['TestResp']
- self.m_readurl.return_value = 'TestResp'
-
- azure_helper.http_with_retries('testurl')
+ self,
+ ):
+ self.m_readurl.side_effect = [SentinelException] * (
+ self.periodic_logging_attempts - 1
+ ) + ["TestResp"]
+ self.m_readurl.return_value = "TestResp"
+
+ azure_helper.http_with_retries("testurl")
self.assertEqual(
- self.m_readurl.call_count,
- self.periodic_logging_attempts)
+ self.m_readurl.call_count, self.periodic_logging_attempts
+ )
self.assertIsNone(
re.search(
- r'Failed HTTP request with Azure endpoint \S* during '
- r'attempt \d+ with exception: \S*',
- self.logs.getvalue()))
+ r"Failed HTTP request with Azure endpoint \S* during "
+ r"attempt \d+ with exception: \S*",
+ self.logs.getvalue(),
+ )
+ )
self.assertIsNotNone(
re.search(
- r'Successful HTTP request with Azure endpoint \S* after '
- r'\d+ attempts',
- self.logs.getvalue()))
+ r"Successful HTTP request with Azure endpoint \S* after "
+ r"\d+ attempts",
+ self.logs.getvalue(),
+ )
+ )
def test_http_with_retries_calls_url_helper_readurl_with_args_kwargs(self):
testurl = mock.MagicMock()
kwargs = {
- 'headers': mock.MagicMock(),
- 'data': mock.MagicMock(),
+ "headers": mock.MagicMock(),
+ "data": mock.MagicMock(),
# timeout kwarg should not be modified or deleted if present
- 'timeout': mock.MagicMock()
+ "timeout": mock.MagicMock(),
}
azure_helper.http_with_retries(testurl, **kwargs)
self.m_readurl.assert_called_once_with(testurl, **kwargs)
def test_http_with_retries_adds_timeout_kwarg_if_not_present(self):
testurl = mock.MagicMock()
- kwargs = {
- 'headers': mock.MagicMock(),
- 'data': mock.MagicMock()
- }
+ kwargs = {"headers": mock.MagicMock(), "data": mock.MagicMock()}
expected_kwargs = copy.deepcopy(kwargs)
- expected_kwargs['timeout'] = self.default_readurl_timeout
+ expected_kwargs["timeout"] = self.default_readurl_timeout
azure_helper.http_with_retries(testurl, **kwargs)
self.m_readurl.assert_called_once_with(testurl, **expected_kwargs)
- def test_http_with_retries_deletes_retries_kwargs_passed_in(
- self):
+ def test_http_with_retries_deletes_retries_kwargs_passed_in(self):
"""http_with_retries already implements retry logic,
so url_helper.readurl should not have retries.
http_with_retries should delete kwargs that
@@ -516,44 +565,44 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
"""
testurl = mock.MagicMock()
kwargs = {
- 'headers': mock.MagicMock(),
- 'data': mock.MagicMock(),
- 'timeout': mock.MagicMock(),
- 'retries': mock.MagicMock(),
- 'infinite': mock.MagicMock()
+ "headers": mock.MagicMock(),
+ "data": mock.MagicMock(),
+ "timeout": mock.MagicMock(),
+ "retries": mock.MagicMock(),
+ "infinite": mock.MagicMock(),
}
expected_kwargs = copy.deepcopy(kwargs)
- expected_kwargs.pop('retries', None)
- expected_kwargs.pop('infinite', None)
+ expected_kwargs.pop("retries", None)
+ expected_kwargs.pop("infinite", None)
azure_helper.http_with_retries(testurl, **kwargs)
self.m_readurl.assert_called_once_with(testurl, **expected_kwargs)
self.assertIn(
- 'retries kwarg passed in for communication with Azure endpoint.',
- self.logs.getvalue())
+ "retries kwarg passed in for communication with Azure endpoint.",
+ self.logs.getvalue(),
+ )
self.assertIn(
- 'infinite kwarg passed in for communication with Azure endpoint.',
- self.logs.getvalue())
+ "infinite kwarg passed in for communication with Azure endpoint.",
+ self.logs.getvalue(),
+ )
class TestOpenSSLManager(CiTestCase):
-
def setUp(self):
super(TestOpenSSLManager, self).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.subp = patches.enter_context(
- mock.patch.object(azure_helper.subp, 'subp'))
+ mock.patch.object(azure_helper.subp, "subp")
+ )
try:
- self.open = patches.enter_context(
- mock.patch('__builtin__.open'))
+ self.open = patches.enter_context(mock.patch("__builtin__.open"))
except ImportError:
- self.open = patches.enter_context(
- mock.patch('builtins.open'))
+ self.open = patches.enter_context(mock.patch("builtins.open"))
- @mock.patch.object(azure_helper, 'cd', mock.MagicMock())
- @mock.patch.object(azure_helper.temp_utils, 'mkdtemp')
+ @mock.patch.object(azure_helper, "cd", mock.MagicMock())
+ @mock.patch.object(azure_helper.temp_utils, "mkdtemp")
def test_openssl_manager_creates_a_tmpdir(self, mkdtemp):
manager = azure_helper.OpenSSLManager()
self.assertEqual(mkdtemp.return_value, manager.tmpdir)
@@ -562,16 +611,16 @@ class TestOpenSSLManager(CiTestCase):
subp_directory = {}
def capture_directory(*args, **kwargs):
- subp_directory['path'] = os.getcwd()
+ subp_directory["path"] = os.getcwd()
self.subp.side_effect = capture_directory
manager = azure_helper.OpenSSLManager()
- self.assertEqual(manager.tmpdir, subp_directory['path'])
+ self.assertEqual(manager.tmpdir, subp_directory["path"])
manager.clean_up()
- @mock.patch.object(azure_helper, 'cd', mock.MagicMock())
- @mock.patch.object(azure_helper.temp_utils, 'mkdtemp', mock.MagicMock())
- @mock.patch.object(azure_helper.util, 'del_dir')
+ @mock.patch.object(azure_helper, "cd", mock.MagicMock())
+ @mock.patch.object(azure_helper.temp_utils, "mkdtemp", mock.MagicMock())
+ @mock.patch.object(azure_helper.util, "del_dir")
def test_clean_up(self, del_dir):
manager = azure_helper.OpenSSLManager()
manager.clean_up()
@@ -579,43 +628,42 @@ class TestOpenSSLManager(CiTestCase):
class TestOpenSSLManagerActions(CiTestCase):
-
def setUp(self):
super(TestOpenSSLManagerActions, self).setUp()
self.allowed_subp = True
def _data_file(self, name):
- path = 'tests/data/azure'
+ path = "tests/data/azure"
return os.path.join(path, name)
@unittest.skip("todo move to cloud_test")
def test_pubkey_extract(self):
- cert = load_file(self._data_file('pubkey_extract_cert'))
- good_key = load_file(self._data_file('pubkey_extract_ssh_key'))
+ cert = load_file(self._data_file("pubkey_extract_cert"))
+ good_key = load_file(self._data_file("pubkey_extract_ssh_key"))
sslmgr = azure_helper.OpenSSLManager()
key = sslmgr._get_ssh_key_from_cert(cert)
self.assertEqual(good_key, key)
- good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473'
+ good_fingerprint = "073E19D14D1C799224C6A0FD8DDAB6A8BF27D473"
fingerprint = sslmgr._get_fingerprint_from_cert(cert)
self.assertEqual(good_fingerprint, fingerprint)
@unittest.skip("todo move to cloud_test")
- @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml')
+ @mock.patch.object(azure_helper.OpenSSLManager, "_decrypt_certs_from_xml")
def test_parse_certificates(self, mock_decrypt_certs):
"""Azure control plane puts private keys as well as certificates
- into the Certificates XML object. Make sure only the public keys
- from certs are extracted and that fingerprints are converted to
- the form specified in the ovf-env.xml file.
+ into the Certificates XML object. Make sure only the public keys
+ from certs are extracted and that fingerprints are converted to
+ the form specified in the ovf-env.xml file.
"""
- cert_contents = load_file(self._data_file('parse_certificates_pem'))
- fingerprints = load_file(self._data_file(
- 'parse_certificates_fingerprints')
+ cert_contents = load_file(self._data_file("parse_certificates_pem"))
+ fingerprints = load_file(
+ self._data_file("parse_certificates_fingerprints")
).splitlines()
mock_decrypt_certs.return_value = cert_contents
sslmgr = azure_helper.OpenSSLManager()
- keys_by_fp = sslmgr.parse_certificates('')
+ keys_by_fp = sslmgr.parse_certificates("")
for fp in keys_by_fp.keys():
self.assertIn(fp, fingerprints)
for fp in fingerprints:
@@ -627,21 +675,23 @@ class TestGoalStateHealthReporter(CiTestCase):
maxDiff = None
default_parameters = {
- 'incarnation': 1634,
- 'container_id': 'MyContainerId',
- 'instance_id': 'MyInstanceId'
+ "incarnation": 1634,
+ "container_id": "MyContainerId",
+ "instance_id": "MyInstanceId",
}
- test_azure_endpoint = 'TestEndpoint'
- test_health_report_url = 'http://{0}/machine?comp=health'.format(
- test_azure_endpoint)
- test_default_headers = {'Content-Type': 'text/xml; charset=utf-8'}
+ test_azure_endpoint = "TestEndpoint"
+ test_health_report_url = "http://{0}/machine?comp=health".format(
+ test_azure_endpoint
+ )
+ test_default_headers = {"Content-Type": "text/xml; charset=utf-8"}
- provisioning_success_status = 'Ready'
- provisioning_not_ready_status = 'NotReady'
- provisioning_failure_substatus = 'ProvisioningFailed'
+ provisioning_success_status = "Ready"
+ provisioning_not_ready_status = "NotReady"
+ provisioning_failure_substatus = "ProvisioningFailed"
provisioning_failure_err_description = (
- 'Test error message containing provisioning failure details')
+ "Test error message containing provisioning failure details"
+ )
def setUp(self):
super(TestGoalStateHealthReporter, self).setUp()
@@ -649,22 +699,28 @@ class TestGoalStateHealthReporter(CiTestCase):
self.addCleanup(patches.close)
patches.enter_context(
- mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
+ mock.patch.object(azure_helper.time, "sleep", mock.MagicMock())
+ )
self.read_file_or_url = patches.enter_context(
- mock.patch.object(azure_helper.url_helper, 'read_file_or_url'))
+ mock.patch.object(azure_helper.url_helper, "read_file_or_url")
+ )
self.post = patches.enter_context(
- mock.patch.object(azure_helper.AzureEndpointHttpClient,
- 'post'))
+ mock.patch.object(azure_helper.AzureEndpointHttpClient, "post")
+ )
self.GoalState = patches.enter_context(
- mock.patch.object(azure_helper, 'GoalState'))
- self.GoalState.return_value.container_id = \
- self.default_parameters['container_id']
- self.GoalState.return_value.instance_id = \
- self.default_parameters['instance_id']
- self.GoalState.return_value.incarnation = \
- self.default_parameters['incarnation']
+ mock.patch.object(azure_helper, "GoalState")
+ )
+ self.GoalState.return_value.container_id = self.default_parameters[
+ "container_id"
+ ]
+ self.GoalState.return_value.instance_id = self.default_parameters[
+ "instance_id"
+ ]
+ self.GoalState.return_value.incarnation = self.default_parameters[
+ "incarnation"
+ ]
def _text_from_xpath_in_xroot(self, xroot, xpath):
element = xroot.find(xpath)
@@ -680,34 +736,41 @@ class TestGoalStateHealthReporter(CiTestCase):
def _get_report_ready_health_document(self):
return self._get_formatted_health_report_xml_string(
- incarnation=escape(str(self.default_parameters['incarnation'])),
- container_id=escape(self.default_parameters['container_id']),
- instance_id=escape(self.default_parameters['instance_id']),
+ incarnation=escape(str(self.default_parameters["incarnation"])),
+ container_id=escape(self.default_parameters["container_id"]),
+ instance_id=escape(self.default_parameters["instance_id"]),
health_status=escape(self.provisioning_success_status),
- health_detail_subsection='')
+ health_detail_subsection="",
+ )
def _get_report_failure_health_document(self):
- health_detail_subsection = \
+ health_detail_subsection = (
self._get_formatted_health_detail_subsection_xml_string(
health_substatus=escape(self.provisioning_failure_substatus),
health_description=escape(
- self.provisioning_failure_err_description))
+ self.provisioning_failure_err_description
+ ),
+ )
+ )
return self._get_formatted_health_report_xml_string(
- incarnation=escape(str(self.default_parameters['incarnation'])),
- container_id=escape(self.default_parameters['container_id']),
- instance_id=escape(self.default_parameters['instance_id']),
+ incarnation=escape(str(self.default_parameters["incarnation"])),
+ container_id=escape(self.default_parameters["container_id"]),
+ instance_id=escape(self.default_parameters["instance_id"]),
health_status=escape(self.provisioning_not_ready_status),
- health_detail_subsection=health_detail_subsection)
+ health_detail_subsection=health_detail_subsection,
+ )
def test_send_ready_signal_sends_post_request(self):
with mock.patch.object(
- azure_helper.GoalStateHealthReporter,
- 'build_report') as m_build_report:
+ azure_helper.GoalStateHealthReporter, "build_report"
+ ) as m_build_report:
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
- client, self.test_azure_endpoint)
+ client,
+ self.test_azure_endpoint,
+ )
reporter.send_ready_signal()
self.assertEqual(1, self.post.call_count)
@@ -715,73 +778,94 @@ class TestGoalStateHealthReporter(CiTestCase):
mock.call(
self.test_health_report_url,
data=m_build_report.return_value,
- extra_headers=self.test_default_headers),
- self.post.call_args)
+ extra_headers=self.test_default_headers,
+ ),
+ self.post.call_args,
+ )
def test_send_failure_signal_sends_post_request(self):
with mock.patch.object(
- azure_helper.GoalStateHealthReporter,
- 'build_report') as m_build_report:
+ azure_helper.GoalStateHealthReporter, "build_report"
+ ) as m_build_report:
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
- client, self.test_azure_endpoint)
+ client,
+ self.test_azure_endpoint,
+ )
reporter.send_failure_signal(
- description=self.provisioning_failure_err_description)
+ description=self.provisioning_failure_err_description
+ )
self.assertEqual(1, self.post.call_count)
self.assertEqual(
mock.call(
self.test_health_report_url,
data=m_build_report.return_value,
- extra_headers=self.test_default_headers),
- self.post.call_args)
+ extra_headers=self.test_default_headers,
+ ),
+ self.post.call_args,
+ )
def test_build_report_for_ready_signal_health_document(self):
health_document = self._get_report_ready_health_document()
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
generated_health_document = reporter.build_report(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
- status=self.provisioning_success_status)
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
+ status=self.provisioning_success_status,
+ )
self.assertEqual(health_document, generated_health_document)
generated_xroot = ElementTree.fromstring(generated_health_document)
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot, './GoalStateIncarnation'),
- str(self.default_parameters['incarnation']))
+ generated_xroot, "./GoalStateIncarnation"
+ ),
+ str(self.default_parameters["incarnation"]),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot, './Container/ContainerId'),
- str(self.default_parameters['container_id']))
+ generated_xroot, "./Container/ContainerId"
+ ),
+ str(self.default_parameters["container_id"]),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot,
- './Container/RoleInstanceList/Role/InstanceId'),
- str(self.default_parameters['instance_id']))
+ generated_xroot, "./Container/RoleInstanceList/Role/InstanceId"
+ ),
+ str(self.default_parameters["instance_id"]),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/State'),
- escape(self.provisioning_success_status))
+ "./Container/RoleInstanceList/Role/Health/State",
+ ),
+ escape(self.provisioning_success_status),
+ )
self.assertIsNone(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details'))
+ "./Container/RoleInstanceList/Role/Health/Details",
+ )
+ )
self.assertIsNone(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/SubStatus'))
+ "./Container/RoleInstanceList/Role/Health/Details/SubStatus",
+ )
+ )
self.assertIsNone(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/Description')
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ )
)
def test_build_report_for_failure_signal_health_document(self):
@@ -789,120 +873,143 @@ class TestGoalStateHealthReporter(CiTestCase):
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
generated_health_document = reporter.build_report(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
status=self.provisioning_not_ready_status,
substatus=self.provisioning_failure_substatus,
- description=self.provisioning_failure_err_description)
+ description=self.provisioning_failure_err_description,
+ )
self.assertEqual(health_document, generated_health_document)
generated_xroot = ElementTree.fromstring(generated_health_document)
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot, './GoalStateIncarnation'),
- str(self.default_parameters['incarnation']))
+ generated_xroot, "./GoalStateIncarnation"
+ ),
+ str(self.default_parameters["incarnation"]),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot, './Container/ContainerId'),
- self.default_parameters['container_id'])
+ generated_xroot, "./Container/ContainerId"
+ ),
+ self.default_parameters["container_id"],
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot,
- './Container/RoleInstanceList/Role/InstanceId'),
- self.default_parameters['instance_id'])
+ generated_xroot, "./Container/RoleInstanceList/Role/InstanceId"
+ ),
+ self.default_parameters["instance_id"],
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/State'),
- escape(self.provisioning_not_ready_status))
+ "./Container/RoleInstanceList/Role/Health/State",
+ ),
+ escape(self.provisioning_not_ready_status),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/'
- 'SubStatus'),
- escape(self.provisioning_failure_substatus))
+ "./Container/RoleInstanceList/Role/Health/Details/SubStatus",
+ ),
+ escape(self.provisioning_failure_substatus),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/'
- 'Description'),
- escape(self.provisioning_failure_err_description))
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ ),
+ escape(self.provisioning_failure_err_description),
+ )
def test_send_ready_signal_calls_build_report(self):
with mock.patch.object(
- azure_helper.GoalStateHealthReporter, 'build_report'
+ azure_helper.GoalStateHealthReporter, "build_report"
) as m_build_report:
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
reporter.send_ready_signal()
self.assertEqual(1, m_build_report.call_count)
self.assertEqual(
mock.call(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
- status=self.provisioning_success_status),
- m_build_report.call_args)
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
+ status=self.provisioning_success_status,
+ ),
+ m_build_report.call_args,
+ )
def test_send_failure_signal_calls_build_report(self):
with mock.patch.object(
- azure_helper.GoalStateHealthReporter, 'build_report'
+ azure_helper.GoalStateHealthReporter, "build_report"
) as m_build_report:
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
reporter.send_failure_signal(
- description=self.provisioning_failure_err_description)
+ description=self.provisioning_failure_err_description
+ )
self.assertEqual(1, m_build_report.call_count)
self.assertEqual(
mock.call(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
status=self.provisioning_not_ready_status,
substatus=self.provisioning_failure_substatus,
- description=self.provisioning_failure_err_description),
- m_build_report.call_args)
+ description=self.provisioning_failure_err_description,
+ ),
+ m_build_report.call_args,
+ )
def test_build_report_escapes_chars(self):
- incarnation = 'jd8\'9*&^<\'A><A[p&o+\"SD()*&&&LKAJSD23'
- container_id = '&&<\"><><ds8\'9+7&d9a86!@($09asdl;<>'
- instance_id = 'Opo>>>jas\'&d;[p&fp\"a<<!!@&&'
- health_status = '&<897\"6&>&aa\'sd!@&!)((*<&>'
- health_substatus = '&as\"d<<a&s>d<\'^@!5&6<7'
- health_description = '&&&>!#$\"&&<as\'1!@$d&>><>&\"sd<67<]>>'
-
- health_detail_subsection = \
+ incarnation = "jd8'9*&^<'A><A[p&o+\"SD()*&&&LKAJSD23"
+ container_id = "&&<\"><><ds8'9+7&d9a86!@($09asdl;<>"
+ instance_id = "Opo>>>jas'&d;[p&fp\"a<<!!@&&"
+ health_status = "&<897\"6&>&aa'sd!@&!)((*<&>"
+ health_substatus = "&as\"d<<a&s>d<'^@!5&6<7"
+ health_description = '&&&>!#$"&&<as\'1!@$d&>><>&"sd<67<]>>'
+
+ health_detail_subsection = (
self._get_formatted_health_detail_subsection_xml_string(
health_substatus=escape(health_substatus),
- health_description=escape(health_description))
+ health_description=escape(health_description),
+ )
+ )
health_document = self._get_formatted_health_report_xml_string(
incarnation=escape(incarnation),
container_id=escape(container_id),
instance_id=escape(instance_id),
health_status=escape(health_status),
- health_detail_subsection=health_detail_subsection)
+ health_detail_subsection=health_detail_subsection,
+ )
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
generated_health_document = reporter.build_report(
incarnation=incarnation,
container_id=container_id,
instance_id=instance_id,
status=health_status,
substatus=health_substatus,
- description=health_description)
+ description=health_description,
+ )
self.assertEqual(health_document, generated_health_document)
@@ -910,26 +1017,31 @@ class TestGoalStateHealthReporter(CiTestCase):
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
- long_err_msg = 'a9&ea8>>>e as1< d\"q2*&(^%\'a=5<' * 100
+ self.test_azure_endpoint,
+ )
+ long_err_msg = "a9&ea8>>>e as1< d\"q2*&(^%'a=5<" * 100
generated_health_document = reporter.build_report(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
status=self.provisioning_not_ready_status,
substatus=self.provisioning_failure_substatus,
- description=long_err_msg)
+ description=long_err_msg,
+ )
generated_xroot = ElementTree.fromstring(generated_health_document)
generated_health_report_description = self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/Description')
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ )
self.assertEqual(
len(unescape(generated_health_report_description)),
- HEALTH_REPORT_DESCRIPTION_TRIM_LEN)
+ HEALTH_REPORT_DESCRIPTION_TRIM_LEN,
+ )
def test_trim_description_then_escape_conforms_to_len_limits_worst_case(
- self):
+ self,
+ ):
"""When unescaped characters are XML-escaped, the length increases.
Char Escape String
< &lt;
@@ -958,150 +1070,176 @@ class TestGoalStateHealthReporter(CiTestCase):
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
- long_err_msg = '\'\"' * 10000
+ self.test_azure_endpoint,
+ )
+ long_err_msg = "'\"" * 10000
generated_health_document = reporter.build_report(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
status=self.provisioning_not_ready_status,
substatus=self.provisioning_failure_substatus,
- description=long_err_msg)
+ description=long_err_msg,
+ )
generated_xroot = ElementTree.fromstring(generated_health_document)
generated_health_report_description = self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/Description')
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ )
# The escaped description string should be less than
# the Azure platform limit for the escaped description string.
self.assertLessEqual(len(generated_health_report_description), 4096)
class TestWALinuxAgentShim(CiTestCase):
-
def setUp(self):
super(TestWALinuxAgentShim, self).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.AzureEndpointHttpClient = patches.enter_context(
- mock.patch.object(azure_helper, 'AzureEndpointHttpClient'))
+ mock.patch.object(azure_helper, "AzureEndpointHttpClient")
+ )
self.find_endpoint = patches.enter_context(
- mock.patch.object(wa_shim, 'find_endpoint'))
+ mock.patch.object(wa_shim, "find_endpoint")
+ )
self.GoalState = patches.enter_context(
- mock.patch.object(azure_helper, 'GoalState'))
+ mock.patch.object(azure_helper, "GoalState")
+ )
self.OpenSSLManager = patches.enter_context(
- mock.patch.object(azure_helper, 'OpenSSLManager', autospec=True))
+ mock.patch.object(azure_helper, "OpenSSLManager", autospec=True)
+ )
patches.enter_context(
- mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
+ mock.patch.object(azure_helper.time, "sleep", mock.MagicMock())
+ )
- self.test_incarnation = 'TestIncarnation'
- self.test_container_id = 'TestContainerId'
- self.test_instance_id = 'TestInstanceId'
+ self.test_incarnation = "TestIncarnation"
+ self.test_container_id = "TestContainerId"
+ self.test_instance_id = "TestInstanceId"
self.GoalState.return_value.incarnation = self.test_incarnation
self.GoalState.return_value.container_id = self.test_container_id
self.GoalState.return_value.instance_id = self.test_instance_id
+ def test_eject_iso_is_called(self):
+ shim = wa_shim()
+ with mock.patch.object(
+ shim, "eject_iso", autospec=True
+ ) as m_eject_iso:
+ shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0")
+ m_eject_iso.assert_called_once_with("/dev/sr0")
+
def test_http_client_does_not_use_certificate_for_report_ready(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
self.assertEqual(
- [mock.call(None)],
- self.AzureEndpointHttpClient.call_args_list)
+ [mock.call(None)], self.AzureEndpointHttpClient.call_args_list
+ )
def test_http_client_does_not_use_certificate_for_report_failure(self):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
self.assertEqual(
- [mock.call(None)],
- self.AzureEndpointHttpClient.call_args_list)
+ [mock.call(None)], self.AzureEndpointHttpClient.call_args_list
+ )
def test_correct_url_used_for_goalstate_during_report_ready(self):
- self.find_endpoint.return_value = 'test_endpoint'
+ self.find_endpoint.return_value = "test_endpoint"
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
m_get = self.AzureEndpointHttpClient.return_value.get
self.assertEqual(
- [mock.call('http://test_endpoint/machine/?comp=goalstate')],
- m_get.call_args_list)
+ [mock.call("http://test_endpoint/machine/?comp=goalstate")],
+ m_get.call_args_list,
+ )
self.assertEqual(
- [mock.call(
- m_get.return_value.contents,
- self.AzureEndpointHttpClient.return_value,
- False
- )],
- self.GoalState.call_args_list)
+ [
+ mock.call(
+ m_get.return_value.contents,
+ self.AzureEndpointHttpClient.return_value,
+ False,
+ )
+ ],
+ self.GoalState.call_args_list,
+ )
def test_correct_url_used_for_goalstate_during_report_failure(self):
- self.find_endpoint.return_value = 'test_endpoint'
+ self.find_endpoint.return_value = "test_endpoint"
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
m_get = self.AzureEndpointHttpClient.return_value.get
self.assertEqual(
- [mock.call('http://test_endpoint/machine/?comp=goalstate')],
- m_get.call_args_list)
+ [mock.call("http://test_endpoint/machine/?comp=goalstate")],
+ m_get.call_args_list,
+ )
self.assertEqual(
- [mock.call(
- m_get.return_value.contents,
- self.AzureEndpointHttpClient.return_value,
- False
- )],
- self.GoalState.call_args_list)
+ [
+ mock.call(
+ m_get.return_value.contents,
+ self.AzureEndpointHttpClient.return_value,
+ False,
+ )
+ ],
+ self.GoalState.call_args_list,
+ )
def test_certificates_used_to_determine_public_keys(self):
# if register_with_azure_and_fetch_data() isn't passed some info about
# the user's public keys, there's no point in even trying to parse the
# certificates
shim = wa_shim()
- mypk = [{'fingerprint': 'fp1', 'path': 'path1'},
- {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}]
- certs = {'fp1': 'expected-key',
- 'fp2': 'should-not-be-found',
- 'fp3': 'expected-no-value-key',
- }
+ mypk = [
+ {"fingerprint": "fp1", "path": "path1"},
+ {"fingerprint": "fp3", "path": "path3", "value": ""},
+ ]
+ certs = {
+ "fp1": "expected-key",
+ "fp2": "should-not-be-found",
+ "fp3": "expected-no-value-key",
+ }
sslmgr = self.OpenSSLManager.return_value
sslmgr.parse_certificates.return_value = certs
data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk)
self.assertEqual(
[mock.call(self.GoalState.return_value.certificates_xml)],
- sslmgr.parse_certificates.call_args_list)
- self.assertIn('expected-key', data['public-keys'])
- self.assertIn('expected-no-value-key', data['public-keys'])
- self.assertNotIn('should-not-be-found', data['public-keys'])
+ sslmgr.parse_certificates.call_args_list,
+ )
+ self.assertIn("expected-key", data)
+ self.assertIn("expected-no-value-key", data)
+ self.assertNotIn("should-not-be-found", data)
def test_absent_certificates_produces_empty_public_keys(self):
- mypk = [{'fingerprint': 'fp1', 'path': 'path1'}]
+ mypk = [{"fingerprint": "fp1", "path": "path1"}]
self.GoalState.return_value.certificates_xml = None
shim = wa_shim()
data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk)
- self.assertEqual([], data['public-keys'])
+ self.assertEqual([], data)
def test_correct_url_used_for_report_ready(self):
- self.find_endpoint.return_value = 'test_endpoint'
+ self.find_endpoint.return_value = "test_endpoint"
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
- expected_url = 'http://test_endpoint/machine?comp=health'
+ expected_url = "http://test_endpoint/machine?comp=health"
self.assertEqual(
[mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
- self.AzureEndpointHttpClient.return_value.post
- .call_args_list)
+ self.AzureEndpointHttpClient.return_value.post.call_args_list,
+ )
def test_correct_url_used_for_report_failure(self):
- self.find_endpoint.return_value = 'test_endpoint'
+ self.find_endpoint.return_value = "test_endpoint"
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
- expected_url = 'http://test_endpoint/machine?comp=health'
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ expected_url = "http://test_endpoint/machine?comp=health"
self.assertEqual(
[mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
- self.AzureEndpointHttpClient.return_value.post
- .call_args_list)
+ self.AzureEndpointHttpClient.return_value.post.call_args_list,
+ )
def test_goal_state_values_used_for_report_ready(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
posted_document = (
- self.AzureEndpointHttpClient.return_value.post
- .call_args[1]['data']
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
)
self.assertIn(self.test_incarnation, posted_document)
self.assertIn(self.test_container_id, posted_document)
@@ -1109,10 +1247,9 @@ class TestWALinuxAgentShim(CiTestCase):
def test_goal_state_values_used_for_report_failure(self):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
posted_document = (
- self.AzureEndpointHttpClient.return_value.post
- .call_args[1]['data']
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
)
self.assertIn(self.test_incarnation, posted_document)
self.assertIn(self.test_container_id, posted_document)
@@ -1125,57 +1262,66 @@ class TestWALinuxAgentShim(CiTestCase):
incarnation=escape(self.test_incarnation),
container_id=escape(self.test_container_id),
instance_id=escape(self.test_instance_id),
- health_status=escape('Ready'),
- health_detail_subsection='')
+ health_status=escape("Ready"),
+ health_detail_subsection="",
+ )
posted_document = (
- self.AzureEndpointHttpClient.return_value.post
- .call_args[1]['data'])
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
+ )
self.assertEqual(health_document, posted_document)
def test_xml_elems_in_report_failure_post(self):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
health_document = HEALTH_REPORT_XML_TEMPLATE.format(
incarnation=escape(self.test_incarnation),
container_id=escape(self.test_container_id),
instance_id=escape(self.test_instance_id),
- health_status=escape('NotReady'),
- health_detail_subsection=HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE
- .format(
- health_substatus=escape('ProvisioningFailed'),
- health_description=escape('TestDesc')))
+ health_status=escape("NotReady"),
+ health_detail_subsection=(
+ HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(
+ health_substatus=escape("ProvisioningFailed"),
+ health_description=escape("TestDesc"),
+ )
+ ),
+ )
posted_document = (
- self.AzureEndpointHttpClient.return_value.post
- .call_args[1]['data'])
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
+ )
self.assertEqual(health_document, posted_document)
- @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True)
+ @mock.patch.object(azure_helper, "GoalStateHealthReporter", autospec=True)
def test_register_with_azure_and_fetch_data_calls_send_ready_signal(
- self, m_goal_state_health_reporter):
+ self, m_goal_state_health_reporter
+ ):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
self.assertEqual(
1,
- m_goal_state_health_reporter.return_value.send_ready_signal
- .call_count)
+ m_goal_state_health_reporter.return_value.send_ready_signal.call_count, # noqa: E501
+ )
- @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True)
+ @mock.patch.object(azure_helper, "GoalStateHealthReporter", autospec=True)
def test_register_with_azure_and_report_failure_calls_send_failure_signal(
- self, m_goal_state_health_reporter):
+ self, m_goal_state_health_reporter
+ ):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
- m_goal_state_health_reporter.return_value.send_failure_signal \
- .assert_called_once_with(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ m_goal_state_health_reporter.return_value.send_failure_signal.assert_called_once_with( # noqa: E501
+ description="TestDesc"
+ )
def test_register_with_azure_and_report_failure_does_not_need_certificates(
- self):
+ self,
+ ):
shim = wa_shim()
with mock.patch.object(
- shim, '_fetch_goal_state_from_azure', autospec=True
+ shim, "_fetch_goal_state_from_azure", autospec=True
) as m_fetch_goal_state_from_azure:
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
m_fetch_goal_state_from_azure.assert_called_once_with(
- need_certificate=False)
+ need_certificate=False
+ )
def test_clean_up_can_be_called_at_any_time(self):
shim = wa_shim()
@@ -1184,7 +1330,7 @@ class TestWALinuxAgentShim(CiTestCase):
def test_openssl_manager_not_instantiated_by_shim_report_status(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
shim.clean_up()
self.OpenSSLManager.assert_not_called()
@@ -1196,177 +1342,204 @@ class TestWALinuxAgentShim(CiTestCase):
def test_clean_up_after_report_failure(self):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
shim.clean_up()
self.OpenSSLManager.return_value.clean_up.assert_not_called()
def test_fetch_goalstate_during_report_ready_raises_exc_on_get_exc(self):
- self.AzureEndpointHttpClient.return_value.get \
- .side_effect = SentinelException
+ self.AzureEndpointHttpClient.return_value.get.side_effect = (
+ SentinelException
+ )
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_fetch_data)
+ self.assertRaises(
+ SentinelException, shim.register_with_azure_and_fetch_data
+ )
def test_fetch_goalstate_during_report_failure_raises_exc_on_get_exc(self):
- self.AzureEndpointHttpClient.return_value.get \
- .side_effect = SentinelException
+ self.AzureEndpointHttpClient.return_value.get.side_effect = (
+ SentinelException
+ )
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_report_failure,
- description='TestDesc')
+ self.assertRaises(
+ SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description="TestDesc",
+ )
def test_fetch_goalstate_during_report_ready_raises_exc_on_parse_exc(self):
self.GoalState.side_effect = SentinelException
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_fetch_data)
+ self.assertRaises(
+ SentinelException, shim.register_with_azure_and_fetch_data
+ )
def test_fetch_goalstate_during_report_failure_raises_exc_on_parse_exc(
- self):
+ self,
+ ):
self.GoalState.side_effect = SentinelException
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_report_failure,
- description='TestDesc')
+ self.assertRaises(
+ SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description="TestDesc",
+ )
def test_failure_to_send_report_ready_health_doc_bubbles_up(self):
- self.AzureEndpointHttpClient.return_value.post \
- .side_effect = SentinelException
+ self.AzureEndpointHttpClient.return_value.post.side_effect = (
+ SentinelException
+ )
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_fetch_data)
+ self.assertRaises(
+ SentinelException, shim.register_with_azure_and_fetch_data
+ )
def test_failure_to_send_report_failure_health_doc_bubbles_up(self):
- self.AzureEndpointHttpClient.return_value.post \
- .side_effect = SentinelException
+ self.AzureEndpointHttpClient.return_value.post.side_effect = (
+ SentinelException
+ )
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_report_failure,
- description='TestDesc')
+ self.assertRaises(
+ SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description="TestDesc",
+ )
class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase):
-
def setUp(self):
super(TestGetMetadataGoalStateXMLAndReportReadyToFabric, self).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.m_shim = patches.enter_context(
- mock.patch.object(azure_helper, 'WALinuxAgentShim'))
+ mock.patch.object(azure_helper, "WALinuxAgentShim")
+ )
def test_data_from_shim_returned(self):
ret = azure_helper.get_metadata_from_fabric()
self.assertEqual(
- self.m_shim.return_value.register_with_azure_and_fetch_data
- .return_value,
- ret)
+ self.m_shim.return_value.register_with_azure_and_fetch_data.return_value, # noqa: E501
+ ret,
+ )
def test_success_calls_clean_up(self):
azure_helper.get_metadata_from_fabric()
self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
- def test_failure_in_registration_propagates_exc_and_calls_clean_up(
- self):
- self.m_shim.return_value.register_with_azure_and_fetch_data \
- .side_effect = SentinelException
- self.assertRaises(SentinelException,
- azure_helper.get_metadata_from_fabric)
+ def test_failure_in_registration_propagates_exc_and_calls_clean_up(self):
+ self.m_shim.return_value.register_with_azure_and_fetch_data.side_effect = ( # noqa: E501
+ SentinelException
+ )
+ self.assertRaises(
+ SentinelException, azure_helper.get_metadata_from_fabric
+ )
self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
def test_calls_shim_register_with_azure_and_fetch_data(self):
m_pubkey_info = mock.MagicMock()
- azure_helper.get_metadata_from_fabric(pubkey_info=m_pubkey_info)
+ azure_helper.get_metadata_from_fabric(
+ pubkey_info=m_pubkey_info, iso_dev="/dev/sr0"
+ )
self.assertEqual(
1,
- self.m_shim.return_value
- .register_with_azure_and_fetch_data.call_count)
+ self.m_shim.return_value.register_with_azure_and_fetch_data.call_count, # noqa: E501
+ )
self.assertEqual(
- mock.call(pubkey_info=m_pubkey_info),
- self.m_shim.return_value
- .register_with_azure_and_fetch_data.call_args)
+ mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info),
+ self.m_shim.return_value.register_with_azure_and_fetch_data.call_args, # noqa: E501
+ )
def test_instantiates_shim_with_kwargs(self):
m_fallback_lease_file = mock.MagicMock()
m_dhcp_options = mock.MagicMock()
azure_helper.get_metadata_from_fabric(
- fallback_lease_file=m_fallback_lease_file,
- dhcp_opts=m_dhcp_options)
+ fallback_lease_file=m_fallback_lease_file, dhcp_opts=m_dhcp_options
+ )
self.assertEqual(1, self.m_shim.call_count)
self.assertEqual(
mock.call(
fallback_lease_file=m_fallback_lease_file,
- dhcp_options=m_dhcp_options),
- self.m_shim.call_args)
+ dhcp_options=m_dhcp_options,
+ ),
+ self.m_shim.call_args,
+ )
class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase):
-
def setUp(self):
super(
- TestGetMetadataGoalStateXMLAndReportFailureToFabric, self).setUp()
+ TestGetMetadataGoalStateXMLAndReportFailureToFabric, self
+ ).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.m_shim = patches.enter_context(
- mock.patch.object(azure_helper, 'WALinuxAgentShim'))
+ mock.patch.object(azure_helper, "WALinuxAgentShim")
+ )
def test_success_calls_clean_up(self):
azure_helper.report_failure_to_fabric()
- self.assertEqual(
- 1,
- self.m_shim.return_value.clean_up.call_count)
+ self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
def test_failure_in_shim_report_failure_propagates_exc_and_calls_clean_up(
- self):
- self.m_shim.return_value.register_with_azure_and_report_failure \
- .side_effect = SentinelException
- self.assertRaises(SentinelException,
- azure_helper.report_failure_to_fabric)
- self.assertEqual(
- 1,
- self.m_shim.return_value.clean_up.call_count)
+ self,
+ ):
+ self.m_shim.return_value.register_with_azure_and_report_failure.side_effect = ( # noqa: E501
+ SentinelException
+ )
+ self.assertRaises(
+ SentinelException, azure_helper.report_failure_to_fabric
+ )
+ self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
def test_report_failure_to_fabric_with_desc_calls_shim_report_failure(
- self):
- azure_helper.report_failure_to_fabric(description='TestDesc')
- self.m_shim.return_value.register_with_azure_and_report_failure \
- .assert_called_once_with(description='TestDesc')
+ self,
+ ):
+ azure_helper.report_failure_to_fabric(description="TestDesc")
+ self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
+ description="TestDesc"
+ )
def test_report_failure_to_fabric_with_no_desc_calls_shim_report_failure(
- self):
+ self,
+ ):
azure_helper.report_failure_to_fabric()
# default err message description should be shown to the user
# if no description is passed in
- self.m_shim.return_value.register_with_azure_and_report_failure \
- .assert_called_once_with(
- description=azure_helper
- .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
+ self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
+ description=(
+ azure_helper.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
+ )
def test_report_failure_to_fabric_empty_desc_calls_shim_report_failure(
- self):
- azure_helper.report_failure_to_fabric(description='')
+ self,
+ ):
+ azure_helper.report_failure_to_fabric(description="")
# default err message description should be shown to the user
# if an empty description is passed in
- self.m_shim.return_value.register_with_azure_and_report_failure \
- .assert_called_once_with(
- description=azure_helper
- .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
+ self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
+ description=(
+ azure_helper.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
+ )
def test_instantiates_shim_with_kwargs(self):
m_fallback_lease_file = mock.MagicMock()
m_dhcp_options = mock.MagicMock()
azure_helper.report_failure_to_fabric(
- fallback_lease_file=m_fallback_lease_file,
- dhcp_opts=m_dhcp_options)
+ fallback_lease_file=m_fallback_lease_file, dhcp_opts=m_dhcp_options
+ )
self.m_shim.assert_called_once_with(
fallback_lease_file=m_fallback_lease_file,
- dhcp_options=m_dhcp_options)
+ dhcp_options=m_dhcp_options,
+ )
class TestExtractIpAddressFromNetworkd(CiTestCase):
- azure_lease = dedent("""\
+ azure_lease = dedent(
+ """\
# This is private data. Do not parse.
ADDRESS=10.132.0.5
NETMASK=255.255.255.255
@@ -1385,7 +1558,8 @@ class TestExtractIpAddressFromNetworkd(CiTestCase):
ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1
CLIENTID=ff405663a200020000ab11332859494d7a8b4c
OPTION_245=624c3620
- """)
+ """
+ )
def setUp(self):
super(TestExtractIpAddressFromNetworkd, self).setUp()
@@ -1394,21 +1568,25 @@ class TestExtractIpAddressFromNetworkd(CiTestCase):
def test_no_valid_leases_is_none(self):
"""No valid leases should return None."""
self.assertIsNone(
- wa_shim._networkd_get_value_from_leases(self.lease_d))
+ wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
def test_option_245_is_found_in_single(self):
"""A single valid lease with 245 option should return it."""
- populate_dir(self.lease_d, {'9': self.azure_lease})
+ populate_dir(self.lease_d, {"9": self.azure_lease})
self.assertEqual(
- '624c3620', wa_shim._networkd_get_value_from_leases(self.lease_d))
+ "624c3620", wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
def test_option_245_not_found_returns_None(self):
"""A valid lease, but no option 245 should return None."""
populate_dir(
self.lease_d,
- {'9': self.azure_lease.replace("OPTION_245", "OPTION_999")})
+ {"9": self.azure_lease.replace("OPTION_245", "OPTION_999")},
+ )
self.assertIsNone(
- wa_shim._networkd_get_value_from_leases(self.lease_d))
+ wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
def test_multiple_returns_first(self):
"""Somewhat arbitrarily return the first address when multiple.
@@ -1418,10 +1596,14 @@ class TestExtractIpAddressFromNetworkd(CiTestCase):
myval = "624c3601"
populate_dir(
self.lease_d,
- {'9': self.azure_lease,
- '2': self.azure_lease.replace("624c3620", myval)})
+ {
+ "9": self.azure_lease,
+ "2": self.azure_lease.replace("624c3620", myval),
+ },
+ )
self.assertEqual(
- myval, wa_shim._networkd_get_value_from_leases(self.lease_d))
+ myval, wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/sources/test_cloudsigma.py
index 7aa3b1d1..a2f26245 100644
--- a/tests/unittests/test_datasource/test_cloudsigma.py
+++ b/tests/unittests/sources/test_cloudsigma.py
@@ -2,13 +2,10 @@
import copy
+from cloudinit import distros, helpers, sources
from cloudinit.cs_utils import Cepko
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import sources
from cloudinit.sources import DataSourceCloudSigma
-
-from cloudinit.tests import helpers as test_helpers
+from tests.unittests import helpers as test_helpers
SERVER_CONTEXT = {
"cpu": 1000,
@@ -28,10 +25,10 @@ SERVER_CONTEXT = {
"vendor_data": {
"location": "zrh",
"cloudinit": "#cloud-config\n\n...",
- }
+ },
}
-DS_PATH = 'cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma'
+DS_PATH = "cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma"
class CepkoMock(Cepko):
@@ -45,41 +42,48 @@ class CepkoMock(Cepko):
class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
def setUp(self):
super(DataSourceCloudSigmaTest, self).setUp()
- self.paths = helpers.Paths({'run_dir': self.tmp_dir()})
- self.add_patch(DS_PATH + '.is_running_in_cloudsigma',
- "m_is_container", return_value=True)
+ self.paths = helpers.Paths({"run_dir": self.tmp_dir()})
+ self.add_patch(
+ DS_PATH + ".is_running_in_cloudsigma",
+ "m_is_container",
+ return_value=True,
+ )
distro_cls = distros.fetch("ubuntu")
distro = distro_cls("ubuntu", cfg={}, paths=self.paths)
self.datasource = DataSourceCloudSigma.DataSourceCloudSigma(
- sys_cfg={}, distro=distro, paths=self.paths)
+ sys_cfg={}, distro=distro, paths=self.paths
+ )
self.datasource.cepko = CepkoMock(SERVER_CONTEXT)
def test_get_hostname(self):
self.datasource.get_data()
self.assertEqual("test_server", self.datasource.get_hostname())
- self.datasource.metadata['name'] = ''
+ self.datasource.metadata["name"] = ""
self.assertEqual("65b2fb23", self.datasource.get_hostname())
- utf8_hostname = b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82'.decode('utf-8')
- self.datasource.metadata['name'] = utf8_hostname
+ utf8_hostname = b"\xd1\x82\xd0\xb5\xd1\x81\xd1\x82".decode("utf-8")
+ self.datasource.metadata["name"] = utf8_hostname
self.assertEqual("65b2fb23", self.datasource.get_hostname())
def test_get_public_ssh_keys(self):
self.datasource.get_data()
- self.assertEqual([SERVER_CONTEXT['meta']['ssh_public_key']],
- self.datasource.get_public_ssh_keys())
+ self.assertEqual(
+ [SERVER_CONTEXT["meta"]["ssh_public_key"]],
+ self.datasource.get_public_ssh_keys(),
+ )
def test_get_instance_id(self):
self.datasource.get_data()
- self.assertEqual(SERVER_CONTEXT['uuid'],
- self.datasource.get_instance_id())
+ self.assertEqual(
+ SERVER_CONTEXT["uuid"], self.datasource.get_instance_id()
+ )
def test_platform(self):
"""All platform-related attributes are set."""
self.datasource.get_data()
- self.assertEqual(self.datasource.cloud_name, 'cloudsigma')
- self.assertEqual(self.datasource.platform_type, 'cloudsigma')
- self.assertEqual(self.datasource.subplatform, 'cepko (/dev/ttyS1)')
+ self.assertEqual(self.datasource.cloud_name, "cloudsigma")
+ self.assertEqual(self.datasource.platform_type, "cloudsigma")
+ self.assertEqual(self.datasource.subplatform, "cepko (/dev/ttyS1)")
def test_metadata(self):
self.datasource.get_data()
@@ -87,22 +91,26 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
def test_user_data(self):
self.datasource.get_data()
- self.assertEqual(self.datasource.userdata_raw,
- SERVER_CONTEXT['meta']['cloudinit-user-data'])
+ self.assertEqual(
+ self.datasource.userdata_raw,
+ SERVER_CONTEXT["meta"]["cloudinit-user-data"],
+ )
def test_encoded_user_data(self):
encoded_context = copy.deepcopy(SERVER_CONTEXT)
- encoded_context['meta']['base64_fields'] = 'cloudinit-user-data'
- encoded_context['meta']['cloudinit-user-data'] = 'aGkgd29ybGQK'
+ encoded_context["meta"]["base64_fields"] = "cloudinit-user-data"
+ encoded_context["meta"]["cloudinit-user-data"] = "aGkgd29ybGQK"
self.datasource.cepko = CepkoMock(encoded_context)
self.datasource.get_data()
- self.assertEqual(self.datasource.userdata_raw, b'hi world\n')
+ self.assertEqual(self.datasource.userdata_raw, b"hi world\n")
def test_vendor_data(self):
self.datasource.get_data()
- self.assertEqual(self.datasource.vendordata_raw,
- SERVER_CONTEXT['vendor_data']['cloudinit'])
+ self.assertEqual(
+ self.datasource.vendordata_raw,
+ SERVER_CONTEXT["vendor_data"]["cloudinit"],
+ )
def test_lack_of_vendor_data(self):
stripped_context = copy.deepcopy(SERVER_CONTEXT)
@@ -125,13 +133,13 @@ class DsLoads(test_helpers.TestCase):
def test_get_datasource_list_returns_in_local(self):
deps = (sources.DEP_FILESYSTEM,)
ds_list = DataSourceCloudSigma.get_datasource_list(deps)
- self.assertEqual(ds_list,
- [DataSourceCloudSigma.DataSourceCloudSigma])
+ self.assertEqual(ds_list, [DataSourceCloudSigma.DataSourceCloudSigma])
def test_list_sources_finds_ds(self):
found = sources.list_sources(
- ['CloudSigma'], (sources.DEP_FILESYSTEM,), ['cloudinit.sources'])
- self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma],
- found)
+ ["CloudSigma"], (sources.DEP_FILESYSTEM,), ["cloudinit.sources"]
+ )
+ self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma], found)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/sources/test_cloudstack.py
index e68168f2..f7c69f91 100644
--- a/tests/unittests/test_datasource/test_cloudstack.py
+++ b/tests/unittests/sources/test_cloudstack.py
@@ -1,80 +1,90 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import helpers
-from cloudinit import util
-from cloudinit.sources.DataSourceCloudStack import (
- DataSourceCloudStack, get_latest_lease)
-
-from cloudinit.tests.helpers import CiTestCase, ExitStack, mock
-
import os
import time
-MOD_PATH = 'cloudinit.sources.DataSourceCloudStack'
-DS_PATH = MOD_PATH + '.DataSourceCloudStack'
+from cloudinit import helpers, util
+from cloudinit.sources.DataSourceCloudStack import (
+ DataSourceCloudStack,
+ get_latest_lease,
+)
+from tests.unittests.helpers import CiTestCase, ExitStack, mock
+MOD_PATH = "cloudinit.sources.DataSourceCloudStack"
+DS_PATH = MOD_PATH + ".DataSourceCloudStack"
-class TestCloudStackPasswordFetching(CiTestCase):
+class TestCloudStackPasswordFetching(CiTestCase):
def setUp(self):
super(TestCloudStackPasswordFetching, self).setUp()
self.patches = ExitStack()
self.addCleanup(self.patches.close)
mod_name = MOD_PATH
- self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name)))
- self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name)))
+ self.patches.enter_context(mock.patch("{0}.ec2".format(mod_name)))
+ self.patches.enter_context(mock.patch("{0}.uhelp".format(mod_name)))
default_gw = "192.201.20.0"
get_latest_lease = mock.MagicMock(return_value=None)
- self.patches.enter_context(mock.patch(
- mod_name + '.get_latest_lease', get_latest_lease))
+ self.patches.enter_context(
+ mock.patch(mod_name + ".get_latest_lease", get_latest_lease)
+ )
get_default_gw = mock.MagicMock(return_value=default_gw)
- self.patches.enter_context(mock.patch(
- mod_name + '.get_default_gateway', get_default_gw))
+ self.patches.enter_context(
+ mock.patch(mod_name + ".get_default_gateway", get_default_gw)
+ )
get_networkd_server_address = mock.MagicMock(return_value=None)
- self.patches.enter_context(mock.patch(
- mod_name + '.dhcp.networkd_get_option_from_leases',
- get_networkd_server_address))
+ self.patches.enter_context(
+ mock.patch(
+ mod_name + ".dhcp.networkd_get_option_from_leases",
+ get_networkd_server_address,
+ )
+ )
self.tmp = self.tmp_dir()
def _set_password_server_response(self, response_string):
- subp = mock.MagicMock(return_value=(response_string, ''))
+ subp = mock.MagicMock(return_value=(response_string, ""))
self.patches.enter_context(
- mock.patch('cloudinit.sources.DataSourceCloudStack.subp.subp',
- subp))
+ mock.patch(
+ "cloudinit.sources.DataSourceCloudStack.subp.subp", subp
+ )
+ )
return subp
def test_empty_password_doesnt_create_config(self):
- self._set_password_server_response('')
+ self._set_password_server_response("")
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds.get_data()
self.assertEqual({}, ds.get_config_obj())
def test_saved_password_doesnt_create_config(self):
- self._set_password_server_response('saved_password')
+ self._set_password_server_response("saved_password")
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds.get_data()
self.assertEqual({}, ds.get_config_obj())
- @mock.patch(DS_PATH + '.wait_for_metadata_service')
+ @mock.patch(DS_PATH + ".wait_for_metadata_service")
def test_password_sets_password(self, m_wait):
m_wait.return_value = True
- password = 'SekritSquirrel'
+ password = "SekritSquirrel"
self._set_password_server_response(password)
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds.get_data()
- self.assertEqual(password, ds.get_config_obj()['password'])
+ self.assertEqual(password, ds.get_config_obj()["password"])
- @mock.patch(DS_PATH + '.wait_for_metadata_service')
+ @mock.patch(DS_PATH + ".wait_for_metadata_service")
def test_bad_request_doesnt_stop_ds_from_working(self, m_wait):
m_wait.return_value = True
- self._set_password_server_response('bad_request')
+ self._set_password_server_response("bad_request")
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
self.assertTrue(ds.get_data())
def assertRequestTypesSent(self, subp, expected_request_types):
@@ -82,42 +92,44 @@ class TestCloudStackPasswordFetching(CiTestCase):
for call in subp.call_args_list:
args = call[0][0]
for arg in args:
- if arg.startswith('DomU_Request'):
+ if arg.startswith("DomU_Request"):
request_types.append(arg.split()[1])
self.assertEqual(expected_request_types, request_types)
- @mock.patch(DS_PATH + '.wait_for_metadata_service')
+ @mock.patch(DS_PATH + ".wait_for_metadata_service")
def test_valid_response_means_password_marked_as_saved(self, m_wait):
m_wait.return_value = True
- password = 'SekritSquirrel'
+ password = "SekritSquirrel"
subp = self._set_password_server_response(password)
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds.get_data()
- self.assertRequestTypesSent(subp,
- ['send_my_password', 'saved_password'])
+ self.assertRequestTypesSent(
+ subp, ["send_my_password", "saved_password"]
+ )
def _check_password_not_saved_for(self, response_string):
subp = self._set_password_server_response(response_string)
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
- with mock.patch(DS_PATH + '.wait_for_metadata_service') as m_wait:
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ with mock.patch(DS_PATH + ".wait_for_metadata_service") as m_wait:
m_wait.return_value = True
ds.get_data()
- self.assertRequestTypesSent(subp, ['send_my_password'])
+ self.assertRequestTypesSent(subp, ["send_my_password"])
def test_password_not_saved_if_empty(self):
- self._check_password_not_saved_for('')
+ self._check_password_not_saved_for("")
def test_password_not_saved_if_already_saved(self):
- self._check_password_not_saved_for('saved_password')
+ self._check_password_not_saved_for("saved_password")
def test_password_not_saved_if_bad_request(self):
- self._check_password_not_saved_for('bad_request')
+ self._check_password_not_saved_for("bad_request")
class TestGetLatestLease(CiTestCase):
-
def _populate_dir_list(self, bdir, files):
"""populate_dir_list([(name, data), (name, data)])
@@ -133,8 +145,9 @@ class TestGetLatestLease(CiTestCase):
def _pop_and_test(self, files, expected):
lease_d = self.tmp_dir()
self._populate_dir_list(lease_d, files)
- self.assertEqual(self.tmp_path(expected, lease_d),
- get_latest_lease(lease_d))
+ self.assertEqual(
+ self.tmp_path(expected, lease_d), get_latest_lease(lease_d)
+ )
def test_skips_dhcpv6_files(self):
"""files started with dhclient6 should be skipped."""
@@ -161,9 +174,15 @@ class TestGetLatestLease(CiTestCase):
def test_ignores_by_extension(self):
"""only .lease or .leases file should be considered."""
- self._pop_and_test(["dhclient.lease", "dhclient.lease.bk",
- "dhclient.lease-old", "dhclient.leaselease"],
- "dhclient.lease")
+ self._pop_and_test(
+ [
+ "dhclient.lease",
+ "dhclient.lease.bk",
+ "dhclient.lease-old",
+ "dhclient.leaselease",
+ ],
+ "dhclient.lease",
+ )
def test_selects_newest_matching(self):
"""If multiple files match, the newest written should be used."""
diff --git a/tests/unittests/sources/test_common.py b/tests/unittests/sources/test_common.py
new file mode 100644
index 00000000..a5bdb629
--- /dev/null
+++ b/tests/unittests/sources/test_common.py
@@ -0,0 +1,123 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import settings, sources, type_utils
+from cloudinit.sources import DataSource
+from cloudinit.sources import DataSourceAliYun as AliYun
+from cloudinit.sources import DataSourceAltCloud as AltCloud
+from cloudinit.sources import DataSourceAzure as Azure
+from cloudinit.sources import DataSourceBigstep as Bigstep
+from cloudinit.sources import DataSourceCloudSigma as CloudSigma
+from cloudinit.sources import DataSourceCloudStack as CloudStack
+from cloudinit.sources import DataSourceConfigDrive as ConfigDrive
+from cloudinit.sources import DataSourceDigitalOcean as DigitalOcean
+from cloudinit.sources import DataSourceEc2 as Ec2
+from cloudinit.sources import DataSourceExoscale as Exoscale
+from cloudinit.sources import DataSourceGCE as GCE
+from cloudinit.sources import DataSourceHetzner as Hetzner
+from cloudinit.sources import DataSourceIBMCloud as IBMCloud
+from cloudinit.sources import DataSourceLXD as LXD
+from cloudinit.sources import DataSourceMAAS as MAAS
+from cloudinit.sources import DataSourceNoCloud as NoCloud
+from cloudinit.sources import DataSourceNone as DSNone
+from cloudinit.sources import DataSourceOpenNebula as OpenNebula
+from cloudinit.sources import DataSourceOpenStack as OpenStack
+from cloudinit.sources import DataSourceOracle as Oracle
+from cloudinit.sources import DataSourceOVF as OVF
+from cloudinit.sources import DataSourceRbxCloud as RbxCloud
+from cloudinit.sources import DataSourceScaleway as Scaleway
+from cloudinit.sources import DataSourceSmartOS as SmartOS
+from cloudinit.sources import DataSourceUpCloud as UpCloud
+from cloudinit.sources import DataSourceVMware as VMware
+from cloudinit.sources import DataSourceVultr as Vultr
+from tests.unittests import helpers as test_helpers
+
+DEFAULT_LOCAL = [
+ Azure.DataSourceAzure,
+ CloudSigma.DataSourceCloudSigma,
+ ConfigDrive.DataSourceConfigDrive,
+ DigitalOcean.DataSourceDigitalOcean,
+ GCE.DataSourceGCELocal,
+ Hetzner.DataSourceHetzner,
+ IBMCloud.DataSourceIBMCloud,
+ LXD.DataSourceLXD,
+ NoCloud.DataSourceNoCloud,
+ OpenNebula.DataSourceOpenNebula,
+ Oracle.DataSourceOracle,
+ OVF.DataSourceOVF,
+ SmartOS.DataSourceSmartOS,
+ Vultr.DataSourceVultr,
+ Ec2.DataSourceEc2Local,
+ OpenStack.DataSourceOpenStackLocal,
+ RbxCloud.DataSourceRbxCloud,
+ Scaleway.DataSourceScaleway,
+ UpCloud.DataSourceUpCloudLocal,
+ VMware.DataSourceVMware,
+]
+
+DEFAULT_NETWORK = [
+ AliYun.DataSourceAliYun,
+ AltCloud.DataSourceAltCloud,
+ Bigstep.DataSourceBigstep,
+ CloudStack.DataSourceCloudStack,
+ DSNone.DataSourceNone,
+ Ec2.DataSourceEc2,
+ Exoscale.DataSourceExoscale,
+ GCE.DataSourceGCE,
+ MAAS.DataSourceMAAS,
+ NoCloud.DataSourceNoCloudNet,
+ OpenStack.DataSourceOpenStack,
+ OVF.DataSourceOVFNet,
+ UpCloud.DataSourceUpCloud,
+ VMware.DataSourceVMware,
+]
+
+
+class ExpectedDataSources(test_helpers.TestCase):
+ builtin_list = settings.CFG_BUILTIN["datasource_list"]
+ deps_local = [sources.DEP_FILESYSTEM]
+ deps_network = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
+ pkg_list = [type_utils.obj_name(sources)]
+
+ def test_expected_default_local_sources_found(self):
+ found = sources.list_sources(
+ self.builtin_list, self.deps_local, self.pkg_list
+ )
+ self.assertEqual(set(DEFAULT_LOCAL), set(found))
+
+ def test_expected_default_network_sources_found(self):
+ found = sources.list_sources(
+ self.builtin_list, self.deps_network, self.pkg_list
+ )
+ self.assertEqual(set(DEFAULT_NETWORK), set(found))
+
+ def test_expected_nondefault_network_sources_found(self):
+ found = sources.list_sources(
+ ["AliYun"], self.deps_network, self.pkg_list
+ )
+ self.assertEqual(set([AliYun.DataSourceAliYun]), set(found))
+
+
+class TestDataSourceInvariants(test_helpers.TestCase):
+ def test_data_sources_have_valid_network_config_sources(self):
+ for ds in DEFAULT_LOCAL + DEFAULT_NETWORK:
+ for cfg_src in ds.network_config_sources:
+ fail_msg = (
+ "{} has an invalid network_config_sources entry:"
+ " {}".format(str(ds), cfg_src)
+ )
+ self.assertTrue(
+ hasattr(sources.NetworkConfigSource, cfg_src), fail_msg
+ )
+
+ def test_expected_dsname_defined(self):
+ for ds in DEFAULT_LOCAL + DEFAULT_NETWORK:
+ fail_msg = (
+ "{} has an invalid / missing dsname property: {}".format(
+ str(ds), str(ds.dsname)
+ )
+ )
+ self.assertNotEqual(ds.dsname, DataSource.dsname, fail_msg)
+ self.assertIsNotNone(ds.dsname)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_configdrive.py b/tests/unittests/sources/test_configdrive.py
new file mode 100644
index 00000000..1fc40a0e
--- /dev/null
+++ b/tests/unittests/sources/test_configdrive.py
@@ -0,0 +1,1068 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+import os
+from copy import copy, deepcopy
+
+from cloudinit import helpers, settings, util
+from cloudinit.net import eni, network_state
+from cloudinit.sources import DataSourceConfigDrive as ds
+from cloudinit.sources.helpers import openstack
+from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
+
+PUBKEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n"
+EC2_META = {
+ "ami-id": "ami-00000001",
+ "ami-launch-index": 0,
+ "ami-manifest-path": "FIXME",
+ "block-device-mapping": {
+ "ami": "sda1",
+ "ephemeral0": "sda2",
+ "root": "/dev/sda1",
+ "swap": "sda3",
+ },
+ "hostname": "sm-foo-test.novalocal",
+ "instance-action": "none",
+ "instance-id": "i-00000001",
+ "instance-type": "m1.tiny",
+ "local-hostname": "sm-foo-test.novalocal",
+ "local-ipv4": None,
+ "placement": {"availability-zone": "nova"},
+ "public-hostname": "sm-foo-test.novalocal",
+ "public-ipv4": "",
+ "public-keys": {"0": {"openssh-key": PUBKEY}},
+ "reservation-id": "r-iru5qm4m",
+ "security-groups": ["default"],
+}
+USER_DATA = b"#!/bin/sh\necho This is user data\n"
+OSTACK_META = {
+ "availability_zone": "nova",
+ "files": [
+ {"content_path": "/content/0000", "path": "/etc/foo.cfg"},
+ {"content_path": "/content/0001", "path": "/etc/bar/bar.cfg"},
+ ],
+ "hostname": "sm-foo-test.novalocal",
+ "meta": {"dsmode": "local", "my-meta": "my-value"},
+ "name": "sm-foo-test",
+ "public_keys": {"mykey": PUBKEY},
+ "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c",
+}
+
+CONTENT_0 = b"This is contents of /etc/foo.cfg\n"
+CONTENT_1 = b"# this is /etc/bar/bar.cfg\n"
+NETWORK_DATA = {
+ "services": [
+ {"type": "dns", "address": "199.204.44.24"},
+ {"type": "dns", "address": "199.204.47.54"},
+ ],
+ "links": [
+ {
+ "vif_id": "2ecc7709-b3f7-4448-9580-e1ec32d75bbd",
+ "ethernet_mac_address": "fa:16:3e:69:b0:58",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2ecc7709-b3",
+ },
+ {
+ "vif_id": "2f88d109-5b57-40e6-af32-2472df09dc33",
+ "ethernet_mac_address": "fa:16:3e:d4:57:ad",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2f88d109-5b",
+ },
+ {
+ "vif_id": "1a5382f8-04c5-4d75-ab98-d666c1ef52cc",
+ "ethernet_mac_address": "fa:16:3e:05:30:fe",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap1a5382f8-04",
+ "name": "nic0",
+ },
+ ],
+ "networks": [
+ {
+ "link": "tap2ecc7709-b3",
+ "type": "ipv4_dhcp",
+ "network_id": "6d6357ac-0f70-4afa-8bd7-c274cc4ea235",
+ "id": "network0",
+ },
+ {
+ "link": "tap2f88d109-5b",
+ "type": "ipv4_dhcp",
+ "network_id": "d227a9b3-6960-4d94-8976-ee5788b44f54",
+ "id": "network1",
+ },
+ {
+ "link": "tap1a5382f8-04",
+ "type": "ipv4_dhcp",
+ "network_id": "dab2ba57-cae2-4311-a5ed-010b263891f5",
+ "id": "network2",
+ },
+ ],
+}
+
+NETWORK_DATA_2 = {
+ "services": [
+ {"type": "dns", "address": "1.1.1.191"},
+ {"type": "dns", "address": "1.1.1.4"},
+ ],
+ "networks": [
+ {
+ "network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a",
+ "type": "ipv4",
+ "netmask": "255.255.255.248",
+ "link": "eth0",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "2.2.2.9",
+ }
+ ],
+ "ip_address": "2.2.2.10",
+ "id": "network0-ipv4",
+ },
+ {
+ "network_id": "ca447c83-6409-499b-aaef-6ad1ae995348",
+ "type": "ipv4",
+ "netmask": "255.255.255.224",
+ "link": "eth1",
+ "routes": [],
+ "ip_address": "3.3.3.24",
+ "id": "network1-ipv4",
+ },
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:dd:50:9a",
+ "mtu": 1500,
+ "type": "vif",
+ "id": "eth0",
+ "vif_id": "vif-foo1",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:a8:14:69",
+ "mtu": 1500,
+ "type": "vif",
+ "id": "eth1",
+ "vif_id": "vif-foo2",
+ },
+ ],
+}
+
+# This network data ha 'tap' or null type for a link.
+NETWORK_DATA_3 = {
+ "services": [
+ {"type": "dns", "address": "172.16.36.11"},
+ {"type": "dns", "address": "172.16.36.12"},
+ ],
+ "networks": [
+ {
+ "network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
+ "type": "ipv4",
+ "netmask": "255.255.255.128",
+ "link": "tap77a0dc5b-72",
+ "ip_address": "172.17.48.18",
+ "id": "network0",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.17.48.1",
+ }
+ ],
+ },
+ {
+ "network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
+ "type": "ipv6",
+ "netmask": "ffff:ffff:ffff:ffff::",
+ "link": "tap77a0dc5b-72",
+ "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d",
+ "id": "network1",
+ "routes": [
+ {
+ "netmask": "::",
+ "network": "::",
+ "gateway": "fdb8:52d0:9d14::1",
+ }
+ ],
+ },
+ {
+ "network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54",
+ "type": "ipv4",
+ "netmask": "255.255.255.128",
+ "link": "tap7d6b7bec-93",
+ "ip_address": "172.16.48.13",
+ "id": "network2",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.16.48.1",
+ },
+ {
+ "netmask": "255.255.0.0",
+ "network": "172.16.0.0",
+ "gateway": "172.16.48.1",
+ },
+ ],
+ },
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:dd:50:9a",
+ "mtu": None,
+ "type": "tap",
+ "id": "tap77a0dc5b-72",
+ "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:a8:14:69",
+ "mtu": None,
+ "type": None,
+ "id": "tap7d6b7bec-93",
+ "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5",
+ },
+ ],
+}
+
+BOND_MAC = "fa:16:3e:b3:72:36"
+NETWORK_DATA_BOND = {
+ "services": [
+ {"type": "dns", "address": "1.1.1.191"},
+ {"type": "dns", "address": "1.1.1.4"},
+ ],
+ "networks": [
+ {
+ "id": "network2-ipv4",
+ "ip_address": "2.2.2.13",
+ "link": "vlan2",
+ "netmask": "255.255.255.248",
+ "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117",
+ "type": "ipv4",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "2.2.2.9",
+ }
+ ],
+ },
+ {
+ "id": "network3-ipv4",
+ "ip_address": "10.0.1.5",
+ "link": "vlan3",
+ "netmask": "255.255.255.248",
+ "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
+ "type": "ipv4",
+ "routes": [
+ {
+ "netmask": "255.255.255.255",
+ "network": "192.168.1.0",
+ "gateway": "10.0.1.1",
+ }
+ ],
+ },
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "0c:c4:7a:34:6e:3c",
+ "id": "eth0",
+ "mtu": 1500,
+ "type": "phy",
+ },
+ {
+ "ethernet_mac_address": "0c:c4:7a:34:6e:3d",
+ "id": "eth1",
+ "mtu": 1500,
+ "type": "phy",
+ },
+ {
+ "bond_links": ["eth0", "eth1"],
+ "bond_miimon": 100,
+ "bond_mode": "4",
+ "bond_xmit_hash_policy": "layer3+4",
+ "ethernet_mac_address": BOND_MAC,
+ "id": "bond0",
+ "type": "bond",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:b3:72:30",
+ "id": "vlan2",
+ "type": "vlan",
+ "vlan_id": 602,
+ "vlan_link": "bond0",
+ "vlan_mac_address": "fa:16:3e:b3:72:30",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:66:ab:a6",
+ "id": "vlan3",
+ "type": "vlan",
+ "vlan_id": 612,
+ "vlan_link": "bond0",
+ "vlan_mac_address": "fa:16:3e:66:ab:a6",
+ },
+ ],
+}
+
+NETWORK_DATA_VLAN = {
+ "services": [{"type": "dns", "address": "1.1.1.191"}],
+ "networks": [
+ {
+ "id": "network1-ipv4",
+ "ip_address": "10.0.1.5",
+ "link": "vlan1",
+ "netmask": "255.255.255.248",
+ "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
+ "type": "ipv4",
+ "routes": [
+ {
+ "netmask": "255.255.255.255",
+ "network": "192.168.1.0",
+ "gateway": "10.0.1.1",
+ }
+ ],
+ }
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:69:b0:58",
+ "id": "eth0",
+ "mtu": 1500,
+ "type": "phy",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:b3:72:30",
+ "id": "vlan1",
+ "type": "vlan",
+ "vlan_id": 602,
+ "vlan_link": "eth0",
+ "vlan_mac_address": "fa:16:3e:b3:72:30",
+ },
+ ],
+}
+
+KNOWN_MACS = {
+ "fa:16:3e:69:b0:58": "enp0s1",
+ "fa:16:3e:d4:57:ad": "enp0s2",
+ "fa:16:3e:dd:50:9a": "foo1",
+ "fa:16:3e:a8:14:69": "foo2",
+ "fa:16:3e:ed:9a:59": "foo3",
+ "0c:c4:7a:34:6e:3d": "oeth1",
+ "0c:c4:7a:34:6e:3c": "oeth0",
+}
+
+CFG_DRIVE_FILES_V2 = {
+ "ec2/2009-04-04/meta-data.json": json.dumps(EC2_META),
+ "ec2/2009-04-04/user-data": USER_DATA,
+ "ec2/latest/meta-data.json": json.dumps(EC2_META),
+ "ec2/latest/user-data": USER_DATA,
+ "openstack/2012-08-10/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/2012-08-10/user_data": USER_DATA,
+ "openstack/content/0000": CONTENT_0,
+ "openstack/content/0001": CONTENT_1,
+ "openstack/latest/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/latest/user_data": USER_DATA,
+ "openstack/latest/network_data.json": json.dumps(NETWORK_DATA),
+ "openstack/2015-10-15/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/2015-10-15/user_data": USER_DATA,
+ "openstack/2015-10-15/network_data.json": json.dumps(NETWORK_DATA),
+}
+
+M_PATH = "cloudinit.sources.DataSourceConfigDrive."
+
+
+class TestConfigDriveDataSource(CiTestCase):
+ def setUp(self):
+ super(TestConfigDriveDataSource, self).setUp()
+ self.add_patch(
+ M_PATH + "util.find_devs_with", "m_find_devs_with", return_value=[]
+ )
+ self.tmp = self.tmp_dir()
+
+ def test_ec2_metadata(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ found = ds.read_config_drive(self.tmp)
+ self.assertTrue("ec2-metadata" in found)
+ ec2_md = found["ec2-metadata"]
+ self.assertEqual(EC2_META, ec2_md)
+
+ def test_dev_os_remap(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ found = ds.read_config_drive(self.tmp)
+ cfg_ds.metadata = found["metadata"]
+ name_tests = {
+ "ami": "/dev/vda1",
+ "root": "/dev/vda1",
+ "ephemeral0": "/dev/vda2",
+ "swap": "/dev/vda3",
+ }
+ for name, dev_name in name_tests.items():
+ with ExitStack() as mocks:
+ provided_name = dev_name[len("/dev/") :]
+ provided_name = "s" + provided_name[1:]
+ find_mock = mocks.enter_context(
+ mock.patch.object(
+ util, "find_devs_with", return_value=[provided_name]
+ )
+ )
+ # We want os.path.exists() to return False on its first call,
+ # and True on its second call. We use a handy generator as
+ # the mock side effect for this. The mocked function returns
+ # what the side effect returns.
+
+ def exists_side_effect():
+ yield False
+ yield True
+
+ exists_mock = mocks.enter_context(
+ mock.patch.object(
+ os.path, "exists", side_effect=exists_side_effect()
+ )
+ )
+ self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
+
+ find_mock.assert_called_once_with(mock.ANY)
+ self.assertEqual(exists_mock.call_count, 2)
+
+ def test_dev_os_map(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ found = ds.read_config_drive(self.tmp)
+ os_md = found["metadata"]
+ cfg_ds.metadata = os_md
+ name_tests = {
+ "ami": "/dev/vda1",
+ "root": "/dev/vda1",
+ "ephemeral0": "/dev/vda2",
+ "swap": "/dev/vda3",
+ }
+ for name, dev_name in name_tests.items():
+ with ExitStack() as mocks:
+ find_mock = mocks.enter_context(
+ mock.patch.object(
+ util, "find_devs_with", return_value=[dev_name]
+ )
+ )
+ exists_mock = mocks.enter_context(
+ mock.patch.object(os.path, "exists", return_value=True)
+ )
+ self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
+
+ find_mock.assert_called_once_with(mock.ANY)
+ exists_mock.assert_called_once_with(mock.ANY)
+
+ def test_dev_ec2_remap(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ found = ds.read_config_drive(self.tmp)
+ ec2_md = found["ec2-metadata"]
+ os_md = found["metadata"]
+ cfg_ds.ec2_metadata = ec2_md
+ cfg_ds.metadata = os_md
+ name_tests = {
+ "ami": "/dev/vda1",
+ "root": "/dev/vda1",
+ "ephemeral0": "/dev/vda2",
+ "swap": "/dev/vda3",
+ None: None,
+ "bob": None,
+ "root2k": None,
+ }
+ for name, dev_name in name_tests.items():
+ # We want os.path.exists() to return False on its first call,
+ # and True on its second call. We use a handy generator as
+ # the mock side effect for this. The mocked function returns
+ # what the side effect returns.
+ def exists_side_effect():
+ yield False
+ yield True
+
+ with mock.patch.object(
+ os.path, "exists", side_effect=exists_side_effect()
+ ):
+ self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
+ # We don't assert the call count for os.path.exists() because
+ # not all of the entries in name_tests results in two calls to
+ # that function. Specifically, 'root2k' doesn't seem to call
+ # it at all.
+
+ def test_dev_ec2_map(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ found = ds.read_config_drive(self.tmp)
+ ec2_md = found["ec2-metadata"]
+ os_md = found["metadata"]
+ cfg_ds.ec2_metadata = ec2_md
+ cfg_ds.metadata = os_md
+ name_tests = {
+ "ami": "/dev/sda1",
+ "root": "/dev/sda1",
+ "ephemeral0": "/dev/sda2",
+ "swap": "/dev/sda3",
+ None: None,
+ "bob": None,
+ "root2k": None,
+ }
+ for name, dev_name in name_tests.items():
+ with mock.patch.object(os.path, "exists", return_value=True):
+ self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
+
+ def test_dir_valid(self):
+ """Verify a dir is read as such."""
+
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+
+ found = ds.read_config_drive(self.tmp)
+
+ expected_md = copy(OSTACK_META)
+ expected_md["instance-id"] = expected_md["uuid"]
+ expected_md["local-hostname"] = expected_md["hostname"]
+
+ self.assertEqual(USER_DATA, found["userdata"])
+ self.assertEqual(expected_md, found["metadata"])
+ self.assertEqual(NETWORK_DATA, found["networkdata"])
+ self.assertEqual(found["files"]["/etc/foo.cfg"], CONTENT_0)
+ self.assertEqual(found["files"]["/etc/bar/bar.cfg"], CONTENT_1)
+
+ def test_seed_dir_valid_extra(self):
+ """Verify extra files do not affect datasource validity."""
+
+ data = copy(CFG_DRIVE_FILES_V2)
+ data["myfoofile.txt"] = "myfoocontent"
+ data["openstack/latest/random-file.txt"] = "random-content"
+
+ populate_dir(self.tmp, data)
+
+ found = ds.read_config_drive(self.tmp)
+
+ expected_md = copy(OSTACK_META)
+ expected_md["instance-id"] = expected_md["uuid"]
+ expected_md["local-hostname"] = expected_md["hostname"]
+
+ self.assertEqual(expected_md, found["metadata"])
+
+ def test_seed_dir_bad_json_metadata(self):
+ """Verify that bad json in metadata raises BrokenConfigDriveDir."""
+ data = copy(CFG_DRIVE_FILES_V2)
+
+ data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}"
+ data["openstack/2015-10-15/meta_data.json"] = "non-json garbage {}"
+ data["openstack/latest/meta_data.json"] = "non-json garbage {}"
+
+ populate_dir(self.tmp, data)
+
+ self.assertRaises(
+ openstack.BrokenMetadata, ds.read_config_drive, self.tmp
+ )
+
+ def test_seed_dir_no_configdrive(self):
+ """Verify that no metadata raises NonConfigDriveDir."""
+
+ my_d = os.path.join(self.tmp, "non-configdrive")
+ data = copy(CFG_DRIVE_FILES_V2)
+ data["myfoofile.txt"] = "myfoocontent"
+ data["openstack/latest/random-file.txt"] = "random-content"
+ data["content/foo"] = "foocontent"
+
+ self.assertRaises(openstack.NonReadable, ds.read_config_drive, my_d)
+
+ def test_seed_dir_missing(self):
+ """Verify that missing seed_dir raises NonConfigDriveDir."""
+ my_d = os.path.join(self.tmp, "nonexistantdirectory")
+ self.assertRaises(openstack.NonReadable, ds.read_config_drive, my_d)
+
+ def test_find_candidates(self):
+ devs_with_answers = {}
+
+ def my_devs_with(*args, **kwargs):
+ criteria = args[0] if len(args) else kwargs.pop("criteria", None)
+ return devs_with_answers.get(criteria, [])
+
+ def my_is_partition(dev):
+ return dev[-1] in "0123456789" and not dev.startswith("sr")
+
+ try:
+ orig_find_devs_with = util.find_devs_with
+ util.find_devs_with = my_devs_with
+
+ orig_is_partition = util.is_partition
+ util.is_partition = my_is_partition
+
+ devs_with_answers = {
+ "TYPE=vfat": [],
+ "TYPE=iso9660": ["/dev/vdb"],
+ "LABEL=config-2": ["/dev/vdb"],
+ }
+ self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
+
+ # add a vfat item
+ # zdd reverse sorts after vdb, but config-2 label is preferred
+ devs_with_answers["TYPE=vfat"] = ["/dev/zdd"]
+ self.assertEqual(
+ ["/dev/vdb", "/dev/zdd"], ds.find_candidate_devs()
+ )
+
+ # verify that partitions are considered, that have correct label.
+ devs_with_answers = {
+ "TYPE=vfat": ["/dev/sda1"],
+ "TYPE=iso9660": [],
+ "LABEL=config-2": ["/dev/vdb3"],
+ }
+ self.assertEqual(["/dev/vdb3"], ds.find_candidate_devs())
+
+ # Verify that uppercase labels are also found.
+ devs_with_answers = {
+ "TYPE=vfat": [],
+ "TYPE=iso9660": ["/dev/vdb"],
+ "LABEL=CONFIG-2": ["/dev/vdb"],
+ }
+ self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
+
+ finally:
+ util.find_devs_with = orig_find_devs_with
+ util.is_partition = orig_is_partition
+
+ @mock.patch(M_PATH + "on_first_boot")
+ def test_pubkeys_v2(self, on_first_boot):
+ """Verify that public-keys work in config-drive-v2."""
+ myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
+ self.assertEqual(
+ myds.get_public_ssh_keys(), [OSTACK_META["public_keys"]["mykey"]]
+ )
+ self.assertEqual("configdrive", myds.cloud_name)
+ self.assertEqual("openstack", myds.platform)
+ self.assertEqual("seed-dir (%s/seed)" % self.tmp, myds.subplatform)
+
+ def test_subplatform_config_drive_when_starts_with_dev(self):
+ """subplatform reports config-drive when source starts with /dev/."""
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ with mock.patch(M_PATH + "find_candidate_devs") as m_find_devs:
+ with mock.patch(M_PATH + "util.mount_cb"):
+ with mock.patch(M_PATH + "on_first_boot"):
+ m_find_devs.return_value = ["/dev/anything"]
+ self.assertEqual(True, cfg_ds.get_data())
+ self.assertEqual("config-disk (/dev/anything)", cfg_ds.subplatform)
+
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
+class TestNetJson(CiTestCase):
+ def setUp(self):
+ super(TestNetJson, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.maxDiff = None
+
+ @mock.patch(M_PATH + "on_first_boot")
+ def test_network_data_is_found(self, on_first_boot):
+ """Verify that network_data is present in ds in config-drive-v2."""
+ myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
+ self.assertIsNotNone(myds.network_json)
+
+ @mock.patch(M_PATH + "on_first_boot")
+ def test_network_config_is_converted(self, on_first_boot):
+ """Verify that network_data is converted and present on ds object."""
+ myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
+ network_config = openstack.convert_net_json(
+ NETWORK_DATA, known_macs=KNOWN_MACS
+ )
+ self.assertEqual(myds.network_config, network_config)
+
+ def test_network_config_conversion_dhcp6(self):
+ """Test some ipv6 input network json and check the expected
+ conversions."""
+ in_data = {
+ "links": [
+ {
+ "vif_id": "2ecc7709-b3f7-4448-9580-e1ec32d75bbd",
+ "ethernet_mac_address": "fa:16:3e:69:b0:58",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2ecc7709-b3",
+ },
+ {
+ "vif_id": "2f88d109-5b57-40e6-af32-2472df09dc33",
+ "ethernet_mac_address": "fa:16:3e:d4:57:ad",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2f88d109-5b",
+ },
+ ],
+ "networks": [
+ {
+ "link": "tap2ecc7709-b3",
+ "type": "ipv6_dhcpv6-stateless",
+ "network_id": "6d6357ac-0f70-4afa-8bd7-c274cc4ea235",
+ "id": "network0",
+ },
+ {
+ "link": "tap2f88d109-5b",
+ "type": "ipv6_dhcpv6-stateful",
+ "network_id": "d227a9b3-6960-4d94-8976-ee5788b44f54",
+ "id": "network1",
+ },
+ ],
+ }
+ out_data = {
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "fa:16:3e:69:b0:58",
+ "mtu": None,
+ "name": "enp0s1",
+ "subnets": [{"type": "ipv6_dhcpv6-stateless"}],
+ "type": "physical",
+ },
+ {
+ "mac_address": "fa:16:3e:d4:57:ad",
+ "mtu": None,
+ "name": "enp0s2",
+ "subnets": [{"type": "ipv6_dhcpv6-stateful"}],
+ "type": "physical",
+ "accept-ra": True,
+ },
+ ],
+ }
+ conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS)
+ self.assertEqual(out_data, conv_data)
+
+ def test_network_config_conversions(self):
+ """Tests a bunch of input network json and checks the
+ expected conversions."""
+ in_datas = [
+ NETWORK_DATA,
+ {
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
+ "networks": [
+ {
+ "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+ "type": "ipv4",
+ "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ }
+ ],
+ "ip_address": "172.19.1.34",
+ "id": "network0",
+ }
+ ],
+ "links": [
+ {
+ "type": "bridge",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ "id": "tap1a81968a-79",
+ "mtu": None,
+ }
+ ],
+ },
+ ]
+ out_datas = [
+ {
+ "version": 1,
+ "config": [
+ {
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ "mac_address": "fa:16:3e:69:b0:58",
+ "name": "enp0s1",
+ "mtu": None,
+ },
+ {
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ "mac_address": "fa:16:3e:d4:57:ad",
+ "name": "enp0s2",
+ "mtu": None,
+ },
+ {
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ "mac_address": "fa:16:3e:05:30:fe",
+ "name": "nic0",
+ "mtu": None,
+ },
+ {
+ "type": "nameserver",
+ "address": "199.204.44.24",
+ },
+ {
+ "type": "nameserver",
+ "address": "199.204.47.54",
+ },
+ ],
+ },
+ {
+ "version": 1,
+ "config": [
+ {
+ "name": "foo3",
+ "mac_address": "fa:16:3e:ed:9a:59",
+ "mtu": None,
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "172.19.1.34",
+ "netmask": "255.255.252.0",
+ "type": "static",
+ "ipv4": True,
+ "routes": [
+ {
+ "gateway": "172.19.3.254",
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ }
+ ],
+ }
+ ],
+ },
+ {
+ "type": "nameserver",
+ "address": "172.19.0.12",
+ },
+ ],
+ },
+ ]
+ for in_data, out_data in zip(in_datas, out_datas):
+ conv_data = openstack.convert_net_json(
+ in_data, known_macs=KNOWN_MACS
+ )
+ self.assertEqual(out_data, conv_data)
+
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
+class TestConvertNetworkData(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestConvertNetworkData, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def _getnames_in_config(self, ncfg):
+ return set(
+ [n["name"] for n in ncfg["config"] if n["type"] == "physical"]
+ )
+
+ def test_conversion_fills_names(self):
+ ncfg = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS)
+ expected = set(["nic0", "enp0s1", "enp0s2"])
+ found = self._getnames_in_config(ncfg)
+ self.assertEqual(found, expected)
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac):
+ macs = KNOWN_MACS.copy()
+ macs.update(
+ {"fa:16:3e:05:30:fe": "foonic1", "fa:16:3e:69:b0:58": "ens1"}
+ )
+ get_interfaces_by_mac.return_value = macs
+
+ ncfg = openstack.convert_net_json(NETWORK_DATA)
+ expected = set(["nic0", "ens1", "enp0s2"])
+ found = self._getnames_in_config(ncfg)
+ self.assertEqual(found, expected)
+
+ def test_convert_raises_value_error_on_missing_name(self):
+ macs = {"aa:aa:aa:aa:aa:00": "ens1"}
+ self.assertRaises(
+ ValueError,
+ openstack.convert_net_json,
+ NETWORK_DATA,
+ known_macs=macs,
+ )
+
+ def test_conversion_with_route(self):
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_2, known_macs=KNOWN_MACS
+ )
+ # not the best test, but see that we get a route in the
+ # network config and that it gets rendered to an ENI file
+ routes = []
+ for n in ncfg["config"]:
+ for s in n.get("subnets", []):
+ routes.extend(s.get("routes", []))
+ self.assertIn(
+ {"network": "0.0.0.0", "netmask": "0.0.0.0", "gateway": "2.2.2.9"},
+ routes,
+ )
+ eni_renderer = eni.Renderer()
+ eni_renderer.render_network_state(
+ network_state.parse_net_config_data(ncfg), target=self.tmp
+ )
+ with open(
+ os.path.join(self.tmp, "etc", "network", "interfaces"), "r"
+ ) as f:
+ eni_rendering = f.read()
+ self.assertIn("route add default gw 2.2.2.9", eni_rendering)
+
+ def test_conversion_with_tap(self):
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_3, known_macs=KNOWN_MACS
+ )
+ physicals = set()
+ for i in ncfg["config"]:
+ if i.get("type") == "physical":
+ physicals.add(i["name"])
+ self.assertEqual(physicals, set(("foo1", "foo2")))
+
+ def test_bond_conversion(self):
+ # light testing of bond conversion and eni rendering of bond
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_BOND, known_macs=KNOWN_MACS
+ )
+ eni_renderer = eni.Renderer()
+
+ eni_renderer.render_network_state(
+ network_state.parse_net_config_data(ncfg), target=self.tmp
+ )
+ with open(
+ os.path.join(self.tmp, "etc", "network", "interfaces"), "r"
+ ) as f:
+ eni_rendering = f.read()
+
+ # Verify there are expected interfaces in the net config.
+ interfaces = sorted(
+ [
+ i["name"]
+ for i in ncfg["config"]
+ if i["type"] in ("vlan", "bond", "physical")
+ ]
+ )
+ self.assertEqual(
+ sorted(["oeth0", "oeth1", "bond0", "bond0.602", "bond0.612"]),
+ interfaces,
+ )
+
+ words = eni_rendering.split()
+ # 'eth0' and 'eth1' are the ids. because their mac adresses
+ # map to other names, we should not see them in the ENI
+ self.assertNotIn("eth0", words)
+ self.assertNotIn("eth1", words)
+
+ # oeth0 and oeth1 are the interface names for eni.
+ # bond0 will be generated for the bond. Each should be auto.
+ self.assertIn("auto oeth0", eni_rendering)
+ self.assertIn("auto oeth1", eni_rendering)
+ self.assertIn("auto bond0", eni_rendering)
+ # The bond should have the given mac address
+ pos = eni_rendering.find("auto bond0")
+ self.assertIn(BOND_MAC, eni_rendering[pos:])
+
+ def test_vlan(self):
+ # light testing of vlan config conversion and eni rendering
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_VLAN, known_macs=KNOWN_MACS
+ )
+ eni_renderer = eni.Renderer()
+ eni_renderer.render_network_state(
+ network_state.parse_net_config_data(ncfg), target=self.tmp
+ )
+ with open(
+ os.path.join(self.tmp, "etc", "network", "interfaces"), "r"
+ ) as f:
+ eni_rendering = f.read()
+
+ self.assertIn("iface enp0s1", eni_rendering)
+ self.assertIn("address 10.0.1.5", eni_rendering)
+ self.assertIn("auto enp0s1.602", eni_rendering)
+
+ def test_mac_addrs_can_be_upper_case(self):
+ # input mac addresses on rackspace may be upper case
+ my_netdata = deepcopy(NETWORK_DATA)
+ for link in my_netdata["links"]:
+ link["ethernet_mac_address"] = link["ethernet_mac_address"].upper()
+
+ ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS)
+ config_name2mac = {}
+ for n in ncfg["config"]:
+ if n["type"] == "physical":
+ config_name2mac[n["name"]] = n["mac_address"]
+
+ expected = {
+ "nic0": "fa:16:3e:05:30:fe",
+ "enp0s1": "fa:16:3e:69:b0:58",
+ "enp0s2": "fa:16:3e:d4:57:ad",
+ }
+ self.assertEqual(expected, config_name2mac)
+
+ def test_unknown_device_types_accepted(self):
+ # If we don't recognise a link, we should treat it as physical for a
+ # best-effort boot
+ my_netdata = deepcopy(NETWORK_DATA)
+ my_netdata["links"][0]["type"] = "my-special-link-type"
+
+ ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS)
+ config_name2mac = {}
+ for n in ncfg["config"]:
+ if n["type"] == "physical":
+ config_name2mac[n["name"]] = n["mac_address"]
+
+ expected = {
+ "nic0": "fa:16:3e:05:30:fe",
+ "enp0s1": "fa:16:3e:69:b0:58",
+ "enp0s2": "fa:16:3e:d4:57:ad",
+ }
+ self.assertEqual(expected, config_name2mac)
+
+ # We should, however, warn the user that we don't recognise the type
+ self.assertIn(
+ "Unknown network_data link type (my-special-link-type)",
+ self.logs.getvalue(),
+ )
+
+
+def cfg_ds_from_dir(base_d, files=None):
+ run = os.path.join(base_d, "run")
+ os.mkdir(run)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": run})
+ )
+ cfg_ds.seed_dir = os.path.join(base_d, "seed")
+ if files:
+ populate_dir(cfg_ds.seed_dir, files)
+ cfg_ds.known_macs = KNOWN_MACS.copy()
+ if not cfg_ds.get_data():
+ raise RuntimeError(
+ "Data source did not extract itself from seed directory %s"
+ % cfg_ds.seed_dir
+ )
+ return cfg_ds
+
+
+def populate_ds_from_read_config(cfg_ds, source, results):
+ """Patch the DataSourceConfigDrive from the results of
+ read_config_drive_dir hopefully in line with what it would have
+ if cfg_ds.get_data had been successfully called"""
+ cfg_ds.source = source
+ cfg_ds.metadata = results.get("metadata")
+ cfg_ds.ec2_metadata = results.get("ec2-metadata")
+ cfg_ds.userdata_raw = results.get("userdata")
+ cfg_ds.version = results.get("version")
+ cfg_ds.network_json = results.get("networkdata")
+ cfg_ds._network_config = openstack.convert_net_json(
+ cfg_ds.network_json, known_macs=KNOWN_MACS
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_digitalocean.py b/tests/unittests/sources/test_digitalocean.py
new file mode 100644
index 00000000..f3e6224e
--- /dev/null
+++ b/tests/unittests/sources/test_digitalocean.py
@@ -0,0 +1,389 @@
+# Copyright (C) 2014 Neal Shrader
+#
+# Author: Neal Shrader <neal@digitalocean.com>
+# Author: Ben Howard <bh@digitalocean.com>
+# Author: Scott Moser <smoser@ubuntu.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+from cloudinit import helpers, settings
+from cloudinit.sources import DataSourceDigitalOcean
+from cloudinit.sources.helpers import digitalocean
+from tests.unittests.helpers import CiTestCase, mock
+
+DO_MULTIPLE_KEYS = [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co",
+]
+DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co"
+
+# the following JSON was taken from droplet (that's why its a string)
+DO_META = json.loads(
+ """
+{
+ "droplet_id": "22532410",
+ "hostname": "utl-96268",
+ "vendor_data": "vendordata goes here",
+ "user_data": "userdata goes here",
+ "public_keys": "",
+ "auth_key": "authorization_key",
+ "region": "nyc3",
+ "interfaces": {
+ "private": [
+ {
+ "ipv4": {
+ "ip_address": "10.132.6.205",
+ "netmask": "255.255.0.0",
+ "gateway": "10.132.0.1"
+ },
+ "mac": "04:01:57:d1:9e:02",
+ "type": "private"
+ }
+ ],
+ "public": [
+ {
+ "ipv4": {
+ "ip_address": "192.0.0.20",
+ "netmask": "255.255.255.0",
+ "gateway": "104.236.0.1"
+ },
+ "ipv6": {
+ "ip_address": "2604:A880:0800:0000:1000:0000:0000:0000",
+ "cidr": 64,
+ "gateway": "2604:A880:0800:0000:0000:0000:0000:0001"
+ },
+ "anchor_ipv4": {
+ "ip_address": "10.0.0.5",
+ "netmask": "255.255.0.0",
+ "gateway": "10.0.0.1"
+ },
+ "mac": "04:01:57:d1:9e:01",
+ "type": "public"
+ }
+ ]
+ },
+ "floating_ip": {
+ "ipv4": {
+ "active": false
+ }
+ },
+ "dns": {
+ "nameservers": [
+ "2001:4860:4860::8844",
+ "2001:4860:4860::8888",
+ "8.8.8.8"
+ ]
+ }
+}
+"""
+)
+
+# This has no private interface
+DO_META_2 = {
+ "droplet_id": 27223699,
+ "hostname": "smtest1",
+ "vendor_data": "\n".join(
+ [
+ '"Content-Type: multipart/mixed; '
+ 'boundary="===============8645434374073493512=="',
+ "MIME-Version: 1.0",
+ "",
+ "--===============8645434374073493512==",
+ "MIME-Version: 1.0"
+ 'Content-Type: text/cloud-config; charset="us-ascii"'
+ "Content-Transfer-Encoding: 7bit"
+ 'Content-Disposition: attachment; filename="cloud-config"'
+ "",
+ "#cloud-config",
+ "disable_root: false",
+ "manage_etc_hosts: true",
+ "",
+ "",
+ "--===============8645434374073493512==",
+ ]
+ ),
+ "public_keys": ["ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies"],
+ "auth_key": "88888888888888888888888888888888",
+ "region": "nyc3",
+ "interfaces": {
+ "public": [
+ {
+ "ipv4": {
+ "ip_address": "45.55.249.133",
+ "netmask": "255.255.192.0",
+ "gateway": "45.55.192.1",
+ },
+ "anchor_ipv4": {
+ "ip_address": "10.17.0.5",
+ "netmask": "255.255.0.0",
+ "gateway": "10.17.0.1",
+ },
+ "mac": "ae:cc:08:7c:88:00",
+ "type": "public",
+ }
+ ]
+ },
+ "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}},
+ "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]},
+ "tags": None,
+}
+
+DO_META["public_keys"] = DO_SINGLE_KEY
+
+MD_URL = "http://169.254.169.254/metadata/v1.json"
+
+
+def _mock_dmi():
+ return (True, DO_META.get("id"))
+
+
+class TestDataSourceDigitalOcean(CiTestCase):
+ """
+ Test reading the meta-data
+ """
+
+ def setUp(self):
+ super(TestDataSourceDigitalOcean, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self, get_sysinfo=_mock_dmi):
+ ds = DataSourceDigitalOcean.DataSourceDigitalOcean(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds.use_ip4LL = False
+ if get_sysinfo is not None:
+ ds._get_sysinfo = get_sysinfo
+ return ds
+
+ @mock.patch("cloudinit.sources.helpers.digitalocean.read_sysinfo")
+ def test_returns_false_not_on_docean(self, m_read_sysinfo):
+ m_read_sysinfo.return_value = (False, None)
+ ds = self.get_ds(get_sysinfo=None)
+ self.assertEqual(False, ds.get_data())
+ self.assertTrue(m_read_sysinfo.called)
+
+ @mock.patch("cloudinit.sources.helpers.digitalocean.read_metadata")
+ def test_metadata(self, mock_readmd):
+ mock_readmd.return_value = DO_META.copy()
+
+ ds = self.get_ds()
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ self.assertEqual(DO_META.get("user_data"), ds.get_userdata_raw())
+ self.assertEqual(DO_META.get("vendor_data"), ds.get_vendordata_raw())
+ self.assertEqual(DO_META.get("region"), ds.availability_zone)
+ self.assertEqual(DO_META.get("droplet_id"), ds.get_instance_id())
+ self.assertEqual(DO_META.get("hostname"), ds.get_hostname())
+
+ # Single key
+ self.assertEqual(
+ [DO_META.get("public_keys")], ds.get_public_ssh_keys()
+ )
+
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+ @mock.patch("cloudinit.sources.helpers.digitalocean.read_metadata")
+ def test_multiple_ssh_keys(self, mock_readmd):
+ metadata = DO_META.copy()
+ metadata["public_keys"] = DO_MULTIPLE_KEYS
+ mock_readmd.return_value = metadata.copy()
+
+ ds = self.get_ds()
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ # Multiple keys
+ self.assertEqual(metadata["public_keys"], ds.get_public_ssh_keys())
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+
+class TestNetworkConvert(CiTestCase):
+ def _get_networking(self):
+ self.m_get_by_mac.return_value = {
+ "04:01:57:d1:9e:01": "ens1",
+ "04:01:57:d1:9e:02": "ens2",
+ "b8:ae:ed:75:5f:9a": "enp0s25",
+ "ae:cc:08:7c:88:00": "meta2p1",
+ }
+ netcfg = digitalocean.convert_network_configuration(
+ DO_META["interfaces"], DO_META["dns"]["nameservers"]
+ )
+ self.assertIn("config", netcfg)
+ return netcfg
+
+ def setUp(self):
+ super(TestNetworkConvert, self).setUp()
+ self.add_patch("cloudinit.net.get_interfaces_by_mac", "m_get_by_mac")
+
+ def test_networking_defined(self):
+ netcfg = self._get_networking()
+ self.assertIsNotNone(netcfg)
+ dns_defined = False
+
+ for part in netcfg.get("config"):
+ n_type = part.get("type")
+ print("testing part ", n_type, "\n", json.dumps(part, indent=3))
+
+ if n_type == "nameserver":
+ n_address = part.get("address")
+ self.assertIsNotNone(n_address)
+ self.assertEqual(len(n_address), 3)
+
+ dns_resolvers = DO_META["dns"]["nameservers"]
+ for x in n_address:
+ self.assertIn(x, dns_resolvers)
+ dns_defined = True
+
+ else:
+ n_subnets = part.get("type")
+ n_name = part.get("name")
+ n_mac = part.get("mac_address")
+
+ self.assertIsNotNone(n_type)
+ self.assertIsNotNone(n_subnets)
+ self.assertIsNotNone(n_name)
+ self.assertIsNotNone(n_mac)
+
+ self.assertTrue(dns_defined)
+
+ def _get_nic_definition(self, int_type, expected_name):
+ """helper function to return if_type (i.e. public) and the expected
+ name used by cloud-init (i.e eth0)"""
+ netcfg = self._get_networking()
+ meta_def = (DO_META.get("interfaces")).get(int_type)[0]
+
+ self.assertEqual(int_type, meta_def.get("type"))
+
+ for nic_def in netcfg.get("config"):
+ print(nic_def)
+ if nic_def.get("name") == expected_name:
+ return nic_def, meta_def
+
+ def _get_match_subn(self, subnets, ip_addr):
+ """get the matching subnet definition based on ip address"""
+ for subn in subnets:
+ address = subn.get("address")
+ self.assertIsNotNone(address)
+
+ # equals won't work because of ipv6 addressing being in
+ # cidr notation, i.e fe00::1/64
+ if ip_addr in address:
+ print(json.dumps(subn, indent=3))
+ return subn
+
+ def test_correct_gateways_defined(self):
+ """test to make sure the eth0 ipv4 and ipv6 gateways are defined"""
+ netcfg = self._get_networking()
+ gateways = []
+ for nic_def in netcfg.get("config"):
+ if nic_def.get("type") != "physical":
+ continue
+ for subn in nic_def.get("subnets"):
+ if "gateway" in subn:
+ gateways.append(subn.get("gateway"))
+
+ # we should have two gateways, one ipv4 and ipv6
+ self.assertEqual(len(gateways), 2)
+
+ # make that the ipv6 gateway is there
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv4_def = meta_def.get("ipv4")
+ self.assertIn(ipv4_def.get("gateway"), gateways)
+
+ # make sure the the ipv6 gateway is there
+ ipv6_def = meta_def.get("ipv6")
+ self.assertIn(ipv6_def.get("gateway"), gateways)
+
+ def test_public_interface_defined(self):
+ """test that the public interface is defined as eth0"""
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ self.assertEqual("eth0", nic_def.get("name"))
+ self.assertEqual(meta_def.get("mac"), nic_def.get("mac_address"))
+ self.assertEqual("physical", nic_def.get("type"))
+
+ def test_private_interface_defined(self):
+ """test that the private interface is defined as eth1"""
+ (nic_def, meta_def) = self._get_nic_definition("private", "eth1")
+ self.assertEqual("eth1", nic_def.get("name"))
+ self.assertEqual(meta_def.get("mac"), nic_def.get("mac_address"))
+ self.assertEqual("physical", nic_def.get("type"))
+
+ def test_public_interface_ipv6(self):
+ """test public ipv6 addressing"""
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv6_def = meta_def.get("ipv6")
+ self.assertIsNotNone(ipv6_def)
+
+ subn_def = self._get_match_subn(
+ nic_def.get("subnets"), ipv6_def.get("ip_address")
+ )
+
+ cidr_notated_address = "{0}/{1}".format(
+ ipv6_def.get("ip_address"), ipv6_def.get("cidr")
+ )
+
+ self.assertEqual(cidr_notated_address, subn_def.get("address"))
+ self.assertEqual(ipv6_def.get("gateway"), subn_def.get("gateway"))
+
+ def test_public_interface_ipv4(self):
+ """test public ipv4 addressing"""
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv4_def = meta_def.get("ipv4")
+ self.assertIsNotNone(ipv4_def)
+
+ subn_def = self._get_match_subn(
+ nic_def.get("subnets"), ipv4_def.get("ip_address")
+ )
+
+ self.assertEqual(ipv4_def.get("netmask"), subn_def.get("netmask"))
+ self.assertEqual(ipv4_def.get("gateway"), subn_def.get("gateway"))
+
+ def test_public_interface_anchor_ipv4(self):
+ """test public ipv4 addressing"""
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv4_def = meta_def.get("anchor_ipv4")
+ self.assertIsNotNone(ipv4_def)
+
+ subn_def = self._get_match_subn(
+ nic_def.get("subnets"), ipv4_def.get("ip_address")
+ )
+
+ self.assertEqual(ipv4_def.get("netmask"), subn_def.get("netmask"))
+ self.assertNotIn("gateway", subn_def)
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_convert_without_private(self, m_get_by_mac):
+ m_get_by_mac.return_value = {
+ "b8:ae:ed:75:5f:9a": "enp0s25",
+ "ae:cc:08:7c:88:00": "meta2p1",
+ }
+ netcfg = digitalocean.convert_network_configuration(
+ DO_META_2["interfaces"], DO_META_2["dns"]["nameservers"]
+ )
+
+ # print(netcfg)
+ byname = {}
+ for i in netcfg["config"]:
+ if "name" in i:
+ if i["name"] in byname:
+ raise ValueError(
+ "name '%s' in config twice: %s" % (i["name"], netcfg)
+ )
+ byname[i["name"]] = i
+ self.assertTrue("eth0" in byname)
+ self.assertTrue("subnets" in byname["eth0"])
+ eth0 = byname["eth0"]
+ self.assertEqual(
+ sorted(["45.55.249.133", "10.17.0.5"]),
+ sorted([i["address"] for i in eth0["subnets"]]),
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/sources/test_ec2.py
index a93f2195..b376660d 100644
--- a/tests/unittests/test_datasource/test_ec2.py
+++ b/tests/unittests/sources/test_ec2.py
@@ -1,35 +1,37 @@
# This file is part of cloud-init. See LICENSE file for license information.
import copy
-import httpretty
import json
-import requests
from unittest import mock
+import httpretty
+import requests
+
from cloudinit import helpers
from cloudinit.sources import DataSourceEc2 as ec2
-from cloudinit.tests import helpers as test_helpers
-
+from tests.unittests import helpers as test_helpers
DYNAMIC_METADATA = {
"instance-identity": {
- "document": json.dumps({
- "devpayProductCodes": None,
- "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"],
- "availabilityZone": "us-west-2b",
- "privateIp": "10.158.112.84",
- "version": "2017-09-30",
- "instanceId": "my-identity-id",
- "billingProducts": None,
- "instanceType": "t2.micro",
- "accountId": "123456789012",
- "imageId": "ami-5fb8c835",
- "pendingTime": "2016-11-19T16:32:11Z",
- "architecture": "x86_64",
- "kernelId": None,
- "ramdiskId": None,
- "region": "us-west-2"
- })
+ "document": json.dumps(
+ {
+ "devpayProductCodes": None,
+ "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"],
+ "availabilityZone": "us-west-2b",
+ "privateIp": "10.158.112.84",
+ "version": "2017-09-30",
+ "instanceId": "my-identity-id",
+ "billingProducts": None,
+ "instanceType": "t2.micro",
+ "accountId": "123456789012",
+ "imageId": "ami-5fb8c835",
+ "pendingTime": "2016-11-19T16:32:11Z",
+ "architecture": "x86_64",
+ "kernelId": None,
+ "ramdiskId": None,
+ "region": "us-west-2",
+ }
+ )
}
}
@@ -52,7 +54,7 @@ DEFAULT_METADATA = {
"local-hostname": "ip-172-3-3-15.us-east-2.compute.internal",
"local-ipv4": "172.3.3.15",
"mac": "06:17:04:d7:26:09",
- "metrics": {"vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"},
+ "metrics": {"vhostmd": '<?xml version="1.0" encoding="UTF-8"?>'},
"network": {
"interfaces": {
"macs": {
@@ -61,13 +63,15 @@ DEFAULT_METADATA = {
"interface-id": "eni-e44ef49e",
"ipv4-associations": {"13.59.77.202": "172.3.3.15"},
"ipv6s": "2600:1f16:aeb:b20b:9d87:a4af:5cc9:73dc",
- "local-hostname": ("ip-172-3-3-15.us-east-2."
- "compute.internal"),
+ "local-hostname": (
+ "ip-172-3-3-15.us-east-2.compute.internal"
+ ),
"local-ipv4s": "172.3.3.15",
"mac": "06:17:04:d7:26:09",
"owner-id": "950047163771",
- "public-hostname": ("ec2-13-59-77-202.us-east-2."
- "compute.amazonaws.com"),
+ "public-hostname": (
+ "ec2-13-59-77-202.us-east-2.compute.amazonaws.com"
+ ),
"public-ipv4s": "13.59.77.202",
"security-group-ids": "sg-5a61d333",
"security-groups": "wide-open",
@@ -77,20 +81,22 @@ DEFAULT_METADATA = {
"vpc-id": "vpc-87e72bee",
"vpc-ipv4-cidr-block": "172.31.0.0/16",
"vpc-ipv4-cidr-blocks": "172.31.0.0/16",
- "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56"
+ "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56",
},
"06:17:04:d7:26:08": {
- "device-number": "1", # Only IPv4 local config
+ "device-number": "1", # Only IPv4 local config
"interface-id": "eni-e44ef49f",
"ipv4-associations": {"": "172.3.3.16"},
"ipv6s": "", # No IPv6 config
- "local-hostname": ("ip-172-3-3-16.us-east-2."
- "compute.internal"),
+ "local-hostname": (
+ "ip-172-3-3-16.us-east-2.compute.internal"
+ ),
"local-ipv4s": "172.3.3.16",
"mac": "06:17:04:d7:26:08",
"owner-id": "950047163771",
- "public-hostname": ("ec2-172-3-3-16.us-east-2."
- "compute.amazonaws.com"),
+ "public-hostname": (
+ "ec2-172-3-3-16.us-east-2.compute.amazonaws.com"
+ ),
"public-ipv4s": "", # No public ipv4 config
"security-group-ids": "sg-5a61d333",
"security-groups": "wide-open",
@@ -100,8 +106,8 @@ DEFAULT_METADATA = {
"vpc-id": "vpc-87e72bee",
"vpc-ipv4-cidr-block": "172.31.0.0/16",
"vpc-ipv4-cidr-blocks": "172.31.0.0/16",
- "vpc-ipv6-cidr-blocks": ""
- }
+ "vpc-ipv6-cidr-blocks": "",
+ },
}
}
},
@@ -123,24 +129,17 @@ DEFAULT_METADATA = {
NIC1_MD_IPV4_IPV6_MULTI_IP = {
"device-number": "0",
"interface-id": "eni-0d6335689899ce9cc",
- "ipv4-associations": {
- "18.218.219.181": "172.31.44.13"
- },
+ "ipv4-associations": {"18.218.219.181": "172.31.44.13"},
"ipv6s": [
"2600:1f16:292:100:c187:593c:4349:136",
"2600:1f16:292:100:f153:12a3:c37c:11f9",
- "2600:1f16:292:100:f152:2222:3333:4444"
- ],
- "local-hostname": ("ip-172-31-44-13.us-east-2."
- "compute.internal"),
- "local-ipv4s": [
- "172.31.44.13",
- "172.31.45.70"
+ "2600:1f16:292:100:f152:2222:3333:4444",
],
+ "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal",
+ "local-ipv4s": ["172.31.44.13", "172.31.45.70"],
"mac": "0a:07:84:3d:6e:38",
"owner-id": "329910648901",
- "public-hostname": ("ec2-18-218-219-181.us-east-2."
- "compute.amazonaws.com"),
+ "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com",
"public-ipv4s": "18.218.219.181",
"security-group-ids": "sg-0c387755222ba8d2e",
"security-groups": "launch-wizard-4",
@@ -150,7 +149,7 @@ NIC1_MD_IPV4_IPV6_MULTI_IP = {
"vpc-id": "vpc-a07f62c8",
"vpc-ipv4-cidr-block": "172.31.0.0/16",
"vpc-ipv4-cidr-blocks": "172.31.0.0/16",
- "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56"
+ "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56",
}
NIC2_MD = {
@@ -166,30 +165,22 @@ NIC2_MD = {
"subnet-ipv4-cidr-block": "172.31.32.0/20",
"vpc-id": "vpc-a07f62c8",
"vpc-ipv4-cidr-block": "172.31.0.0/16",
- "vpc-ipv4-cidr-blocks": "172.31.0.0/16"
+ "vpc-ipv4-cidr-blocks": "172.31.0.0/16",
}
SECONDARY_IP_METADATA_2018_09_24 = {
"ami-id": "ami-0986c2ac728528ac2",
"ami-launch-index": "0",
"ami-manifest-path": "(unknown)",
- "block-device-mapping": {
- "ami": "/dev/sda1",
- "root": "/dev/sda1"
- },
- "events": {
- "maintenance": {
- "history": "[]",
- "scheduled": "[]"
- }
- },
+ "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"},
+ "events": {"maintenance": {"history": "[]", "scheduled": "[]"}},
"hostname": "ip-172-31-44-13.us-east-2.compute.internal",
"identity-credentials": {
"ec2": {
"info": {
"AccountId": "329910648901",
"Code": "Success",
- "LastUpdated": "2019-07-06T14:22:56Z"
+ "LastUpdated": "2019-07-06T14:22:56Z",
}
}
},
@@ -199,9 +190,7 @@ SECONDARY_IP_METADATA_2018_09_24 = {
"local-hostname": "ip-172-31-44-13.us-east-2.compute.internal",
"local-ipv4": "172.31.44.13",
"mac": "0a:07:84:3d:6e:38",
- "metrics": {
- "vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
- },
+ "metrics": {"vhostmd": '<?xml version="1.0" encoding="UTF-8"?>'},
"network": {
"interfaces": {
"macs": {
@@ -209,27 +198,17 @@ SECONDARY_IP_METADATA_2018_09_24 = {
}
}
},
- "placement": {
- "availability-zone": "us-east-2c"
- },
+ "placement": {"availability-zone": "us-east-2c"},
"profile": "default-hvm",
- "public-hostname": (
- "ec2-18-218-219-181.us-east-2.compute.amazonaws.com"),
+ "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com",
"public-ipv4": "18.218.219.181",
- "public-keys": {
- "yourkeyname,e": [
- "ssh-rsa AAAAW...DZ yourkeyname"
- ]
- },
+ "public-keys": {"yourkeyname,e": ["ssh-rsa AAAAW...DZ yourkeyname"]},
"reservation-id": "r-09b4917135cdd33be",
"security-groups": "launch-wizard-4",
- "services": {
- "domain": "amazonaws.com",
- "partition": "aws"
- }
+ "services": {"domain": "amazonaws.com", "partition": "aws"},
}
-M_PATH_NET = 'cloudinit.sources.DataSourceEc2.net.'
+M_PATH_NET = "cloudinit.sources.DataSourceEc2.net."
def _register_ssh_keys(rfunc, base_url, keys_data):
@@ -250,9 +229,9 @@ def _register_ssh_keys(rfunc, base_url, keys_data):
"""
base_url = base_url.rstrip("/")
- odd_index = '\n'.join(
- ["{0}={1}".format(n, name)
- for n, name in enumerate(sorted(keys_data))])
+ odd_index = "\n".join(
+ ["{0}={1}".format(n, name) for n, name in enumerate(sorted(keys_data))]
+ )
rfunc(base_url, odd_index)
rfunc(base_url + "/", odd_index)
@@ -260,7 +239,7 @@ def _register_ssh_keys(rfunc, base_url, keys_data):
for n, name in enumerate(sorted(keys_data)):
val = keys_data[name]
if isinstance(val, list):
- val = '\n'.join(val)
+ val = "\n".join(val)
burl = base_url + "/%s" % n
rfunc(burl, "openssh-key")
rfunc(burl + "/", "openssh-key")
@@ -281,6 +260,7 @@ def register_mock_metaserver(base_url, data):
base_url/mac with 00:16:3e:00:00:00
In the index, references to lists or dictionaries have a trailing /.
"""
+
def register_helper(register, base_url, body):
if not isinstance(base_url, str):
register(base_url, body)
@@ -289,25 +269,24 @@ def register_mock_metaserver(base_url, data):
if isinstance(body, str):
register(base_url, body)
elif isinstance(body, list):
- register(base_url, '\n'.join(body) + '\n')
- register(base_url + '/', '\n'.join(body) + '\n')
+ register(base_url, "\n".join(body) + "\n")
+ register(base_url + "/", "\n".join(body) + "\n")
elif isinstance(body, dict):
vals = []
for k, v in body.items():
- if k == 'public-keys':
- _register_ssh_keys(
- register, base_url + '/public-keys/', v)
+ if k == "public-keys":
+ _register_ssh_keys(register, base_url + "/public-keys/", v)
continue
suffix = k.rstrip("/")
if not isinstance(v, (str, list)):
suffix += "/"
vals.append(suffix)
- url = base_url + '/' + suffix
+ url = base_url + "/" + suffix
register_helper(register, url, v)
- register(base_url, '\n'.join(vals) + '\n')
- register(base_url + '/', '\n'.join(vals) + '\n')
+ register(base_url, "\n".join(vals) + "\n")
+ register(base_url + "/", "\n".join(vals) + "\n")
elif body is None:
- register(base_url, 'not found', status=404)
+ register(base_url, "not found", status=404)
def myreg(*argc, **kwargs):
url = argc[0]
@@ -322,9 +301,9 @@ class TestEc2(test_helpers.HttprettyTestCase):
maxDiff = None
valid_platform_data = {
- 'uuid': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412',
- 'uuid_source': 'dmi',
- 'serial': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412',
+ "uuid": "ec212f79-87d1-2f1d-588f-d86dc0fd5412",
+ "uuid_source": "dmi",
+ "serial": "ec212f79-87d1-2f1d-588f-d86dc0fd5412",
}
def setUp(self):
@@ -333,9 +312,9 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.metadata_addr = self.datasource.metadata_urls[0]
self.tmp = self.tmp_dir()
- def data_url(self, version, data_item='meta-data'):
+ def data_url(self, version, data_item="meta-data"):
"""Return a metadata url based on the version provided."""
- return '/'.join([self.metadata_addr, version, data_item])
+ return "/".join([self.metadata_addr, version, data_item])
def _patch_add_cleanup(self, mpath, *args, **kwargs):
p = mock.patch(mpath, *args, **kwargs)
@@ -345,7 +324,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
def _setup_ds(self, sys_cfg, platform_data, md, md_version=None):
self.uris = []
distro = {}
- paths = helpers.Paths({'run_dir': self.tmp})
+ paths = helpers.Paths({"run_dir": self.tmp})
if sys_cfg is None:
sys_cfg = {}
ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths)
@@ -354,32 +333,39 @@ class TestEc2(test_helpers.HttprettyTestCase):
if platform_data is not None:
self._patch_add_cleanup(
"cloudinit.sources.DataSourceEc2._collect_platform_data",
- return_value=platform_data)
+ return_value=platform_data,
+ )
if md:
- all_versions = (
- [ds.min_metadata_version] + ds.extended_metadata_versions)
- token_url = self.data_url('latest', data_item='api/token')
- register_mock_metaserver(token_url, 'API-TOKEN')
+ all_versions = [
+ ds.min_metadata_version
+ ] + ds.extended_metadata_versions
+ token_url = self.data_url("latest", data_item="api/token")
+ register_mock_metaserver(token_url, "API-TOKEN")
for version in all_versions:
- metadata_url = self.data_url(version) + '/'
+ metadata_url = self.data_url(version) + "/"
if version == md_version:
# Register all metadata for desired version
register_mock_metaserver(
- metadata_url, md.get('md', DEFAULT_METADATA))
+ metadata_url, md.get("md", DEFAULT_METADATA)
+ )
userdata_url = self.data_url(
- version, data_item='user-data')
- register_mock_metaserver(userdata_url, md.get('ud', ''))
+ version, data_item="user-data"
+ )
+ register_mock_metaserver(userdata_url, md.get("ud", ""))
identity_url = self.data_url(
- version, data_item='dynamic/instance-identity')
+ version, data_item="dynamic/instance-identity"
+ )
register_mock_metaserver(
- identity_url, md.get('id', DYNAMIC_METADATA))
+ identity_url, md.get("id", DYNAMIC_METADATA)
+ )
else:
- instance_id_url = metadata_url + 'instance-id'
+ instance_id_url = metadata_url + "instance-id"
if version == ds.min_metadata_version:
# Add min_metadata_version service availability check
register_mock_metaserver(
- instance_id_url, DEFAULT_METADATA['instance-id'])
+ instance_id_url, DEFAULT_METADATA["instance-id"]
+ )
else:
# Register 404s for all unrequested extended versions
register_mock_metaserver(instance_id_url, None)
@@ -389,24 +375,33 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""network_config property returns network version 2 for metadata"""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
- find_fallback_path = M_PATH_NET + 'find_fallback_nic'
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ find_fallback_path = M_PATH_NET + "find_fallback_nic"
with mock.patch(find_fallback_path) as m_find_fallback:
- m_find_fallback.return_value = 'eth9'
+ m_find_fallback.return_value = "eth9"
ds.get_data()
- mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': '06:17:04:d7:26:09'}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
- patch_path = M_PATH_NET + 'get_interfaces_by_mac'
- get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
+ mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": "06:17:04:d7:26:09"},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
+ get_interface_mac_path = M_PATH_NET + "get_interface_mac"
with mock.patch(patch_path) as m_get_interfaces_by_mac:
with mock.patch(find_fallback_path) as m_find_fallback:
with mock.patch(get_interface_mac_path) as m_get_mac:
- m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
- m_find_fallback.return_value = 'eth9'
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
+ m_find_fallback.return_value = "eth9"
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
@@ -418,24 +413,33 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
- find_fallback_path = M_PATH_NET + 'find_fallback_nic'
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ find_fallback_path = M_PATH_NET + "find_fallback_nic"
with mock.patch(find_fallback_path) as m_find_fallback:
- m_find_fallback.return_value = 'eth9'
+ m_find_fallback.return_value = "eth9"
ds.get_data()
- mac1 = '06:17:04:d7:26:08' # IPv4 only in DEFAULT_METADATA
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': mac1.lower()}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': False}}}
- patch_path = M_PATH_NET + 'get_interfaces_by_mac'
- get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
+ mac1 = "06:17:04:d7:26:08" # IPv4 only in DEFAULT_METADATA
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": mac1.lower()},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
+ get_interface_mac_path = M_PATH_NET + "get_interface_mac"
with mock.patch(patch_path) as m_get_interfaces_by_mac:
with mock.patch(find_fallback_path) as m_find_fallback:
with mock.patch(get_interface_mac_path) as m_get_mac:
- m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
- m_find_fallback.return_value = 'eth9'
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
+ m_find_fallback.return_value = "eth9"
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
@@ -447,27 +451,38 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': SECONDARY_IP_METADATA_2018_09_24})
- find_fallback_path = M_PATH_NET + 'find_fallback_nic'
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": SECONDARY_IP_METADATA_2018_09_24},
+ )
+ find_fallback_path = M_PATH_NET + "find_fallback_nic"
with mock.patch(find_fallback_path) as m_find_fallback:
- m_find_fallback.return_value = 'eth9'
+ m_find_fallback.return_value = "eth9"
ds.get_data()
- mac1 = '0a:07:84:3d:6e:38' # 1 secondary IPv4 and 2 secondary IPv6
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': mac1}, 'set-name': 'eth9',
- 'addresses': ['172.31.45.70/20',
- '2600:1f16:292:100:f152:2222:3333:4444/128',
- '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
- 'dhcp4': True, 'dhcp6': True}}}
- patch_path = M_PATH_NET + 'get_interfaces_by_mac'
- get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
+ mac1 = "0a:07:84:3d:6e:38" # 1 secondary IPv4 and 2 secondary IPv6
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": mac1},
+ "set-name": "eth9",
+ "addresses": [
+ "172.31.45.70/20",
+ "2600:1f16:292:100:f152:2222:3333:4444/128",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9/128",
+ ],
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
+ get_interface_mac_path = M_PATH_NET + "get_interface_mac"
with mock.patch(patch_path) as m_get_interfaces_by_mac:
with mock.patch(find_fallback_path) as m_find_fallback:
with mock.patch(get_interface_mac_path) as m_get_mac:
- m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
- m_find_fallback.return_value = 'eth9'
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
+ m_find_fallback.return_value = "eth9"
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
@@ -475,12 +490,13 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""network_config property is cached in DataSourceEc2."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
- ds._network_config = {'cached': 'data'}
- self.assertEqual({'cached': 'data'}, ds.network_config)
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ ds._network_config = {"cached": "data"}
+ self.assertEqual({"cached": "data"}, ds.network_config)
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp):
"""Refresh the network_config Ec2 cache if network key is absent.
@@ -488,28 +504,39 @@ class TestEc2(test_helpers.HttprettyTestCase):
which lacked newly required network key.
"""
old_metadata = copy.deepcopy(DEFAULT_METADATA)
- old_metadata.pop('network')
+ old_metadata.pop("network")
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': old_metadata})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": old_metadata},
+ )
self.assertTrue(ds.get_data())
# Provide new revision of metadata that contains network data
register_mock_metaserver(
- 'http://169.254.169.254/2009-04-04/meta-data/', DEFAULT_METADATA)
- mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA
- get_interface_mac_path = M_PATH_NET + 'get_interfaces_by_mac'
- ds.fallback_nic = 'eth9'
+ "http://169.254.169.254/2009-04-04/meta-data/", DEFAULT_METADATA
+ )
+ mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA
+ get_interface_mac_path = M_PATH_NET + "get_interfaces_by_mac"
+ ds.fallback_nic = "eth9"
with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac:
- m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
nc = ds.network_config # Will re-crawl network metadata
self.assertIsNotNone(nc)
self.assertIn(
- 'Refreshing stale metadata from prior to upgrade',
- self.logs.getvalue())
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
+ "Refreshing stale metadata from prior to upgrade",
+ self.logs.getvalue(),
+ )
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
self.assertEqual(expected, ds.network_config)
def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self):
@@ -522,40 +549,46 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.datasource = ec2.DataSourceEc2Local
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
# Mock 404s on all versions except latest
- all_versions = (
- [ds.min_metadata_version] + ds.extended_metadata_versions)
+ all_versions = [
+ ds.min_metadata_version
+ ] + ds.extended_metadata_versions
for ver in all_versions[:-1]:
register_mock_metaserver(
- 'http://169.254.169.254/{0}/meta-data/instance-id'.format(ver),
- None)
- ds.metadata_address = 'http://169.254.169.254'
+ "http://169.254.169.254/{0}/meta-data/instance-id".format(ver),
+ None,
+ )
+ ds.metadata_address = "http://169.254.169.254"
register_mock_metaserver(
- '{0}/{1}/meta-data/'.format(ds.metadata_address, all_versions[-1]),
- DEFAULT_METADATA)
+ "{0}/{1}/meta-data/".format(ds.metadata_address, all_versions[-1]),
+ DEFAULT_METADATA,
+ )
# Register dynamic/instance-identity document which we now read.
register_mock_metaserver(
- '{0}/{1}/dynamic/'.format(ds.metadata_address, all_versions[-1]),
- DYNAMIC_METADATA)
+ "{0}/{1}/dynamic/".format(ds.metadata_address, all_versions[-1]),
+ DYNAMIC_METADATA,
+ )
ds._cloud_name = ec2.CloudNames.AWS
# Setup cached metadata on the Datasource
ds.metadata = DEFAULT_METADATA
- self.assertEqual('my-identity-id', ds.get_instance_id())
+ self.assertEqual("my-identity-id", ds.get_instance_id())
def test_classic_instance_true(self):
"""If no vpc-id in metadata, is_classic_instance must return true."""
md_copy = copy.deepcopy(DEFAULT_METADATA)
- ifaces_md = md_copy.get('network', {}).get('interfaces', {})
- for _mac, mac_data in ifaces_md.get('macs', {}).items():
- if 'vpc-id' in mac_data:
- del mac_data['vpc-id']
+ ifaces_md = md_copy.get("network", {}).get("interfaces", {})
+ for _mac, mac_data in ifaces_md.get("macs", {}).items():
+ if "vpc-id" in mac_data:
+ del mac_data["vpc-id"]
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': md_copy})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": md_copy},
+ )
self.assertTrue(ds.get_data())
self.assertTrue(ds.is_classic_instance())
@@ -563,8 +596,9 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""If vpc-id in metadata, is_classic_instance must return false."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
self.assertTrue(ds.get_data())
self.assertFalse(ds.is_classic_instance())
@@ -572,108 +606,117 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""Inaccessibility of http://169.254.169.254 are retried."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md=None)
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md=None,
+ )
conn_error = requests.exceptions.ConnectionError(
- '[Errno 113] no route to host'
+ "[Errno 113] no route to host"
)
- mock_success = mock.MagicMock(contents=b'fakesuccess')
+ mock_success = mock.MagicMock(contents=b"fakesuccess")
mock_success.ok.return_value = True
- with mock.patch('cloudinit.url_helper.readurl') as m_readurl:
+ with mock.patch("cloudinit.url_helper.readurl") as m_readurl:
m_readurl.side_effect = (conn_error, conn_error, mock_success)
- with mock.patch('cloudinit.url_helper.time.sleep'):
+ with mock.patch("cloudinit.url_helper.time.sleep"):
self.assertTrue(ds.wait_for_metadata_service())
# Just one /latest/api/token request
self.assertEqual(3, len(m_readurl.call_args_list))
for readurl_call in m_readurl.call_args_list:
- self.assertIn('latest/api/token', readurl_call[0][0])
+ self.assertIn("latest/api/token", readurl_call[0][0])
def test_aws_token_403_fails_without_retries(self):
"""Verify that 403s fetching AWS tokens are not retried."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md=None)
- token_url = self.data_url('latest', data_item='api/token')
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md=None,
+ )
+ token_url = self.data_url("latest", data_item="api/token")
httpretty.register_uri(httpretty.PUT, token_url, body={}, status=403)
self.assertFalse(ds.get_data())
# Just one /latest/api/token request
logs = self.logs.getvalue()
failed_put_log = '"PUT /latest/api/token HTTP/1.1" 403 0'
expected_logs = [
- 'WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is'
- ' disabled. Aborting.',
+ "WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is"
+ " disabled. Aborting.",
"WARNING: IMDS's HTTP endpoint is probably disabled",
- failed_put_log
+ failed_put_log,
]
for log in expected_logs:
self.assertIn(log, logs)
self.assertEqual(
1,
- len([line for line in logs.splitlines() if failed_put_log in line])
+ len(
+ [line for line in logs.splitlines() if failed_put_log in line]
+ ),
)
def test_aws_token_redacted(self):
"""Verify that aws tokens are redacted when logged."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
self.assertTrue(ds.get_data())
all_logs = self.logs.getvalue().splitlines()
REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'"
REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'"
logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log]
logs_with_redacted = [log for log in all_logs if REDACT_TOK in log]
- logs_with_token = [log for log in all_logs if 'API-TOKEN' in log]
+ logs_with_token = [log for log in all_logs if "API-TOKEN" in log]
self.assertEqual(1, len(logs_with_redacted_ttl))
self.assertEqual(81, len(logs_with_redacted))
self.assertEqual(0, len(logs_with_token))
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
def test_valid_platform_with_strict_true(self, m_dhcp):
"""Valid platform data should return true with strict_id true."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertTrue(ret)
self.assertEqual(0, m_dhcp.call_count)
- self.assertEqual('aws', ds.cloud_name)
- self.assertEqual('ec2', ds.platform_type)
- self.assertEqual('metadata (%s)' % ds.metadata_address, ds.subplatform)
+ self.assertEqual("aws", ds.cloud_name)
+ self.assertEqual("ec2", ds.platform_type)
+ self.assertEqual("metadata (%s)" % ds.metadata_address, ds.subplatform)
def test_valid_platform_with_strict_false(self):
"""Valid platform data should return true with strict_id false."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertTrue(ret)
def test_unknown_platform_with_strict_true(self):
"""Unknown platform data with strict_id true should return False."""
- uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a'
+ uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a"
ds = self._setup_ds(
- platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''},
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
+ platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""},
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertFalse(ret)
def test_unknown_platform_with_strict_false(self):
"""Unknown platform data with strict_id false should return True."""
- uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a'
+ uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a"
ds = self._setup_ds(
- platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''},
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""},
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertTrue(ret)
@@ -682,24 +725,28 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.datasource = ec2.DataSourceEc2Local
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
platform_attrs = [
- attr for attr in ec2.CloudNames.__dict__.keys()
- if not attr.startswith('__')]
+ attr
+ for attr in ec2.CloudNames.__dict__.keys()
+ if not attr.startswith("__")
+ ]
for attr_name in platform_attrs:
platform_name = getattr(ec2.CloudNames, attr_name)
- if platform_name != 'aws':
+ if platform_name != "aws":
ds._cloud_name = platform_name
ret = ds.get_data()
- self.assertEqual('ec2', ds.platform_type)
+ self.assertEqual("ec2", ds.platform_type)
self.assertFalse(ret)
message = (
"Local Ec2 mode only supported on ('aws',),"
- ' not {0}'.format(platform_name))
+ " not {0}".format(platform_name)
+ )
self.assertIn(message, self.logs.getvalue())
- @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD')
+ @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD")
def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd):
"""DataSourceEc2Local returns False on BSD.
@@ -709,20 +756,23 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.datasource = ec2.DataSourceEc2Local
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertFalse(ret)
self.assertIn(
"FreeBSD doesn't support running dhclient with -sf",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
- @mock.patch('cloudinit.net.find_fallback_nic')
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
- @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD')
- def test_ec2_local_performs_dhcp_on_non_bsd(self, m_is_bsd, m_dhcp,
- m_fallback_nic, m_net):
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD")
+ def test_ec2_local_performs_dhcp_on_non_bsd(
+ self, m_is_bsd, m_dhcp, m_fallback_nic, m_net
+ ):
"""Ec2Local returns True for valid platform data on non-BSD with dhcp.
DataSourceEc2Local will setup initial IPv4 network via dhcp discovery.
@@ -730,31 +780,41 @@ class TestEc2(test_helpers.HttprettyTestCase):
When the platform data is valid, return True.
"""
- m_fallback_nic.return_value = 'eth9'
+ m_fallback_nic.return_value = "eth9"
m_is_bsd.return_value = False
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'broadcast-address': '192.168.2.255'}]
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "broadcast-address": "192.168.2.255",
+ }
+ ]
self.datasource = ec2.DataSourceEc2Local
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertTrue(ret)
- m_dhcp.assert_called_once_with('eth9', None)
+ m_dhcp.assert_called_once_with("eth9", None)
m_net.assert_called_once_with(
- broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9',
- prefix_or_mask='255.255.255.0', router='192.168.2.1',
- static_routes=None)
- self.assertIn('Crawl of metadata service took', self.logs.getvalue())
+ broadcast="192.168.2.255",
+ interface="eth9",
+ ip="192.168.2.9",
+ prefix_or_mask="255.255.255.0",
+ router="192.168.2.1",
+ static_routes=None,
+ )
+ self.assertIn("Crawl of metadata service took", self.logs.getvalue())
class TestGetSecondaryAddresses(test_helpers.CiTestCase):
- mac = '06:17:04:d7:26:ff'
+ mac = "06:17:04:d7:26:ff"
with_logs = True
def test_md_with_no_secondary_addresses(self):
@@ -764,26 +824,34 @@ class TestGetSecondaryAddresses(test_helpers.CiTestCase):
def test_md_with_secondary_v4_and_v6_addresses(self):
"""All secondary addresses are returned from nic metadata"""
self.assertEqual(
- ['172.31.45.70/20', '2600:1f16:292:100:f152:2222:3333:4444/128',
- '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
- ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac))
+ [
+ "172.31.45.70/20",
+ "2600:1f16:292:100:f152:2222:3333:4444/128",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9/128",
+ ],
+ ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac),
+ )
def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self):
"""Any invalid subnet-ipv(4|6)-cidr-block values use defaults"""
invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP)
- invalid_cidr_md['subnet-ipv4-cidr-block'] = "something-unexpected"
- invalid_cidr_md['subnet-ipv6-cidr-block'] = "not/sure/what/this/is"
+ invalid_cidr_md["subnet-ipv4-cidr-block"] = "something-unexpected"
+ invalid_cidr_md["subnet-ipv6-cidr-block"] = "not/sure/what/this/is"
self.assertEqual(
- ['172.31.45.70/24', '2600:1f16:292:100:f152:2222:3333:4444/128',
- '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
- ec2.get_secondary_addresses(invalid_cidr_md, self.mac))
+ [
+ "172.31.45.70/24",
+ "2600:1f16:292:100:f152:2222:3333:4444/128",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9/128",
+ ],
+ ec2.get_secondary_addresses(invalid_cidr_md, self.mac),
+ )
expected_logs = [
"WARNING: Could not parse subnet-ipv4-cidr-block"
" something-unexpected for mac 06:17:04:d7:26:ff."
" ipv4 network config prefix defaults to /24",
"WARNING: Could not parse subnet-ipv6-cidr-block"
" not/sure/what/this/is for mac 06:17:04:d7:26:ff."
- " ipv6 network config prefix defaults to /128"
+ " ipv6 network config prefix defaults to /128",
]
logs = self.logs.getvalue()
for log in expected_logs:
@@ -791,188 +859,267 @@ class TestGetSecondaryAddresses(test_helpers.CiTestCase):
class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
-
def setUp(self):
super(TestConvertEc2MetadataNetworkConfig, self).setUp()
- self.mac1 = '06:17:04:d7:26:09'
+ self.mac1 = "06:17:04:d7:26:09"
interface_dict = copy.deepcopy(
- DEFAULT_METADATA['network']['interfaces']['macs'][self.mac1])
+ DEFAULT_METADATA["network"]["interfaces"]["macs"][self.mac1]
+ )
# These tests are written assuming the base interface doesn't have IPv6
- interface_dict.pop('ipv6s')
+ interface_dict.pop("ipv6s")
self.network_metadata = {
- 'interfaces': {'macs': {self.mac1: interface_dict}}}
+ "interfaces": {"macs": {self.mac1: interface_dict}}
+ }
def test_convert_ec2_metadata_network_config_skips_absent_macs(self):
"""Any mac absent from metadata is skipped by network config."""
- macs_to_nics = {self.mac1: 'eth9', 'DE:AD:BE:EF:FF:FF': 'vitualnic2'}
+ macs_to_nics = {self.mac1: "eth9", "DE:AD:BE:EF:FF:FF": "vitualnic2"}
# DE:AD:BE:EF:FF:FF represented by OS but not in metadata
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': False}}}
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- self.network_metadata, macs_to_nics))
+ self.network_metadata, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self):
"""Config dhcp6 when ipv6s is in metadata for a mac."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_ipv6 = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_ipv6['interfaces']['macs'][self.mac1])
- nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
- nic1_metadata.pop('public-ipv4s')
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
+ nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ nic1_metadata.pop("public-ipv4s")
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_ipv6, macs_to_nics))
+ network_metadata_ipv6, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_local_only_dhcp4(self):
"""Config dhcp4 when there are no public addresses in public-ipv4s."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_ipv6 = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_ipv6['interfaces']['macs'][self.mac1])
- nic1_metadata['local-ipv4s'] = '172.3.3.15'
- nic1_metadata.pop('public-ipv4s')
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': False}}}
+ nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1]
+ nic1_metadata["local-ipv4s"] = "172.3.3.15"
+ nic1_metadata.pop("public-ipv4s")
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_ipv6, macs_to_nics))
+ network_metadata_ipv6, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self):
"""Config dhcp4 on fallback_nic when there are no ipv4 addresses."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_ipv6 = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_ipv6['interfaces']['macs'][self.mac1])
- nic1_metadata['public-ipv4s'] = ''
+ nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1]
+ nic1_metadata["public-ipv4s"] = ""
# When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config.
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': False}}}
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_ipv6, macs_to_nics, fallback_nic='eth9'))
+ network_metadata_ipv6, macs_to_nics, fallback_nic="eth9"
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self):
"""When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_both = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_both['interfaces']['macs'][self.mac1])
- nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
- nic1_metadata.pop('public-ipv4s')
- nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
+ nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ nic1_metadata.pop("public-ipv4s")
+ nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_both, macs_to_nics))
+ network_metadata_both, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_multiple_nics(self):
"""DHCP route-metric increases on secondary NICs for IPv4 and IPv6."""
- mac2 = '06:17:04:d7:26:08'
- macs_to_nics = {self.mac1: 'eth9', mac2: 'eth10'}
+ mac2 = "06:17:04:d7:26:08"
+ macs_to_nics = {self.mac1: "eth9", mac2: "eth10"}
network_metadata_both = copy.deepcopy(self.network_metadata)
# Add 2nd nic info
- network_metadata_both['interfaces']['macs'][mac2] = NIC2_MD
- nic1_metadata = (
- network_metadata_both['interfaces']['macs'][self.mac1])
- nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
- nic1_metadata.pop('public-ipv4s') # No public-ipv4 IPs in cfg
- nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc
- expected = {'version': 2, 'ethernets': {
- 'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': True, 'dhcp6-overrides': {'route-metric': 100}},
- 'eth10': {
- 'match': {'macaddress': mac2}, 'set-name': 'eth10',
- 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 200},
- 'dhcp6': False}}}
+ network_metadata_both["interfaces"]["macs"][mac2] = NIC2_MD
+ nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg
+ nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ },
+ "eth10": {
+ "match": {"macaddress": mac2},
+ "set-name": "eth10",
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 200},
+ "dhcp6": False,
+ },
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_both, macs_to_nics))
+ network_metadata_both, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self):
"""Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_both = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_both['interfaces']['macs'][self.mac1])
- nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
+ nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_both, macs_to_nics))
+ network_metadata_both, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self):
"""Convert Ec2 Metadata calls get_interfaces_by_mac by default."""
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1},
- 'set-name': 'eth9', 'dhcp4': True, 'dhcp6': False}}}
- patch_path = M_PATH_NET + 'get_interfaces_by_mac'
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
with mock.patch(patch_path) as m_get_interfaces_by_mac:
- m_get_interfaces_by_mac.return_value = {self.mac1: 'eth9'}
+ m_get_interfaces_by_mac.return_value = {self.mac1: "eth9"}
self.assertEqual(
expected,
- ec2.convert_ec2_metadata_network_config(self.network_metadata))
+ ec2.convert_ec2_metadata_network_config(self.network_metadata),
+ )
class TesIdentifyPlatform(test_helpers.CiTestCase):
-
def collmock(self, **kwargs):
"""return non-special _collect_platform_data updated with changes."""
unspecial = {
- 'asset_tag': '3857-0037-2746-7462-1818-3997-77',
- 'serial': 'H23-C4J3JV-R6',
- 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2',
- 'uuid_source': 'dmi',
- 'vendor': 'tothecloud',
+ "asset_tag": "3857-0037-2746-7462-1818-3997-77",
+ "serial": "H23-C4J3JV-R6",
+ "uuid": "81c7e555-6471-4833-9551-1ab366c4cfd2",
+ "uuid_source": "dmi",
+ "vendor": "tothecloud",
}
unspecial.update(**kwargs)
return unspecial
- @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data')
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
def test_identify_zstack(self, m_collect):
- """zstack should be identified if chassis-asset-tag ends in .zstack.io
+ """zstack should be identified if chassis-asset-tag
+ ends in .zstack.io
"""
- m_collect.return_value = self.collmock(asset_tag='123456.zstack.io')
+ m_collect.return_value = self.collmock(asset_tag="123456.zstack.io")
self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform())
- @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data')
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
def test_identify_zstack_full_domain_only(self, m_collect):
- """zstack asset-tag matching should match only on full domain boundary.
+ """zstack asset-tag matching should match only on
+ full domain boundary.
"""
- m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io')
+ m_collect.return_value = self.collmock(asset_tag="123456.buzzstack.io")
self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform())
- @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data')
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
def test_identify_e24cloud(self, m_collect):
"""e24cloud identified if vendor is e24cloud"""
- m_collect.return_value = self.collmock(vendor='e24cloud')
+ m_collect.return_value = self.collmock(vendor="e24cloud")
self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform())
- @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data')
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
def test_identify_e24cloud_negative(self, m_collect):
"""e24cloud identified if vendor is e24cloud"""
- m_collect.return_value = self.collmock(vendor='e24cloudyday')
+ m_collect.return_value = self.collmock(vendor="e24cloudyday")
self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform())
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_exoscale.py b/tests/unittests/sources/test_exoscale.py
new file mode 100644
index 00000000..591256d8
--- /dev/null
+++ b/tests/unittests/sources/test_exoscale.py
@@ -0,0 +1,241 @@
+# Author: Mathieu Corbin <mathieu.corbin@exoscale.com>
+# Author: Christopher Glass <christopher.glass@exoscale.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+import os
+
+import httpretty
+import requests
+
+from cloudinit import helpers, util
+from cloudinit.sources.DataSourceExoscale import (
+ API_VERSION,
+ METADATA_URL,
+ PASSWORD_SERVER_PORT,
+ DataSourceExoscale,
+ get_password,
+ read_metadata,
+)
+from tests.unittests.helpers import HttprettyTestCase, mock
+
+TEST_PASSWORD_URL = "{}:{}/{}/".format(
+ METADATA_URL, PASSWORD_SERVER_PORT, API_VERSION
+)
+
+TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, API_VERSION)
+
+TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, API_VERSION)
+
+
+@httpretty.activate
+class TestDatasourceExoscale(HttprettyTestCase):
+ def setUp(self):
+ super(TestDatasourceExoscale, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.password_url = TEST_PASSWORD_URL
+ self.metadata_url = TEST_METADATA_URL
+ self.userdata_url = TEST_USERDATA_URL
+
+ def test_password_saved(self):
+ """The password is not set when it is not found
+ in the metadata service."""
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body="saved_password"
+ )
+ self.assertFalse(get_password())
+
+ def test_password_empty(self):
+ """No password is set if the metadata service returns
+ an empty string."""
+ httpretty.register_uri(httpretty.GET, self.password_url, body="")
+ self.assertFalse(get_password())
+
+ def test_password(self):
+ """The password is set to what is found in the metadata
+ service."""
+ expected_password = "p@ssw0rd"
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_password
+ )
+ password = get_password()
+ self.assertEqual(expected_password, password)
+
+ def test_activate_removes_set_passwords_semaphore(self):
+ """Allow set_passwords to run every boot by removing the semaphore."""
+ path = helpers.Paths({"cloud_dir": self.tmp})
+ sem_dir = self.tmp_path("instance/sem", dir=self.tmp)
+ util.ensure_dir(sem_dir)
+ sem_file = os.path.join(sem_dir, "config_set_passwords")
+ with open(sem_file, "w") as stream:
+ stream.write("")
+ ds = DataSourceExoscale({}, None, path)
+ ds.activate(None, None)
+ self.assertFalse(os.path.exists(sem_file))
+
+ def test_get_data(self):
+ """The datasource conforms to expected behavior when supplied
+ full test data."""
+ path = helpers.Paths({"run_dir": self.tmp})
+ ds = DataSourceExoscale({}, None, path)
+ ds._is_platform_viable = lambda: True
+ expected_password = "p@ssw0rd"
+ expected_id = "12345"
+ expected_hostname = "myname"
+ expected_userdata = "#cloud-config"
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_password
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
+ self.assertTrue(ds._get_data())
+ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
+ self.assertEqual(
+ ds.metadata,
+ {"instance-id": expected_id, "local-hostname": expected_hostname},
+ )
+ self.assertEqual(
+ ds.get_config_obj(),
+ {
+ "ssh_pwauth": True,
+ "password": expected_password,
+ "chpasswd": {
+ "expire": False,
+ },
+ },
+ )
+
+ def test_get_data_saved_password(self):
+ """The datasource conforms to expected behavior when saved_password is
+ returned by the password server."""
+ path = helpers.Paths({"run_dir": self.tmp})
+ ds = DataSourceExoscale({}, None, path)
+ ds._is_platform_viable = lambda: True
+ expected_answer = "saved_password"
+ expected_id = "12345"
+ expected_hostname = "myname"
+ expected_userdata = "#cloud-config"
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_answer
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
+ self.assertTrue(ds._get_data())
+ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
+ self.assertEqual(
+ ds.metadata,
+ {"instance-id": expected_id, "local-hostname": expected_hostname},
+ )
+ self.assertEqual(ds.get_config_obj(), {})
+
+ def test_get_data_no_password(self):
+ """The datasource conforms to expected behavior when no password is
+ returned by the password server."""
+ path = helpers.Paths({"run_dir": self.tmp})
+ ds = DataSourceExoscale({}, None, path)
+ ds._is_platform_viable = lambda: True
+ expected_answer = ""
+ expected_id = "12345"
+ expected_hostname = "myname"
+ expected_userdata = "#cloud-config"
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_answer
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
+ self.assertTrue(ds._get_data())
+ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
+ self.assertEqual(
+ ds.metadata,
+ {"instance-id": expected_id, "local-hostname": expected_hostname},
+ )
+ self.assertEqual(ds.get_config_obj(), {})
+
+ @mock.patch("cloudinit.sources.DataSourceExoscale.get_password")
+ def test_read_metadata_when_password_server_unreachable(self, m_password):
+ """The read_metadata function returns partial results in case the
+ password server (only) is unreachable."""
+ expected_id = "12345"
+ expected_hostname = "myname"
+ expected_userdata = "#cloud-config"
+
+ m_password.side_effect = requests.Timeout("Fake Connection Timeout")
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
+
+ result = read_metadata()
+
+ self.assertIsNone(result.get("password"))
+ self.assertEqual(
+ result.get("user-data").decode("utf-8"), expected_userdata
+ )
+
+ def test_non_viable_platform(self):
+ """The datasource fails fast when the platform is not viable."""
+ path = helpers.Paths({"run_dir": self.tmp})
+ ds = DataSourceExoscale({}, None, path)
+ ds._is_platform_viable = lambda: False
+ self.assertFalse(ds._get_data())
diff --git a/tests/unittests/sources/test_gce.py b/tests/unittests/sources/test_gce.py
new file mode 100644
index 00000000..e030931b
--- /dev/null
+++ b/tests/unittests/sources/test_gce.py
@@ -0,0 +1,416 @@
+# Copyright (C) 2014 Vaidas Jablonskis
+#
+# Author: Vaidas Jablonskis <jablonskis@gmail.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import datetime
+import json
+import re
+from base64 import b64decode, b64encode
+from unittest import mock
+from urllib.parse import urlparse
+
+import httpretty
+
+from cloudinit import distros, helpers, settings
+from cloudinit.sources import DataSourceGCE
+from tests.unittests import helpers as test_helpers
+
+GCE_META = {
+ "instance/id": "123",
+ "instance/zone": "foo/bar",
+ "instance/hostname": "server.project-foo.local",
+}
+
+GCE_META_PARTIAL = {
+ "instance/id": "1234",
+ "instance/hostname": "server.project-bar.local",
+ "instance/zone": "bar/baz",
+}
+
+GCE_META_ENCODING = {
+ "instance/id": "12345",
+ "instance/hostname": "server.project-baz.local",
+ "instance/zone": "baz/bang",
+ "instance/attributes": {
+ "user-data": b64encode(b"#!/bin/echo baz\n").decode("utf-8"),
+ "user-data-encoding": "base64",
+ },
+}
+
+GCE_USER_DATA_TEXT = {
+ "instance/id": "12345",
+ "instance/hostname": "server.project-baz.local",
+ "instance/zone": "baz/bang",
+ "instance/attributes": {
+ "user-data": "#!/bin/sh\necho hi mom\ntouch /run/up-now\n",
+ },
+}
+
+HEADERS = {"Metadata-Flavor": "Google"}
+MD_URL_RE = re.compile(
+ r"http://metadata.google.internal/computeMetadata/v1/.*"
+)
+GUEST_ATTRIBUTES_URL = (
+ "http://metadata.google.internal/computeMetadata/"
+ "v1/instance/guest-attributes/hostkeys/"
+)
+
+
+def _set_mock_metadata(gce_meta=None):
+ if gce_meta is None:
+ gce_meta = GCE_META
+
+ def _request_callback(method, uri, headers):
+ url_path = urlparse(uri).path
+ if url_path.startswith("/computeMetadata/v1/"):
+ path = url_path.split("/computeMetadata/v1/")[1:][0]
+ recursive = path.endswith("/")
+ path = path.rstrip("/")
+ else:
+ path = None
+ if path in gce_meta:
+ response = gce_meta.get(path)
+ if recursive:
+ response = json.dumps(response)
+ return (200, headers, response)
+ else:
+ return (404, headers, "")
+
+ # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316
+ httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback)
+
+
+@httpretty.activate
+class TestDataSourceGCE(test_helpers.HttprettyTestCase):
+ def _make_distro(self, dtype, def_user=None):
+ cfg = dict(settings.CFG_BUILTIN)
+ cfg["system_info"]["distro"] = dtype
+ paths = helpers.Paths(cfg["system_info"]["paths"])
+ distro_cls = distros.fetch(dtype)
+ if def_user:
+ cfg["system_info"]["default_user"] = def_user.copy()
+ distro = distro_cls(dtype, cfg["system_info"], paths)
+ return distro
+
+ def setUp(self):
+ tmp = self.tmp_dir()
+ self.ds = DataSourceGCE.DataSourceGCE(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": tmp})
+ )
+ ppatch = self.m_platform_reports_gce = mock.patch(
+ "cloudinit.sources.DataSourceGCE.platform_reports_gce"
+ )
+ self.m_platform_reports_gce = ppatch.start()
+ self.m_platform_reports_gce.return_value = True
+ self.addCleanup(ppatch.stop)
+ self.add_patch("time.sleep", "m_sleep") # just to speed up tests
+ super(TestDataSourceGCE, self).setUp()
+
+ def test_connection(self):
+ _set_mock_metadata()
+ success = self.ds.get_data()
+ self.assertTrue(success)
+
+ req_header = httpretty.last_request().headers
+ for header_name, expected_value in HEADERS.items():
+ self.assertEqual(expected_value, req_header.get(header_name))
+
+ def test_metadata(self):
+ # UnicodeDecodeError if set to ds.userdata instead of userdata_raw
+ meta = GCE_META.copy()
+ meta["instance/attributes/user-data"] = b"/bin/echo \xff\n"
+
+ _set_mock_metadata()
+ self.ds.get_data()
+
+ shostname = GCE_META.get("instance/hostname").split(".")[0]
+ self.assertEqual(shostname, self.ds.get_hostname())
+
+ self.assertEqual(
+ GCE_META.get("instance/id"), self.ds.get_instance_id()
+ )
+
+ self.assertEqual(
+ GCE_META.get("instance/attributes/user-data"),
+ self.ds.get_userdata_raw(),
+ )
+
+ # test partial metadata (missing user-data in particular)
+ def test_metadata_partial(self):
+ _set_mock_metadata(GCE_META_PARTIAL)
+ self.ds.get_data()
+
+ self.assertEqual(
+ GCE_META_PARTIAL.get("instance/id"), self.ds.get_instance_id()
+ )
+
+ shostname = GCE_META_PARTIAL.get("instance/hostname").split(".")[0]
+ self.assertEqual(shostname, self.ds.get_hostname())
+
+ def test_userdata_no_encoding(self):
+ """check that user-data is read."""
+ _set_mock_metadata(GCE_USER_DATA_TEXT)
+ self.ds.get_data()
+ self.assertEqual(
+ GCE_USER_DATA_TEXT["instance/attributes"]["user-data"].encode(),
+ self.ds.get_userdata_raw(),
+ )
+
+ def test_metadata_encoding(self):
+ """user-data is base64 encoded if user-data-encoding is 'base64'."""
+ _set_mock_metadata(GCE_META_ENCODING)
+ self.ds.get_data()
+
+ instance_data = GCE_META_ENCODING.get("instance/attributes")
+ decoded = b64decode(instance_data.get("user-data"))
+ self.assertEqual(decoded, self.ds.get_userdata_raw())
+
+ def test_missing_required_keys_return_false(self):
+ for required_key in [
+ "instance/id",
+ "instance/zone",
+ "instance/hostname",
+ ]:
+ meta = GCE_META_PARTIAL.copy()
+ del meta[required_key]
+ _set_mock_metadata(meta)
+ self.assertEqual(False, self.ds.get_data())
+ httpretty.reset()
+
+ def test_no_ssh_keys_metadata(self):
+ _set_mock_metadata()
+ self.ds.get_data()
+ self.assertEqual([], self.ds.get_public_ssh_keys())
+
+ def test_cloudinit_ssh_keys(self):
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
+ project_attributes = {
+ "sshKeys": "\n".join(
+ [
+ "cloudinit:{0}".format(valid_key.format(0)),
+ "user:{0}".format(invalid_key.format(0)),
+ ]
+ ),
+ "ssh-keys": "\n".join(
+ [
+ "cloudinit:{0}".format(valid_key.format(1)),
+ "user:{0}".format(invalid_key.format(1)),
+ ]
+ ),
+ }
+ instance_attributes = {
+ "ssh-keys": "\n".join(
+ [
+ "cloudinit:{0}".format(valid_key.format(2)),
+ "user:{0}".format(invalid_key.format(2)),
+ ]
+ ),
+ "block-project-ssh-keys": "False",
+ }
+
+ meta = GCE_META.copy()
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
+
+ _set_mock_metadata(meta)
+ self.ds.get_data()
+
+ expected = [valid_key.format(key) for key in range(3)]
+ self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
+
+ @mock.patch("cloudinit.sources.DataSourceGCE.ug_util")
+ def test_default_user_ssh_keys(self, mock_ug_util):
+ mock_ug_util.normalize_users_groups.return_value = None, None
+ mock_ug_util.extract_default.return_value = "ubuntu", None
+ ubuntu_ds = DataSourceGCE.DataSourceGCE(
+ settings.CFG_BUILTIN,
+ self._make_distro("ubuntu"),
+ helpers.Paths({"run_dir": self.tmp_dir()}),
+ )
+
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
+ project_attributes = {
+ "sshKeys": "\n".join(
+ [
+ "ubuntu:{0}".format(valid_key.format(0)),
+ "user:{0}".format(invalid_key.format(0)),
+ ]
+ ),
+ "ssh-keys": "\n".join(
+ [
+ "ubuntu:{0}".format(valid_key.format(1)),
+ "user:{0}".format(invalid_key.format(1)),
+ ]
+ ),
+ }
+ instance_attributes = {
+ "ssh-keys": "\n".join(
+ [
+ "ubuntu:{0}".format(valid_key.format(2)),
+ "user:{0}".format(invalid_key.format(2)),
+ ]
+ ),
+ "block-project-ssh-keys": "False",
+ }
+
+ meta = GCE_META.copy()
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
+
+ _set_mock_metadata(meta)
+ ubuntu_ds.get_data()
+
+ expected = [valid_key.format(key) for key in range(3)]
+ self.assertEqual(set(expected), set(ubuntu_ds.get_public_ssh_keys()))
+
+ def test_instance_ssh_keys_override(self):
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
+ project_attributes = {
+ "sshKeys": "cloudinit:{0}".format(invalid_key.format(0)),
+ "ssh-keys": "cloudinit:{0}".format(invalid_key.format(1)),
+ }
+ instance_attributes = {
+ "sshKeys": "cloudinit:{0}".format(valid_key.format(0)),
+ "ssh-keys": "cloudinit:{0}".format(valid_key.format(1)),
+ "block-project-ssh-keys": "False",
+ }
+
+ meta = GCE_META.copy()
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
+
+ _set_mock_metadata(meta)
+ self.ds.get_data()
+
+ expected = [valid_key.format(key) for key in range(2)]
+ self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
+
+ def test_block_project_ssh_keys_override(self):
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
+ project_attributes = {
+ "sshKeys": "cloudinit:{0}".format(invalid_key.format(0)),
+ "ssh-keys": "cloudinit:{0}".format(invalid_key.format(1)),
+ }
+ instance_attributes = {
+ "ssh-keys": "cloudinit:{0}".format(valid_key.format(0)),
+ "block-project-ssh-keys": "True",
+ }
+
+ meta = GCE_META.copy()
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
+
+ _set_mock_metadata(meta)
+ self.ds.get_data()
+
+ expected = [valid_key.format(0)]
+ self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
+
+ def test_only_last_part_of_zone_used_for_availability_zone(self):
+ _set_mock_metadata()
+ r = self.ds.get_data()
+ self.assertEqual(True, r)
+ self.assertEqual("bar", self.ds.availability_zone)
+
+ @mock.patch("cloudinit.sources.DataSourceGCE.GoogleMetadataFetcher")
+ def test_get_data_returns_false_if_not_on_gce(self, m_fetcher):
+ self.m_platform_reports_gce.return_value = False
+ ret = self.ds.get_data()
+ self.assertEqual(False, ret)
+ m_fetcher.assert_not_called()
+
+ def test_has_expired(self):
+ def _get_timestamp(days):
+ format_str = "%Y-%m-%dT%H:%M:%S+0000"
+ today = datetime.datetime.now()
+ timestamp = today + datetime.timedelta(days=days)
+ return timestamp.strftime(format_str)
+
+ past = _get_timestamp(-1)
+ future = _get_timestamp(1)
+ ssh_keys = {
+ None: False,
+ "": False,
+ "Invalid": False,
+ "user:ssh-rsa key user@domain.com": False,
+ 'user:ssh-rsa key google {"expireOn":"%s"}' % past: False,
+ "user:ssh-rsa key google-ssh": False,
+ "user:ssh-rsa key google-ssh {invalid:json}": False,
+ 'user:ssh-rsa key google-ssh {"userName":"user"}': False,
+ 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False,
+ 'user:xyz key google-ssh {"expireOn":"%s"}' % future: False,
+ 'user:xyz key google-ssh {"expireOn":"%s"}' % past: True,
+ }
+
+ for key, expired in ssh_keys.items():
+ self.assertEqual(DataSourceGCE._has_expired(key), expired)
+
+ def test_parse_public_keys_non_ascii(self):
+ public_key_data = [
+ "cloudinit:rsa ssh-ke%s invalid" % chr(165),
+ "use%sname:rsa ssh-key" % chr(174),
+ "cloudinit:test 1",
+ "default:test 2",
+ "user:test 3",
+ ]
+ expected = ["test 1", "test 2"]
+ found = DataSourceGCE._parse_public_keys(
+ public_key_data, default_user="default"
+ )
+ self.assertEqual(sorted(found), sorted(expected))
+
+ @mock.patch("cloudinit.url_helper.readurl")
+ def test_publish_host_keys(self, m_readurl):
+ hostkeys = [("ssh-rsa", "asdfasdf"), ("ssh-ed25519", "qwerqwer")]
+ readurl_expected_calls = [
+ mock.call(
+ check_status=False,
+ data=b"asdfasdf",
+ headers=HEADERS,
+ request_method="PUT",
+ url="%s%s" % (GUEST_ATTRIBUTES_URL, "ssh-rsa"),
+ ),
+ mock.call(
+ check_status=False,
+ data=b"qwerqwer",
+ headers=HEADERS,
+ request_method="PUT",
+ url="%s%s" % (GUEST_ATTRIBUTES_URL, "ssh-ed25519"),
+ ),
+ ]
+ self.ds.publish_host_keys(hostkeys)
+ m_readurl.assert_has_calls(readurl_expected_calls, any_order=True)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4",
+ autospec=True,
+ )
+ @mock.patch(
+ "cloudinit.sources.DataSourceGCE.DataSourceGCELocal.fallback_interface"
+ )
+ def test_local_datasource_uses_ephemeral_dhcp(self, _m_fallback, m_dhcp):
+ _set_mock_metadata()
+ ds = DataSourceGCE.DataSourceGCELocal(
+ sys_cfg={}, distro=None, paths=None
+ )
+ ds._get_data()
+ assert m_dhcp.call_count == 1
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4",
+ autospec=True,
+ )
+ def test_datasource_doesnt_use_ephemeral_dhcp(self, m_dhcp):
+ _set_mock_metadata()
+ ds = DataSourceGCE.DataSourceGCE(sys_cfg={}, distro=None, paths=None)
+ ds._get_data()
+ assert m_dhcp.call_count == 0
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_hetzner.py b/tests/unittests/sources/test_hetzner.py
index eadb92f1..f80ed45f 100644
--- a/tests/unittests/test_datasource/test_hetzner.py
+++ b/tests/unittests/sources/test_hetzner.py
@@ -4,16 +4,17 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.sources import DataSourceHetzner
-import cloudinit.sources.helpers.hetzner as hc_helper
-from cloudinit import util, settings, helpers
-
-from cloudinit.tests.helpers import mock, CiTestCase
-
import base64
+
import pytest
-METADATA = util.load_yaml("""
+import cloudinit.sources.helpers.hetzner as hc_helper
+from cloudinit import helpers, settings, util
+from cloudinit.sources import DataSourceHetzner
+from tests.unittests.helpers import CiTestCase, mock
+
+METADATA = util.load_yaml(
+ """
hostname: cloudinit-test
instance-id: 123456
local-ipv4: ''
@@ -23,36 +24,31 @@ network-config:
name: eth0
subnets:
- dns_nameservers:
- - 213.133.99.99
- - 213.133.100.100
- - 213.133.98.98
+ - 185.12.64.1
+ - 185.12.64.2
ipv4: true
type: dhcp
- type: physical
- - name: eth0:0
- subnets:
- address: 2a01:4f8:beef:beef::1/64
+ dns_nameservers:
+ - 2a01:4ff:ff00::add:2
+ - 2a01:4ff:ff00::add:1
gateway: fe80::1
ipv6: true
- routes:
- - gateway: fe80::1%eth0
- netmask: 0
- network: '::'
- type: static
type: physical
version: 1
network-sysconfig: "DEVICE='eth0'\nTYPE=Ethernet\nBOOTPROTO=dhcp\n\
ONBOOT='yes'\nHWADDR=96:00:00:08:19:da\n\
IPV6INIT=yes\nIPV6ADDR=2a01:4f8:beef:beef::1/64\n\
IPV6_DEFAULTGW=fe80::1%eth0\nIPV6_AUTOCONF=no\n\
- DNS1=213.133.99.99\nDNS2=213.133.100.100\n"
-public-ipv4: 192.168.0.1
+ DNS1=185.12.64.1\nDNS2=185.12.64.2\n"
+public-ipv4: 192.168.0.2
public-keys:
- ssh-ed25519 \
AAAAC3Nzac1lZdI1NTE5AaaAIaFrcac0yVITsmRrmueq6MD0qYNKlEvW8O1Ib4nkhmWh \
test-key@workstation
vendor_data: "test"
-""")
+"""
+)
USERDATA = b"""#cloud-config
runcmd:
@@ -64,55 +60,78 @@ class TestDataSourceHetzner(CiTestCase):
"""
Test reading the meta-data
"""
+
def setUp(self):
super(TestDataSourceHetzner, self).setUp()
self.tmp = self.tmp_dir()
def get_ds(self):
ds = DataSourceHetzner.DataSourceHetzner(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
return ds
- @mock.patch('cloudinit.net.EphemeralIPv4Network')
- @mock.patch('cloudinit.net.find_fallback_nic')
- @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata')
- @mock.patch('cloudinit.sources.helpers.hetzner.read_userdata')
- @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data')
- def test_read_data(self, m_get_hcloud_data, m_usermd, m_readmd,
- m_fallback_nic, m_net):
- m_get_hcloud_data.return_value = (True,
- str(METADATA.get('instance-id')))
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.sources.DataSourceHetzner.EphemeralDHCPv4")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.sources.helpers.hetzner.read_metadata")
+ @mock.patch("cloudinit.sources.helpers.hetzner.read_userdata")
+ @mock.patch("cloudinit.sources.DataSourceHetzner.get_hcloud_data")
+ def test_read_data(
+ self,
+ m_get_hcloud_data,
+ m_usermd,
+ m_readmd,
+ m_fallback_nic,
+ m_net,
+ m_dhcp,
+ ):
+ m_get_hcloud_data.return_value = (
+ True,
+ str(METADATA.get("instance-id")),
+ )
m_readmd.return_value = METADATA.copy()
m_usermd.return_value = USERDATA
- m_fallback_nic.return_value = 'eth0'
+ m_fallback_nic.return_value = "eth0"
+ m_dhcp.return_value = [
+ {
+ "interface": "eth0",
+ "fixed-address": "192.168.0.2",
+ "routers": "192.168.0.1",
+ "subnet-mask": "255.255.255.0",
+ "broadcast-address": "192.168.0.255",
+ }
+ ]
ds = self.get_ds()
ret = ds.get_data()
self.assertTrue(ret)
m_net.assert_called_once_with(
- 'eth0', '169.254.0.1',
- 16, '169.254.255.255'
+ iface="eth0",
+ connectivity_url_data={
+ "url": "http://169.254.169.254/hetzner/v1/metadata/instance-id"
+ },
)
self.assertTrue(m_readmd.called)
- self.assertEqual(METADATA.get('hostname'), ds.get_hostname())
+ self.assertEqual(METADATA.get("hostname"), ds.get_hostname())
- self.assertEqual(METADATA.get('public-keys'),
- ds.get_public_ssh_keys())
+ self.assertEqual(METADATA.get("public-keys"), ds.get_public_ssh_keys())
self.assertIsInstance(ds.get_public_ssh_keys(), list)
self.assertEqual(ds.get_userdata_raw(), USERDATA)
- self.assertEqual(ds.get_vendordata_raw(), METADATA.get('vendor_data'))
-
- @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata')
- @mock.patch('cloudinit.net.find_fallback_nic')
- @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data')
- def test_not_on_hetzner_returns_false(self, m_get_hcloud_data,
- m_find_fallback, m_read_md):
+ self.assertEqual(ds.get_vendordata_raw(), METADATA.get("vendor_data"))
+
+ @mock.patch("cloudinit.sources.helpers.hetzner.read_metadata")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.sources.DataSourceHetzner.get_hcloud_data")
+ def test_not_on_hetzner_returns_false(
+ self, m_get_hcloud_data, m_find_fallback, m_read_md
+ ):
"""If helper 'get_hcloud_data' returns False,
- return False from get_data."""
+ return False from get_data."""
m_get_hcloud_data.return_value = (False, None)
ds = self.get_ds()
ret = ds.get_data()
@@ -132,11 +151,14 @@ class TestMaybeB64Decode:
with pytest.raises(TypeError):
hc_helper.maybe_b64decode(invalid_input)
- @pytest.mark.parametrize("in_data,expected", [
- # If data is not b64 encoded, then return value should be the same.
- (b"this is my data", b"this is my data"),
- # If data is b64 encoded, then return value should be decoded.
- (base64.b64encode(b"data"), b"data"),
- ])
+ @pytest.mark.parametrize(
+ "in_data,expected",
+ [
+ # If data is not b64 encoded, then return value should be the same.
+ (b"this is my data", b"this is my data"),
+ # If data is b64 encoded, then return value should be decoded.
+ (base64.b64encode(b"data"), b"data"),
+ ],
+ )
def test_happy_path(self, in_data, expected):
assert expected == hc_helper.maybe_b64decode(in_data)
diff --git a/tests/unittests/test_datasource/test_ibmcloud.py b/tests/unittests/sources/test_ibmcloud.py
index 9013ae9f..17a8be64 100644
--- a/tests/unittests/test_datasource/test_ibmcloud.py
+++ b/tests/unittests/sources/test_ibmcloud.py
@@ -1,15 +1,15 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.helpers import Paths
-from cloudinit.sources import DataSourceIBMCloud as ibm
-from cloudinit.tests import helpers as test_helpers
-from cloudinit import util
-
import base64
import copy
import json
from textwrap import dedent
+from cloudinit import util
+from cloudinit.helpers import Paths
+from cloudinit.sources import DataSourceIBMCloud as ibm
+from tests.unittests import helpers as test_helpers
+
mock = test_helpers.mock
D_PATH = "cloudinit.sources.DataSourceIBMCloud."
@@ -23,24 +23,36 @@ class TestGetIBMPlatform(test_helpers.CiTestCase):
blkid_base = {
"/dev/xvda1": {
- "DEVNAME": "/dev/xvda1", "LABEL": "cloudimg-bootfs",
- "TYPE": "ext3"},
+ "DEVNAME": "/dev/xvda1",
+ "LABEL": "cloudimg-bootfs",
+ "TYPE": "ext3",
+ },
"/dev/xvda2": {
- "DEVNAME": "/dev/xvda2", "LABEL": "cloudimg-rootfs",
- "TYPE": "ext4"},
+ "DEVNAME": "/dev/xvda2",
+ "LABEL": "cloudimg-rootfs",
+ "TYPE": "ext4",
+ },
}
blkid_metadata_disk = {
"/dev/xvdh1": {
- "DEVNAME": "/dev/xvdh1", "LABEL": "METADATA", "TYPE": "vfat",
- "SEC_TYPE": "msdos", "UUID": "681B-8C5D",
- "PARTUUID": "3d631e09-01"},
+ "DEVNAME": "/dev/xvdh1",
+ "LABEL": "METADATA",
+ "TYPE": "vfat",
+ "SEC_TYPE": "msdos",
+ "UUID": "681B-8C5D",
+ "PARTUUID": "3d631e09-01",
+ },
}
blkid_oscode_disk = {
"/dev/xvdh": {
- "DEVNAME": "/dev/xvdh", "LABEL": "config-2", "TYPE": "vfat",
- "SEC_TYPE": "msdos", "UUID": ibm.IBM_CONFIG_UUID}
+ "DEVNAME": "/dev/xvdh",
+ "LABEL": "config-2",
+ "TYPE": "vfat",
+ "SEC_TYPE": "msdos",
+ "UUID": ibm.IBM_CONFIG_UUID,
+ }
}
def setUp(self):
@@ -56,7 +68,8 @@ class TestGetIBMPlatform(test_helpers.CiTestCase):
m_is_prov.return_value = False
self.assertEqual(
(ibm.Platforms.TEMPLATE_LIVE_METADATA, "/dev/xvdh1"),
- ibm.get_ibm_platform())
+ ibm.get_ibm_platform(),
+ )
def test_id_template_prov_metadata(self, m_blkid, m_is_prov, _m_xen):
"""identify TEMPLATE_PROVISIONING_METADATA."""
@@ -64,7 +77,8 @@ class TestGetIBMPlatform(test_helpers.CiTestCase):
m_is_prov.return_value = True
self.assertEqual(
(ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh1"),
- ibm.get_ibm_platform())
+ ibm.get_ibm_platform(),
+ )
def test_id_template_prov_nodata(self, m_blkid, m_is_prov, _m_xen):
"""identify TEMPLATE_PROVISIONING_NODATA."""
@@ -72,14 +86,16 @@ class TestGetIBMPlatform(test_helpers.CiTestCase):
m_is_prov.return_value = True
self.assertEqual(
(ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None),
- ibm.get_ibm_platform())
+ ibm.get_ibm_platform(),
+ )
def test_id_os_code(self, m_blkid, m_is_prov, _m_xen):
"""Identify OS_CODE."""
m_blkid.return_value = self.blkid_oscode
m_is_prov.return_value = False
- self.assertEqual((ibm.Platforms.OS_CODE, "/dev/xvdh"),
- ibm.get_ibm_platform())
+ self.assertEqual(
+ (ibm.Platforms.OS_CODE, "/dev/xvdh"), ibm.get_ibm_platform()
+ )
def test_id_os_code_must_match_uuid(self, m_blkid, m_is_prov, _m_xen):
"""Test against false positive on openstack with non-ibm UUID."""
@@ -116,7 +132,8 @@ class TestReadMD(test_helpers.CiTestCase):
"public_keys": {"1091307": "ssh-rsa AAAAB3N..Hw== ci-pubkey"},
}
- content_interfaces = dedent("""\
+ content_interfaces = dedent(
+ """\
auto lo
iface lo inet loopback
@@ -125,71 +142,107 @@ class TestReadMD(test_helpers.CiTestCase):
iface eth0 inet static
address 10.82.43.5
netmask 255.255.255.192
- """)
+ """
+ )
userdata = b"#!/bin/sh\necho hi mom\n"
# meta.js file gets json encoded userdata as a list.
meta_js = '["#!/bin/sh\necho hi mom\n"]'
vendor_data = {
- "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"}
+ "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"
+ }
network_data = {
"links": [
- {"id": "interface_29402281", "name": "eth0", "mtu": None,
- "type": "phy", "ethernet_mac_address": "06:00:f1:bd:da:25"},
- {"id": "interface_29402279", "name": "eth1", "mtu": None,
- "type": "phy", "ethernet_mac_address": "06:98:5e:d0:7f:86"}
+ {
+ "id": "interface_29402281",
+ "name": "eth0",
+ "mtu": None,
+ "type": "phy",
+ "ethernet_mac_address": "06:00:f1:bd:da:25",
+ },
+ {
+ "id": "interface_29402279",
+ "name": "eth1",
+ "mtu": None,
+ "type": "phy",
+ "ethernet_mac_address": "06:98:5e:d0:7f:86",
+ },
],
"networks": [
- {"id": "network_109887563", "link": "interface_29402281",
- "type": "ipv4", "ip_address": "10.82.43.2",
- "netmask": "255.255.255.192",
- "routes": [
- {"network": "10.0.0.0", "netmask": "255.0.0.0",
- "gateway": "10.82.43.1"},
- {"network": "161.26.0.0", "netmask": "255.255.0.0",
- "gateway": "10.82.43.1"}]},
- {"id": "network_109887551", "link": "interface_29402279",
- "type": "ipv4", "ip_address": "108.168.194.252",
- "netmask": "255.255.255.248",
- "routes": [
- {"network": "0.0.0.0", "netmask": "0.0.0.0",
- "gateway": "108.168.194.249"}]}
+ {
+ "id": "network_109887563",
+ "link": "interface_29402281",
+ "type": "ipv4",
+ "ip_address": "10.82.43.2",
+ "netmask": "255.255.255.192",
+ "routes": [
+ {
+ "network": "10.0.0.0",
+ "netmask": "255.0.0.0",
+ "gateway": "10.82.43.1",
+ },
+ {
+ "network": "161.26.0.0",
+ "netmask": "255.255.0.0",
+ "gateway": "10.82.43.1",
+ },
+ ],
+ },
+ {
+ "id": "network_109887551",
+ "link": "interface_29402279",
+ "type": "ipv4",
+ "ip_address": "108.168.194.252",
+ "netmask": "255.255.255.248",
+ "routes": [
+ {
+ "network": "0.0.0.0",
+ "netmask": "0.0.0.0",
+ "gateway": "108.168.194.249",
+ }
+ ],
+ },
],
"services": [
{"type": "dns", "address": "10.0.80.11"},
- {"type": "dns", "address": "10.0.80.12"}
+ {"type": "dns", "address": "10.0.80.12"},
],
}
- sysuuid = '7f79ebf5-d791-43c3-a723-854e8389d59f'
+ sysuuid = "7f79ebf5-d791-43c3-a723-854e8389d59f"
def _get_expected_metadata(self, os_md):
"""return expected 'metadata' for data loaded from meta_data.json."""
os_md = copy.deepcopy(os_md)
renames = (
- ('hostname', 'local-hostname'),
- ('uuid', 'instance-id'),
- ('public_keys', 'public-keys'))
+ ("hostname", "local-hostname"),
+ ("uuid", "instance-id"),
+ ("public_keys", "public-keys"),
+ )
ret = {}
for osname, mdname in renames:
if osname in os_md:
ret[mdname] = os_md[osname]
- if 'random_seed' in os_md:
- ret['random_seed'] = base64.b64decode(os_md['random_seed'])
+ if "random_seed" in os_md:
+ ret["random_seed"] = base64.b64decode(os_md["random_seed"])
return ret
def test_provisioning_md(self, m_platform, m_sysuuid):
"""Provisioning env with a metadata disk should return None."""
m_platform.return_value = (
- ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh")
+ ibm.Platforms.TEMPLATE_PROVISIONING_METADATA,
+ "/dev/xvdh",
+ )
self.assertIsNone(ibm.read_md())
def test_provisioning_no_metadata(self, m_platform, m_sysuuid):
"""Provisioning env with no metadata disk should return None."""
m_platform.return_value = (
- ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None)
+ ibm.Platforms.TEMPLATE_PROVISIONING_NODATA,
+ None,
+ )
self.assertIsNone(ibm.read_md())
def test_provisioning_not_ibm(self, m_platform, m_sysuuid):
@@ -201,62 +254,83 @@ class TestReadMD(test_helpers.CiTestCase):
"""Template live environment should be identified."""
tmpdir = self.tmp_dir()
m_platform.return_value = (
- ibm.Platforms.TEMPLATE_LIVE_METADATA, tmpdir)
+ ibm.Platforms.TEMPLATE_LIVE_METADATA,
+ tmpdir,
+ )
m_sysuuid.return_value = self.sysuuid
- test_helpers.populate_dir(tmpdir, {
- 'openstack/latest/meta_data.json': json.dumps(self.template_md),
- 'openstack/latest/user_data': self.userdata,
- 'openstack/content/interfaces': self.content_interfaces,
- 'meta.js': self.meta_js})
+ test_helpers.populate_dir(
+ tmpdir,
+ {
+ "openstack/latest/meta_data.json": json.dumps(
+ self.template_md
+ ),
+ "openstack/latest/user_data": self.userdata,
+ "openstack/content/interfaces": self.content_interfaces,
+ "meta.js": self.meta_js,
+ },
+ )
ret = ibm.read_md()
- self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA,
- ret['platform'])
- self.assertEqual(tmpdir, ret['source'])
- self.assertEqual(self.userdata, ret['userdata'])
- self.assertEqual(self._get_expected_metadata(self.template_md),
- ret['metadata'])
- self.assertEqual(self.sysuuid, ret['system-uuid'])
+ self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, ret["platform"])
+ self.assertEqual(tmpdir, ret["source"])
+ self.assertEqual(self.userdata, ret["userdata"])
+ self.assertEqual(
+ self._get_expected_metadata(self.template_md), ret["metadata"]
+ )
+ self.assertEqual(self.sysuuid, ret["system-uuid"])
def test_os_code_live(self, m_platform, m_sysuuid):
"""Verify an os_code metadata path."""
tmpdir = self.tmp_dir()
m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir)
netdata = json.dumps(self.network_data)
- test_helpers.populate_dir(tmpdir, {
- 'openstack/latest/meta_data.json': json.dumps(self.oscode_md),
- 'openstack/latest/user_data': self.userdata,
- 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data),
- 'openstack/latest/network_data.json': netdata,
- })
+ test_helpers.populate_dir(
+ tmpdir,
+ {
+ "openstack/latest/meta_data.json": json.dumps(self.oscode_md),
+ "openstack/latest/user_data": self.userdata,
+ "openstack/latest/vendor_data.json": json.dumps(
+ self.vendor_data
+ ),
+ "openstack/latest/network_data.json": netdata,
+ },
+ )
ret = ibm.read_md()
- self.assertEqual(ibm.Platforms.OS_CODE, ret['platform'])
- self.assertEqual(tmpdir, ret['source'])
- self.assertEqual(self.userdata, ret['userdata'])
- self.assertEqual(self._get_expected_metadata(self.oscode_md),
- ret['metadata'])
+ self.assertEqual(ibm.Platforms.OS_CODE, ret["platform"])
+ self.assertEqual(tmpdir, ret["source"])
+ self.assertEqual(self.userdata, ret["userdata"])
+ self.assertEqual(
+ self._get_expected_metadata(self.oscode_md), ret["metadata"]
+ )
def test_os_code_live_no_userdata(self, m_platform, m_sysuuid):
"""Verify os_code without user-data."""
tmpdir = self.tmp_dir()
m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir)
- test_helpers.populate_dir(tmpdir, {
- 'openstack/latest/meta_data.json': json.dumps(self.oscode_md),
- 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data),
- })
+ test_helpers.populate_dir(
+ tmpdir,
+ {
+ "openstack/latest/meta_data.json": json.dumps(self.oscode_md),
+ "openstack/latest/vendor_data.json": json.dumps(
+ self.vendor_data
+ ),
+ },
+ )
ret = ibm.read_md()
- self.assertEqual(ibm.Platforms.OS_CODE, ret['platform'])
- self.assertEqual(tmpdir, ret['source'])
- self.assertIsNone(ret['userdata'])
- self.assertEqual(self._get_expected_metadata(self.oscode_md),
- ret['metadata'])
+ self.assertEqual(ibm.Platforms.OS_CODE, ret["platform"])
+ self.assertEqual(tmpdir, ret["source"])
+ self.assertIsNone(ret["userdata"])
+ self.assertEqual(
+ self._get_expected_metadata(self.oscode_md), ret["metadata"]
+ )
class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase):
"""Test the _is_ibm_provisioning method."""
+
inst_log = "/root/swinstall.log"
prov_cfg = "/root/provisioningConfiguration.cfg"
boot_ref = "/proc/1/environ"
@@ -279,9 +353,11 @@ class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase):
def test_config_with_old_log(self):
"""A config with a log from previous boot is not provisioning."""
rootd = self.tmp_dir()
- data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10),
- self.inst_log: ("log data\n", -30),
- self.boot_ref: ("PWD=/", 0)}
+ data = {
+ self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", -30),
+ self.boot_ref: ("PWD=/", 0),
+ }
test_helpers.populate_dir_with_ts(rootd, data)
self.assertFalse(self._call_with_root(rootd=rootd))
self.assertIn("from previous boot", self.logs.getvalue())
@@ -289,9 +365,11 @@ class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase):
def test_config_with_new_log(self):
"""A config with a log from this boot is provisioning."""
rootd = self.tmp_dir()
- data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10),
- self.inst_log: ("log data\n", 30),
- self.boot_ref: ("PWD=/", 0)}
+ data = {
+ self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", 30),
+ self.boot_ref: ("PWD=/", 0),
+ }
test_helpers.populate_dir_with_ts(rootd, data)
self.assertTrue(self._call_with_root(rootd=rootd))
self.assertIn("from current boot", self.logs.getvalue())
@@ -300,44 +378,49 @@ class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase):
"""If the config and log existed, but no reference, assume not."""
rootd = self.tmp_dir()
test_helpers.populate_dir(
- rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"})
+ rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"}
+ )
self.assertFalse(self._call_with_root(rootd=rootd))
self.assertIn("no reference file", self.logs.getvalue())
class TestDataSourceIBMCloud(test_helpers.CiTestCase):
-
def setUp(self):
super(TestDataSourceIBMCloud, self).setUp()
self.tmp = self.tmp_dir()
- self.cloud_dir = self.tmp_path('cloud', dir=self.tmp)
+ self.cloud_dir = self.tmp_path("cloud", dir=self.tmp)
util.ensure_dir(self.cloud_dir)
- paths = Paths({'run_dir': self.tmp, 'cloud_dir': self.cloud_dir})
- self.ds = ibm.DataSourceIBMCloud(
- sys_cfg={}, distro=None, paths=paths)
+ paths = Paths({"run_dir": self.tmp, "cloud_dir": self.cloud_dir})
+ self.ds = ibm.DataSourceIBMCloud(sys_cfg={}, distro=None, paths=paths)
def test_get_data_false(self):
"""When read_md returns None, get_data returns False."""
- with mock.patch(D_PATH + 'read_md', return_value=None):
+ with mock.patch(D_PATH + "read_md", return_value=None):
self.assertFalse(self.ds.get_data())
def test_get_data_processes_read_md(self):
"""get_data processes and caches content returned by read_md."""
md = {
- 'metadata': {}, 'networkdata': 'net', 'platform': 'plat',
- 'source': 'src', 'system-uuid': 'uuid', 'userdata': 'ud',
- 'vendordata': 'vd'}
- with mock.patch(D_PATH + 'read_md', return_value=md):
+ "metadata": {},
+ "networkdata": "net",
+ "platform": "plat",
+ "source": "src",
+ "system-uuid": "uuid",
+ "userdata": "ud",
+ "vendordata": "vd",
+ }
+ with mock.patch(D_PATH + "read_md", return_value=md):
self.assertTrue(self.ds.get_data())
- self.assertEqual('src', self.ds.source)
- self.assertEqual('plat', self.ds.platform)
+ self.assertEqual("src", self.ds.source)
+ self.assertEqual("plat", self.ds.platform)
self.assertEqual({}, self.ds.metadata)
- self.assertEqual('ud', self.ds.userdata_raw)
- self.assertEqual('net', self.ds.network_json)
- self.assertEqual('vd', self.ds.vendordata_pure)
- self.assertEqual('uuid', self.ds.system_uuid)
- self.assertEqual('ibmcloud', self.ds.cloud_name)
- self.assertEqual('ibmcloud', self.ds.platform_type)
- self.assertEqual('plat (src)', self.ds.subplatform)
+ self.assertEqual("ud", self.ds.userdata_raw)
+ self.assertEqual("net", self.ds.network_json)
+ self.assertEqual("vd", self.ds.vendordata_pure)
+ self.assertEqual("uuid", self.ds.system_uuid)
+ self.assertEqual("ibmcloud", self.ds.cloud_name)
+ self.assertEqual("ibmcloud", self.ds.platform_type)
+ self.assertEqual("plat (src)", self.ds.subplatform)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py
new file mode 100644
index 00000000..ce8fc970
--- /dev/null
+++ b/tests/unittests/sources/test_init.py
@@ -0,0 +1,994 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import inspect
+import os
+import stat
+
+from cloudinit import importer, util
+from cloudinit.event import EventScope, EventType
+from cloudinit.helpers import Paths
+from cloudinit.sources import (
+ EXPERIMENTAL_TEXT,
+ INSTANCE_JSON_FILE,
+ INSTANCE_JSON_SENSITIVE_FILE,
+ METADATA_UNKNOWN,
+ REDACT_SENSITIVE_VALUE,
+ UNSET,
+ DataSource,
+ canonical_cloud_id,
+ redact_sensitive_keys,
+)
+from cloudinit.user_data import UserDataProcessor
+from tests.unittests.helpers import CiTestCase, mock
+
+
+class DataSourceTestSubclassNet(DataSource):
+
+ dsname = "MyTestSubclass"
+ url_max_wait = 55
+
+ def __init__(
+ self,
+ sys_cfg,
+ distro,
+ paths,
+ custom_metadata=None,
+ custom_userdata=None,
+ get_data_retval=True,
+ ):
+ super(DataSourceTestSubclassNet, self).__init__(sys_cfg, distro, paths)
+ self._custom_userdata = custom_userdata
+ self._custom_metadata = custom_metadata
+ self._get_data_retval = get_data_retval
+
+ def _get_cloud_name(self):
+ return "SubclassCloudName"
+
+ def _get_data(self):
+ if self._custom_metadata:
+ self.metadata = self._custom_metadata
+ else:
+ self.metadata = {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ }
+ if self._custom_userdata:
+ self.userdata_raw = self._custom_userdata
+ else:
+ self.userdata_raw = "userdata_raw"
+ self.vendordata_raw = "vendordata_raw"
+ return self._get_data_retval
+
+
+class InvalidDataSourceTestSubclassNet(DataSource):
+ pass
+
+
+class TestDataSource(CiTestCase):
+
+ with_logs = True
+ maxDiff = None
+
+ def setUp(self):
+ super(TestDataSource, self).setUp()
+ self.sys_cfg = {"datasource": {"_undef": {"key1": False}}}
+ self.distro = "distrotest" # generally should be a Distro object
+ self.paths = Paths({})
+ self.datasource = DataSource(self.sys_cfg, self.distro, self.paths)
+
+ def test_datasource_init(self):
+ """DataSource initializes metadata attributes, ds_cfg and ud_proc."""
+ self.assertEqual(self.paths, self.datasource.paths)
+ self.assertEqual(self.sys_cfg, self.datasource.sys_cfg)
+ self.assertEqual(self.distro, self.datasource.distro)
+ self.assertIsNone(self.datasource.userdata)
+ self.assertEqual({}, self.datasource.metadata)
+ self.assertIsNone(self.datasource.userdata_raw)
+ self.assertIsNone(self.datasource.vendordata)
+ self.assertIsNone(self.datasource.vendordata_raw)
+ self.assertEqual({"key1": False}, self.datasource.ds_cfg)
+ self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor)
+
+ def test_datasource_init_gets_ds_cfg_using_dsname(self):
+ """Init uses DataSource.dsname for sourcing ds_cfg."""
+ sys_cfg = {"datasource": {"MyTestSubclass": {"key2": False}}}
+ distro = "distrotest" # generally should be a Distro object
+ datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
+ self.assertEqual({"key2": False}, datasource.ds_cfg)
+
+ def test_str_is_classname(self):
+ """The string representation of the datasource is the classname."""
+ self.assertEqual("DataSource", str(self.datasource))
+ self.assertEqual(
+ "DataSourceTestSubclassNet",
+ str(DataSourceTestSubclassNet("", "", self.paths)),
+ )
+
+ def test_datasource_get_url_params_defaults(self):
+ """get_url_params default url config settings for the datasource."""
+ params = self.datasource.get_url_params()
+ self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait)
+ self.assertEqual(params.timeout_seconds, self.datasource.url_timeout)
+ self.assertEqual(params.num_retries, self.datasource.url_retries)
+ self.assertEqual(
+ params.sec_between_retries, self.datasource.url_sec_between_retries
+ )
+
+ def test_datasource_get_url_params_subclassed(self):
+ """Subclasses can override get_url_params defaults."""
+ sys_cfg = {"datasource": {"MyTestSubclass": {"key2": False}}}
+ distro = "distrotest" # generally should be a Distro object
+ datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
+ expected = (
+ datasource.url_max_wait,
+ datasource.url_timeout,
+ datasource.url_retries,
+ datasource.url_sec_between_retries,
+ )
+ url_params = datasource.get_url_params()
+ self.assertNotEqual(self.datasource.get_url_params(), url_params)
+ self.assertEqual(expected, url_params)
+
+ def test_datasource_get_url_params_ds_config_override(self):
+ """Datasource configuration options can override url param defaults."""
+ sys_cfg = {
+ "datasource": {
+ "MyTestSubclass": {
+ "max_wait": "1",
+ "timeout": "2",
+ "retries": "3",
+ "sec_between_retries": 4,
+ }
+ }
+ }
+ datasource = DataSourceTestSubclassNet(
+ sys_cfg, self.distro, self.paths
+ )
+ expected = (1, 2, 3, 4)
+ url_params = datasource.get_url_params()
+ self.assertNotEqual(
+ (
+ datasource.url_max_wait,
+ datasource.url_timeout,
+ datasource.url_retries,
+ datasource.url_sec_between_retries,
+ ),
+ url_params,
+ )
+ self.assertEqual(expected, url_params)
+
+ def test_datasource_get_url_params_is_zero_or_greater(self):
+ """get_url_params ignores timeouts with a value below 0."""
+ # Set an override that is below 0 which gets ignored.
+ sys_cfg = {"datasource": {"_undef": {"timeout": "-1"}}}
+ datasource = DataSource(sys_cfg, self.distro, self.paths)
+ (
+ _max_wait,
+ timeout,
+ _retries,
+ _sec_between_retries,
+ ) = datasource.get_url_params()
+ self.assertEqual(0, timeout)
+
+ def test_datasource_get_url_uses_defaults_on_errors(self):
+ """On invalid system config values for url_params defaults are used."""
+ # All invalid values should be logged
+ sys_cfg = {
+ "datasource": {
+ "_undef": {
+ "max_wait": "nope",
+ "timeout": "bug",
+ "retries": "nonint",
+ }
+ }
+ }
+ datasource = DataSource(sys_cfg, self.distro, self.paths)
+ url_params = datasource.get_url_params()
+ expected = (
+ datasource.url_max_wait,
+ datasource.url_timeout,
+ datasource.url_retries,
+ datasource.url_sec_between_retries,
+ )
+ self.assertEqual(expected, url_params)
+ logs = self.logs.getvalue()
+ expected_logs = [
+ "Config max_wait 'nope' is not an int, using default '-1'",
+ "Config timeout 'bug' is not an int, using default '10'",
+ "Config retries 'nonint' is not an int, using default '5'",
+ ]
+ for log in expected_logs:
+ self.assertIn(log, logs)
+
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ def test_fallback_interface_is_discovered(self, m_get_fallback_nic):
+ """The fallback_interface is discovered via find_fallback_nic."""
+ m_get_fallback_nic.return_value = "nic9"
+ self.assertEqual("nic9", self.datasource.fallback_interface)
+
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic):
+ """Log a warning when fallback_interface can not discover the nic."""
+ self.datasource._cloud_name = "MySupahCloud"
+ m_get_fallback_nic.return_value = None # Couldn't discover nic
+ self.assertIsNone(self.datasource.fallback_interface)
+ self.assertEqual(
+ "WARNING: Did not find a fallback interface on MySupahCloud.\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic):
+ """The fallback_interface is cached and won't be rediscovered."""
+ self.datasource._fallback_interface = "nic10"
+ self.assertEqual("nic10", self.datasource.fallback_interface)
+ m_get_fallback_nic.assert_not_called()
+
+ def test__get_data_unimplemented(self):
+ """Raise an error when _get_data is not implemented."""
+ with self.assertRaises(NotImplementedError) as context_manager:
+ self.datasource.get_data()
+ self.assertIn(
+ "Subclasses of DataSource must implement _get_data",
+ str(context_manager.exception),
+ )
+ datasource2 = InvalidDataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, self.paths
+ )
+ with self.assertRaises(NotImplementedError) as context_manager:
+ datasource2.get_data()
+ self.assertIn(
+ "Subclasses of DataSource must implement _get_data",
+ str(context_manager.exception),
+ )
+
+ def test_get_data_calls_subclass__get_data(self):
+ """Datasource.get_data uses the subclass' version of _get_data."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertTrue(datasource.get_data())
+ self.assertEqual(
+ {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ },
+ datasource.metadata,
+ )
+ self.assertEqual("userdata_raw", datasource.userdata_raw)
+ self.assertEqual("vendordata_raw", datasource.vendordata_raw)
+
+ def test_get_hostname_strips_local_hostname_without_domain(self):
+ """Datasource.get_hostname strips metadata local-hostname of domain."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertTrue(datasource.get_data())
+ self.assertEqual(
+ "test-subclass-hostname", datasource.metadata["local-hostname"]
+ )
+ self.assertEqual("test-subclass-hostname", datasource.get_hostname())
+ datasource.metadata["local-hostname"] = "hostname.my.domain.com"
+ self.assertEqual("hostname", datasource.get_hostname())
+
+ def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self):
+ """Datasource.get_hostname with fqdn set gets qualified hostname."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertTrue(datasource.get_data())
+ datasource.metadata["local-hostname"] = "hostname.my.domain.com"
+ self.assertEqual(
+ "hostname.my.domain.com", datasource.get_hostname(fqdn=True)
+ )
+
+ def test_get_hostname_without_metadata_uses_system_hostname(self):
+ """Datasource.gethostname runs util.get_hostname when no metadata."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertEqual({}, datasource.metadata)
+ mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts"
+ with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost:
+ with mock.patch(mock_fqdn) as m_fqdn:
+ m_gethost.return_value = "systemhostname.domain.com"
+ m_fqdn.return_value = None # No maching fqdn in /etc/hosts
+ self.assertEqual("systemhostname", datasource.get_hostname())
+ self.assertEqual(
+ "systemhostname.domain.com",
+ datasource.get_hostname(fqdn=True),
+ )
+
+ def test_get_hostname_without_metadata_returns_none(self):
+ """Datasource.gethostname returns None when metadata_only and no MD."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertEqual({}, datasource.metadata)
+ mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts"
+ with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost:
+ with mock.patch(mock_fqdn) as m_fqdn:
+ self.assertIsNone(datasource.get_hostname(metadata_only=True))
+ self.assertIsNone(
+ datasource.get_hostname(fqdn=True, metadata_only=True)
+ )
+ self.assertEqual([], m_gethost.call_args_list)
+ self.assertEqual([], m_fqdn.call_args_list)
+
+ def test_get_hostname_without_metadata_prefers_etc_hosts(self):
+ """Datasource.gethostname prefers /etc/hosts to util.get_hostname."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertEqual({}, datasource.metadata)
+ mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts"
+ with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost:
+ with mock.patch(mock_fqdn) as m_fqdn:
+ m_gethost.return_value = "systemhostname.domain.com"
+ m_fqdn.return_value = "fqdnhostname.domain.com"
+ self.assertEqual("fqdnhostname", datasource.get_hostname())
+ self.assertEqual(
+ "fqdnhostname.domain.com",
+ datasource.get_hostname(fqdn=True),
+ )
+
+ def test_get_data_does_not_write_instance_data_on_failure(self):
+ """get_data does not write INSTANCE_JSON_FILE on get_data False."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ get_data_retval=False,
+ )
+ self.assertFalse(datasource.get_data())
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ self.assertFalse(
+ os.path.exists(json_file), "Found unexpected file %s" % json_file
+ )
+
+ def test_get_data_writes_json_instance_data_on_success(self):
+ """get_data writes INSTANCE_JSON_FILE to run_dir as world readable."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ sys_info = {
+ "python": "3.7",
+ "platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "uname": [
+ "Linux",
+ "myhost",
+ "5.4.0-24-generic",
+ "SMP blah",
+ "x86_64",
+ ],
+ "variant": "ubuntu",
+ "dist": ["ubuntu", "20.04", "focal"],
+ }
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ with mock.patch(
+ "cloudinit.sources.canonical_cloud_id",
+ return_value="canonical_cloud_id",
+ ):
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ content = util.load_file(json_file)
+ expected = {
+ "base64_encoded_keys": [],
+ "merged_cfg": REDACT_SENSITIVE_VALUE,
+ "sensitive_keys": ["merged_cfg"],
+ "sys_info": sys_info,
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": "myaz",
+ "availability_zone": "myaz",
+ "cloud_id": "canonical_cloud_id",
+ "cloud-name": "subclasscloudname",
+ "cloud_name": "subclasscloudname",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance-id": "iid-datasource",
+ "instance_id": "iid-datasource",
+ "local-hostname": "test-subclass-hostname",
+ "local_hostname": "test-subclass-hostname",
+ "kernel_release": "5.4.0-24-generic",
+ "machine": "x86_64",
+ "platform": "mytestsubclass",
+ "public_ssh_keys": [],
+ "python_version": "3.7",
+ "region": "myregion",
+ "system_platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "subplatform": "unknown",
+ "variant": "ubuntu",
+ },
+ "ds": {
+ "_doc": EXPERIMENTAL_TEXT,
+ "meta_data": {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ },
+ },
+ }
+ self.assertEqual(expected, util.load_json(content))
+ file_stat = os.stat(json_file)
+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+ self.assertEqual(expected, util.load_json(content))
+
+ def test_get_data_writes_redacted_public_json_instance_data(self):
+ """get_data writes redacted content to public INSTANCE_JSON_FILE."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ custom_metadata={
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {
+ "security-credentials": {
+ "cred1": "sekret",
+ "cred2": "othersekret",
+ }
+ },
+ },
+ )
+ self.assertCountEqual(
+ (
+ "merged_cfg",
+ "security-credentials",
+ ),
+ datasource.sensitive_metadata_keys,
+ )
+ sys_info = {
+ "python": "3.7",
+ "platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "uname": [
+ "Linux",
+ "myhost",
+ "5.4.0-24-generic",
+ "SMP blah",
+ "x86_64",
+ ],
+ "variant": "ubuntu",
+ "dist": ["ubuntu", "20.04", "focal"],
+ }
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ redacted = util.load_json(util.load_file(json_file))
+ expected = {
+ "base64_encoded_keys": [],
+ "merged_cfg": REDACT_SENSITIVE_VALUE,
+ "sensitive_keys": [
+ "ds/meta_data/some/security-credentials",
+ "merged_cfg",
+ ],
+ "sys_info": sys_info,
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": "myaz",
+ "availability_zone": "myaz",
+ "cloud-name": "subclasscloudname",
+ "cloud_name": "subclasscloudname",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance-id": "iid-datasource",
+ "instance_id": "iid-datasource",
+ "local-hostname": "test-subclass-hostname",
+ "local_hostname": "test-subclass-hostname",
+ "kernel_release": "5.4.0-24-generic",
+ "machine": "x86_64",
+ "platform": "mytestsubclass",
+ "public_ssh_keys": [],
+ "python_version": "3.7",
+ "region": "myregion",
+ "system_platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "subplatform": "unknown",
+ "variant": "ubuntu",
+ },
+ "ds": {
+ "_doc": EXPERIMENTAL_TEXT,
+ "meta_data": {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {"security-credentials": REDACT_SENSITIVE_VALUE},
+ },
+ },
+ }
+ self.assertCountEqual(expected, redacted)
+ file_stat = os.stat(json_file)
+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+
+ def test_get_data_writes_json_instance_data_sensitive(self):
+ """
+ get_data writes unmodified data to sensitive file as root-readonly.
+ """
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ custom_metadata={
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {
+ "security-credentials": {
+ "cred1": "sekret",
+ "cred2": "othersekret",
+ }
+ },
+ },
+ )
+ sys_info = {
+ "python": "3.7",
+ "platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "uname": [
+ "Linux",
+ "myhost",
+ "5.4.0-24-generic",
+ "SMP blah",
+ "x86_64",
+ ],
+ "variant": "ubuntu",
+ "dist": ["ubuntu", "20.04", "focal"],
+ }
+
+ self.assertCountEqual(
+ (
+ "merged_cfg",
+ "security-credentials",
+ ),
+ datasource.sensitive_metadata_keys,
+ )
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ with mock.patch(
+ "cloudinit.sources.canonical_cloud_id",
+ return_value="canonical-cloud-id",
+ ):
+ datasource.get_data()
+ sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
+ content = util.load_file(sensitive_json_file)
+ expected = {
+ "base64_encoded_keys": [],
+ "merged_cfg": {
+ "_doc": (
+ "Merged cloud-init system config from "
+ "/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/"
+ ),
+ "datasource": {"_undef": {"key1": False}},
+ },
+ "sensitive_keys": [
+ "ds/meta_data/some/security-credentials",
+ "merged_cfg",
+ ],
+ "sys_info": sys_info,
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": "myaz",
+ "availability_zone": "myaz",
+ "cloud_id": "canonical-cloud-id",
+ "cloud-name": "subclasscloudname",
+ "cloud_name": "subclasscloudname",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance-id": "iid-datasource",
+ "instance_id": "iid-datasource",
+ "kernel_release": "5.4.0-24-generic",
+ "local-hostname": "test-subclass-hostname",
+ "local_hostname": "test-subclass-hostname",
+ "machine": "x86_64",
+ "platform": "mytestsubclass",
+ "public_ssh_keys": [],
+ "python_version": "3.7",
+ "region": "myregion",
+ "subplatform": "unknown",
+ "system_platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "variant": "ubuntu",
+ },
+ "ds": {
+ "_doc": EXPERIMENTAL_TEXT,
+ "meta_data": {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {
+ "security-credentials": {
+ "cred1": "sekret",
+ "cred2": "othersekret",
+ }
+ },
+ },
+ },
+ }
+ self.assertCountEqual(expected, util.load_json(content))
+ file_stat = os.stat(sensitive_json_file)
+ self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
+ self.assertEqual(expected, util.load_json(content))
+
+ def test_get_data_handles_redacted_unserializable_content(self):
+ """get_data warns unserializable content in INSTANCE_JSON_FILE."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ custom_metadata={"key1": "val1", "key2": {"key2.1": self.paths}},
+ )
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ content = util.load_file(json_file)
+ expected_metadata = {
+ "key1": "val1",
+ "key2": {
+ "key2.1": (
+ "Warning: redacted unserializable type <class"
+ " 'cloudinit.helpers.Paths'>"
+ )
+ },
+ }
+ instance_json = util.load_json(content)
+ self.assertEqual(expected_metadata, instance_json["ds"]["meta_data"])
+
+ def test_persist_instance_data_writes_ec2_metadata_when_set(self):
+ """When ec2_metadata class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ datasource.ec2_metadata = UNSET
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertNotIn("ec2_metadata", instance_data["ds"])
+ datasource.ec2_metadata = {"ec2stuff": "is good"}
+ datasource.persist_instance_data()
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {"ec2stuff": "is good"}, instance_data["ds"]["ec2_metadata"]
+ )
+
+ def test_persist_instance_data_writes_canonical_cloud_id_and_symlink(self):
+ """canonical-cloud-id class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ cloud_id_link = os.path.join(tmp, "cloud-id")
+ cloud_id_file = os.path.join(tmp, "cloud-id-my-cloud")
+ cloud_id2_file = os.path.join(tmp, "cloud-id-my-cloud2")
+ for filename in (cloud_id_file, cloud_id_link, cloud_id2_file):
+ self.assertFalse(
+ os.path.exists(filename), "Unexpected link found {filename}"
+ )
+ with mock.patch(
+ "cloudinit.sources.canonical_cloud_id", return_value="my-cloud"
+ ):
+ datasource.get_data()
+ self.assertEqual("my-cloud\n", util.load_file(cloud_id_link))
+ # A symlink with the generic /run/cloud-init/cloud-id link is present
+ self.assertTrue(util.is_link(cloud_id_link))
+ # When cloud-id changes, symlink and content change
+ with mock.patch(
+ "cloudinit.sources.canonical_cloud_id", return_value="my-cloud2"
+ ):
+ datasource.persist_instance_data()
+ self.assertEqual("my-cloud2\n", util.load_file(cloud_id2_file))
+ # Previous cloud-id-<cloud-type> file removed
+ self.assertFalse(os.path.exists(cloud_id_file))
+ # Generic link persisted which contains canonical-cloud-id as content
+ self.assertTrue(util.is_link(cloud_id_link))
+ self.assertEqual("my-cloud2\n", util.load_file(cloud_id_link))
+
+ def test_persist_instance_data_writes_network_json_when_set(self):
+ """When network_data.json class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertNotIn("network_json", instance_data["ds"])
+ datasource.network_json = {"network_json": "is good"}
+ datasource.persist_instance_data()
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {"network_json": "is good"}, instance_data["ds"]["network_json"]
+ )
+
+ def test_get_data_base64encodes_unserializable_bytes(self):
+ """On py3, get_data base64encodes any unserializable content."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ custom_metadata={"key1": "val1", "key2": {"key2.1": b"\x123"}},
+ )
+ self.assertTrue(datasource.get_data())
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ content = util.load_file(json_file)
+ instance_json = util.load_json(content)
+ self.assertCountEqual(
+ ["ds/meta_data/key2/key2.1"], instance_json["base64_encoded_keys"]
+ )
+ self.assertEqual(
+ {"key1": "val1", "key2": {"key2.1": "EjM="}},
+ instance_json["ds"]["meta_data"],
+ )
+
+ def test_get_hostname_subclass_support(self):
+ """Validate get_hostname signature on all subclasses of DataSource."""
+ base_args = inspect.getfullargspec(DataSource.get_hostname)
+ # Import all DataSource subclasses so we can inspect them.
+ modules = util.find_modules(os.path.dirname(os.path.dirname(__file__)))
+ for _loc, name in modules.items():
+ mod_locs, _ = importer.find_module(name, ["cloudinit.sources"], [])
+ if mod_locs:
+ importer.import_module(mod_locs[0])
+ for child in DataSource.__subclasses__():
+ if "Test" in child.dsname:
+ continue
+ self.assertEqual(
+ base_args,
+ inspect.getfullargspec(child.get_hostname),
+ "%s does not implement DataSource.get_hostname params" % child,
+ )
+ for grandchild in child.__subclasses__():
+ self.assertEqual(
+ base_args,
+ inspect.getfullargspec(grandchild.get_hostname),
+ "%s does not implement DataSource.get_hostname params"
+ % grandchild,
+ )
+
+ def test_clear_cached_attrs_resets_cached_attr_class_attributes(self):
+ """Class attributes listed in cached_attr_defaults are reset."""
+ count = 0
+ # Setup values for all cached class attributes
+ for attr, value in self.datasource.cached_attr_defaults:
+ setattr(self.datasource, attr, count)
+ count += 1
+ self.datasource._dirty_cache = True
+ self.datasource.clear_cached_attrs()
+ for attr, value in self.datasource.cached_attr_defaults:
+ self.assertEqual(value, getattr(self.datasource, attr))
+
+ def test_clear_cached_attrs_noops_on_clean_cache(self):
+ """Class attributes listed in cached_attr_defaults are reset."""
+ count = 0
+ # Setup values for all cached class attributes
+ for attr, _ in self.datasource.cached_attr_defaults:
+ setattr(self.datasource, attr, count)
+ count += 1
+ self.datasource._dirty_cache = False # Fake clean cache
+ self.datasource.clear_cached_attrs()
+ count = 0
+ for attr, _ in self.datasource.cached_attr_defaults:
+ self.assertEqual(count, getattr(self.datasource, attr))
+ count += 1
+
+ def test_clear_cached_attrs_skips_non_attr_class_attributes(self):
+ """Skip any cached_attr_defaults which aren't class attributes."""
+ self.datasource._dirty_cache = True
+ self.datasource.clear_cached_attrs()
+ for attr in ("ec2_metadata", "network_json"):
+ self.assertFalse(hasattr(self.datasource, attr))
+
+ def test_clear_cached_attrs_of_custom_attrs(self):
+ """Custom attr_values can be passed to clear_cached_attrs."""
+ self.datasource._dirty_cache = True
+ cached_attr_name = self.datasource.cached_attr_defaults[0][0]
+ setattr(self.datasource, cached_attr_name, "himom")
+ self.datasource.myattr = "orig"
+ self.datasource.clear_cached_attrs(
+ attr_defaults=(("myattr", "updated"),)
+ )
+ self.assertEqual("himom", getattr(self.datasource, cached_attr_name))
+ self.assertEqual("updated", self.datasource.myattr)
+
+ @mock.patch.dict(
+ DataSource.default_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ @mock.patch.dict(
+ DataSource.supported_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ def test_update_metadata_only_acts_on_supported_update_events(self):
+ """update_metadata_if_supported wont get_data on unsupported events."""
+ self.assertEqual(
+ {EventScope.NETWORK: set([EventType.BOOT_NEW_INSTANCE])},
+ self.datasource.default_update_events,
+ )
+
+ def fake_get_data():
+ raise Exception("get_data should not be called")
+
+ self.datasource.get_data = fake_get_data
+ self.assertFalse(
+ self.datasource.update_metadata_if_supported(
+ source_event_types=[EventType.BOOT]
+ )
+ )
+
+ @mock.patch.dict(
+ DataSource.supported_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ def test_update_metadata_returns_true_on_supported_update_event(self):
+ """update_metadata_if_supported returns get_data on supported events"""
+
+ def fake_get_data():
+ return True
+
+ self.datasource.get_data = fake_get_data
+ self.datasource._network_config = "something"
+ self.datasource._dirty_cache = True
+ self.assertTrue(
+ self.datasource.update_metadata_if_supported(
+ source_event_types=[
+ EventType.BOOT,
+ EventType.BOOT_NEW_INSTANCE,
+ ]
+ )
+ )
+ self.assertEqual(UNSET, self.datasource._network_config)
+
+ self.assertIn(
+ "DEBUG: Update datasource metadata and network config due to"
+ " events: boot-new-instance",
+ self.logs.getvalue(),
+ )
+
+
+class TestRedactSensitiveData(CiTestCase):
+ def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self):
+ """When sensitive_keys is absent or empty from metadata do nothing."""
+ md = {"my": "data"}
+ self.assertEqual(
+ md, redact_sensitive_keys(md, redact_value="redacted")
+ )
+ md["sensitive_keys"] = []
+ self.assertEqual(
+ md, redact_sensitive_keys(md, redact_value="redacted")
+ )
+
+ def test_redact_sensitive_data_redacts_exact_match_name(self):
+ """Only exact matched sensitive_keys are redacted from metadata."""
+ md = {
+ "sensitive_keys": ["md/secure"],
+ "md": {"secure": "s3kr1t", "insecure": "publik"},
+ }
+ secure_md = copy.deepcopy(md)
+ secure_md["md"]["secure"] = "redacted"
+ self.assertEqual(
+ secure_md, redact_sensitive_keys(md, redact_value="redacted")
+ )
+
+ def test_redact_sensitive_data_does_redacts_with_default_string(self):
+ """When redact_value is absent, REDACT_SENSITIVE_VALUE is used."""
+ md = {
+ "sensitive_keys": ["md/secure"],
+ "md": {"secure": "s3kr1t", "insecure": "publik"},
+ }
+ secure_md = copy.deepcopy(md)
+ secure_md["md"]["secure"] = "redacted for non-root user"
+ self.assertEqual(secure_md, redact_sensitive_keys(md))
+
+
+class TestCanonicalCloudID(CiTestCase):
+ def test_cloud_id_returns_platform_on_unknowns(self):
+ """When region and cloud_name are unknown, return platform."""
+ self.assertEqual(
+ "platform",
+ canonical_cloud_id(
+ cloud_name=METADATA_UNKNOWN,
+ region=METADATA_UNKNOWN,
+ platform="platform",
+ ),
+ )
+
+ def test_cloud_id_returns_platform_on_none(self):
+ """When region and cloud_name are unknown, return platform."""
+ self.assertEqual(
+ "platform",
+ canonical_cloud_id(
+ cloud_name=None, region=None, platform="platform"
+ ),
+ )
+
+ def test_cloud_id_returns_cloud_name_on_unknown_region(self):
+ """When region is unknown, return cloud_name."""
+ for region in (None, METADATA_UNKNOWN):
+ self.assertEqual(
+ "cloudname",
+ canonical_cloud_id(
+ cloud_name="cloudname", region=region, platform="platform"
+ ),
+ )
+
+ def test_cloud_id_returns_platform_on_unknown_cloud_name(self):
+ """When region is set but cloud_name is unknown return cloud_name."""
+ self.assertEqual(
+ "platform",
+ canonical_cloud_id(
+ cloud_name=METADATA_UNKNOWN,
+ region="region",
+ platform="platform",
+ ),
+ )
+
+ def test_cloud_id_aws_based_on_region_and_cloud_name(self):
+ """When cloud_name is aws, return proper cloud-id based on region."""
+ self.assertEqual(
+ "aws-china",
+ canonical_cloud_id(
+ cloud_name="aws", region="cn-north-1", platform="platform"
+ ),
+ )
+ self.assertEqual(
+ "aws",
+ canonical_cloud_id(
+ cloud_name="aws", region="us-east-1", platform="platform"
+ ),
+ )
+ self.assertEqual(
+ "aws-gov",
+ canonical_cloud_id(
+ cloud_name="aws", region="us-gov-1", platform="platform"
+ ),
+ )
+ self.assertEqual( # Overrideen non-aws cloud_name is returned
+ "!aws",
+ canonical_cloud_id(
+ cloud_name="!aws", region="us-gov-1", platform="platform"
+ ),
+ )
+
+ def test_cloud_id_azure_based_on_region_and_cloud_name(self):
+ """Report cloud-id when cloud_name is azure and region is in china."""
+ self.assertEqual(
+ "azure-china",
+ canonical_cloud_id(
+ cloud_name="azure", region="chinaeast", platform="platform"
+ ),
+ )
+ self.assertEqual(
+ "azure",
+ canonical_cloud_id(
+ cloud_name="azure", region="!chinaeast", platform="platform"
+ ),
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_lxd.py b/tests/unittests/sources/test_lxd.py
new file mode 100644
index 00000000..e11c3746
--- /dev/null
+++ b/tests/unittests/sources/test_lxd.py
@@ -0,0 +1,394 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+import re
+import stat
+from collections import namedtuple
+from copy import deepcopy
+from unittest import mock
+
+import pytest
+import yaml
+
+from cloudinit.sources import UNSET
+from cloudinit.sources import DataSourceLXD as lxd
+from cloudinit.sources import InvalidMetaDataException
+
+DS_PATH = "cloudinit.sources.DataSourceLXD."
+
+
+LStatResponse = namedtuple("lstatresponse", "st_mode")
+
+
+NETWORK_V1 = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "subnets": [{"type": "dhcp", "control": "auto"}],
+ }
+ ],
+}
+
+
+def _add_network_v1_device(devname) -> dict:
+ """Helper to inject device name into default network v1 config."""
+ network_cfg = deepcopy(NETWORK_V1)
+ network_cfg["config"][0]["name"] = devname
+ return network_cfg
+
+
+LXD_V1_METADATA = {
+ "meta-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n",
+ "network-config": NETWORK_V1,
+ "user-data": "#cloud-config\npackages: [sl]\n",
+ "vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n",
+ "config": {
+ "user.user-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n",
+ "user.vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n",
+ "user.network-config": yaml.safe_dump(NETWORK_V1),
+ },
+}
+
+
+@pytest.fixture
+def lxd_metadata():
+ return LXD_V1_METADATA
+
+
+@pytest.fixture
+def lxd_ds(request, paths, lxd_metadata):
+ """
+ Return an instantiated DataSourceLXD.
+
+ This also performs the mocking required for the default test case:
+ * ``is_platform_viable`` returns True,
+ * ``read_metadata`` returns ``LXD_V1_METADATA``
+
+ (This uses the paths fixture for the required helpers.Paths object)
+ """
+ with mock.patch(DS_PATH + "is_platform_viable", return_value=True):
+ with mock.patch(DS_PATH + "read_metadata", return_value=lxd_metadata):
+ yield lxd.DataSourceLXD(
+ sys_cfg={}, distro=mock.Mock(), paths=paths
+ )
+
+
+class TestGenerateFallbackNetworkConfig:
+ @pytest.mark.parametrize(
+ "uname_machine,systemd_detect_virt,expected",
+ (
+ # None for systemd_detect_virt returns None from which
+ ({}, None, NETWORK_V1),
+ ({}, None, NETWORK_V1),
+ ("anything", "lxc\n", NETWORK_V1),
+ # `uname -m` on kvm determines devname
+ ("x86_64", "kvm\n", _add_network_v1_device("enp5s0")),
+ ("ppc64le", "kvm\n", _add_network_v1_device("enp0s5")),
+ ("s390x", "kvm\n", _add_network_v1_device("enc9")),
+ ),
+ )
+ @mock.patch(DS_PATH + "util.system_info")
+ @mock.patch(DS_PATH + "subp.subp")
+ @mock.patch(DS_PATH + "subp.which")
+ def test_net_v2_based_on_network_mode_virt_type_and_uname_machine(
+ self,
+ m_which,
+ m_subp,
+ m_system_info,
+ uname_machine,
+ systemd_detect_virt,
+ expected,
+ ):
+ """Return network config v2 based on uname -m, systemd-detect-virt."""
+ if systemd_detect_virt is None:
+ m_which.return_value = None
+ m_system_info.return_value = {"uname": ["", "", "", "", uname_machine]}
+ m_subp.return_value = (systemd_detect_virt, "")
+ assert expected == lxd.generate_fallback_network_config()
+ if systemd_detect_virt is None:
+ assert 0 == m_subp.call_count
+ assert 0 == m_system_info.call_count
+ else:
+ assert [
+ mock.call(["systemd-detect-virt"])
+ ] == m_subp.call_args_list
+ if systemd_detect_virt != "kvm\n":
+ assert 0 == m_system_info.call_count
+ else:
+ assert 1 == m_system_info.call_count
+
+
+class TestDataSourceLXD:
+ def test_platform_info(self, lxd_ds):
+ assert "LXD" == lxd_ds.dsname
+ assert "lxd" == lxd_ds.cloud_name
+ assert "lxd" == lxd_ds.platform_type
+
+ def test_subplatform(self, lxd_ds):
+ assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == lxd_ds.subplatform
+
+ def test__get_data(self, lxd_ds):
+ """get_data calls read_metadata, setting appropiate instance attrs."""
+ assert UNSET == lxd_ds._crawled_metadata
+ assert UNSET == lxd_ds._network_config
+ assert None is lxd_ds.userdata_raw
+ assert True is lxd_ds._get_data()
+ assert LXD_V1_METADATA == lxd_ds._crawled_metadata
+ # network-config is dumped from YAML
+ assert NETWORK_V1 == lxd_ds._network_config
+ # Any user-data and vendor-data are saved as raw
+ assert LXD_V1_METADATA["user-data"] == lxd_ds.userdata_raw
+ assert LXD_V1_METADATA["vendor-data"] == lxd_ds.vendordata_raw
+
+
+class TestIsPlatformViable:
+ @pytest.mark.parametrize(
+ "exists,lstat_mode,expected",
+ (
+ (False, None, False),
+ (True, stat.S_IFREG, False),
+ (True, stat.S_IFSOCK, True),
+ ),
+ )
+ @mock.patch(DS_PATH + "os.lstat")
+ @mock.patch(DS_PATH + "os.path.exists")
+ def test_expected_viable(
+ self, m_exists, m_lstat, exists, lstat_mode, expected
+ ):
+ """Return True only when LXD_SOCKET_PATH exists and is a socket."""
+ m_exists.return_value = exists
+ m_lstat.return_value = LStatResponse(lstat_mode)
+ assert expected is lxd.is_platform_viable()
+ m_exists.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)])
+ if exists:
+ m_lstat.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)])
+ else:
+ assert 0 == m_lstat.call_count
+
+
+class TestReadMetadata:
+ @pytest.mark.parametrize(
+ "url_responses,expected,logs",
+ (
+ ( # Assert non-JSON format from config route
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": "[NOT_JSON",
+ },
+ InvalidMetaDataException(
+ "Unable to determine cloud-init config from"
+ " http://lxd/1.0/config. Expected JSON but found:"
+ " [NOT_JSON"
+ ),
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ ],
+ ),
+ ( # Assert success on just meta-data
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": "[]",
+ },
+ {
+ "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
+ "config": {},
+ "meta-data": "local-hostname: md\n",
+ },
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ ],
+ ),
+ ( # Assert 404s for config routes log skipping
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": (
+ '["/1.0/config/user.custom1",'
+ ' "/1.0/config/user.meta-data",'
+ ' "/1.0/config/user.network-config",'
+ ' "/1.0/config/user.user-data",'
+ ' "/1.0/config/user.vendor-data"]'
+ ),
+ "http://lxd/1.0/config/user.custom1": "custom1",
+ "http://lxd/1.0/config/user.meta-data": "", # 404
+ "http://lxd/1.0/config/user.network-config": "net-config",
+ "http://lxd/1.0/config/user.user-data": "", # 404
+ "http://lxd/1.0/config/user.vendor-data": "", # 404
+ },
+ {
+ "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
+ "config": {
+ "user.custom1": "custom1", # Not promoted
+ "user.network-config": "net-config",
+ },
+ "meta-data": "local-hostname: md\n",
+ "network-config": "net-config",
+ },
+ [
+ "Skipping http://lxd/1.0/config/user.vendor-data on"
+ " [HTTP:404]",
+ "Skipping http://lxd/1.0/config/user.meta-data on"
+ " [HTTP:404]",
+ "Skipping http://lxd/1.0/config/user.user-data on"
+ " [HTTP:404]",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.custom1",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/user.network-config",
+ ],
+ ),
+ ( # Assert all CONFIG_KEY_ALIASES promoted to top-level keys
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": (
+ '["/1.0/config/user.custom1",'
+ ' "/1.0/config/user.meta-data",'
+ ' "/1.0/config/user.network-config",'
+ ' "/1.0/config/user.user-data",'
+ ' "/1.0/config/user.vendor-data"]'
+ ),
+ "http://lxd/1.0/config/user.custom1": "custom1",
+ "http://lxd/1.0/config/user.meta-data": "meta-data",
+ "http://lxd/1.0/config/user.network-config": "net-config",
+ "http://lxd/1.0/config/user.user-data": "user-data",
+ "http://lxd/1.0/config/user.vendor-data": "vendor-data",
+ },
+ {
+ "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
+ "config": {
+ "user.custom1": "custom1", # Not promoted
+ "user.meta-data": "meta-data",
+ "user.network-config": "net-config",
+ "user.user-data": "user-data",
+ "user.vendor-data": "vendor-data",
+ },
+ "meta-data": "local-hostname: md\n",
+ "network-config": "net-config",
+ "user-data": "user-data",
+ "vendor-data": "vendor-data",
+ },
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.custom1",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.meta-data",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/user.network-config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.user-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data",
+ ],
+ ),
+ ( # Assert cloud-init.* config key values prefered over user.*
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": (
+ '["/1.0/config/user.meta-data",'
+ ' "/1.0/config/user.network-config",'
+ ' "/1.0/config/user.user-data",'
+ ' "/1.0/config/user.vendor-data",'
+ ' "/1.0/config/cloud-init.network-config",'
+ ' "/1.0/config/cloud-init.user-data",'
+ ' "/1.0/config/cloud-init.vendor-data"]'
+ ),
+ "http://lxd/1.0/config/user.meta-data": "user.meta-data",
+ "http://lxd/1.0/config/user.network-config": (
+ "user.network-config"
+ ),
+ "http://lxd/1.0/config/user.user-data": "user.user-data",
+ "http://lxd/1.0/config/user.vendor-data": (
+ "user.vendor-data"
+ ),
+ "http://lxd/1.0/config/cloud-init.meta-data": (
+ "cloud-init.meta-data"
+ ),
+ "http://lxd/1.0/config/cloud-init.network-config": (
+ "cloud-init.network-config"
+ ),
+ "http://lxd/1.0/config/cloud-init.user-data": (
+ "cloud-init.user-data"
+ ),
+ "http://lxd/1.0/config/cloud-init.vendor-data": (
+ "cloud-init.vendor-data"
+ ),
+ },
+ {
+ "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
+ "config": {
+ "user.meta-data": "user.meta-data",
+ "user.network-config": "user.network-config",
+ "user.user-data": "user.user-data",
+ "user.vendor-data": "user.vendor-data",
+ "cloud-init.network-config": (
+ "cloud-init.network-config"
+ ),
+ "cloud-init.user-data": "cloud-init.user-data",
+ "cloud-init.vendor-data": "cloud-init.vendor-data",
+ },
+ "meta-data": "local-hostname: md\n",
+ "network-config": "cloud-init.network-config",
+ "user-data": "cloud-init.user-data",
+ "vendor-data": "cloud-init.vendor-data",
+ },
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.meta-data",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/user.network-config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.user-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/cloud-init.network-config",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/cloud-init.user-data",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/cloud-init.vendor-data",
+ "Ignoring LXD config user.user-data in favor of"
+ " cloud-init.user-data value.",
+ "Ignoring LXD config user.network-config in favor of"
+ " cloud-init.network-config value.",
+ "Ignoring LXD config user.vendor-data in favor of"
+ " cloud-init.vendor-data value.",
+ ],
+ ),
+ ),
+ )
+ @mock.patch.object(lxd.requests.Session, "get")
+ def test_read_metadata_handles_unexpected_content_or_http_status(
+ self, session_get, url_responses, expected, logs, caplog
+ ):
+ """read_metadata handles valid and invalid content and status codes."""
+
+ def fake_get(url):
+ """Mock Response json, ok, status_code, text from url_responses."""
+ m_resp = mock.MagicMock()
+ content = url_responses.get(url, "")
+ m_resp.json.side_effect = lambda: json.loads(content)
+ if content:
+ mock_ok = mock.PropertyMock(return_value=True)
+ mock_status_code = mock.PropertyMock(return_value=200)
+ else:
+ mock_ok = mock.PropertyMock(return_value=False)
+ mock_status_code = mock.PropertyMock(return_value=404)
+ type(m_resp).ok = mock_ok
+ type(m_resp).status_code = mock_status_code
+ mock_text = mock.PropertyMock(return_value=content)
+ type(m_resp).text = mock_text
+ return m_resp
+
+ session_get.side_effect = fake_get
+
+ if isinstance(expected, Exception):
+ with pytest.raises(type(expected), match=re.escape(str(expected))):
+ lxd.read_metadata()
+ else:
+ assert expected == lxd.read_metadata()
+ caplogs = caplog.text
+ for log in logs:
+ assert log in caplogs
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/sources/test_maas.py
index 41b6c27b..e95ba374 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/sources/test_maas.py
@@ -1,19 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from copy import copy
import os
import shutil
import tempfile
-import yaml
+from copy import copy
from unittest import mock
-from cloudinit.sources import DataSourceMAAS
+import yaml
+
from cloudinit import url_helper
-from cloudinit.tests.helpers import CiTestCase, populate_dir
+from cloudinit.sources import DataSourceMAAS
+from tests.unittests.helpers import CiTestCase, populate_dir
class TestMAASDataSource(CiTestCase):
-
def setUp(self):
super(TestMAASDataSource, self).setUp()
# Make a temp directoy for tests to use.
@@ -23,11 +23,13 @@ class TestMAASDataSource(CiTestCase):
def test_seed_dir_valid(self):
"""Verify a valid seeddir is read as such."""
- userdata = b'valid01-userdata'
- data = {'meta-data/instance-id': 'i-valid01',
- 'meta-data/local-hostname': 'valid01-hostname',
- 'user-data': userdata,
- 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'}
+ userdata = b"valid01-userdata"
+ data = {
+ "meta-data/instance-id": "i-valid01",
+ "meta-data/local-hostname": "valid01-hostname",
+ "user-data": userdata,
+ "public-keys": "ssh-rsa AAAAB3Nz...aC1yc2E= keyname",
+ }
my_d = os.path.join(self.tmp, "valid")
populate_dir(my_d, data)
@@ -35,20 +37,23 @@ class TestMAASDataSource(CiTestCase):
ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d)
self.assertEqual(userdata, ud)
- for key in ('instance-id', 'local-hostname'):
+ for key in ("instance-id", "local-hostname"):
self.assertEqual(data["meta-data/" + key], md[key])
# verify that 'userdata' is not returned as part of the metadata
- self.assertFalse(('user-data' in md))
+ self.assertFalse(("user-data" in md))
self.assertIsNone(vd)
def test_seed_dir_valid_extra(self):
"""Verify extra files do not affect seed_dir validity."""
- userdata = b'valid-extra-userdata'
- data = {'meta-data/instance-id': 'i-valid-extra',
- 'meta-data/local-hostname': 'valid-extra-hostname',
- 'user-data': userdata, 'foo': 'bar'}
+ userdata = b"valid-extra-userdata"
+ data = {
+ "meta-data/instance-id": "i-valid-extra",
+ "meta-data/local-hostname": "valid-extra-hostname",
+ "user-data": userdata,
+ "foo": "bar",
+ }
my_d = os.path.join(self.tmp, "valid_extra")
populate_dir(my_d, data)
@@ -56,62 +61,77 @@ class TestMAASDataSource(CiTestCase):
ud, md, _vd = DataSourceMAAS.read_maas_seed_dir(my_d)
self.assertEqual(userdata, ud)
- for key in ('instance-id', 'local-hostname'):
- self.assertEqual(data['meta-data/' + key], md[key])
+ for key in ("instance-id", "local-hostname"):
+ self.assertEqual(data["meta-data/" + key], md[key])
# additional files should not just appear as keys in metadata atm
- self.assertFalse(('foo' in md))
+ self.assertFalse(("foo" in md))
def test_seed_dir_invalid(self):
"""Verify that invalid seed_dir raises MAASSeedDirMalformed."""
- valid = {'instance-id': 'i-instanceid',
- 'local-hostname': 'test-hostname', 'user-data': ''}
+ valid = {
+ "instance-id": "i-instanceid",
+ "local-hostname": "test-hostname",
+ "user-data": "",
+ }
my_based = os.path.join(self.tmp, "valid_extra")
# missing 'userdata' file
my_d = "%s-01" % my_based
invalid_data = copy(valid)
- del invalid_data['local-hostname']
+ del invalid_data["local-hostname"]
populate_dir(my_d, invalid_data)
- self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed,
- DataSourceMAAS.read_maas_seed_dir, my_d)
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirMalformed,
+ DataSourceMAAS.read_maas_seed_dir,
+ my_d,
+ )
# missing 'instance-id'
my_d = "%s-02" % my_based
invalid_data = copy(valid)
- del invalid_data['instance-id']
+ del invalid_data["instance-id"]
populate_dir(my_d, invalid_data)
- self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed,
- DataSourceMAAS.read_maas_seed_dir, my_d)
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirMalformed,
+ DataSourceMAAS.read_maas_seed_dir,
+ my_d,
+ )
def test_seed_dir_none(self):
"""Verify that empty seed_dir raises MAASSeedDirNone."""
my_d = os.path.join(self.tmp, "valid_empty")
- self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
- DataSourceMAAS.read_maas_seed_dir, my_d)
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirNone,
+ DataSourceMAAS.read_maas_seed_dir,
+ my_d,
+ )
def test_seed_dir_missing(self):
"""Verify that missing seed_dir raises MAASSeedDirNone."""
- self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
- DataSourceMAAS.read_maas_seed_dir,
- os.path.join(self.tmp, "nonexistantdirectory"))
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirNone,
+ DataSourceMAAS.read_maas_seed_dir,
+ os.path.join(self.tmp, "nonexistantdirectory"),
+ )
def mock_read_maas_seed_url(self, data, seed, version="19991231"):
"""mock up readurl to appear as a web server at seed has provided data.
return what read_maas_seed_url returns."""
+
def my_readurl(*args, **kwargs):
if len(args):
url = args[0]
else:
- url = kwargs['url']
+ url = kwargs["url"]
prefix = "%s/%s/" % (seed, version)
if not url.startswith(prefix):
raise ValueError("unexpected call %s" % url)
- short = url[len(prefix):]
+ short = url[len(prefix) :]
if short not in data:
raise url_helper.UrlError("not found", code=404, url=url)
return url_helper.StringResponse(data[short])
@@ -124,44 +144,48 @@ class TestMAASDataSource(CiTestCase):
def test_seed_url_valid(self):
"""Verify that valid seed_url is read as such."""
valid = {
- 'meta-data/instance-id': 'i-instanceid',
- 'meta-data/local-hostname': 'test-hostname',
- 'meta-data/public-keys': 'test-hostname',
- 'meta-data/vendor-data': b'my-vendordata',
- 'user-data': b'foodata',
+ "meta-data/instance-id": "i-instanceid",
+ "meta-data/local-hostname": "test-hostname",
+ "meta-data/public-keys": "test-hostname",
+ "meta-data/vendor-data": b"my-vendordata",
+ "user-data": b"foodata",
}
my_seed = "http://example.com/xmeta"
my_ver = "1999-99-99"
ud, md, vd = self.mock_read_maas_seed_url(valid, my_seed, my_ver)
- self.assertEqual(valid['meta-data/instance-id'], md['instance-id'])
+ self.assertEqual(valid["meta-data/instance-id"], md["instance-id"])
self.assertEqual(
- valid['meta-data/local-hostname'], md['local-hostname'])
- self.assertEqual(valid['meta-data/public-keys'], md['public-keys'])
- self.assertEqual(valid['user-data'], ud)
+ valid["meta-data/local-hostname"], md["local-hostname"]
+ )
+ self.assertEqual(valid["meta-data/public-keys"], md["public-keys"])
+ self.assertEqual(valid["user-data"], ud)
# vendor-data is yaml, which decodes a string
- self.assertEqual(valid['meta-data/vendor-data'].decode(), vd)
+ self.assertEqual(valid["meta-data/vendor-data"].decode(), vd)
def test_seed_url_vendor_data_dict(self):
- expected_vd = {'key1': 'value1'}
+ expected_vd = {"key1": "value1"}
valid = {
- 'meta-data/instance-id': 'i-instanceid',
- 'meta-data/local-hostname': 'test-hostname',
- 'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(),
+ "meta-data/instance-id": "i-instanceid",
+ "meta-data/local-hostname": "test-hostname",
+ "meta-data/vendor-data": yaml.safe_dump(expected_vd).encode(),
}
_ud, md, vd = self.mock_read_maas_seed_url(
- valid, "http://example.com/foo")
+ valid, "http://example.com/foo"
+ )
- self.assertEqual(valid['meta-data/instance-id'], md['instance-id'])
+ self.assertEqual(valid["meta-data/instance-id"], md["instance-id"])
self.assertEqual(expected_vd, vd)
@mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper")
class TestGetOauthHelper(CiTestCase):
- base_cfg = {'consumer_key': 'FAKE_CONSUMER_KEY',
- 'token_key': 'FAKE_TOKEN_KEY',
- 'token_secret': 'FAKE_TOKEN_SECRET',
- 'consumer_secret': None}
+ base_cfg = {
+ "consumer_key": "FAKE_CONSUMER_KEY",
+ "token_key": "FAKE_TOKEN_KEY",
+ "token_secret": "FAKE_TOKEN_SECRET",
+ "consumer_secret": None,
+ }
def test_all_required(self, m_helper):
"""Valid config as expected."""
@@ -171,17 +195,20 @@ class TestGetOauthHelper(CiTestCase):
def test_other_fields_not_passed_through(self, m_helper):
"""Only relevant fields are passed through."""
mycfg = self.base_cfg.copy()
- mycfg['unrelated_field'] = 'unrelated'
+ mycfg["unrelated_field"] = "unrelated"
DataSourceMAAS.get_oauth_helper(mycfg)
m_helper.assert_has_calls([mock.call(**self.base_cfg)])
class TestGetIdHash(CiTestCase):
- v1_cfg = {'consumer_key': 'CKEY', 'token_key': 'TKEY',
- 'token_secret': 'TSEC'}
+ v1_cfg = {
+ "consumer_key": "CKEY",
+ "token_key": "TKEY",
+ "token_secret": "TSEC",
+ }
v1_id = (
- 'v1:'
- '403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392')
+ "v1:403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392"
+ )
def test_v1_expected(self):
"""Test v1 id generated as expected working behavior from config."""
@@ -191,8 +218,8 @@ class TestGetIdHash(CiTestCase):
def test_v1_extra_fields_are_ignored(self):
"""Test v1 id ignores unused entries in config."""
cfg = self.v1_cfg.copy()
- cfg['consumer_secret'] = "BOO"
- cfg['unrelated'] = "HI MOM"
+ cfg["consumer_secret"] = "BOO"
+ cfg["unrelated"] = "HI MOM"
result = DataSourceMAAS.get_id_from_ds_cfg(cfg)
self.assertEqual(self.v1_id, result)
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/sources/test_nocloud.py
index 02cc9b38..1f6b722d 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/sources/test_nocloud.py
@@ -1,27 +1,27 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import dmi
-from cloudinit import helpers
-from cloudinit.sources.DataSourceNoCloud import (
- DataSourceNoCloud as dsNoCloud,
- _maybe_remove_top_network,
- parse_cmdline_data)
-from cloudinit import util
-from cloudinit.tests.helpers import CiTestCase, populate_dir, mock, ExitStack
-
import os
import textwrap
+
import yaml
+from cloudinit import dmi, helpers, util
+from cloudinit.sources.DataSourceNoCloud import DataSourceNoCloud as dsNoCloud
+from cloudinit.sources.DataSourceNoCloud import (
+ _maybe_remove_top_network,
+ parse_cmdline_data,
+)
+from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
+
-@mock.patch('cloudinit.sources.DataSourceNoCloud.util.is_lxd')
+@mock.patch("cloudinit.sources.DataSourceNoCloud.util.is_lxd")
class TestNoCloudDataSource(CiTestCase):
-
def setUp(self):
super(TestNoCloudDataSource, self).setUp()
self.tmp = self.tmp_dir()
self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
self.cmdline = "root=TESTCMDLINE"
@@ -29,77 +29,77 @@ class TestNoCloudDataSource(CiTestCase):
self.addCleanup(self.mocks.close)
self.mocks.enter_context(
- mock.patch.object(util, 'get_cmdline', return_value=self.cmdline))
+ mock.patch.object(util, "get_cmdline", return_value=self.cmdline)
+ )
self.mocks.enter_context(
- mock.patch.object(dmi, 'read_dmi_data', return_value=None))
+ mock.patch.object(dmi, "read_dmi_data", return_value=None)
+ )
def _test_fs_config_is_read(self, fs_label, fs_label_to_search):
- vfat_device = 'device-1'
+ vfat_device = "device-1"
def m_mount_cb(device, callback, mtype):
- if (device == vfat_device):
- return {'meta-data': yaml.dump({'instance-id': 'IID'})}
+ if device == vfat_device:
+ return {"meta-data": yaml.dump({"instance-id": "IID"})}
else:
return {}
- def m_find_devs_with(query='', path=''):
- if 'TYPE=vfat' == query:
+ def m_find_devs_with(query="", path=""):
+ if "TYPE=vfat" == query:
return [vfat_device]
- elif 'LABEL={}'.format(fs_label) == query:
+ elif "LABEL={}".format(fs_label) == query:
return [vfat_device]
else:
return []
self.mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- side_effect=m_find_devs_with))
+ mock.patch.object(
+ util, "find_devs_with", side_effect=m_find_devs_with
+ )
+ )
self.mocks.enter_context(
- mock.patch.object(util, 'mount_cb',
- side_effect=m_mount_cb))
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}}
+ mock.patch.object(util, "mount_cb", side_effect=m_mount_cb)
+ )
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": fs_label_to_search}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
- self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
+ self.assertEqual(dsrc.metadata.get("instance-id"), "IID")
self.assertTrue(ret)
def test_nocloud_seed_dir_on_lxd(self, m_is_lxd):
- md = {'instance-id': 'IID', 'dsmode': 'local'}
+ md = {"instance-id": "IID", "dsmode": "local"}
ud = b"USER_DATA_HERE"
seed_dir = os.path.join(self.paths.seed_dir, "nocloud")
- populate_dir(seed_dir,
- {'user-data': ud, 'meta-data': yaml.safe_dump(md)})
+ populate_dir(
+ seed_dir, {"user-data": ud, "meta-data": yaml.safe_dump(md)}
+ )
- sys_cfg = {
- 'datasource': {'NoCloud': {'fs_label': None}}
- }
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertEqual(dsrc.userdata_raw, ud)
self.assertEqual(dsrc.metadata, md)
- self.assertEqual(dsrc.platform_type, 'lxd')
- self.assertEqual(
- dsrc.subplatform, 'seed-dir (%s)' % seed_dir)
+ self.assertEqual(dsrc.platform_type, "lxd")
+ self.assertEqual(dsrc.subplatform, "seed-dir (%s)" % seed_dir)
self.assertTrue(ret)
def test_nocloud_seed_dir_non_lxd_platform_is_nocloud(self, m_is_lxd):
"""Non-lxd environments will list nocloud as the platform."""
m_is_lxd.return_value = False
- md = {'instance-id': 'IID', 'dsmode': 'local'}
+ md = {"instance-id": "IID", "dsmode": "local"}
seed_dir = os.path.join(self.paths.seed_dir, "nocloud")
- populate_dir(seed_dir,
- {'user-data': '', 'meta-data': yaml.safe_dump(md)})
+ populate_dir(
+ seed_dir, {"user-data": "", "meta-data": yaml.safe_dump(md)}
+ )
- sys_cfg = {
- 'datasource': {'NoCloud': {'fs_label': None}}
- }
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
self.assertTrue(dsrc.get_data())
- self.assertEqual(dsrc.platform_type, 'nocloud')
- self.assertEqual(
- dsrc.subplatform, 'seed-dir (%s)' % seed_dir)
+ self.assertEqual(dsrc.platform_type, "nocloud")
+ self.assertEqual(dsrc.subplatform, "seed-dir (%s)" % seed_dir)
def test_fs_label(self, m_is_lxd):
# find_devs_with should not be called ff fs_label is None
@@ -107,65 +107,70 @@ class TestNoCloudDataSource(CiTestCase):
pass
self.mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- side_effect=PsuedoException))
+ mock.patch.object(
+ util, "find_devs_with", side_effect=PsuedoException
+ )
+ )
# by default, NoCloud should search for filesystems by label
- sys_cfg = {'datasource': {'NoCloud': {}}}
+ sys_cfg = {"datasource": {"NoCloud": {}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
self.assertRaises(PsuedoException, dsrc.get_data)
# but disabling searching should just end up with None found
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertFalse(ret)
def test_fs_config_lowercase_label(self, m_is_lxd):
- self._test_fs_config_is_read('cidata', 'cidata')
+ self._test_fs_config_is_read("cidata", "cidata")
def test_fs_config_uppercase_label(self, m_is_lxd):
- self._test_fs_config_is_read('CIDATA', 'cidata')
+ self._test_fs_config_is_read("CIDATA", "cidata")
def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd):
- self._test_fs_config_is_read('cidata', 'CIDATA')
+ self._test_fs_config_is_read("cidata", "CIDATA")
def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd):
- self._test_fs_config_is_read('CIDATA', 'CIDATA')
+ self._test_fs_config_is_read("CIDATA", "CIDATA")
def test_no_datasource_expected(self, m_is_lxd):
# no source should be found if no cmdline, config, and fs_label=None
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
self.assertFalse(dsrc.get_data())
def test_seed_in_config(self, m_is_lxd):
data = {
- 'fs_label': None,
- 'meta-data': yaml.safe_dump({'instance-id': 'IID'}),
- 'user-data': b"USER_DATA_RAW",
+ "fs_label": None,
+ "meta-data": yaml.safe_dump({"instance-id": "IID"}),
+ "user-data": b"USER_DATA_RAW",
}
- sys_cfg = {'datasource': {'NoCloud': data}}
+ sys_cfg = {"datasource": {"NoCloud": data}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW")
- self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
+ self.assertEqual(dsrc.metadata.get("instance-id"), "IID")
self.assertTrue(ret)
def test_nocloud_seed_with_vendordata(self, m_is_lxd):
- md = {'instance-id': 'IID', 'dsmode': 'local'}
+ md = {"instance-id": "IID", "dsmode": "local"}
ud = b"USER_DATA_HERE"
vd = b"THIS IS MY VENDOR_DATA"
- populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': ud, 'meta-data': yaml.safe_dump(md),
- 'vendor-data': vd})
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {
+ "user-data": ud,
+ "meta-data": yaml.safe_dump(md),
+ "vendor-data": vd,
+ },
+ )
- sys_cfg = {
- 'datasource': {'NoCloud': {'fs_label': None}}
- }
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -175,10 +180,12 @@ class TestNoCloudDataSource(CiTestCase):
self.assertTrue(ret)
def test_nocloud_no_vendordata(self, m_is_lxd):
- populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud", 'meta-data': "instance-id: IID\n"})
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {"user-data": b"ud", "meta-data": "instance-id: IID\n"},
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -189,23 +196,28 @@ class TestNoCloudDataSource(CiTestCase):
def test_metadata_network_interfaces(self, m_is_lxd):
gateway = "103.225.10.1"
md = {
- 'instance-id': 'i-abcd',
- 'local-hostname': 'hostname1',
- 'network-interfaces': textwrap.dedent("""\
+ "instance-id": "i-abcd",
+ "local-hostname": "hostname1",
+ "network-interfaces": textwrap.dedent(
+ """\
auto eth0
iface eth0 inet static
hwaddr 00:16:3e:70:e1:04
address 103.225.10.12
netmask 255.255.255.0
- gateway """ + gateway + """
- dns-servers 8.8.8.8""")}
+ gateway """
+ + gateway
+ + """
+ dns-servers 8.8.8.8"""
+ ),
+ }
populate_dir(
os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud",
- 'meta-data': yaml.dump(md) + "\n"})
+ {"user-data": b"ud", "meta-data": yaml.dump(md) + "\n"},
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -215,16 +227,26 @@ class TestNoCloudDataSource(CiTestCase):
def test_metadata_network_config(self, m_is_lxd):
# network-config needs to get into network_config
- netconf = {'version': 1,
- 'config': [{'type': 'physical', 'name': 'interface0',
- 'subnets': [{'type': 'dhcp'}]}]}
+ netconf = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "interface0",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
populate_dir(
os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud",
- 'meta-data': "instance-id: IID\n",
- 'network-config': yaml.dump(netconf) + "\n"})
+ {
+ "user-data": b"ud",
+ "meta-data": "instance-id: IID\n",
+ "network-config": yaml.dump(netconf) + "\n",
+ },
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -233,14 +255,17 @@ class TestNoCloudDataSource(CiTestCase):
def test_metadata_network_config_with_toplevel_network(self, m_is_lxd):
"""network-config may have 'network' top level key."""
- netconf = {'config': 'disabled'}
+ netconf = {"config": "disabled"}
populate_dir(
os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud",
- 'meta-data': "instance-id: IID\n",
- 'network-config': yaml.dump({'network': netconf}) + "\n"})
+ {
+ "user-data": b"ud",
+ "meta-data": "instance-id: IID\n",
+ "network-config": yaml.dump({"network": netconf}) + "\n",
+ },
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -251,27 +276,42 @@ class TestNoCloudDataSource(CiTestCase):
# network-config should override meta-data/network-interfaces
gateway = "103.225.10.1"
md = {
- 'instance-id': 'i-abcd',
- 'local-hostname': 'hostname1',
- 'network-interfaces': textwrap.dedent("""\
+ "instance-id": "i-abcd",
+ "local-hostname": "hostname1",
+ "network-interfaces": textwrap.dedent(
+ """\
auto eth0
iface eth0 inet static
hwaddr 00:16:3e:70:e1:04
address 103.225.10.12
netmask 255.255.255.0
- gateway """ + gateway + """
- dns-servers 8.8.8.8""")}
+ gateway """
+ + gateway
+ + """
+ dns-servers 8.8.8.8"""
+ ),
+ }
- netconf = {'version': 1,
- 'config': [{'type': 'physical', 'name': 'interface0',
- 'subnets': [{'type': 'dhcp'}]}]}
+ netconf = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "interface0",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
populate_dir(
os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud",
- 'meta-data': yaml.dump(md) + "\n",
- 'network-config': yaml.dump(netconf) + "\n"})
+ {
+ "user-data": b"ud",
+ "meta-data": yaml.dump(md) + "\n",
+ "network-config": yaml.dump(netconf) + "\n",
+ },
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -281,17 +321,24 @@ class TestNoCloudDataSource(CiTestCase):
@mock.patch("cloudinit.util.blkid")
def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid):
- populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud", 'meta-data': "instance-id: IID\n"})
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {"user-data": b"ud", "meta-data": "instance-id: IID\n"},
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
self.mocks.enter_context(
- mock.patch.object(util, 'is_FreeBSD', return_value=True))
+ mock.patch.object(util, "is_FreeBSD", return_value=True)
+ )
def _mfind_devs_with_freebsd(
- criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
+ criteria=None,
+ oformat="device",
+ tag=None,
+ no_cache=False,
+ path=None,
+ ):
if not criteria:
return ["/dev/msdosfs/foo", "/dev/iso9660/foo"]
if criteria.startswith("LABEL="):
@@ -304,17 +351,19 @@ class TestNoCloudDataSource(CiTestCase):
self.mocks.enter_context(
mock.patch.object(
- util, 'find_devs_with_freebsd',
- side_effect=_mfind_devs_with_freebsd))
+ util,
+ "find_devs_with_freebsd",
+ side_effect=_mfind_devs_with_freebsd,
+ )
+ )
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
- ret = dsrc._get_devices('foo')
- self.assertEqual(['/dev/msdosfs/foo', '/dev/iso9660/foo'], ret)
+ ret = dsrc._get_devices("foo")
+ self.assertEqual(["/dev/msdosfs/foo", "/dev/iso9660/foo"], ret)
fake_blkid.assert_not_called()
class TestParseCommandLineData(CiTestCase):
-
def test_parse_cmdline_data_valid(self):
ds_id = "ds=nocloud"
pairs = (
@@ -322,18 +371,21 @@ class TestParseCommandLineData(CiTestCase):
("%(ds_id)s; root=/dev/foo", {}),
("%(ds_id)s", {}),
("%(ds_id)s;", {}),
- ("%(ds_id)s;s=SEED", {'seedfrom': 'SEED'}),
- ("%(ds_id)s;seedfrom=SEED;local-hostname=xhost",
- {'seedfrom': 'SEED', 'local-hostname': 'xhost'}),
- ("%(ds_id)s;h=xhost",
- {'local-hostname': 'xhost'}),
- ("%(ds_id)s;h=xhost;i=IID",
- {'local-hostname': 'xhost', 'instance-id': 'IID'}),
+ ("%(ds_id)s;s=SEED", {"seedfrom": "SEED"}),
+ (
+ "%(ds_id)s;seedfrom=SEED;local-hostname=xhost",
+ {"seedfrom": "SEED", "local-hostname": "xhost"},
+ ),
+ ("%(ds_id)s;h=xhost", {"local-hostname": "xhost"}),
+ (
+ "%(ds_id)s;h=xhost;i=IID",
+ {"local-hostname": "xhost", "instance-id": "IID"},
+ ),
)
for (fmt, expected) in pairs:
fill = {}
- cmdline = fmt % {'ds_id': ds_id}
+ cmdline = fmt % {"ds_id": ds_id}
ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline)
self.assertEqual(expected, fill)
self.assertTrue(ret)
@@ -358,36 +410,44 @@ class TestParseCommandLineData(CiTestCase):
class TestMaybeRemoveToplevelNetwork(CiTestCase):
"""test _maybe_remove_top_network function."""
- basecfg = [{'type': 'physical', 'name': 'interface0',
- 'subnets': [{'type': 'dhcp'}]}]
+
+ basecfg = [
+ {
+ "type": "physical",
+ "name": "interface0",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ]
def test_should_remove_safely(self):
- mcfg = {'config': self.basecfg, 'version': 1}
- self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg}))
+ mcfg = {"config": self.basecfg, "version": 1}
+ self.assertEqual(mcfg, _maybe_remove_top_network({"network": mcfg}))
def test_no_remove_if_other_keys(self):
"""should not shift if other keys at top level."""
- mcfg = {'network': {'config': self.basecfg, 'version': 1},
- 'unknown_keyname': 'keyval'}
+ mcfg = {
+ "network": {"config": self.basecfg, "version": 1},
+ "unknown_keyname": "keyval",
+ }
self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
def test_no_remove_if_non_dict(self):
"""should not shift if not a dict."""
- mcfg = {'network': '"content here'}
+ mcfg = {"network": '"content here'}
self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
def test_no_remove_if_missing_config_or_version(self):
"""should not shift unless network entry has config and version."""
- mcfg = {'network': {'config': self.basecfg}}
+ mcfg = {"network": {"config": self.basecfg}}
self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
- mcfg = {'network': {'version': 1}}
+ mcfg = {"network": {"version": 1}}
self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
def test_remove_with_config_disabled(self):
"""network/config=disabled should be shifted."""
- mcfg = {'config': 'disabled'}
- self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg}))
+ mcfg = {"config": "disabled"}
+ self.assertEqual(mcfg, _maybe_remove_top_network({"network": mcfg}))
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/sources/test_opennebula.py
index 9c6070a5..e05c4749 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/sources/test_opennebula.py
@@ -1,62 +1,61 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import helpers
-from cloudinit.sources import DataSourceOpenNebula as ds
-from cloudinit import util
-from cloudinit.tests.helpers import mock, populate_dir, CiTestCase
-
import os
import pwd
import unittest
import pytest
+from cloudinit import helpers, util
+from cloudinit.sources import DataSourceOpenNebula as ds
+from tests.unittests.helpers import CiTestCase, mock, populate_dir
TEST_VARS = {
- 'VAR1': 'single',
- 'VAR2': 'double word',
- 'VAR3': 'multi\nline\n',
- 'VAR4': "'single'",
- 'VAR5': "'double word'",
- 'VAR6': "'multi\nline\n'",
- 'VAR7': 'single\\t',
- 'VAR8': 'double\\tword',
- 'VAR9': 'multi\\t\nline\n',
- 'VAR10': '\\', # expect '\'
- 'VAR11': '\'', # expect '
- 'VAR12': '$', # expect $
+ "VAR1": "single",
+ "VAR2": "double word",
+ "VAR3": "multi\nline\n",
+ "VAR4": "'single'",
+ "VAR5": "'double word'",
+ "VAR6": "'multi\nline\n'",
+ "VAR7": "single\\t",
+ "VAR8": "double\\tword",
+ "VAR9": "multi\\t\nline\n",
+ "VAR10": "\\", # expect '\'
+ "VAR11": "'", # expect '
+ "VAR12": "$", # expect $
}
-INVALID_CONTEXT = ';'
-USER_DATA = '#cloud-config\napt_upgrade: true'
-SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i'
-HOSTNAME = 'foo.example.com'
-PUBLIC_IP = '10.0.0.3'
-MACADDR = '02:00:0a:12:01:01'
-IP_BY_MACADDR = '10.18.1.1'
-IP4_PREFIX = '24'
-IP6_GLOBAL = '2001:db8:1:0:400:c0ff:fea8:1ba'
-IP6_ULA = 'fd01:dead:beaf:0:400:c0ff:fea8:1ba'
-IP6_GW = '2001:db8:1::ffff'
-IP6_PREFIX = '48'
+INVALID_CONTEXT = ";"
+USER_DATA = "#cloud-config\napt_upgrade: true"
+SSH_KEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i"
+HOSTNAME = "foo.example.com"
+PUBLIC_IP = "10.0.0.3"
+MACADDR = "02:00:0a:12:01:01"
+IP_BY_MACADDR = "10.18.1.1"
+IP4_PREFIX = "24"
+IP6_GLOBAL = "2001:db8:1:0:400:c0ff:fea8:1ba"
+IP6_ULA = "fd01:dead:beaf:0:400:c0ff:fea8:1ba"
+IP6_GW = "2001:db8:1::ffff"
+IP6_PREFIX = "48"
DS_PATH = "cloudinit.sources.DataSourceOpenNebula"
class TestOpenNebulaDataSource(CiTestCase):
parsed_user = None
- allowed_subp = ['bash']
+ allowed_subp = ["bash"]
def setUp(self):
super(TestOpenNebulaDataSource, self).setUp()
self.tmp = self.tmp_dir()
self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
# defaults for few tests
self.ds = ds.DataSourceOpenNebula
self.seed_dir = os.path.join(self.paths.seed_dir, "opennebula")
- self.sys_cfg = {'datasource': {'OpenNebula': {'dsmode': 'local'}}}
+ self.sys_cfg = {"datasource": {"OpenNebula": {"dsmode": "local"}}}
# we don't want 'sudo' called in tests. so we patch switch_user_cmd
def my_switch_user_cmd(user):
@@ -86,7 +85,7 @@ class TestOpenNebulaDataSource(CiTestCase):
try:
# dont' try to lookup for CDs
util.find_devs_with = lambda n: []
- populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT})
+ populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT})
dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
finally:
@@ -97,18 +96,19 @@ class TestOpenNebulaDataSource(CiTestCase):
try:
# generate non-existing system user name
sys_cfg = self.sys_cfg
- invalid_user = 'invalid'
- while not sys_cfg['datasource']['OpenNebula'].get('parseuser'):
+ invalid_user = "invalid"
+ while not sys_cfg["datasource"]["OpenNebula"].get("parseuser"):
try:
pwd.getpwnam(invalid_user)
- invalid_user += 'X'
+ invalid_user += "X"
except KeyError:
- sys_cfg['datasource']['OpenNebula']['parseuser'] = \
- invalid_user
+ sys_cfg["datasource"]["OpenNebula"][
+ "parseuser"
+ ] = invalid_user
# dont' try to lookup for CDs
util.find_devs_with = lambda n: []
- populate_context_dir(self.seed_dir, {'KEY1': 'val1'})
+ populate_context_dir(self.seed_dir, {"KEY1": "val1"})
dsrc = self.ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
finally:
@@ -119,226 +119,265 @@ class TestOpenNebulaDataSource(CiTestCase):
try:
# dont' try to lookup for CDs
util.find_devs_with = lambda n: []
- populate_context_dir(self.seed_dir, {'KEY1': 'val1'})
+ populate_context_dir(self.seed_dir, {"KEY1": "val1"})
dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertTrue(ret)
finally:
util.find_devs_with = orig_find_devs_with
- self.assertEqual('opennebula', dsrc.cloud_name)
- self.assertEqual('opennebula', dsrc.platform_type)
+ self.assertEqual("opennebula", dsrc.cloud_name)
+ self.assertEqual("opennebula", dsrc.platform_type)
self.assertEqual(
- 'seed-dir (%s/seed/opennebula)' % self.tmp, dsrc.subplatform)
+ "seed-dir (%s/seed/opennebula)" % self.tmp, dsrc.subplatform
+ )
def test_seed_dir_non_contextdisk(self):
- self.assertRaises(ds.NonContextDiskDir, ds.read_context_disk_dir,
- self.seed_dir, mock.Mock())
+ self.assertRaises(
+ ds.NonContextDiskDir,
+ ds.read_context_disk_dir,
+ self.seed_dir,
+ mock.Mock(),
+ )
def test_seed_dir_empty1_context(self):
- populate_dir(self.seed_dir, {'context.sh': ''})
+ populate_dir(self.seed_dir, {"context.sh": ""})
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertIsNone(results['userdata'])
- self.assertEqual(results['metadata'], {})
+ self.assertIsNone(results["userdata"])
+ self.assertEqual(results["metadata"], {})
def test_seed_dir_empty2_context(self):
populate_context_dir(self.seed_dir, {})
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertIsNone(results['userdata'])
- self.assertEqual(results['metadata'], {})
+ self.assertIsNone(results["userdata"])
+ self.assertEqual(results["metadata"], {})
def test_seed_dir_broken_context(self):
- populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT})
+ populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT})
- self.assertRaises(ds.BrokenContextDiskDir,
- ds.read_context_disk_dir,
- self.seed_dir, mock.Mock())
+ self.assertRaises(
+ ds.BrokenContextDiskDir,
+ ds.read_context_disk_dir,
+ self.seed_dir,
+ mock.Mock(),
+ )
def test_context_parser(self):
populate_context_dir(self.seed_dir, TEST_VARS)
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('metadata' in results)
- self.assertEqual(TEST_VARS, results['metadata'])
+ self.assertTrue("metadata" in results)
+ self.assertEqual(TEST_VARS, results["metadata"])
def test_ssh_key(self):
- public_keys = ['first key', 'second key']
+ public_keys = ["first key", "second key"]
for c in range(4):
- for k in ('SSH_KEY', 'SSH_PUBLIC_KEY'):
+ for k in ("SSH_KEY", "SSH_PUBLIC_KEY"):
my_d = os.path.join(self.tmp, "%s-%i" % (k, c))
- populate_context_dir(my_d, {k: '\n'.join(public_keys)})
+ populate_context_dir(my_d, {k: "\n".join(public_keys)})
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('metadata' in results)
- self.assertTrue('public-keys' in results['metadata'])
- self.assertEqual(public_keys,
- results['metadata']['public-keys'])
+ self.assertTrue("metadata" in results)
+ self.assertTrue("public-keys" in results["metadata"])
+ self.assertEqual(
+ public_keys, results["metadata"]["public-keys"]
+ )
public_keys.append(SSH_KEY % (c + 1,))
def test_user_data_plain(self):
- for k in ('USER_DATA', 'USERDATA'):
+ for k in ("USER_DATA", "USERDATA"):
my_d = os.path.join(self.tmp, k)
- populate_context_dir(my_d, {k: USER_DATA,
- 'USERDATA_ENCODING': ''})
+ populate_context_dir(my_d, {k: USER_DATA, "USERDATA_ENCODING": ""})
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('userdata' in results)
- self.assertEqual(USER_DATA, results['userdata'])
+ self.assertTrue("userdata" in results)
+ self.assertEqual(USER_DATA, results["userdata"])
def test_user_data_encoding_required_for_decode(self):
b64userdata = util.b64e(USER_DATA)
- for k in ('USER_DATA', 'USERDATA'):
+ for k in ("USER_DATA", "USERDATA"):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: b64userdata})
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('userdata' in results)
- self.assertEqual(b64userdata, results['userdata'])
+ self.assertTrue("userdata" in results)
+ self.assertEqual(b64userdata, results["userdata"])
def test_user_data_base64_encoding(self):
- for k in ('USER_DATA', 'USERDATA'):
+ for k in ("USER_DATA", "USERDATA"):
my_d = os.path.join(self.tmp, k)
- populate_context_dir(my_d, {k: util.b64e(USER_DATA),
- 'USERDATA_ENCODING': 'base64'})
+ populate_context_dir(
+ my_d, {k: util.b64e(USER_DATA), "USERDATA_ENCODING": "base64"}
+ )
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('userdata' in results)
- self.assertEqual(USER_DATA, results['userdata'])
+ self.assertTrue("userdata" in results)
+ self.assertEqual(USER_DATA, results["userdata"])
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
def test_hostname(self, m_get_phys_by_mac):
- for dev in ('eth0', 'ens3'):
+ for dev in ("eth0", "ens3"):
m_get_phys_by_mac.return_value = {MACADDR: dev}
- for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
+ for k in (
+ "SET_HOSTNAME",
+ "HOSTNAME",
+ "PUBLIC_IP",
+ "IP_PUBLIC",
+ "ETH0_IP",
+ ):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: PUBLIC_IP})
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('metadata' in results)
- self.assertTrue('local-hostname' in results['metadata'])
+ self.assertTrue("metadata" in results)
+ self.assertTrue("local-hostname" in results["metadata"])
self.assertEqual(
- PUBLIC_IP, results['metadata']['local-hostname'])
+ PUBLIC_IP, results["metadata"]["local-hostname"]
+ )
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
def test_network_interfaces(self, m_get_phys_by_mac):
- for dev in ('eth0', 'ens3'):
+ for dev in ("eth0", "ens3"):
m_get_phys_by_mac.return_value = {MACADDR: dev}
# without ETH0_MAC
# for Older OpenNebula?
- populate_context_dir(self.seed_dir, {'ETH0_IP': IP_BY_MACADDR})
+ populate_context_dir(self.seed_dir, {"ETH0_IP": IP_BY_MACADDR})
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/' + IP4_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP and ETH0_MAC
populate_context_dir(
- self.seed_dir, {'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR})
+ self.seed_dir, {"ETH0_IP": IP_BY_MACADDR, "ETH0_MAC": MACADDR}
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/' + IP4_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP with empty string and ETH0_MAC
# in the case of using Virtual Network contains
# "AR = [ TYPE = ETHER ]"
populate_context_dir(
- self.seed_dir, {'ETH0_IP': '', 'ETH0_MAC': MACADDR})
+ self.seed_dir, {"ETH0_IP": "", "ETH0_MAC": MACADDR}
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/' + IP4_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_MASK
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP': IP_BY_MACADDR,
- 'ETH0_MAC': MACADDR,
- 'ETH0_MASK': '255.255.0.0'
- })
+ self.seed_dir,
+ {
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_MAC": MACADDR,
+ "ETH0_MASK": "255.255.0.0",
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/16' in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/16"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_MASK with empty string
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP': IP_BY_MACADDR,
- 'ETH0_MAC': MACADDR,
- 'ETH0_MASK': ''
- })
+ self.seed_dir,
+ {
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_MAC": MACADDR,
+ "ETH0_MASK": "",
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/' + IP4_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP6
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_MAC': MACADDR,
- })
+ self.seed_dir,
+ {
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_MAC": MACADDR,
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP6_GLOBAL + '/64' in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP6_GLOBAL + "/64"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP6_ULA
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP6_ULA': IP6_ULA,
- 'ETH0_MAC': MACADDR,
- })
+ self.seed_dir,
+ {
+ "ETH0_IP6_ULA": IP6_ULA,
+ "ETH0_MAC": MACADDR,
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP6_ULA + '/64' in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP6_ULA + "/64"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX,
- 'ETH0_MAC': MACADDR,
- })
+ self.seed_dir,
+ {
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX,
+ "ETH0_MAC": MACADDR,
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP6_GLOBAL + '/' + IP6_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP6_GLOBAL + "/" + IP6_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH with empty string
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': '',
- 'ETH0_MAC': MACADDR,
- })
+ self.seed_dir,
+ {
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ "ETH0_MAC": MACADDR,
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP6_GLOBAL + '/64' in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP6_GLOBAL + "/64"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
def test_find_candidates(self):
def my_devs_with(criteria):
@@ -351,25 +390,28 @@ class TestOpenNebulaDataSource(CiTestCase):
orig_find_devs_with = util.find_devs_with
try:
util.find_devs_with = my_devs_with
- self.assertEqual(["/dev/sdb", "/dev/sr0", "/dev/vdb"],
- ds.find_candidate_devs())
+ self.assertEqual(
+ ["/dev/sdb", "/dev/sr0", "/dev/vdb"], ds.find_candidate_devs()
+ )
finally:
util.find_devs_with = orig_find_devs_with
-@mock.patch(DS_PATH + '.net.get_interfaces_by_mac', mock.Mock(return_value={}))
+@mock.patch(DS_PATH + ".net.get_interfaces_by_mac", mock.Mock(return_value={}))
class TestOpenNebulaNetwork(unittest.TestCase):
- system_nics = ('eth0', 'ens3')
+ system_nics = ("eth0", "ens3")
def test_context_devname(self):
"""Verify context_devname correctly returns mac and name."""
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH1_MAC': '02:00:0a:12:0f:0f', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH1_MAC": "02:00:0a:12:0f:0f",
+ }
expected = {
- '02:00:0a:12:01:01': 'ETH0',
- '02:00:0a:12:0f:0f': 'ETH1', }
+ "02:00:0a:12:01:01": "ETH0",
+ "02:00:0a:12:0f:0f": "ETH1",
+ }
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(expected, net.context_devname)
@@ -379,28 +421,30 @@ class TestOpenNebulaNetwork(unittest.TestCase):
and search domains.
"""
context = {
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_SEARCH_DOMAIN': 'example.com example.org', }
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_SEARCH_DOMAIN": "example.com example.org",
+ }
expected = {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
- 'search': ['example.com', 'example.org']}
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com", "example.org"],
+ }
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_nameservers('eth0')
+ val = net.get_nameservers("eth0")
self.assertEqual(expected, val)
def test_get_mtu(self):
"""Verify get_mtu('device') correctly returns MTU size."""
- context = {'ETH0_MTU': '1280'}
+ context = {"ETH0_MTU": "1280"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_mtu('eth0')
- self.assertEqual('1280', val)
+ val = net.get_mtu("eth0")
+ self.assertEqual("1280", val)
def test_get_ip(self):
"""Verify get_ip('device') correctly returns IPv4 address."""
- context = {'ETH0_IP': PUBLIC_IP}
+ context = {"ETH0_IP": PUBLIC_IP}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip('eth0', MACADDR)
+ val = net.get_ip("eth0", MACADDR)
self.assertEqual(PUBLIC_IP, val)
def test_get_ip_emptystring(self):
@@ -409,9 +453,9 @@ class TestOpenNebulaNetwork(unittest.TestCase):
It returns IP address created by MAC address if ETH0_IP has empty
string.
"""
- context = {'ETH0_IP': ''}
+ context = {"ETH0_IP": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip('eth0', MACADDR)
+ val = net.get_ip("eth0", MACADDR)
self.assertEqual(IP_BY_MACADDR, val)
def test_get_ip6(self):
@@ -420,11 +464,12 @@ class TestOpenNebulaNetwork(unittest.TestCase):
In this case, IPv6 address is Given by ETH0_IP6.
"""
context = {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_ULA': '', }
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_ULA": "",
+ }
expected = [IP6_GLOBAL]
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6('eth0')
+ val = net.get_ip6("eth0")
self.assertEqual(expected, val)
def test_get_ip6_ula(self):
@@ -433,11 +478,12 @@ class TestOpenNebulaNetwork(unittest.TestCase):
In this case, IPv6 address is Given by ETH0_IP6_ULA.
"""
context = {
- 'ETH0_IP6': '',
- 'ETH0_IP6_ULA': IP6_ULA, }
+ "ETH0_IP6": "",
+ "ETH0_IP6_ULA": IP6_ULA,
+ }
expected = [IP6_ULA]
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6('eth0')
+ val = net.get_ip6("eth0")
self.assertEqual(expected, val)
def test_get_ip6_dual(self):
@@ -446,20 +492,21 @@ class TestOpenNebulaNetwork(unittest.TestCase):
In this case, IPv6 addresses are Given by ETH0_IP6 and ETH0_IP6_ULA.
"""
context = {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_ULA': IP6_ULA, }
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_ULA": IP6_ULA,
+ }
expected = [IP6_GLOBAL, IP6_ULA]
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6('eth0')
+ val = net.get_ip6("eth0")
self.assertEqual(expected, val)
def test_get_ip6_prefix(self):
"""
Verify get_ip6_prefix('device') correctly returns IPv6 prefix.
"""
- context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX}
+ context = {"ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6_prefix('eth0')
+ val = net.get_ip6_prefix("eth0")
self.assertEqual(IP6_PREFIX, val)
def test_get_ip6_prefix_emptystring(self):
@@ -468,58 +515,59 @@ class TestOpenNebulaNetwork(unittest.TestCase):
It returns default value '64' if ETH0_IP6_PREFIX_LENGTH has empty
string.
"""
- context = {'ETH0_IP6_PREFIX_LENGTH': ''}
+ context = {"ETH0_IP6_PREFIX_LENGTH": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6_prefix('eth0')
- self.assertEqual('64', val)
+ val = net.get_ip6_prefix("eth0")
+ self.assertEqual("64", val)
def test_get_gateway(self):
"""
Verify get_gateway('device') correctly returns IPv4 default gateway
address.
"""
- context = {'ETH0_GATEWAY': '1.2.3.5'}
+ context = {"ETH0_GATEWAY": "1.2.3.5"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_gateway('eth0')
- self.assertEqual('1.2.3.5', val)
+ val = net.get_gateway("eth0")
+ self.assertEqual("1.2.3.5", val)
def test_get_gateway6(self):
"""
Verify get_gateway6('device') correctly returns IPv6 default gateway
address.
"""
- context = {'ETH0_GATEWAY6': IP6_GW}
- net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_gateway6('eth0')
- self.assertEqual(IP6_GW, val)
+ for k in ("GATEWAY6", "IP6_GATEWAY"):
+ context = {"ETH0_" + k: IP6_GW}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_gateway6("eth0")
+ self.assertEqual(IP6_GW, val)
def test_get_mask(self):
"""
Verify get_mask('device') correctly returns IPv4 subnet mask.
"""
- context = {'ETH0_MASK': '255.255.0.0'}
+ context = {"ETH0_MASK": "255.255.0.0"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_mask('eth0')
- self.assertEqual('255.255.0.0', val)
+ val = net.get_mask("eth0")
+ self.assertEqual("255.255.0.0", val)
def test_get_mask_emptystring(self):
"""
Verify get_mask('device') correctly returns IPv4 subnet mask.
It returns default value '255.255.255.0' if ETH0_MASK has empty string.
"""
- context = {'ETH0_MASK': ''}
+ context = {"ETH0_MASK": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_mask('eth0')
- self.assertEqual('255.255.255.0', val)
+ val = net.get_mask("eth0")
+ self.assertEqual("255.255.255.0", val)
def test_get_network(self):
"""
Verify get_network('device') correctly returns IPv4 network address.
"""
- context = {'ETH0_NETWORK': '1.2.3.0'}
+ context = {"ETH0_NETWORK": "1.2.3.0"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_network('eth0', MACADDR)
- self.assertEqual('1.2.3.0', val)
+ val = net.get_network("eth0", MACADDR)
+ self.assertEqual("1.2.3.0", val)
def test_get_network_emptystring(self):
"""
@@ -527,48 +575,48 @@ class TestOpenNebulaNetwork(unittest.TestCase):
It returns network address created by MAC address if ETH0_NETWORK has
empty string.
"""
- context = {'ETH0_NETWORK': ''}
+ context = {"ETH0_NETWORK": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_network('eth0', MACADDR)
- self.assertEqual('10.18.1.0', val)
+ val = net.get_network("eth0", MACADDR)
+ self.assertEqual("10.18.1.0", val)
def test_get_field(self):
"""
Verify get_field('device', 'name') returns *context* value.
"""
- context = {'ETH9_DUMMY': 'DUMMY_VALUE'}
+ context = {"ETH9_DUMMY": "DUMMY_VALUE"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy')
- self.assertEqual('DUMMY_VALUE', val)
+ val = net.get_field("eth9", "dummy")
+ self.assertEqual("DUMMY_VALUE", val)
def test_get_field_withdefaultvalue(self):
"""
Verify get_field('device', 'name', 'default value') returns *context*
value.
"""
- context = {'ETH9_DUMMY': 'DUMMY_VALUE'}
+ context = {"ETH9_DUMMY": "DUMMY_VALUE"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE')
- self.assertEqual('DUMMY_VALUE', val)
+ val = net.get_field("eth9", "dummy", "DEFAULT_VALUE")
+ self.assertEqual("DUMMY_VALUE", val)
def test_get_field_withdefaultvalue_emptycontext(self):
"""
Verify get_field('device', 'name', 'default value') returns *default*
value if context value is empty string.
"""
- context = {'ETH9_DUMMY': ''}
+ context = {"ETH9_DUMMY": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE')
- self.assertEqual('DEFAULT_VALUE', val)
+ val = net.get_field("eth9", "dummy", "DEFAULT_VALUE")
+ self.assertEqual("DEFAULT_VALUE", val)
def test_get_field_emptycontext(self):
"""
Verify get_field('device', 'name') returns None if context value is
empty string.
"""
- context = {'ETH9_DUMMY': ''}
+ context = {"ETH9_DUMMY": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy')
+ val = net.get_field("eth9", "dummy")
self.assertEqual(None, val)
def test_get_field_nonecontext(self):
@@ -576,9 +624,9 @@ class TestOpenNebulaNetwork(unittest.TestCase):
Verify get_field('device', 'name') returns None if context value is
None.
"""
- context = {'ETH9_DUMMY': None}
+ context = {"ETH9_DUMMY": None}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy')
+ val = net.get_field("eth9", "dummy")
self.assertEqual(None, val)
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
@@ -587,31 +635,39 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty ETH0_GATEWAY
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_GATEWAY': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_GATEWAY
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_GATEWAY': '1.2.3.5', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY": "1.2.3.5",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'gateway4': '1.2.3.5',
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "gateway4": "1.2.3.5",
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -622,31 +678,39 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty ETH0_GATEWAY6
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_GATEWAY6': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY6": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_GATEWAY6
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_GATEWAY6': IP6_GW, }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY6": IP6_GW,
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'gateway6': IP6_GW,
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "gateway6": IP6_GW,
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -657,37 +721,46 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_IP6': '',
- 'ETH0_IP6_ULA': '',
- 'ETH0_IP6_PREFIX_LENGTH': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_IP6": "",
+ "ETH0_IP6_ULA": "",
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX,
- 'ETH0_IP6_ULA': IP6_ULA, }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX,
+ "ETH0_IP6_ULA": IP6_ULA,
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [
- IP_BY_MACADDR + '/' + IP4_PREFIX,
- IP6_GLOBAL + '/' + IP6_PREFIX,
- IP6_ULA + '/' + IP6_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [
+ IP_BY_MACADDR + "/" + IP4_PREFIX,
+ IP6_GLOBAL + "/" + IP6_PREFIX,
+ IP6_ULA + "/" + IP6_PREFIX,
+ ],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -698,37 +771,46 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'DNS': '',
- 'ETH0_DNS': '',
- 'ETH0_SEARCH_DOMAIN': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "DNS": "",
+ "ETH0_DNS": "",
+ "ETH0_SEARCH_DOMAIN": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_SEARCH_DOMAIN': 'example.com example.org', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_SEARCH_DOMAIN": "example.com example.org",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'nameservers': {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
- 'search': ['example.com', 'example.org']},
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com", "example.org"],
+ },
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -739,31 +821,39 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty ETH0_MTU
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_MTU': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MTU": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_MTU
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_MTU': '1280', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MTU": "1280",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'mtu': '1280',
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "mtu": "1280",
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -774,11 +864,14 @@ class TestOpenNebulaNetwork(unittest.TestCase):
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork({}, mock.Mock())
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
self.assertEqual(net.gen_conf(), expected)
@@ -793,71 +886,82 @@ class TestOpenNebulaNetwork(unittest.TestCase):
def test_eth0_override(self):
self.maxDiff = None
context = {
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_GATEWAY': '1.2.3.5',
- 'ETH0_GATEWAY6': '',
- 'ETH0_IP': IP_BY_MACADDR,
- 'ETH0_IP6': '',
- 'ETH0_IP6_PREFIX_LENGTH': '',
- 'ETH0_IP6_ULA': '',
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_MASK': '255.255.0.0',
- 'ETH0_MTU': '',
- 'ETH0_NETWORK': '10.18.0.0',
- 'ETH0_SEARCH_DOMAIN': '',
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_GATEWAY": "1.2.3.5",
+ "ETH0_GATEWAY6": "",
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_IP6": "",
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ "ETH0_IP6_ULA": "",
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MASK": "255.255.0.0",
+ "ETH0_MTU": "",
+ "ETH0_NETWORK": "10.18.0.0",
+ "ETH0_SEARCH_DOMAIN": "",
}
for nic in self.system_nics:
- net = ds.OpenNebulaNetwork(context, mock.Mock(),
- system_nics_by_mac={MACADDR: nic})
+ net = ds.OpenNebulaNetwork(
+ context, mock.Mock(), system_nics_by_mac={MACADDR: nic}
+ )
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/16'],
- 'gateway4': '1.2.3.5',
- 'nameservers': {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8']}}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/16"],
+ "gateway4": "1.2.3.5",
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"]
+ },
+ }
+ },
+ }
self.assertEqual(expected, net.gen_conf())
def test_eth0_v4v6_override(self):
self.maxDiff = None
context = {
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_GATEWAY': '1.2.3.5',
- 'ETH0_GATEWAY6': IP6_GW,
- 'ETH0_IP': IP_BY_MACADDR,
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX,
- 'ETH0_IP6_ULA': IP6_ULA,
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_MASK': '255.255.0.0',
- 'ETH0_MTU': '1280',
- 'ETH0_NETWORK': '10.18.0.0',
- 'ETH0_SEARCH_DOMAIN': 'example.com example.org',
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_GATEWAY": "1.2.3.5",
+ "ETH0_GATEWAY6": IP6_GW,
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX,
+ "ETH0_IP6_ULA": IP6_ULA,
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MASK": "255.255.0.0",
+ "ETH0_MTU": "1280",
+ "ETH0_NETWORK": "10.18.0.0",
+ "ETH0_SEARCH_DOMAIN": "example.com example.org",
}
for nic in self.system_nics:
- net = ds.OpenNebulaNetwork(context, mock.Mock(),
- system_nics_by_mac={MACADDR: nic})
+ net = ds.OpenNebulaNetwork(
+ context, mock.Mock(), system_nics_by_mac={MACADDR: nic}
+ )
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [
- IP_BY_MACADDR + '/16',
- IP6_GLOBAL + '/' + IP6_PREFIX,
- IP6_ULA + '/' + IP6_PREFIX],
- 'gateway4': '1.2.3.5',
- 'gateway6': IP6_GW,
- 'nameservers': {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
- 'search': ['example.com', 'example.org']},
- 'mtu': '1280'}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [
+ IP_BY_MACADDR + "/16",
+ IP6_GLOBAL + "/" + IP6_PREFIX,
+ IP6_ULA + "/" + IP6_PREFIX,
+ ],
+ "gateway4": "1.2.3.5",
+ "gateway6": IP6_GW,
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com", "example.org"],
+ },
+ "mtu": "1280",
+ }
+ },
+ }
self.assertEqual(expected, net.gen_conf())
@@ -867,62 +971,67 @@ class TestOpenNebulaNetwork(unittest.TestCase):
MAC_1 = "02:00:0a:12:01:01"
MAC_2 = "02:00:0a:12:01:02"
context = {
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_GATEWAY': '1.2.3.5',
- 'ETH0_GATEWAY6': IP6_GW,
- 'ETH0_IP': '10.18.1.1',
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': '',
- 'ETH0_IP6_ULA': IP6_ULA,
- 'ETH0_MAC': MAC_2,
- 'ETH0_MASK': '255.255.0.0',
- 'ETH0_MTU': '1280',
- 'ETH0_NETWORK': '10.18.0.0',
- 'ETH0_SEARCH_DOMAIN': 'example.com',
- 'ETH3_DNS': '10.3.1.2',
- 'ETH3_GATEWAY': '10.3.0.1',
- 'ETH3_GATEWAY6': '',
- 'ETH3_IP': '10.3.1.3',
- 'ETH3_IP6': '',
- 'ETH3_IP6_PREFIX_LENGTH': '',
- 'ETH3_IP6_ULA': '',
- 'ETH3_MAC': MAC_1,
- 'ETH3_MASK': '255.255.0.0',
- 'ETH3_MTU': '',
- 'ETH3_NETWORK': '10.3.0.0',
- 'ETH3_SEARCH_DOMAIN': 'third.example.com third.example.org',
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_GATEWAY": "1.2.3.5",
+ "ETH0_GATEWAY6": IP6_GW,
+ "ETH0_IP": "10.18.1.1",
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ "ETH0_IP6_ULA": IP6_ULA,
+ "ETH0_MAC": MAC_2,
+ "ETH0_MASK": "255.255.0.0",
+ "ETH0_MTU": "1280",
+ "ETH0_NETWORK": "10.18.0.0",
+ "ETH0_SEARCH_DOMAIN": "example.com",
+ "ETH3_DNS": "10.3.1.2",
+ "ETH3_GATEWAY": "10.3.0.1",
+ "ETH3_GATEWAY6": "",
+ "ETH3_IP": "10.3.1.3",
+ "ETH3_IP6": "",
+ "ETH3_IP6_PREFIX_LENGTH": "",
+ "ETH3_IP6_ULA": "",
+ "ETH3_MAC": MAC_1,
+ "ETH3_MASK": "255.255.0.0",
+ "ETH3_MTU": "",
+ "ETH3_NETWORK": "10.3.0.0",
+ "ETH3_SEARCH_DOMAIN": "third.example.com third.example.org",
}
net = ds.OpenNebulaNetwork(
context,
mock.Mock(),
- system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'}
+ system_nics_by_mac={MAC_1: "enp0s25", MAC_2: "enp1s2"},
)
expected = {
- 'version': 2,
- 'ethernets': {
- 'enp1s2': {
- 'match': {'macaddress': MAC_2},
- 'addresses': [
- '10.18.1.1/16',
- IP6_GLOBAL + '/64',
- IP6_ULA + '/64'],
- 'gateway4': '1.2.3.5',
- 'gateway6': IP6_GW,
- 'nameservers': {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
- 'search': ['example.com']},
- 'mtu': '1280'},
- 'enp0s25': {
- 'match': {'macaddress': MAC_1},
- 'addresses': ['10.3.1.3/16'],
- 'gateway4': '10.3.0.1',
- 'nameservers': {
- 'addresses': ['10.3.1.2', '1.2.3.8'],
- 'search': [
- 'third.example.com',
- 'third.example.org']}}}}
+ "version": 2,
+ "ethernets": {
+ "enp1s2": {
+ "match": {"macaddress": MAC_2},
+ "addresses": [
+ "10.18.1.1/16",
+ IP6_GLOBAL + "/64",
+ IP6_ULA + "/64",
+ ],
+ "gateway4": "1.2.3.5",
+ "gateway6": IP6_GW,
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com"],
+ },
+ "mtu": "1280",
+ },
+ "enp0s25": {
+ "match": {"macaddress": MAC_1},
+ "addresses": ["10.3.1.3/16"],
+ "gateway4": "10.3.0.1",
+ "nameservers": {
+ "addresses": ["10.3.1.2", "1.2.3.8"],
+ "search": ["third.example.com", "third.example.org"],
+ },
+ },
+ },
+ }
self.assertEqual(expected, net.gen_conf())
@@ -930,7 +1039,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
class TestParseShellConfig:
@pytest.mark.allow_subp_for("bash")
def test_no_seconds(self):
- cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"])
+ cfg = "\n".join(["foo=bar", "SECONDS=2", "xx=foo"])
# we could test 'sleep 2', but that would make the test run slower.
ret = ds.parse_shell_config(cfg)
assert ret == {"foo": "bar", "xx": "foo"}
@@ -969,7 +1078,8 @@ class TestGetPhysicalNicsByMac:
def populate_context_dir(path, variables):
data = "# Context variables generated by OpenNebula\n"
for k, v in variables.items():
- data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''")))
- populate_dir(path, {'context.sh': data})
+ data += "%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''"))
+ populate_dir(path, {"context.sh": data})
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py
new file mode 100644
index 00000000..c111bbcd
--- /dev/null
+++ b/tests/unittests/sources/test_openstack.py
@@ -0,0 +1,788 @@
+# Copyright (C) 2014 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import json
+import re
+from io import StringIO
+from urllib.parse import urlparse
+
+import httpretty as hp
+
+from cloudinit import helpers, settings, util
+from cloudinit.sources import UNSET, BrokenMetadata
+from cloudinit.sources import DataSourceOpenStack as ds
+from cloudinit.sources import convert_vendordata
+from cloudinit.sources.helpers import openstack
+from tests.unittests import helpers as test_helpers
+
+BASE_URL = "http://169.254.169.254"
+PUBKEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n"
+EC2_META = {
+ "ami-id": "ami-00000001",
+ "ami-launch-index": "0",
+ "ami-manifest-path": "FIXME",
+ "hostname": "sm-foo-test.novalocal",
+ "instance-action": "none",
+ "instance-id": "i-00000001",
+ "instance-type": "m1.tiny",
+ "local-hostname": "sm-foo-test.novalocal",
+ "local-ipv4": "0.0.0.0",
+ "public-hostname": "sm-foo-test.novalocal",
+ "public-ipv4": "0.0.0.1",
+ "reservation-id": "r-iru5qm4m",
+}
+USER_DATA = b"#!/bin/sh\necho This is user data\n"
+VENDOR_DATA = {
+ "magic": "",
+}
+VENDOR_DATA2 = {"static": {}}
+OSTACK_META = {
+ "availability_zone": "nova",
+ "files": [
+ {"content_path": "/content/0000", "path": "/etc/foo.cfg"},
+ {"content_path": "/content/0001", "path": "/etc/bar/bar.cfg"},
+ ],
+ "hostname": "sm-foo-test.novalocal",
+ "meta": {"dsmode": "local", "my-meta": "my-value"},
+ "name": "sm-foo-test",
+ "public_keys": {"mykey": PUBKEY},
+ "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c",
+}
+CONTENT_0 = b"This is contents of /etc/foo.cfg\n"
+CONTENT_1 = b"# this is /etc/bar/bar.cfg\n"
+OS_FILES = {
+ "openstack/content/0000": CONTENT_0,
+ "openstack/content/0001": CONTENT_1,
+ "openstack/latest/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/latest/network_data.json": json.dumps(
+ {"links": [], "networks": [], "services": []}
+ ),
+ "openstack/latest/user_data": USER_DATA,
+ "openstack/latest/vendor_data.json": json.dumps(VENDOR_DATA),
+ "openstack/latest/vendor_data2.json": json.dumps(VENDOR_DATA2),
+}
+EC2_FILES = {
+ "latest/user-data": USER_DATA,
+}
+EC2_VERSIONS = [
+ "latest",
+]
+
+MOCK_PATH = "cloudinit.sources.DataSourceOpenStack."
+
+
+# TODO _register_uris should leverage test_ec2.register_mock_metaserver.
+def _register_uris(version, ec2_files, ec2_meta, os_files):
+ """Registers a set of url patterns into httpretty that will mimic the
+ same data returned by the openstack metadata service (and ec2 service)."""
+
+ def match_ec2_url(uri, headers):
+ path = uri.path.strip("/")
+ if len(path) == 0:
+ return (200, headers, "\n".join(EC2_VERSIONS))
+ path = uri.path.lstrip("/")
+ if path in ec2_files:
+ return (200, headers, ec2_files.get(path))
+ if path == "latest/meta-data/":
+ buf = StringIO()
+ for (k, v) in ec2_meta.items():
+ if isinstance(v, (list, tuple)):
+ buf.write("%s/" % (k))
+ else:
+ buf.write("%s" % (k))
+ buf.write("\n")
+ return (200, headers, buf.getvalue())
+ if path.startswith("latest/meta-data/"):
+ value = None
+ pieces = path.split("/")
+ if path.endswith("/"):
+ pieces = pieces[2:-1]
+ value = util.get_cfg_by_path(ec2_meta, pieces)
+ else:
+ pieces = pieces[2:]
+ value = util.get_cfg_by_path(ec2_meta, pieces)
+ if value is not None:
+ return (200, headers, str(value))
+ return (404, headers, "")
+
+ def match_os_uri(uri, headers):
+ path = uri.path.strip("/")
+ if path == "openstack":
+ return (200, headers, "\n".join([openstack.OS_LATEST]))
+ path = uri.path.lstrip("/")
+ if path in os_files:
+ return (200, headers, os_files.get(path))
+ return (404, headers, "")
+
+ def get_request_callback(method, uri, headers):
+ uri = urlparse(uri)
+ path = uri.path.lstrip("/").split("/")
+ if path[0] == "openstack":
+ return match_os_uri(uri, headers)
+ return match_ec2_url(uri, headers)
+
+ hp.register_uri(
+ hp.GET,
+ re.compile(r"http://169.254.169.254/.*"),
+ body=get_request_callback,
+ )
+
+
+def _read_metadata_service():
+ return ds.read_metadata_service(BASE_URL, retries=0, timeout=0.1)
+
+
+class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
+
+ with_logs = True
+ VERSION = "latest"
+
+ def setUp(self):
+ super(TestOpenStackDataSource, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_successful(self):
+ _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ f = _read_metadata_service()
+ self.assertEqual(VENDOR_DATA, f.get("vendordata"))
+ self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertEqual(2, len(f["files"]))
+ self.assertEqual(USER_DATA, f.get("userdata"))
+ self.assertEqual(EC2_META, f.get("ec2-metadata"))
+ self.assertEqual(2, f.get("version"))
+ metadata = f["metadata"]
+ self.assertEqual("nova", metadata.get("availability_zone"))
+ self.assertEqual("sm-foo-test.novalocal", metadata.get("hostname"))
+ self.assertEqual(
+ "sm-foo-test.novalocal", metadata.get("local-hostname")
+ )
+ self.assertEqual("sm-foo-test", metadata.get("name"))
+ self.assertEqual(
+ "b0fa911b-69d4-4476-bbe2-1c92bff6535c", metadata.get("uuid")
+ )
+ self.assertEqual(
+ "b0fa911b-69d4-4476-bbe2-1c92bff6535c", metadata.get("instance-id")
+ )
+
+ def test_no_ec2(self):
+ _register_uris(self.VERSION, {}, {}, OS_FILES)
+ f = _read_metadata_service()
+ self.assertEqual(VENDOR_DATA, f.get("vendordata"))
+ self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertEqual(USER_DATA, f.get("userdata"))
+ self.assertEqual({}, f.get("ec2-metadata"))
+ self.assertEqual(2, f.get("version"))
+
+ def test_bad_metadata(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files.pop(k, None)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(openstack.NonReadable, _read_metadata_service)
+
+ def test_bad_uuid(self):
+ os_files = copy.deepcopy(OS_FILES)
+ os_meta = copy.deepcopy(OSTACK_META)
+ os_meta.pop("uuid")
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files[k] = json.dumps(os_meta)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
+
+ def test_userdata_empty(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("user_data"):
+ os_files.pop(k, None)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ f = _read_metadata_service()
+ self.assertEqual(VENDOR_DATA, f.get("vendordata"))
+ self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertFalse(f.get("userdata"))
+
+ def test_vendordata_empty(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("vendor_data.json"):
+ os_files.pop(k, None)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ f = _read_metadata_service()
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertFalse(f.get("vendordata"))
+
+ def test_vendordata2_empty(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("vendor_data2.json"):
+ os_files.pop(k, None)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ f = _read_metadata_service()
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertFalse(f.get("vendordata2"))
+
+ def test_vendordata_invalid(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("vendor_data.json"):
+ os_files[k] = "{" # some invalid json
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
+
+ def test_vendordata2_invalid(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("vendor_data2.json"):
+ os_files[k] = "{" # some invalid json
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
+
+ def test_metadata_invalid(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files[k] = "{" # some invalid json
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
+
+ @test_helpers.mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_datasource(self, m_dhcp):
+ _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ self.assertIsNone(ds_os.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
+ self.assertTrue(found)
+ self.assertEqual(2, ds_os.version)
+ md = dict(ds_os.metadata)
+ md.pop("instance-id", None)
+ md.pop("local-hostname", None)
+ self.assertEqual(OSTACK_META, md)
+ self.assertEqual(EC2_META, ds_os.ec2_metadata)
+ self.assertEqual(USER_DATA, ds_os.userdata_raw)
+ self.assertEqual(2, len(ds_os.files))
+ self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure)
+ self.assertEqual(VENDOR_DATA2, ds_os.vendordata2_pure)
+ self.assertIsNone(ds_os.vendordata_raw)
+ m_dhcp.assert_not_called()
+
+ @hp.activate
+ @test_helpers.mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ @test_helpers.mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_local_datasource(self, m_dhcp, m_net):
+ """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data."""
+ _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ ds_os_local = ds.DataSourceOpenStackLocal(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os_local._fallback_interface = "eth9" # Monkey patch for dhcp
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "broadcast-address": "192.168.2.255",
+ }
+ ]
+
+ self.assertIsNone(ds_os_local.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os_local.get_data()
+ self.assertTrue(found)
+ self.assertEqual(2, ds_os_local.version)
+ md = dict(ds_os_local.metadata)
+ md.pop("instance-id", None)
+ md.pop("local-hostname", None)
+ self.assertEqual(OSTACK_META, md)
+ self.assertEqual(EC2_META, ds_os_local.ec2_metadata)
+ self.assertEqual(USER_DATA, ds_os_local.userdata_raw)
+ self.assertEqual(2, len(ds_os_local.files))
+ self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure)
+ self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure)
+ self.assertIsNone(ds_os_local.vendordata_raw)
+ m_dhcp.assert_called_with("eth9", None)
+
+ def test_bad_datasource_meta(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files[k] = "{" # some invalid json
+ _register_uris(self.VERSION, {}, {}, os_files)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ self.assertIsNone(ds_os.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
+ self.assertFalse(found)
+ self.assertIsNone(ds_os.version)
+ self.assertIn(
+ "InvalidMetaDataException: Broken metadata address"
+ " http://169.254.169.25",
+ self.logs.getvalue(),
+ )
+
+ def test_no_datasource(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files.pop(k)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os.ds_cfg = {
+ "max_wait": 0,
+ "timeout": 0,
+ }
+ self.assertIsNone(ds_os.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
+ self.assertFalse(found)
+ self.assertIsNone(ds_os.version)
+
+ def test_network_config_disabled_by_datasource_config(self):
+ """The network_config can be disabled from datasource config."""
+ mock_path = MOCK_PATH + "openstack.convert_net_json"
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os.ds_cfg = {"apply_network_config": False}
+ sample_json = {
+ "links": [{"ethernet_mac_address": "mymac"}],
+ "networks": [],
+ "services": [],
+ }
+ ds_os.network_json = sample_json # Ignore this content from metadata
+ with test_helpers.mock.patch(mock_path) as m_convert_json:
+ self.assertIsNone(ds_os.network_config)
+ m_convert_json.assert_not_called()
+
+ def test_network_config_from_network_json(self):
+ """The datasource gets network_config from network_data.json."""
+ mock_path = MOCK_PATH + "openstack.convert_net_json"
+ example_cfg = {"version": 1, "config": []}
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ sample_json = {
+ "links": [{"ethernet_mac_address": "mymac"}],
+ "networks": [],
+ "services": [],
+ }
+ ds_os.network_json = sample_json
+ with test_helpers.mock.patch(mock_path) as m_convert_json:
+ m_convert_json.return_value = example_cfg
+ self.assertEqual(example_cfg, ds_os.network_config)
+ self.assertIn(
+ "network config provided via network_json", self.logs.getvalue()
+ )
+ m_convert_json.assert_called_with(sample_json, known_macs=None)
+
+ def test_network_config_cached(self):
+ """The datasource caches the network_config property."""
+ mock_path = MOCK_PATH + "openstack.convert_net_json"
+ example_cfg = {"version": 1, "config": []}
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os._network_config = example_cfg
+ with test_helpers.mock.patch(mock_path) as m_convert_json:
+ self.assertEqual(example_cfg, ds_os.network_config)
+ m_convert_json.assert_not_called()
+
+ def test_disabled_datasource(self):
+ os_files = copy.deepcopy(OS_FILES)
+ os_meta = copy.deepcopy(OSTACK_META)
+ os_meta["meta"] = {
+ "dsmode": "disabled",
+ }
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files[k] = json.dumps(os_meta)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os.ds_cfg = {
+ "max_wait": 0,
+ "timeout": 0,
+ }
+ self.assertIsNone(ds_os.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
+ self.assertFalse(found)
+ self.assertIsNone(ds_os.version)
+
+ @hp.activate
+ def test_wb__crawl_metadata_does_not_persist(self):
+ """_crawl_metadata returns current metadata and does not cache."""
+ _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ crawled_data = ds_os._crawl_metadata()
+ self.assertEqual(UNSET, ds_os.ec2_metadata)
+ self.assertIsNone(ds_os.userdata_raw)
+ self.assertEqual(0, len(ds_os.files))
+ self.assertIsNone(ds_os.vendordata_raw)
+ self.assertEqual(
+ [
+ "dsmode",
+ "ec2-metadata",
+ "files",
+ "metadata",
+ "networkdata",
+ "userdata",
+ "vendordata",
+ "vendordata2",
+ "version",
+ ],
+ sorted(crawled_data.keys()),
+ )
+ self.assertEqual("local", crawled_data["dsmode"])
+ self.assertEqual(EC2_META, crawled_data["ec2-metadata"])
+ self.assertEqual(2, len(crawled_data["files"]))
+ md = copy.deepcopy(crawled_data["metadata"])
+ md.pop("instance-id")
+ md.pop("local-hostname")
+ self.assertEqual(OSTACK_META, md)
+ self.assertEqual(
+ json.loads(OS_FILES["openstack/latest/network_data.json"]),
+ crawled_data["networkdata"],
+ )
+ self.assertEqual(USER_DATA, crawled_data["userdata"])
+ self.assertEqual(VENDOR_DATA, crawled_data["vendordata"])
+ self.assertEqual(VENDOR_DATA2, crawled_data["vendordata2"])
+ self.assertEqual(2, crawled_data["version"])
+
+
+class TestVendorDataLoading(test_helpers.TestCase):
+ def cvj(self, data):
+ return convert_vendordata(data)
+
+ def test_vd_load_none(self):
+ # non-existant vendor-data should return none
+ self.assertIsNone(self.cvj(None))
+
+ def test_vd_load_string(self):
+ self.assertEqual(self.cvj("foobar"), "foobar")
+
+ def test_vd_load_list(self):
+ data = [{"foo": "bar"}, "mystring", list(["another", "list"])]
+ self.assertEqual(self.cvj(data), data)
+
+ def test_vd_load_dict_no_ci(self):
+ self.assertIsNone(self.cvj({"foo": "bar"}))
+
+ def test_vd_load_dict_ci_dict(self):
+ self.assertRaises(
+ ValueError, self.cvj, {"foo": "bar", "cloud-init": {"x": 1}}
+ )
+
+ def test_vd_load_dict_ci_string(self):
+ data = {"foo": "bar", "cloud-init": "VENDOR_DATA"}
+ self.assertEqual(self.cvj(data), data["cloud-init"])
+
+ def test_vd_load_dict_ci_list(self):
+ data = {"foo": "bar", "cloud-init": ["VD_1", "VD_2"]}
+ self.assertEqual(self.cvj(data), data["cloud-init"])
+
+
+@test_helpers.mock.patch(MOCK_PATH + "util.is_x86")
+class TestDetectOpenStack(test_helpers.CiTestCase):
+ def test_detect_openstack_non_intel_x86(self, m_is_x86):
+ """Return True on non-intel platforms because dmi isn't conclusive."""
+ m_is_x86.return_value = False
+ self.assertTrue(
+ ds.detect_openstack(), "Expected detect_openstack == True"
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env")
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_not_detect_openstack_intel_x86_ec2(
+ self, m_dmi, m_proc_env, m_is_x86
+ ):
+ """Return False on EC2 platforms."""
+ m_is_x86.return_value = True
+ # No product_name in proc/1/environ
+ m_proc_env.return_value = {"HOME": "/"}
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "HVM domU" # Nothing 'openstackish' on EC2
+ if dmi_key == "chassis-asset-tag":
+ return "" # Empty string on EC2
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertFalse(
+ ds.detect_openstack(), "Expected detect_openstack == False on EC2"
+ )
+ m_proc_env.assert_called_with(1)
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_intel_product_name_compute(
+ self, m_dmi, m_is_x86
+ ):
+ """Return True on OpenStack compute and nova instances."""
+ m_is_x86.return_value = True
+ openstack_product_names = ["OpenStack Nova", "OpenStack Compute"]
+
+ for product_name in openstack_product_names:
+ m_dmi.return_value = product_name
+ self.assertTrue(
+ ds.detect_openstack(), "Failed to detect_openstack"
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_opentelekomcloud_chassis_asset_tag(
+ self, m_dmi, m_is_x86
+ ):
+ """Return True on OpenStack reporting OpenTelekomCloud asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "HVM domU" # Nothing 'openstackish' on OpenTelekomCloud
+ if dmi_key == "chassis-asset-tag":
+ return "OpenTelekomCloud"
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ "Expected detect_openstack == True on OpenTelekomCloud",
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_sapccloud_chassis_asset_tag(
+ self, m_dmi, m_is_x86
+ ):
+ """Return True on OpenStack reporting SAP CCloud VM asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "VMware Virtual Platform" # SAP CCloud uses VMware
+ if dmi_key == "chassis-asset-tag":
+ return "SAP CCloud VM"
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ "Expected detect_openstack == True on SAP CCloud VM",
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_oraclecloud_chassis_asset_tag(
+ self, m_dmi, m_is_x86
+ ):
+ """Return True on OpenStack reporting Oracle cloud asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "Standard PC (i440FX + PIIX, 1996)" # No match
+ if dmi_key == "chassis-asset-tag":
+ return "OracleCloud.com"
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(accept_oracle=True),
+ "Expected detect_openstack == True on OracleCloud.com",
+ )
+ self.assertFalse(
+ ds.detect_openstack(accept_oracle=False),
+ "Expected detect_openstack == False.",
+ )
+
+ def _test_detect_openstack_nova_compute_chassis_asset_tag(
+ self, m_dmi, m_is_x86, chassis_tag
+ ):
+ """Return True on OpenStack reporting generic asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "Generic OpenStack Platform"
+ if dmi_key == "chassis-asset-tag":
+ return chassis_tag
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ "Expected detect_openstack == True on Generic OpenStack Platform",
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi, m_is_x86):
+ self._test_detect_openstack_nova_compute_chassis_asset_tag(
+ m_dmi, m_is_x86, "OpenStack Nova"
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi, m_is_x86):
+ self._test_detect_openstack_nova_compute_chassis_asset_tag(
+ m_dmi, m_is_x86, "OpenStack Compute"
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env")
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_by_proc_1_environ(
+ self, m_dmi, m_proc_env, m_is_x86
+ ):
+ """Return True when nova product_name specified in /proc/1/environ."""
+ m_is_x86.return_value = True
+ # Nova product_name in proc/1/environ
+ m_proc_env.return_value = {
+ "HOME": "/",
+ "product_name": "OpenStack Nova",
+ }
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "HVM domU" # Nothing 'openstackish'
+ if dmi_key == "chassis-asset-tag":
+ return "" # Nothin 'openstackish'
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ "Expected detect_openstack == True on OpenTelekomCloud",
+ )
+ m_proc_env.assert_called_with(1)
+
+
+class TestMetadataReader(test_helpers.HttprettyTestCase):
+ """Test the MetadataReader."""
+
+ burl = "http://169.254.169.254/"
+ md_base = {
+ "availability_zone": "myaz1",
+ "hostname": "sm-foo-test.novalocal",
+ "keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}],
+ "launch_index": 0,
+ "name": "sm-foo-test",
+ "public_keys": {"mykey": PUBKEY},
+ "project_id": "6a103f813b774b9fb15a4fcd36e1c056",
+ "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c",
+ }
+
+ def register(self, path, body=None, status=200):
+ content = body if not isinstance(body, str) else body.encode("utf-8")
+ hp.register_uri(
+ hp.GET, self.burl + "openstack" + path, status=status, body=content
+ )
+
+ def register_versions(self, versions):
+ self.register("", "\n".join(versions))
+ self.register("/", "\n".join(versions))
+
+ def register_version(self, version, data):
+ content = "\n".join(sorted(data.keys()))
+ self.register(version, content)
+ self.register(version + "/", content)
+ for path, content in data.items():
+ self.register("/%s/%s" % (version, path), content)
+ self.register("/%s/%s" % (version, path), content)
+ if "user_data" not in data:
+ self.register("/%s/user_data" % version, "nodata", status=404)
+
+ def test__find_working_version(self):
+ """Test a working version ignores unsupported."""
+ unsup = "2016-11-09"
+ self.register_versions(
+ [
+ openstack.OS_FOLSOM,
+ openstack.OS_LIBERTY,
+ unsup,
+ openstack.OS_LATEST,
+ ]
+ )
+ self.assertEqual(
+ openstack.OS_LIBERTY,
+ openstack.MetadataReader(self.burl)._find_working_version(),
+ )
+
+ def test__find_working_version_uses_latest(self):
+ """'latest' should be used if no supported versions."""
+ unsup1, unsup2 = ("2016-11-09", "2017-06-06")
+ self.register_versions([unsup1, unsup2, openstack.OS_LATEST])
+ self.assertEqual(
+ openstack.OS_LATEST,
+ openstack.MetadataReader(self.burl)._find_working_version(),
+ )
+
+ def test_read_v2_os_ocata(self):
+ """Validate return value of read_v2 for os_ocata data."""
+ md = copy.deepcopy(self.md_base)
+ md["devices"] = []
+ network_data = {"links": [], "networks": [], "services": []}
+ vendor_data = {}
+ vendor_data2 = {"static": {}}
+
+ data = {
+ "meta_data.json": json.dumps(md),
+ "network_data.json": json.dumps(network_data),
+ "vendor_data.json": json.dumps(vendor_data),
+ "vendor_data2.json": json.dumps(vendor_data2),
+ }
+
+ self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST])
+ self.register_version(openstack.OS_OCATA, data)
+
+ mock_read_ec2 = test_helpers.mock.MagicMock(
+ return_value={"instance-id": "unused-ec2"}
+ )
+ expected_md = copy.deepcopy(md)
+ expected_md.update(
+ {"instance-id": md["uuid"], "local-hostname": md["hostname"]}
+ )
+ expected = {
+ "userdata": "", # Annoying, no user-data results in empty string.
+ "version": 2,
+ "metadata": expected_md,
+ "vendordata": vendor_data,
+ "vendordata2": vendor_data2,
+ "networkdata": network_data,
+ "ec2-metadata": mock_read_ec2.return_value,
+ "files": {},
+ }
+ reader = openstack.MetadataReader(self.burl)
+ reader._read_ec2_metadata = mock_read_ec2
+ self.assertEqual(expected, reader.read_v2())
+ self.assertEqual(1, mock_read_ec2.call_count)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_oracle.py b/tests/unittests/sources/test_oracle.py
index a7bbdfd9..b3e6f10c 100644
--- a/cloudinit/sources/tests/test_oracle.py
+++ b/tests/unittests/sources/test_oracle.py
@@ -11,8 +11,8 @@ import pytest
from cloudinit.sources import DataSourceOracle as oracle
from cloudinit.sources import NetworkConfigSource
from cloudinit.sources.DataSourceOracle import OpcMetadata
-from cloudinit.tests import helpers as test_helpers
from cloudinit.url_helper import UrlError
+from tests.unittests import helpers as test_helpers
DS_PATH = "cloudinit.sources.DataSourceOracle"
@@ -93,7 +93,7 @@ def metadata_version():
return 2
-@pytest.yield_fixture
+@pytest.fixture
def oracle_ds(request, fixture_utils, paths, metadata_version):
"""
Return an instantiated DataSourceOracle.
@@ -119,7 +119,9 @@ def oracle_ds(request, fixture_utils, paths, metadata_version):
return_value=metadata,
):
yield oracle.DataSourceOracle(
- sys_cfg=sys_cfg, distro=mock.Mock(), paths=paths,
+ sys_cfg=sys_cfg,
+ distro=mock.Mock(),
+ paths=paths,
)
@@ -129,18 +131,22 @@ class TestDataSourceOracle:
assert "oracle" == oracle_ds.platform_type
def test_subplatform_before_fetch(self, oracle_ds):
- assert 'unknown' == oracle_ds.subplatform
+ assert "unknown" == oracle_ds.subplatform
def test_platform_info_after_fetch(self, oracle_ds):
oracle_ds._get_data()
- assert 'metadata (http://169.254.169.254/opc/v2/)' == \
- oracle_ds.subplatform
+ assert (
+ "metadata (http://169.254.169.254/opc/v2/)"
+ == oracle_ds.subplatform
+ )
- @pytest.mark.parametrize('metadata_version', [1])
+ @pytest.mark.parametrize("metadata_version", [1])
def test_v1_platform_info_after_fetch(self, oracle_ds):
oracle_ds._get_data()
- assert 'metadata (http://169.254.169.254/opc/v1/)' == \
- oracle_ds.subplatform
+ assert (
+ "metadata (http://169.254.169.254/opc/v1/)"
+ == oracle_ds.subplatform
+ )
def test_secondary_nics_disabled_by_default(self, oracle_ds):
assert not oracle_ds.ds_cfg["configure_secondary_nics"]
@@ -153,26 +159,31 @@ class TestDataSourceOracle:
class TestIsPlatformViable(test_helpers.CiTestCase):
- @mock.patch(DS_PATH + ".dmi.read_dmi_data",
- return_value=oracle.CHASSIS_ASSET_TAG)
+ @mock.patch(
+ DS_PATH + ".dmi.read_dmi_data", return_value=oracle.CHASSIS_ASSET_TAG
+ )
def test_expected_viable(self, m_read_dmi_data):
"""System with known chassis tag is viable."""
self.assertTrue(oracle._is_platform_viable())
- m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+ m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
@mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value=None)
def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data):
"""System without known chassis tag is not viable."""
self.assertFalse(oracle._is_platform_viable())
- m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+ m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
@mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value="LetsGoCubs")
def test_expected_not_viable_other(self, m_read_dmi_data):
"""System with unnown chassis tag is not viable."""
self.assertFalse(oracle._is_platform_viable())
- m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+ m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
class TestNetworkConfigFromOpcImds:
def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds):
oracle_ds._vnics_data = [{}]
@@ -188,222 +199,317 @@ class TestNetworkConfigFromOpcImds:
# operations are used
oracle_ds._network_config = object()
oracle_ds._add_network_config_from_opc_imds()
- assert 'bare metal machine' in caplog.text
+ assert "bare metal machine" in caplog.text
def test_missing_mac_skipped(self, oracle_ds, caplog):
oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
oracle_ds._network_config = {
- 'version': 1, 'config': [{'primary': 'nic'}]
+ "version": 1,
+ "config": [{"primary": "nic"}],
}
with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
oracle_ds._add_network_config_from_opc_imds()
- assert 1 == len(oracle_ds.network_config['config'])
- assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \
- caplog.text
+ assert 1 == len(oracle_ds.network_config["config"])
+ assert (
+ "Interface with MAC 00:00:17:02:2b:b1 not found; skipping"
+ in caplog.text
+ )
def test_missing_mac_skipped_v2(self, oracle_ds, caplog):
oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
oracle_ds._network_config = {
- 'version': 2, 'ethernets': {'primary': {'nic': {}}}
+ "version": 2,
+ "ethernets": {"primary": {"nic": {}}},
}
with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
oracle_ds._add_network_config_from_opc_imds()
- assert 1 == len(oracle_ds.network_config['ethernets'])
- assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \
- caplog.text
+ assert 1 == len(oracle_ds.network_config["ethernets"])
+ assert (
+ "Interface with MAC 00:00:17:02:2b:b1 not found; skipping"
+ in caplog.text
+ )
def test_secondary_nic(self, oracle_ds):
oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
oracle_ds._network_config = {
- 'version': 1, 'config': [{'primary': 'nic'}]
+ "version": 1,
+ "config": [{"primary": "nic"}],
}
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
- with mock.patch(DS_PATH + ".get_interfaces_by_mac",
- return_value={mac_addr: nic_name}):
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
+ with mock.patch(
+ DS_PATH + ".get_interfaces_by_mac",
+ return_value={mac_addr: nic_name},
+ ):
oracle_ds._add_network_config_from_opc_imds()
# The input is mutated
- assert 2 == len(oracle_ds.network_config['config'])
+ assert 2 == len(oracle_ds.network_config["config"])
- secondary_nic_cfg = oracle_ds.network_config['config'][1]
- assert nic_name == secondary_nic_cfg['name']
- assert 'physical' == secondary_nic_cfg['type']
- assert mac_addr == secondary_nic_cfg['mac_address']
- assert 9000 == secondary_nic_cfg['mtu']
+ secondary_nic_cfg = oracle_ds.network_config["config"][1]
+ assert nic_name == secondary_nic_cfg["name"]
+ assert "physical" == secondary_nic_cfg["type"]
+ assert mac_addr == secondary_nic_cfg["mac_address"]
+ assert 9000 == secondary_nic_cfg["mtu"]
- assert 1 == len(secondary_nic_cfg['subnets'])
- subnet_cfg = secondary_nic_cfg['subnets'][0]
+ assert 1 == len(secondary_nic_cfg["subnets"])
+ subnet_cfg = secondary_nic_cfg["subnets"][0]
# These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
- assert '10.0.0.231' == subnet_cfg['address']
+ assert "10.0.0.231" == subnet_cfg["address"]
def test_secondary_nic_v2(self, oracle_ds):
oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
oracle_ds._network_config = {
- 'version': 2, 'ethernets': {'primary': {'nic': {}}}
+ "version": 2,
+ "ethernets": {"primary": {"nic": {}}},
}
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
- with mock.patch(DS_PATH + ".get_interfaces_by_mac",
- return_value={mac_addr: nic_name}):
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
+ with mock.patch(
+ DS_PATH + ".get_interfaces_by_mac",
+ return_value={mac_addr: nic_name},
+ ):
oracle_ds._add_network_config_from_opc_imds()
# The input is mutated
- assert 2 == len(oracle_ds.network_config['ethernets'])
+ assert 2 == len(oracle_ds.network_config["ethernets"])
- secondary_nic_cfg = oracle_ds.network_config['ethernets']['ens3']
- assert secondary_nic_cfg['dhcp4'] is False
- assert secondary_nic_cfg['dhcp6'] is False
- assert mac_addr == secondary_nic_cfg['match']['macaddress']
- assert 9000 == secondary_nic_cfg['mtu']
+ secondary_nic_cfg = oracle_ds.network_config["ethernets"]["ens3"]
+ assert secondary_nic_cfg["dhcp4"] is False
+ assert secondary_nic_cfg["dhcp6"] is False
+ assert mac_addr == secondary_nic_cfg["match"]["macaddress"]
+ assert 9000 == secondary_nic_cfg["mtu"]
- assert 1 == len(secondary_nic_cfg['addresses'])
+ assert 1 == len(secondary_nic_cfg["addresses"])
# These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
- assert '10.0.0.231' == secondary_nic_cfg['addresses'][0]
+ assert "10.0.0.231" == secondary_nic_cfg["addresses"][0]
class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
-
def setUp(self):
super(TestNetworkConfigFiltersNetFailover, self).setUp()
- self.add_patch(DS_PATH + '.get_interfaces_by_mac',
- 'm_get_interfaces_by_mac')
- self.add_patch(DS_PATH + '.is_netfail_master', 'm_netfail_master')
+ self.add_patch(
+ DS_PATH + ".get_interfaces_by_mac", "m_get_interfaces_by_mac"
+ )
+ self.add_patch(DS_PATH + ".is_netfail_master", "m_netfail_master")
def test_ignore_bogus_network_config(self):
- netcfg = {'something': 'here'}
+ netcfg = {"something": "here"}
passed_netcfg = copy.copy(netcfg)
oracle._ensure_netfailover_safe(passed_netcfg)
self.assertEqual(netcfg, passed_netcfg)
def test_ignore_network_config_unknown_versions(self):
- netcfg = {'something': 'here', 'version': 3}
+ netcfg = {"something": "here", "version": 3}
passed_netcfg = copy.copy(netcfg)
oracle._ensure_netfailover_safe(passed_netcfg)
self.assertEqual(netcfg, passed_netcfg)
def test_checks_v1_type_physical_interfaces(self):
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
self.m_get_interfaces_by_mac.return_value = {
mac_addr: nic_name,
}
- netcfg = {'version': 1, 'config': [
- {'type': 'physical', 'name': nic_name, 'mac_address': mac_addr,
- 'subnets': [{'type': 'dhcp4'}]}]}
+ netcfg = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": nic_name,
+ "mac_address": mac_addr,
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
passed_netcfg = copy.copy(netcfg)
self.m_netfail_master.return_value = False
oracle._ensure_netfailover_safe(passed_netcfg)
self.assertEqual(netcfg, passed_netcfg)
- self.assertEqual([mock.call(nic_name)],
- self.m_netfail_master.call_args_list)
+ self.assertEqual(
+ [mock.call(nic_name)], self.m_netfail_master.call_args_list
+ )
def test_checks_v1_skips_non_phys_interfaces(self):
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'bond0'
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "bond0"
self.m_get_interfaces_by_mac.return_value = {
mac_addr: nic_name,
}
- netcfg = {'version': 1, 'config': [
- {'type': 'bond', 'name': nic_name, 'mac_address': mac_addr,
- 'subnets': [{'type': 'dhcp4'}]}]}
+ netcfg = {
+ "version": 1,
+ "config": [
+ {
+ "type": "bond",
+ "name": nic_name,
+ "mac_address": mac_addr,
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
passed_netcfg = copy.copy(netcfg)
oracle._ensure_netfailover_safe(passed_netcfg)
self.assertEqual(netcfg, passed_netcfg)
self.assertEqual(0, self.m_netfail_master.call_count)
def test_removes_master_mac_property_v1(self):
- nic_master, mac_master = 'ens3', self.random_string()
- nic_other, mac_other = 'ens7', self.random_string()
- nic_extra, mac_extra = 'enp0s1f2', self.random_string()
+ nic_master, mac_master = "ens3", self.random_string()
+ nic_other, mac_other = "ens7", self.random_string()
+ nic_extra, mac_extra = "enp0s1f2", self.random_string()
self.m_get_interfaces_by_mac.return_value = {
mac_master: nic_master,
mac_other: nic_other,
mac_extra: nic_extra,
}
- netcfg = {'version': 1, 'config': [
- {'type': 'physical', 'name': nic_master,
- 'mac_address': mac_master},
- {'type': 'physical', 'name': nic_other, 'mac_address': mac_other},
- {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra},
- ]}
+ netcfg = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": nic_master,
+ "mac_address": mac_master,
+ },
+ {
+ "type": "physical",
+ "name": nic_other,
+ "mac_address": mac_other,
+ },
+ {
+ "type": "physical",
+ "name": nic_extra,
+ "mac_address": mac_extra,
+ },
+ ],
+ }
def _is_netfail_master(iface):
- if iface == 'ens3':
+ if iface == "ens3":
return True
return False
+
self.m_netfail_master.side_effect = _is_netfail_master
- expected_cfg = {'version': 1, 'config': [
- {'type': 'physical', 'name': nic_master},
- {'type': 'physical', 'name': nic_other, 'mac_address': mac_other},
- {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra},
- ]}
+ expected_cfg = {
+ "version": 1,
+ "config": [
+ {"type": "physical", "name": nic_master},
+ {
+ "type": "physical",
+ "name": nic_other,
+ "mac_address": mac_other,
+ },
+ {
+ "type": "physical",
+ "name": nic_extra,
+ "mac_address": mac_extra,
+ },
+ ],
+ }
oracle._ensure_netfailover_safe(netcfg)
self.assertEqual(expected_cfg, netcfg)
def test_checks_v2_type_ethernet_interfaces(self):
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
self.m_get_interfaces_by_mac.return_value = {
mac_addr: nic_name,
}
- netcfg = {'version': 2, 'ethernets': {
- nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name,
- 'match': {'macaddress': mac_addr}}}}
+ netcfg = {
+ "version": 2,
+ "ethernets": {
+ nic_name: {
+ "dhcp4": True,
+ "critical": True,
+ "set-name": nic_name,
+ "match": {"macaddress": mac_addr},
+ }
+ },
+ }
passed_netcfg = copy.copy(netcfg)
self.m_netfail_master.return_value = False
oracle._ensure_netfailover_safe(passed_netcfg)
self.assertEqual(netcfg, passed_netcfg)
- self.assertEqual([mock.call(nic_name)],
- self.m_netfail_master.call_args_list)
+ self.assertEqual(
+ [mock.call(nic_name)], self.m_netfail_master.call_args_list
+ )
def test_skips_v2_non_ethernet_interfaces(self):
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'wlps0'
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "wlps0"
self.m_get_interfaces_by_mac.return_value = {
mac_addr: nic_name,
}
- netcfg = {'version': 2, 'wifis': {
- nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name,
- 'match': {'macaddress': mac_addr}}}}
+ netcfg = {
+ "version": 2,
+ "wifis": {
+ nic_name: {
+ "dhcp4": True,
+ "critical": True,
+ "set-name": nic_name,
+ "match": {"macaddress": mac_addr},
+ }
+ },
+ }
passed_netcfg = copy.copy(netcfg)
oracle._ensure_netfailover_safe(passed_netcfg)
self.assertEqual(netcfg, passed_netcfg)
self.assertEqual(0, self.m_netfail_master.call_count)
def test_removes_master_mac_property_v2(self):
- nic_master, mac_master = 'ens3', self.random_string()
- nic_other, mac_other = 'ens7', self.random_string()
- nic_extra, mac_extra = 'enp0s1f2', self.random_string()
+ nic_master, mac_master = "ens3", self.random_string()
+ nic_other, mac_other = "ens7", self.random_string()
+ nic_extra, mac_extra = "enp0s1f2", self.random_string()
self.m_get_interfaces_by_mac.return_value = {
mac_master: nic_master,
mac_other: nic_other,
mac_extra: nic_extra,
}
- netcfg = {'version': 2, 'ethernets': {
- nic_extra: {'dhcp4': True, 'set-name': nic_extra,
- 'match': {'macaddress': mac_extra}},
- nic_other: {'dhcp4': True, 'set-name': nic_other,
- 'match': {'macaddress': mac_other}},
- nic_master: {'dhcp4': True, 'set-name': nic_master,
- 'match': {'macaddress': mac_master}},
- }}
+ netcfg = {
+ "version": 2,
+ "ethernets": {
+ nic_extra: {
+ "dhcp4": True,
+ "set-name": nic_extra,
+ "match": {"macaddress": mac_extra},
+ },
+ nic_other: {
+ "dhcp4": True,
+ "set-name": nic_other,
+ "match": {"macaddress": mac_other},
+ },
+ nic_master: {
+ "dhcp4": True,
+ "set-name": nic_master,
+ "match": {"macaddress": mac_master},
+ },
+ },
+ }
def _is_netfail_master(iface):
- if iface == 'ens3':
+ if iface == "ens3":
return True
return False
+
self.m_netfail_master.side_effect = _is_netfail_master
- expected_cfg = {'version': 2, 'ethernets': {
- nic_master: {'dhcp4': True, 'match': {'name': nic_master}},
- nic_extra: {'dhcp4': True, 'set-name': nic_extra,
- 'match': {'macaddress': mac_extra}},
- nic_other: {'dhcp4': True, 'set-name': nic_other,
- 'match': {'macaddress': mac_other}},
- }}
+ expected_cfg = {
+ "version": 2,
+ "ethernets": {
+ nic_master: {"dhcp4": True, "match": {"name": nic_master}},
+ nic_extra: {
+ "dhcp4": True,
+ "set-name": nic_extra,
+ "match": {"macaddress": mac_extra},
+ },
+ nic_other: {
+ "dhcp4": True,
+ "set-name": nic_other,
+ "match": {"macaddress": mac_other},
+ },
+ },
+ }
oracle._ensure_netfailover_safe(netcfg)
import pprint
+
pprint.pprint(netcfg)
- print('---- ^^ modified ^^ ---- vv original vv ----')
+ print("---- ^^ modified ^^ ---- vv original vv ----")
pprint.pprint(expected_cfg)
self.assertEqual(expected_cfg, netcfg)
@@ -421,12 +527,12 @@ def _mock_v2_urls(httpretty):
httpretty.register_uri(
httpretty.GET,
"http://169.254.169.254/opc/v2/instance/",
- body=instance_callback
+ body=instance_callback,
)
httpretty.register_uri(
httpretty.GET,
"http://169.254.169.254/opc/v2/vnics/",
- body=vnics_callback
+ body=vnics_callback,
)
@@ -439,12 +545,12 @@ def _mock_no_v2_urls(httpretty):
httpretty.register_uri(
httpretty.GET,
"http://169.254.169.254/opc/v1/instance/",
- body=OPC_V1_METADATA
+ body=OPC_V1_METADATA,
)
httpretty.register_uri(
httpretty.GET,
"http://169.254.169.254/opc/v1/vnics/",
- body=OPC_BM_SECONDARY_VNIC_RESPONSE
+ body=OPC_BM_SECONDARY_VNIC_RESPONSE,
)
@@ -455,18 +561,34 @@ class TestReadOpcMetadata:
@mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
@pytest.mark.parametrize(
- 'version,setup_urls,instance_data,fetch_vnics,vnics_data', [
- (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), True,
- json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)),
+ "version,setup_urls,instance_data,fetch_vnics,vnics_data",
+ [
+ (
+ 2,
+ _mock_v2_urls,
+ json.loads(OPC_V2_METADATA),
+ True,
+ json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE),
+ ),
(2, _mock_v2_urls, json.loads(OPC_V2_METADATA), False, None),
- (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), True,
- json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)),
+ (
+ 1,
+ _mock_no_v2_urls,
+ json.loads(OPC_V1_METADATA),
+ True,
+ json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE),
+ ),
(1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), False, None),
- ]
+ ],
)
def test_metadata_returned(
- self, version, setup_urls, instance_data,
- fetch_vnics, vnics_data, httpretty
+ self,
+ version,
+ setup_urls,
+ instance_data,
+ fetch_vnics,
+ vnics_data,
+ httpretty,
):
setup_urls(httpretty)
metadata = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics)
@@ -486,10 +608,16 @@ class TestReadOpcMetadata:
(3, 1, json.loads(OPC_V1_METADATA), does_not_raise()),
(3, 2, json.loads(OPC_V1_METADATA), does_not_raise()),
(3, 3, None, pytest.raises(UrlError)),
- ]
+ ],
)
- def test_retries(self, v2_failure_count, v1_failure_count,
- expected_body, expectation, httpretty):
+ def test_retries(
+ self,
+ v2_failure_count,
+ v1_failure_count,
+ expected_body,
+ expectation,
+ httpretty,
+ ):
v2_responses = [httpretty.Response("", status=404)] * v2_failure_count
v2_responses.append(httpretty.Response(OPC_V2_METADATA))
v1_responses = [httpretty.Response("", status=404)] * v1_failure_count
@@ -521,7 +649,7 @@ class TestCommon_GetDataBehaviour:
separate class for that case.)
"""
- @pytest.yield_fixture(params=[True, False])
+ @pytest.fixture(params=[True, False])
def parameterized_oracle_ds(self, request, oracle_ds):
"""oracle_ds parameterized for iSCSI and non-iSCSI root respectively"""
is_iscsi_root = request.param
@@ -544,7 +672,8 @@ class TestCommon_GetDataBehaviour:
DS_PATH + "._is_platform_viable", mock.Mock(return_value=False)
)
def test_false_if_platform_not_viable(
- self, parameterized_oracle_ds,
+ self,
+ parameterized_oracle_ds,
):
assert not parameterized_oracle_ds._get_data()
@@ -567,7 +696,10 @@ class TestCommon_GetDataBehaviour:
),
)
def test_metadata_keys_set_correctly(
- self, keyname, expected_value, parameterized_oracle_ds,
+ self,
+ keyname,
+ expected_value,
+ parameterized_oracle_ds,
):
assert parameterized_oracle_ds._get_data()
assert expected_value == parameterized_oracle_ds.metadata[keyname]
@@ -587,7 +719,10 @@ class TestCommon_GetDataBehaviour:
DS_PATH + "._read_system_uuid", mock.Mock(return_value="my-test-uuid")
)
def test_attributes_set_correctly(
- self, attribute_name, expected_value, parameterized_oracle_ds,
+ self,
+ attribute_name,
+ expected_value,
+ parameterized_oracle_ds,
):
assert parameterized_oracle_ds._get_data()
assert expected_value == getattr(
@@ -620,7 +755,8 @@ class TestCommon_GetDataBehaviour:
instance_data["metadata"]["ssh_authorized_keys"] = ssh_keys
metadata = OpcMetadata(None, instance_data, None)
with mock.patch(
- DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(return_value=metadata),
):
assert parameterized_oracle_ds._get_data()
assert (
@@ -634,7 +770,8 @@ class TestCommon_GetDataBehaviour:
del instance_data["metadata"]["user_data"]
metadata = OpcMetadata(None, instance_data, None)
with mock.patch(
- DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(return_value=metadata),
):
assert parameterized_oracle_ds._get_data()
@@ -647,7 +784,8 @@ class TestCommon_GetDataBehaviour:
del instance_data["metadata"]
metadata = OpcMetadata(None, instance_data, None)
with mock.patch(
- DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(return_value=metadata),
):
assert parameterized_oracle_ds._get_data()
@@ -690,7 +828,13 @@ class TestNonIscsiRoot_GetDataBehaviour:
assert oracle_ds._get_data()
assert [
- mock.call(m_find_fallback_nic.return_value)
+ mock.call(
+ iface=m_find_fallback_nic.return_value,
+ connectivity_url_data={
+ "headers": {"Authorization": "Bearer Oracle"},
+ "url": "http://169.254.169.254/opc/v2/instance/",
+ },
+ )
] == m_EphemeralDHCPv4.call_args_list
@@ -749,9 +893,10 @@ class TestNetworkConfig:
def side_effect(self):
self._network_config["secondary_added"] = mock.sentinel.needle
- oracle_ds._vnics_data = 'DummyData'
+ oracle_ds._vnics_data = "DummyData"
with mock.patch.object(
- oracle.DataSourceOracle, "_add_network_config_from_opc_imds",
+ oracle.DataSourceOracle,
+ "_add_network_config_from_opc_imds",
new=side_effect,
):
was_secondary_added = "secondary_added" in oracle_ds.network_config
@@ -767,19 +912,22 @@ class TestNetworkConfig:
oracle_ds._vnics_data = "DummyData"
with mock.patch.object(
- oracle.DataSourceOracle, "_add_network_config_from_opc_imds",
- side_effect=Exception()
+ oracle.DataSourceOracle,
+ "_add_network_config_from_opc_imds",
+ side_effect=Exception(),
):
network_config = oracle_ds.network_config
assert network_config == m_read_initramfs_config.return_value
assert "Failed to parse secondary network configuration" in caplog.text
- def test_ds_network_cfg_preferred_over_initramfs(self, _m):
- """Ensure that DS net config is preferred over initramfs config"""
+ def test_ds_network_cfg_order(self, _m):
+ """Ensure that DS net config is preferred over initramfs config
+ but less than system config."""
config_sources = oracle.DataSourceOracle.network_config_sources
+ system_idx = config_sources.index(NetworkConfigSource.system_cfg)
ds_idx = config_sources.index(NetworkConfigSource.ds)
initramfs_idx = config_sources.index(NetworkConfigSource.initramfs)
- assert ds_idx < initramfs_idx
+ assert system_idx < ds_idx < initramfs_idx
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_ovf.py b/tests/unittests/sources/test_ovf.py
new file mode 100644
index 00000000..c2c87f12
--- /dev/null
+++ b/tests/unittests/sources/test_ovf.py
@@ -0,0 +1,1237 @@
+# Copyright (C) 2016 Canonical Ltd.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import base64
+import os
+from collections import OrderedDict
+from textwrap import dedent
+
+from cloudinit import subp, util
+from cloudinit.helpers import Paths
+from cloudinit.safeyaml import YAMLError
+from cloudinit.sources import DataSourceOVF as dsovf
+from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
+ CustomScriptNotFound,
+)
+from tests.unittests.helpers import CiTestCase, mock, wrap_and_call
+
+MPATH = "cloudinit.sources.DataSourceOVF."
+
+NOT_FOUND = None
+
+OVF_ENV_CONTENT = """<?xml version="1.0" encoding="UTF-8"?>
+<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
+ xsi:schemaLocation="http://schemas.dmtf.org/ovf/environment/1 ../dsp8027.xsd"
+ oe:id="WebTier">
+ <!-- Information about hypervisor platform -->
+ <oe:PlatformSection>
+ <Kind>ESX Server</Kind>
+ <Version>3.0.1</Version>
+ <Vendor>VMware, Inc.</Vendor>
+ <Locale>en_US</Locale>
+ </oe:PlatformSection>
+ <!--- Properties defined for this virtual machine -->
+ <PropertySection>
+{properties}
+ </PropertySection>
+</Environment>
+"""
+
+
+def fill_properties(props, template=OVF_ENV_CONTENT):
+ lines = []
+ prop_tmpl = '<Property oe:key="{key}" oe:value="{val}"/>'
+ for key, val in props.items():
+ lines.append(prop_tmpl.format(key=key, val=val))
+ indent = " "
+ properties = "".join([indent + line + "\n" for line in lines])
+ return template.format(properties=properties)
+
+
+class TestReadOvfEnv(CiTestCase):
+ def test_with_b64_userdata(self):
+ user_data = "#!/bin/sh\necho hello world\n"
+ user_data_b64 = base64.b64encode(user_data.encode()).decode()
+ props = {
+ "user-data": user_data_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual(user_data.encode(), ud)
+ self.assertEqual({"password": "passw0rd"}, cfg)
+
+ def test_with_non_b64_userdata(self):
+ user_data = "my-user-data"
+ props = {"user-data": user_data, "instance-id": "inst-001"}
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual(user_data.encode(), ud)
+ self.assertEqual({}, cfg)
+
+ def test_with_no_userdata(self):
+ props = {"password": "passw0rd", "instance-id": "inst-001"}
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual({"password": "passw0rd"}, cfg)
+ self.assertIsNone(ud)
+
+ def test_with_b64_network_config_enable_read_network(self):
+ network_config = dedent(
+ """\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """
+ )
+ network_config_b64 = base64.b64encode(network_config.encode()).decode()
+ props = {
+ "network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env, True)
+ self.assertEqual("inst-001", md["instance-id"])
+ self.assertEqual({"password": "passw0rd"}, cfg)
+ self.assertEqual(
+ {
+ "version": 2,
+ "ethernets": {
+ "nics": {
+ "nameservers": {
+ "addresses": ["127.0.0.53"],
+ "search": ["eng.vmware.com", "vmware.com"],
+ },
+ "match": {"name": "eth*"},
+ "gateway4": "10.10.10.253",
+ "dhcp4": False,
+ "addresses": ["10.10.10.1/24"],
+ }
+ },
+ },
+ md["network-config"],
+ )
+ self.assertIsNone(ud)
+
+ def test_with_non_b64_network_config_enable_read_network(self):
+ network_config = dedent(
+ """\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """
+ )
+ props = {
+ "network-config": network_config,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env, True)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual({"password": "passw0rd"}, cfg)
+ self.assertIsNone(ud)
+
+ def test_with_b64_network_config_disable_read_network(self):
+ network_config = dedent(
+ """\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """
+ )
+ network_config_b64 = base64.b64encode(network_config.encode()).decode()
+ props = {
+ "network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual({"password": "passw0rd"}, cfg)
+ self.assertIsNone(ud)
+
+
+class TestMarkerFiles(CiTestCase):
+ def setUp(self):
+ super(TestMarkerFiles, self).setUp()
+ self.tdir = self.tmp_dir()
+
+ def test_false_when_markerid_none(self):
+ """Return False when markerid provided is None."""
+ self.assertFalse(
+ dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir)
+ )
+
+ def test_markerid_file_exist(self):
+ """Return False when markerid file path does not exist,
+ True otherwise."""
+ self.assertFalse(dsovf.check_marker_exists("123", self.tdir))
+
+ marker_file = self.tmp_path(".markerfile-123.txt", self.tdir)
+ util.write_file(marker_file, "")
+ self.assertTrue(dsovf.check_marker_exists("123", self.tdir))
+
+ def test_marker_file_setup(self):
+ """Test creation of marker files."""
+ markerfilepath = self.tmp_path(".markerfile-hi.txt", self.tdir)
+ self.assertFalse(os.path.exists(markerfilepath))
+ dsovf.setup_marker_files(markerid="hi", marker_dir=self.tdir)
+ self.assertTrue(os.path.exists(markerfilepath))
+
+
+class TestDatasourceOVF(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestDatasourceOVF, self).setUp()
+ self.datasource = dsovf.DataSourceOVF
+ self.tdir = self.tmp_dir()
+
+ def test_get_data_false_on_none_dmi_data(self):
+ """When dmi for system-product-name is None, get_data returns False."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
+ retcode = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": None,
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
+ self.assertIn(
+ "DEBUG: No system-product-name found", self.logs.getvalue()
+ )
+
+ def test_get_data_vmware_customization_disabled(self):
+ """When vmware customization is disabled via sys_cfg and
+ allow_raw_data is disabled via ds_cfg, log a message.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={
+ "disable_vmware_customization": True,
+ "datasource": {"OVF": {"allow_raw_data": False}},
+ },
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [MISC]
+ MARKER-ID = 12345345
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ retcode = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
+ self.assertIn(
+ "DEBUG: Customization for VMware platform is disabled.",
+ self.logs.getvalue(),
+ )
+
+ def test_get_data_vmware_customization_sys_cfg_disabled(self):
+ """When vmware customization is disabled via sys_cfg and
+ no meta data is found, log a message.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={
+ "disable_vmware_customization": True,
+ "datasource": {"OVF": {"allow_raw_data": True}},
+ },
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [MISC]
+ MARKER-ID = 12345345
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ retcode = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
+ self.assertIn(
+ "DEBUG: Customization using VMware config is disabled.",
+ self.logs.getvalue(),
+ )
+
+ def test_get_data_allow_raw_data_disabled(self):
+ """When allow_raw_data is disabled via ds_cfg and
+ meta data is found, log a message.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={
+ "disable_vmware_customization": False,
+ "datasource": {"OVF": {"allow_raw_data": False}},
+ },
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ util.write_file(metadata_file, "This is meta data")
+ retcode = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [self.tdir + "/test-meta", "", ""],
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
+ self.assertIn(
+ "DEBUG: Customization using raw data is disabled.",
+ self.logs.getvalue(),
+ )
+
+ def test_get_data_vmware_customization_enabled(self):
+ """When cloud-init workflow for vmware is enabled via sys_cfg log a
+ message.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345345
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ with mock.patch(MPATH + "get_tools_config", return_value="true"):
+ with self.assertRaises(CustomScriptNotFound) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+ customscript = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % customscript, str(context.exception)
+ )
+
+ def test_get_data_cust_script_disabled(self):
+ """If custom script is disabled by VMware tools configuration,
+ raise a RuntimeError.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345346
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the custom sript
+ customscript = self.tmp_path("test-script", self.tdir)
+ util.write_file(customscript, "This is the post cust script")
+
+ with mock.patch(MPATH + "get_tools_config", return_value="invalid"):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(RuntimeError) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+ self.assertIn(
+ "Custom script is disabled by VM Administrator",
+ str(context.exception),
+ )
+
+ def test_get_data_cust_script_enabled(self):
+ """If custom script is enabled by VMware tools configuration,
+ execute the script.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345346
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Mock custom script is enabled by return true when calling
+ # get_tools_config
+ with mock.patch(MPATH + "get_tools_config", return_value="true"):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(CustomScriptNotFound) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+ # Verify custom script is trying to be executed
+ customscript = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % customscript, str(context.exception)
+ )
+
+ def test_get_data_force_run_post_script_is_yes(self):
+ """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if
+ enable-custom-scripts is not defined in VM Tools configuration
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts
+ # default value is TRUE
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345346
+ DEFAULT-RUN-POST-CUST-SCRIPT = yes
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Mock get_tools_config(section, key, defaultVal) to return
+ # defaultVal
+ def my_get_tools_config(*args, **kwargs):
+ return args[2]
+
+ with mock.patch(
+ MPATH + "get_tools_config", side_effect=my_get_tools_config
+ ):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(CustomScriptNotFound) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+ # Verify custom script still runs although it is
+ # disabled by VMware Tools
+ customscript = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % customscript, str(context.exception)
+ )
+
+ def test_get_data_non_vmware_seed_platform_info(self):
+ """Platform info properly reports when on non-vmware platforms."""
+ paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir})
+ # Write ovf-env.xml seed file
+ seed_dir = self.tmp_path("seed", dir=self.tdir)
+ ovf_env = self.tmp_path("ovf-env.xml", dir=seed_dir)
+ util.write_file(ovf_env, OVF_ENV_CONTENT)
+ ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
+
+ self.assertEqual("ovf", ds.cloud_name)
+ self.assertEqual("ovf", ds.platform_type)
+ with mock.patch(MPATH + "dmi.read_dmi_data", return_value="!VMware"):
+ with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd:
+ with mock.patch(MPATH + "transport_iso9660") as m_iso9660:
+ m_iso9660.return_value = NOT_FOUND
+ m_guestd.return_value = NOT_FOUND
+ self.assertTrue(ds.get_data())
+ self.assertEqual(
+ "ovf (%s/seed/ovf-env.xml)" % self.tdir, ds.subplatform
+ )
+
+ def test_get_data_vmware_seed_platform_info(self):
+ """Platform info properly reports when on VMware platform."""
+ paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir})
+ # Write ovf-env.xml seed file
+ seed_dir = self.tmp_path("seed", dir=self.tdir)
+ ovf_env = self.tmp_path("ovf-env.xml", dir=seed_dir)
+ util.write_file(ovf_env, OVF_ENV_CONTENT)
+ ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
+
+ self.assertEqual("ovf", ds.cloud_name)
+ self.assertEqual("ovf", ds.platform_type)
+ with mock.patch(MPATH + "dmi.read_dmi_data", return_value="VMWare"):
+ with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd:
+ with mock.patch(MPATH + "transport_iso9660") as m_iso9660:
+ m_iso9660.return_value = NOT_FOUND
+ m_guestd.return_value = NOT_FOUND
+ self.assertTrue(ds.get_data())
+ self.assertEqual(
+ "vmware (%s/seed/ovf-env.xml)" % self.tdir,
+ ds.subplatform,
+ )
+
+ @mock.patch("cloudinit.subp.subp")
+ @mock.patch("cloudinit.sources.DataSource.persist_instance_data")
+ def test_get_data_vmware_guestinfo_with_network_config(
+ self, m_persist, m_subp
+ ):
+ self._test_get_data_with_network_config(guestinfo=False, iso=True)
+
+ @mock.patch("cloudinit.subp.subp")
+ @mock.patch("cloudinit.sources.DataSource.persist_instance_data")
+ def test_get_data_iso9660_with_network_config(self, m_persist, m_subp):
+ self._test_get_data_with_network_config(guestinfo=True, iso=False)
+
+ def _test_get_data_with_network_config(self, guestinfo, iso):
+ network_config = dedent(
+ """\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """
+ )
+ network_config_b64 = base64.b64encode(network_config.encode()).decode()
+ props = {
+ "network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir})
+ ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
+ with mock.patch(
+ MPATH + "transport_vmware_guestinfo",
+ return_value=env if guestinfo else NOT_FOUND,
+ ):
+ with mock.patch(
+ MPATH + "transport_iso9660",
+ return_value=env if iso else NOT_FOUND,
+ ):
+ self.assertTrue(ds.get_data())
+ self.assertEqual("inst-001", ds.metadata["instance-id"])
+ self.assertEqual(
+ {
+ "version": 2,
+ "ethernets": {
+ "nics": {
+ "nameservers": {
+ "addresses": ["127.0.0.53"],
+ "search": ["vmware.com"],
+ },
+ "match": {"name": "eth*"},
+ "gateway4": "10.10.10.253",
+ "dhcp4": False,
+ "addresses": ["10.10.10.1/24"],
+ }
+ },
+ },
+ ds.network_config,
+ )
+
+ def test_get_data_cloudinit_metadata_json(self):
+ """Test metadata can be loaded to cloud-init metadata and network.
+ The metadata format is json.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ {
+ "instance-id": "cloud-vm",
+ "local-hostname": "my-host.domain.com",
+ "network": {
+ "version": 2,
+ "ethernets": {
+ "eths": {
+ "match": {
+ "name": "ens*"
+ },
+ "dhcp4": true
+ }
+ }
+ }
+ }
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ "",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds._get_data,
+ )
+
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
+ self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"])
+ self.assertEqual(2, ds.network_config["version"])
+ self.assertTrue(ds.network_config["ethernets"]["eths"]["dhcp4"])
+
+ def test_get_data_cloudinit_metadata_yaml(self):
+ """Test metadata can be loaded to cloud-init metadata and network.
+ The metadata format is yaml.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ "",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds._get_data,
+ )
+
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
+ self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"])
+ self.assertEqual(2, ds.network_config["version"])
+ self.assertTrue(ds.network_config["ethernets"]["nics"]["dhcp4"])
+
+ def test_get_data_cloudinit_metadata_not_valid(self):
+ """Test metadata is not JSON or YAML format."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = "[This is not json or yaml format]a=b"
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(YAMLError) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ "",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+
+ self.assertIn(
+ "expected '<document start>', but found '<scalar>'",
+ str(context.exception),
+ )
+
+ def test_get_data_cloudinit_metadata_not_found(self):
+ """Test metadata file can't be found."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Don't prepare the meta data file
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(FileNotFoundError) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+
+ self.assertIn("is not found", str(context.exception))
+
+ def test_get_data_cloudinit_userdata(self):
+ """Test user data can be loaded to cloud-init user data."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ USERDATA = test-user
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ # Prepare the user data file
+ userdata_file = self.tmp_path("test-user", self.tdir)
+ userdata_content = "This is the user data"
+ util.write_file(userdata_file, userdata_content)
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ self.tdir + "/test-user",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds._get_data,
+ )
+
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
+ self.assertEqual(userdata_content, ds.userdata_raw)
+
+ def test_get_data_cloudinit_userdata_not_found(self):
+ """Test userdata file can't be found."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ USERDATA = test-user
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ # Don't prepare the user data file
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(FileNotFoundError) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+
+ self.assertIn("is not found", str(context.exception))
+
+
+class TestTransportIso9660(CiTestCase):
+ def setUp(self):
+ super(TestTransportIso9660, self).setUp()
+ self.add_patch("cloudinit.util.find_devs_with", "m_find_devs_with")
+ self.add_patch("cloudinit.util.mounts", "m_mounts")
+ self.add_patch("cloudinit.util.mount_cb", "m_mount_cb")
+ self.add_patch(
+ "cloudinit.sources.DataSourceOVF.get_ovf_env", "m_get_ovf_env"
+ )
+ self.m_get_ovf_env.return_value = ("myfile", "mycontent")
+
+ def test_find_already_mounted(self):
+ """Check we call get_ovf_env from on matching mounted devices"""
+ mounts = {
+ "/dev/sr9": {
+ "fstype": "iso9660",
+ "mountpoint": "wark/media/sr9",
+ "opts": "ro",
+ }
+ }
+ self.m_mounts.return_value = mounts
+
+ self.assertEqual("mycontent", dsovf.transport_iso9660())
+
+ def test_find_already_mounted_skips_non_iso9660(self):
+ """Check we call get_ovf_env ignoring non iso9660"""
+ mounts = {
+ "/dev/xvdb": {
+ "fstype": "vfat",
+ "mountpoint": "wark/foobar",
+ "opts": "defaults,noatime",
+ },
+ "/dev/xvdc": {
+ "fstype": "iso9660",
+ "mountpoint": "wark/media/sr9",
+ "opts": "ro",
+ },
+ }
+ # We use an OrderedDict here to ensure we check xvdb before xvdc
+ # as we're not mocking the regex matching, however, if we place
+ # an entry in the results then we can be reasonably sure that
+ # we're skipping an entry which fails to match.
+ self.m_mounts.return_value = OrderedDict(
+ sorted(mounts.items(), key=lambda t: t[0])
+ )
+
+ self.assertEqual("mycontent", dsovf.transport_iso9660())
+
+ def test_find_already_mounted_matches_kname(self):
+ """Check we dont regex match on basename of the device"""
+ mounts = {
+ "/dev/foo/bar/xvdc": {
+ "fstype": "iso9660",
+ "mountpoint": "wark/media/sr9",
+ "opts": "ro",
+ }
+ }
+ # we're skipping an entry which fails to match.
+ self.m_mounts.return_value = mounts
+
+ self.assertEqual(NOT_FOUND, dsovf.transport_iso9660())
+
+ def test_mount_cb_called_on_blkdevs_with_iso9660(self):
+ """Check we call mount_cb on blockdevs with iso9660 only"""
+ self.m_mounts.return_value = {}
+ self.m_find_devs_with.return_value = ["/dev/sr0"]
+ self.m_mount_cb.return_value = ("myfile", "mycontent")
+
+ self.assertEqual("mycontent", dsovf.transport_iso9660())
+ self.m_mount_cb.assert_called_with(
+ "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660"
+ )
+
+ def test_mount_cb_called_on_blkdevs_with_iso9660_check_regex(self):
+ """Check we call mount_cb on blockdevs with iso9660 and match regex"""
+ self.m_mounts.return_value = {}
+ self.m_find_devs_with.return_value = [
+ "/dev/abc",
+ "/dev/my-cdrom",
+ "/dev/sr0",
+ ]
+ self.m_mount_cb.return_value = ("myfile", "mycontent")
+
+ self.assertEqual("mycontent", dsovf.transport_iso9660())
+ self.m_mount_cb.assert_called_with(
+ "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660"
+ )
+
+ def test_mount_cb_not_called_no_matches(self):
+ """Check we don't call mount_cb if nothing matches"""
+ self.m_mounts.return_value = {}
+ self.m_find_devs_with.return_value = ["/dev/vg/myovf"]
+
+ self.assertEqual(NOT_FOUND, dsovf.transport_iso9660())
+ self.assertEqual(0, self.m_mount_cb.call_count)
+
+ def test_mount_cb_called_require_iso_false(self):
+ """Check we call mount_cb on blockdevs with require_iso=False"""
+ self.m_mounts.return_value = {}
+ self.m_find_devs_with.return_value = ["/dev/xvdz"]
+ self.m_mount_cb.return_value = ("myfile", "mycontent")
+
+ self.assertEqual(
+ "mycontent", dsovf.transport_iso9660(require_iso=False)
+ )
+
+ self.m_mount_cb.assert_called_with(
+ "/dev/xvdz", dsovf.get_ovf_env, mtype=None
+ )
+
+ def test_maybe_cdrom_device_none(self):
+ """Test maybe_cdrom_device returns False for none/empty input"""
+ self.assertFalse(dsovf.maybe_cdrom_device(None))
+ self.assertFalse(dsovf.maybe_cdrom_device(""))
+
+ def test_maybe_cdrom_device_non_string_exception(self):
+ """Test maybe_cdrom_device raises ValueError on non-string types"""
+ with self.assertRaises(ValueError):
+ dsovf.maybe_cdrom_device({"a": "eleven"})
+
+ def test_maybe_cdrom_device_false_on_multi_dir_paths(self):
+ """Test maybe_cdrom_device is false on /dev[/.*]/* paths"""
+ self.assertFalse(dsovf.maybe_cdrom_device("/dev/foo/sr0"))
+ self.assertFalse(dsovf.maybe_cdrom_device("foo/sr0"))
+ self.assertFalse(dsovf.maybe_cdrom_device("../foo/sr0"))
+ self.assertFalse(dsovf.maybe_cdrom_device("../foo/sr0"))
+
+ def test_maybe_cdrom_device_true_on_hd_partitions(self):
+ """Test maybe_cdrom_device is false on /dev/hd[a-z][0-9]+ paths"""
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/hda1"))
+ self.assertTrue(dsovf.maybe_cdrom_device("hdz9"))
+
+ def test_maybe_cdrom_device_true_on_valid_relative_paths(self):
+ """Test maybe_cdrom_device normalizes paths"""
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/wark/../sr9"))
+ self.assertTrue(dsovf.maybe_cdrom_device("///sr0"))
+ self.assertTrue(dsovf.maybe_cdrom_device("/sr0"))
+ self.assertTrue(dsovf.maybe_cdrom_device("//dev//hda"))
+
+ def test_maybe_cdrom_device_true_on_xvd_partitions(self):
+ """Test maybe_cdrom_device returns true on xvd*"""
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/xvda"))
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/xvda1"))
+ self.assertTrue(dsovf.maybe_cdrom_device("xvdza1"))
+
+
+@mock.patch(MPATH + "subp.which")
+@mock.patch(MPATH + "subp.subp")
+class TestTransportVmwareGuestinfo(CiTestCase):
+ """Test the com.vmware.guestInfo transport implemented in
+ transport_vmware_guestinfo."""
+
+ rpctool = "vmware-rpctool"
+ with_logs = True
+ rpctool_path = "/not/important/vmware-rpctool"
+
+ def test_without_vmware_rpctool_returns_notfound(self, m_subp, m_which):
+ m_which.return_value = None
+ self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(
+ 0,
+ m_subp.call_count,
+ "subp should not be called if no rpctool in path.",
+ )
+
+ def test_notfound_on_exit_code_1(self, m_subp, m_which):
+ """If vmware-rpctool exits 1, then must return not found."""
+ m_which.return_value = self.rpctool_path
+ m_subp.side_effect = subp.ProcessExecutionError(
+ stdout="", stderr="No value found", exit_code=1, cmd=["unused"]
+ )
+ self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(1, m_subp.call_count)
+ self.assertNotIn(
+ "WARNING",
+ self.logs.getvalue(),
+ "exit code of 1 by rpctool should not cause warning.",
+ )
+
+ def test_notfound_if_no_content_but_exit_zero(self, m_subp, m_which):
+ """If vmware-rpctool exited 0 with no stdout is normal not-found.
+
+ This isn't actually a case I've seen. normally on "not found",
+ rpctool would exit 1 with 'No value found' on stderr. But cover
+ the case where it exited 0 and just wrote nothing to stdout.
+ """
+ m_which.return_value = self.rpctool_path
+ m_subp.return_value = ("", "")
+ self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(1, m_subp.call_count)
+
+ def test_notfound_and_warns_on_unexpected_exit_code(self, m_subp, m_which):
+ """If vmware-rpctool exits non zero or 1, warnings should be logged."""
+ m_which.return_value = self.rpctool_path
+ m_subp.side_effect = subp.ProcessExecutionError(
+ stdout=None, stderr="No value found", exit_code=2, cmd=["unused"]
+ )
+ self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(1, m_subp.call_count)
+ self.assertIn(
+ "WARNING",
+ self.logs.getvalue(),
+ "exit code of 2 by rpctool should log WARNING.",
+ )
+
+ def test_found_when_guestinfo_present(self, m_subp, m_which):
+ """When there is a ovf info, transport should return it."""
+ m_which.return_value = self.rpctool_path
+ content = fill_properties({})
+ m_subp.return_value = (content, "")
+ self.assertEqual(content, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(1, m_subp.call_count)
+
+
+#
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_rbx.py b/tests/unittests/sources/test_rbx.py
new file mode 100644
index 00000000..475bf498
--- /dev/null
+++ b/tests/unittests/sources/test_rbx.py
@@ -0,0 +1,241 @@
+import json
+
+from cloudinit import distros, helpers, subp
+from cloudinit.sources import DataSourceRbxCloud as ds
+from tests.unittests.helpers import CiTestCase, mock, populate_dir
+
+DS_PATH = "cloudinit.sources.DataSourceRbxCloud"
+
+CRYPTO_PASS = (
+ "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f"
+ "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5"
+ "tToyGP41.s1"
+)
+
+CLOUD_METADATA = {
+ "vm": {
+ "memory": 4,
+ "cpu": 2,
+ "name": "vm-image-builder",
+ "_id": "5beab44f680cffd11f0e60fc",
+ },
+ "additionalMetadata": {
+ "username": "guru",
+ "sshKeys": ["ssh-rsa ..."],
+ "password": {"sha512": CRYPTO_PASS},
+ },
+ "disk": [
+ {
+ "size": 10,
+ "type": "ssd",
+ "name": "vm-image-builder-os",
+ "_id": "5beab450680cffd11f0e60fe",
+ },
+ {
+ "size": 2,
+ "type": "ssd",
+ "name": "ubuntu-1804-bionic",
+ "_id": "5bef002c680cffd11f107590",
+ },
+ ],
+ "netadp": [
+ {
+ "ip": [{"address": "62.181.8.174"}],
+ "network": {
+ "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]},
+ "routing": [],
+ "gateway": "62.181.8.1",
+ "netmask": "255.255.248.0",
+ "name": "public",
+ "type": "public",
+ "_id": "5784e97be2627505227b578c",
+ },
+ "speed": 1000,
+ "type": "hv",
+ "macaddress": "00:15:5D:FF:0F:03",
+ "_id": "5beab450680cffd11f0e6102",
+ },
+ {
+ "ip": [{"address": "10.209.78.11"}],
+ "network": {
+ "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]},
+ "routing": [],
+ "gateway": "10.209.78.1",
+ "netmask": "255.255.255.0",
+ "name": "network-determined-bardeen",
+ "type": "private",
+ "_id": "5beaec64680cffd11f0e7c31",
+ },
+ "speed": 1000,
+ "type": "hv",
+ "macaddress": "00:15:5D:FF:0F:24",
+ "_id": "5bec18c6680cffd11f0f0d8b",
+ },
+ ],
+ "dvddrive": [{"iso": {}}],
+}
+
+
+class TestRbxDataSource(CiTestCase):
+ parsed_user = None
+ allowed_subp = ["bash"]
+
+ def _fetch_distro(self, kind):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({})
+ return cls(kind, {}, paths)
+
+ def setUp(self):
+ super(TestRbxDataSource, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.paths = helpers.Paths(
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
+
+ # defaults for few tests
+ self.ds = ds.DataSourceRbxCloud
+ self.seed_dir = self.paths.seed_dir
+ self.sys_cfg = {"datasource": {"RbxCloud": {"dsmode": "local"}}}
+
+ def test_seed_read_user_data_callback_empty_file(self):
+ populate_user_metadata(self.seed_dir, "")
+ populate_cloud_metadata(self.seed_dir, {})
+ results = ds.read_user_data_callback(self.seed_dir)
+
+ self.assertIsNone(results)
+
+ def test_seed_read_user_data_callback_valid_disk(self):
+ populate_user_metadata(self.seed_dir, "")
+ populate_cloud_metadata(self.seed_dir, CLOUD_METADATA)
+ results = ds.read_user_data_callback(self.seed_dir)
+
+ self.assertNotEqual(results, None)
+ self.assertTrue("userdata" in results)
+ self.assertTrue("metadata" in results)
+ self.assertTrue("cfg" in results)
+
+ def test_seed_read_user_data_callback_userdata(self):
+ userdata = "#!/bin/sh\nexit 1"
+ populate_user_metadata(self.seed_dir, userdata)
+ populate_cloud_metadata(self.seed_dir, CLOUD_METADATA)
+
+ results = ds.read_user_data_callback(self.seed_dir)
+
+ self.assertNotEqual(results, None)
+ self.assertTrue("userdata" in results)
+ self.assertEqual(results["userdata"], userdata)
+
+ def test_generate_network_config(self):
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "subnets": [
+ {
+ "control": "auto",
+ "dns_nameservers": ["8.8.8.8", "8.8.4.4"],
+ "netmask": "255.255.248.0",
+ "address": "62.181.8.174",
+ "type": "static",
+ "gateway": "62.181.8.1",
+ }
+ ],
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:15:5d:ff:0f:03",
+ },
+ {
+ "subnets": [
+ {
+ "control": "auto",
+ "dns_nameservers": ["9.9.9.9", "8.8.8.8"],
+ "netmask": "255.255.255.0",
+ "address": "10.209.78.11",
+ "type": "static",
+ "gateway": "10.209.78.1",
+ }
+ ],
+ "type": "physical",
+ "name": "eth1",
+ "mac_address": "00:15:5d:ff:0f:24",
+ },
+ ],
+ }
+ self.assertTrue(
+ ds.generate_network_config(CLOUD_METADATA["netadp"]), expected
+ )
+
+ @mock.patch(DS_PATH + ".subp.subp")
+ def test_gratuitous_arp_run_standard_arping(self, m_subp):
+ """Test handle run arping & parameters."""
+ items = [
+ {"destination": "172.17.0.2", "source": "172.16.6.104"},
+ {
+ "destination": "172.17.0.2",
+ "source": "172.16.6.104",
+ },
+ ]
+ ds.gratuitous_arp(items, self._fetch_distro("ubuntu"))
+ self.assertEqual(
+ [
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ ],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch(DS_PATH + ".subp.subp")
+ def test_handle_rhel_like_arping(self, m_subp):
+ """Test handle on RHEL-like distros."""
+ items = [
+ {
+ "source": "172.16.6.104",
+ "destination": "172.17.0.2",
+ }
+ ]
+ ds.gratuitous_arp(items, self._fetch_distro("fedora"))
+ self.assertEqual(
+ [
+ mock.call(
+ ["arping", "-c", "2", "-s", "172.16.6.104", "172.17.0.2"]
+ )
+ ],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch(
+ DS_PATH + ".subp.subp", side_effect=subp.ProcessExecutionError()
+ )
+ def test_continue_on_arping_error(self, m_subp):
+ """Continue when command error"""
+ items = [
+ {"destination": "172.17.0.2", "source": "172.16.6.104"},
+ {
+ "destination": "172.17.0.2",
+ "source": "172.16.6.104",
+ },
+ ]
+ ds.gratuitous_arp(items, self._fetch_distro("ubuntu"))
+ self.assertEqual(
+ [
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ ],
+ m_subp.call_args_list,
+ )
+
+
+def populate_cloud_metadata(path, data):
+ populate_dir(path, {"cloud.json": json.dumps(data)})
+
+
+def populate_user_metadata(path, data):
+ populate_dir(path, {"user.data": data})
diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py
new file mode 100644
index 00000000..d7e8b969
--- /dev/null
+++ b/tests/unittests/sources/test_scaleway.py
@@ -0,0 +1,526 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+import httpretty
+import requests
+
+from cloudinit import helpers, settings, sources
+from cloudinit.sources import DataSourceScaleway
+from tests.unittests.helpers import CiTestCase, HttprettyTestCase, mock
+
+
+class DataResponses(object):
+ """
+ Possible responses of the API endpoint
+ 169.254.42.42/user_data/cloud-init and
+ 169.254.42.42/vendor_data/cloud-init.
+ """
+
+ FAKE_USER_DATA = '#!/bin/bash\necho "user-data"'
+
+ @staticmethod
+ def rate_limited(method, uri, headers):
+ return 429, headers, ""
+
+ @staticmethod
+ def api_error(method, uri, headers):
+ return 500, headers, ""
+
+ @classmethod
+ def get_ok(cls, method, uri, headers):
+ return 200, headers, cls.FAKE_USER_DATA
+
+ @staticmethod
+ def empty(method, uri, headers):
+ """
+ No user data for this server.
+ """
+ return 404, headers, ""
+
+
+class MetadataResponses(object):
+ """
+ Possible responses of the metadata API.
+ """
+
+ FAKE_METADATA = {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "hostname": "scaleway.host",
+ "tags": [
+ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ ],
+ "ssh_public_keys": [
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ "fingerprint": "2048 06:ae:... login (RSA)",
+ },
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "fingerprint": "2048 06:ff:... login2 (RSA)",
+ },
+ ],
+ }
+
+ @classmethod
+ def get_ok(cls, method, uri, headers):
+ return 200, headers, json.dumps(cls.FAKE_METADATA)
+
+
+class TestOnScaleway(CiTestCase):
+ def setUp(self):
+ super(TestOnScaleway, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline):
+ mock, faked = fake_dmi
+ mock.return_value = "Scaleway" if faked else "Whatever"
+
+ mock, faked = fake_file_exists
+ mock.return_value = faked
+
+ mock, faked = fake_cmdline
+ mock.return_value = (
+ "initrd=initrd showopts scaleway nousb"
+ if faked
+ else "BOOT_IMAGE=/vmlinuz-3.11.0-26-generic"
+ )
+
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_not_on_scaleway(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, False),
+ fake_file_exists=(m_file_exists, False),
+ fake_cmdline=(m_get_cmdline, False),
+ )
+ self.assertFalse(DataSourceScaleway.on_scaleway())
+
+ # When not on Scaleway, get_data() returns False.
+ datasource = DataSourceScaleway.DataSourceScaleway(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ self.assertFalse(datasource.get_data())
+
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_on_scaleway_dmi(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
+ """
+ dmidecode returns "Scaleway".
+ """
+ # dmidecode returns "Scaleway"
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, True),
+ fake_file_exists=(m_file_exists, False),
+ fake_cmdline=(m_get_cmdline, False),
+ )
+ self.assertTrue(DataSourceScaleway.on_scaleway())
+
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_on_scaleway_var_run_scaleway(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
+ """
+ /var/run/scaleway exists.
+ """
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, False),
+ fake_file_exists=(m_file_exists, True),
+ fake_cmdline=(m_get_cmdline, False),
+ )
+ self.assertTrue(DataSourceScaleway.on_scaleway())
+
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_on_scaleway_cmdline(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
+ """
+ "scaleway" in /proc/cmdline.
+ """
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, False),
+ fake_file_exists=(m_file_exists, False),
+ fake_cmdline=(m_get_cmdline, True),
+ )
+ self.assertTrue(DataSourceScaleway.on_scaleway())
+
+
+def get_source_address_adapter(*args, **kwargs):
+ """
+ Scaleway user/vendor data API requires to be called with a privileged port.
+
+ If the unittests are run as non-root, the user doesn't have the permission
+ to bind on ports below 1024.
+
+ This function removes the bind on a privileged address, since anyway the
+ HTTP call is mocked by httpretty.
+ """
+ kwargs.pop("source_address")
+ return requests.adapters.HTTPAdapter(*args, **kwargs)
+
+
+class TestDataSourceScaleway(HttprettyTestCase):
+ def setUp(self):
+ tmp = self.tmp_dir()
+ self.datasource = DataSourceScaleway.DataSourceScaleway(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": tmp})
+ )
+ super(TestDataSourceScaleway, self).setUp()
+
+ self.metadata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[
+ "metadata_url"
+ ]
+ self.userdata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[
+ "userdata_url"
+ ]
+ self.vendordata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[
+ "vendordata_url"
+ ]
+
+ self.add_patch(
+ "cloudinit.sources.DataSourceScaleway.on_scaleway",
+ "_m_on_scaleway",
+ return_value=True,
+ )
+ self.add_patch(
+ "cloudinit.sources.DataSourceScaleway.net.find_fallback_nic",
+ "_m_find_fallback_nic",
+ return_value="scalewaynic0",
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4")
+ @mock.patch(
+ "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter",
+ get_source_address_adapter,
+ )
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("time.sleep", return_value=None)
+ def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4):
+ """
+ get_data() returns metadata, user data and vendor data.
+ """
+ m_get_cmdline.return_value = "scaleway"
+
+ # Make user data API return a valid response
+ httpretty.register_uri(
+ httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=DataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.vendordata_url, body=DataResponses.get_ok
+ )
+ self.datasource.get_data()
+
+ self.assertEqual(
+ self.datasource.get_instance_id(),
+ MetadataResponses.FAKE_METADATA["id"],
+ )
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ ].sort(),
+ )
+ self.assertEqual(
+ self.datasource.get_hostname(),
+ MetadataResponses.FAKE_METADATA["hostname"],
+ )
+ self.assertEqual(
+ self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA
+ )
+ self.assertEqual(
+ self.datasource.get_vendordata_raw(), DataResponses.FAKE_USER_DATA
+ )
+ self.assertIsNone(self.datasource.availability_zone)
+ self.assertIsNone(self.datasource.region)
+ self.assertEqual(sleep.call_count, 0)
+
+ def test_ssh_keys_empty(self):
+ """
+ get_public_ssh_keys() should return empty list if no ssh key are
+ available
+ """
+ self.datasource.metadata["tags"] = []
+ self.datasource.metadata["ssh_public_keys"] = []
+ self.assertEqual(self.datasource.get_public_ssh_keys(), [])
+
+ def test_ssh_keys_only_tags(self):
+ """
+ get_public_ssh_keys() should return list of keys available in tags
+ """
+ self.datasource.metadata["tags"] = [
+ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ ]
+ self.datasource.metadata["ssh_public_keys"] = []
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ ].sort(),
+ )
+
+ def test_ssh_keys_only_conf(self):
+ """
+ get_public_ssh_keys() should return list of keys available in
+ ssh_public_keys field
+ """
+ self.datasource.metadata["tags"] = []
+ self.datasource.metadata["ssh_public_keys"] = [
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ "fingerprint": "2048 06:ae:... login (RSA)",
+ },
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "fingerprint": "2048 06:ff:... login2 (RSA)",
+ },
+ ]
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ ].sort(),
+ )
+
+ def test_ssh_keys_both(self):
+ """
+ get_public_ssh_keys() should return a merge of keys available
+ in ssh_public_keys and tags
+ """
+ self.datasource.metadata["tags"] = [
+ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ ]
+
+ self.datasource.metadata["ssh_public_keys"] = [
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ "fingerprint": "2048 06:ae:... login (RSA)",
+ },
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "fingerprint": "2048 06:ff:... login2 (RSA)",
+ },
+ ]
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ ].sort(),
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4")
+ @mock.patch(
+ "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter",
+ get_source_address_adapter,
+ )
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("time.sleep", return_value=None)
+ def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4):
+ """
+ get_data() returns metadata, but no user data nor vendor data.
+ """
+ m_get_cmdline.return_value = "scaleway"
+
+ # Make user and vendor data APIs return HTTP/404, which means there is
+ # no user / vendor data for the server.
+ httpretty.register_uri(
+ httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=DataResponses.empty
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.vendordata_url, body=DataResponses.empty
+ )
+ self.datasource.get_data()
+ self.assertIsNone(self.datasource.get_userdata_raw())
+ self.assertIsNone(self.datasource.get_vendordata_raw())
+ self.assertEqual(sleep.call_count, 0)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4")
+ @mock.patch(
+ "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter",
+ get_source_address_adapter,
+ )
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("time.sleep", return_value=None)
+ def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4):
+ """
+ get_data() is rate limited two times by the metadata API when fetching
+ user data.
+ """
+ m_get_cmdline.return_value = "scaleway"
+
+ httpretty.register_uri(
+ httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.vendordata_url, body=DataResponses.empty
+ )
+
+ httpretty.register_uri(
+ httpretty.GET,
+ self.userdata_url,
+ responses=[
+ httpretty.Response(body=DataResponses.rate_limited),
+ httpretty.Response(body=DataResponses.rate_limited),
+ httpretty.Response(body=DataResponses.get_ok),
+ ],
+ )
+ self.datasource.get_data()
+ self.assertEqual(
+ self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA
+ )
+ self.assertEqual(sleep.call_count, 2)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_ok(self, m_get_cmdline, fallback_nic):
+ """
+ network_config will only generate IPv4 config if no ipv6 data is
+ available in the metadata
+ """
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = None
+
+ netcfg = self.datasource.network_config
+ resp = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
+ self.assertEqual(netcfg, resp)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic):
+ """
+ network_config will only generate IPv4/v6 configs if ipv6 data is
+ available in the metadata
+ """
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = {
+ "address": "2000:abc:4444:9876::42:999",
+ "gateway": "2000:abc:4444:9876::42:000",
+ "netmask": "127",
+ }
+
+ netcfg = self.datasource.network_config
+ resp = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [
+ {"type": "dhcp4"},
+ {
+ "type": "static",
+ "address": "2000:abc:4444:9876::42:999",
+ "gateway": "2000:abc:4444:9876::42:000",
+ "netmask": "127",
+ },
+ ],
+ }
+ ],
+ }
+ self.assertEqual(netcfg, resp)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_existing(self, m_get_cmdline, fallback_nic):
+ """
+ network_config() should return the same data if a network config
+ already exists
+ """
+ m_get_cmdline.return_value = "scaleway"
+ self.datasource._network_config = "0xdeadbeef"
+
+ netcfg = self.datasource.network_config
+ self.assertEqual(netcfg, "0xdeadbeef")
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_unset(self, m_get_cmdline, fallback_nic):
+ """
+ _network_config will be set to sources.UNSET after the first boot.
+ Make sure it behave correctly.
+ """
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = None
+ self.datasource._network_config = sources.UNSET
+
+ resp = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
+
+ netcfg = self.datasource.network_config
+ self.assertEqual(netcfg, resp)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.LOG.warning")
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_cached_none(
+ self, m_get_cmdline, fallback_nic, logwarning
+ ):
+ """
+ network_config() should return config data if cached data is None
+ rather than sources.UNSET
+ """
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = None
+ self.datasource._network_config = None
+
+ resp = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
+
+ netcfg = self.datasource.network_config
+ self.assertEqual(netcfg, resp)
+ logwarning.assert_called_with(
+ "Found None as cached _network_config. Resetting to %s",
+ sources.UNSET,
+ )
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/sources/test_smartos.py
index 5847a384..55239c4e 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/sources/test_smartos.py
@@ -5,14 +5,13 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-'''This is a testcase for the SmartOS datasource.
+"""This is a testcase for the SmartOS datasource.
It replicates a serial console and acts like the SmartOS console does in
order to validate return responses.
-'''
+"""
-from binascii import crc32
import json
import multiprocessing
import os
@@ -22,32 +21,40 @@ import signal
import stat
import unittest
import uuid
+from binascii import crc32
+from cloudinit import helpers as c_helpers
from cloudinit import serial
+from cloudinit.event import EventScope, EventType
from cloudinit.sources import DataSourceSmartOS
+from cloudinit.sources.DataSourceSmartOS import SERIAL_DEVICE, SMARTOS_ENV_KVM
from cloudinit.sources.DataSourceSmartOS import (
convert_smartos_network_data as convert_net,
- SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ,
- identify_file)
-from cloudinit.event import EventType
-
-from cloudinit import helpers as c_helpers
-from cloudinit.util import (b64e, write_file)
-from cloudinit.subp import (subp, ProcessExecutionError, which)
-
-from cloudinit.tests.helpers import (
- CiTestCase, mock, FilesystemMockingTestCase, skipIf)
-
+)
+from cloudinit.sources.DataSourceSmartOS import (
+ get_smartos_environ,
+ identify_file,
+)
+from cloudinit.subp import ProcessExecutionError, subp, which
+from cloudinit.util import b64e, write_file
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ skipIf,
+)
try:
import serial as _pyserial
+
assert _pyserial # avoid pyflakes error F401: import unused
HAS_PYSERIAL = True
except ImportError:
HAS_PYSERIAL = False
-DSMOS = 'cloudinit.sources.DataSourceSmartOS'
-SDC_NICS = json.loads("""
+DSMOS = "cloudinit.sources.DataSourceSmartOS"
+SDC_NICS = json.loads(
+ """
[
{
"nic_tag": "external",
@@ -87,10 +94,12 @@ SDC_NICS = json.loads("""
]
}
]
-""")
+"""
+)
-SDC_NICS_ALT = json.loads("""
+SDC_NICS_ALT = json.loads(
+ """
[
{
"interface": "net0",
@@ -126,9 +135,11 @@ SDC_NICS_ALT = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_DHCP = json.loads("""
+SDC_NICS_DHCP = json.loads(
+ """
[
{
"interface": "net0",
@@ -164,9 +175,11 @@ SDC_NICS_DHCP = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_MIP = json.loads("""
+SDC_NICS_MIP = json.loads(
+ """
[
{
"interface": "net0",
@@ -204,9 +217,11 @@ SDC_NICS_MIP = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_MIP_IPV6 = json.loads("""
+SDC_NICS_MIP_IPV6 = json.loads(
+ """
[
{
"interface": "net0",
@@ -243,9 +258,11 @@ SDC_NICS_MIP_IPV6 = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_IPV4_IPV6 = json.loads("""
+SDC_NICS_IPV4_IPV6 = json.loads(
+ """
[
{
"interface": "net0",
@@ -277,9 +294,11 @@ SDC_NICS_IPV4_IPV6 = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_SINGLE_GATEWAY = json.loads("""
+SDC_NICS_SINGLE_GATEWAY = json.loads(
+ """
[
{
"interface":"net0",
@@ -309,32 +328,33 @@ SDC_NICS_SINGLE_GATEWAY = json.loads("""
"mtu":1500
}
]
-""")
+"""
+)
MOCK_RETURNS = {
- 'hostname': 'test-host',
- 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname',
- 'disable_iptables_flag': None,
- 'enable_motd_sys_info': None,
- 'test-var1': 'some data',
- 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
- 'sdc:datacenter_name': 'somewhere2',
- 'sdc:operator-script': '\n'.join(['bin/true', '']),
- 'sdc:uuid': str(uuid.uuid4()),
- 'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']),
- 'user-data': '\n'.join(['something', '']),
- 'user-script': '\n'.join(['/bin/true', '']),
- 'sdc:nics': json.dumps(SDC_NICS),
+ "hostname": "test-host",
+ "root_authorized_keys": "ssh-rsa AAAAB3Nz...aC1yc2E= keyname",
+ "disable_iptables_flag": None,
+ "enable_motd_sys_info": None,
+ "test-var1": "some data",
+ "cloud-init:user-data": "\n".join(["#!/bin/sh", "/bin/true", ""]),
+ "sdc:datacenter_name": "somewhere2",
+ "sdc:operator-script": "\n".join(["bin/true", ""]),
+ "sdc:uuid": str(uuid.uuid4()),
+ "sdc:vendor-data": "\n".join(["VENDOR_DATA", ""]),
+ "user-data": "\n".join(["something", ""]),
+ "user-script": "\n".join(["/bin/true", ""]),
+ "sdc:nics": json.dumps(SDC_NICS),
}
-DMI_DATA_RETURN = 'smartdc'
+DMI_DATA_RETURN = "smartdc"
# Useful for calculating the length of a frame body. A SUCCESS body will be
# followed by more characters or be one character less if SUCCESS with no
# payload. See Section 4.3 of https://eng.joyent.com/mdata/protocol.html.
-SUCCESS_LEN = len('0123abcd SUCCESS ')
-NOTFOUND_LEN = len('0123abcd NOTFOUND')
+SUCCESS_LEN = len("0123abcd SUCCESS ")
+NOTFOUND_LEN = len("0123abcd NOTFOUND")
class PsuedoJoyentClient(object):
@@ -364,11 +384,11 @@ class PsuedoJoyentClient(object):
return True
def open_transport(self):
- assert(not self._is_open)
+ assert not self._is_open
self._is_open = True
def close_transport(self):
- assert(self._is_open)
+ assert self._is_open
self._is_open = False
@@ -381,21 +401,35 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ")
self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact")
- self.legacy_user_d = self.tmp_path('legacy_user_tmp')
+ self.legacy_user_d = self.tmp_path("legacy_user_tmp")
os.mkdir(self.legacy_user_d)
- self.add_patch(DSMOS + ".LEGACY_USER_D", "m_legacy_user_d",
- autospec=False, new=self.legacy_user_d)
- self.add_patch(DSMOS + ".identify_file", "m_identify_file",
- return_value="text/plain")
-
- def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM,
- sys_cfg=None, ds_cfg=None):
+ self.add_patch(
+ DSMOS + ".LEGACY_USER_D",
+ "m_legacy_user_d",
+ autospec=False,
+ new=self.legacy_user_d,
+ )
+ self.add_patch(
+ DSMOS + ".identify_file",
+ "m_identify_file",
+ return_value="text/plain",
+ )
+
+ def _get_ds(
+ self,
+ mockdata=None,
+ mode=DataSourceSmartOS.SMARTOS_ENV_KVM,
+ sys_cfg=None,
+ ds_cfg=None,
+ ):
self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata)
self.get_smartos_environ.return_value = mode
tmpd = self.tmp_dir()
- dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd),
- 'run_dir': self.tmp_path('run_dir')}
+ dirs = {
+ "cloud_dir": self.tmp_path("cloud_dir", tmpd),
+ "run_dir": self.tmp_path("run_dir"),
+ }
for d in dirs.values():
os.mkdir(d)
paths = c_helpers.Paths(dirs)
@@ -404,14 +438,15 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
sys_cfg = {}
if ds_cfg is not None:
- sys_cfg['datasource'] = sys_cfg.get('datasource', {})
- sys_cfg['datasource']['SmartOS'] = ds_cfg
+ sys_cfg["datasource"] = sys_cfg.get("datasource", {})
+ sys_cfg["datasource"]["SmartOS"] = ds_cfg
return DataSourceSmartOS.DataSourceSmartOS(
- sys_cfg, distro=None, paths=paths)
+ sys_cfg, distro=None, paths=paths
+ )
def test_no_base64(self):
- ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True}
+ ds_cfg = {"no_base64_decode": ["test_var1"], "all_base": True}
dsrc = self._get_ds(ds_cfg=ds_cfg)
ret = dsrc.get_data()
self.assertTrue(ret)
@@ -420,166 +455,180 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['sdc:uuid'],
- dsrc.metadata['instance-id'])
+ self.assertEqual(
+ MOCK_RETURNS["sdc:uuid"], dsrc.metadata["instance-id"]
+ )
def test_platform_info(self):
"""All platform-related attributes are properly set."""
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- self.assertEqual('joyent', dsrc.cloud_name)
- self.assertEqual('joyent', dsrc.platform_type)
- self.assertEqual('serial (/dev/ttyS1)', dsrc.subplatform)
+ self.assertEqual("joyent", dsrc.cloud_name)
+ self.assertEqual("joyent", dsrc.platform_type)
+ self.assertEqual("serial (/dev/ttyS1)", dsrc.subplatform)
def test_root_keys(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['root_authorized_keys'],
- dsrc.metadata['public-keys'])
+ self.assertEqual(
+ MOCK_RETURNS["root_authorized_keys"], dsrc.metadata["public-keys"]
+ )
def test_hostname_b64(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['hostname'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ MOCK_RETURNS["hostname"], dsrc.metadata["local-hostname"]
+ )
def test_hostname(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['hostname'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ MOCK_RETURNS["hostname"], dsrc.metadata["local-hostname"]
+ )
def test_hostname_if_no_sdc_hostname(self):
my_returns = MOCK_RETURNS.copy()
- my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname']
+ my_returns["sdc:hostname"] = "sdc-" + my_returns["hostname"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(my_returns['hostname'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ my_returns["hostname"], dsrc.metadata["local-hostname"]
+ )
def test_sdc_hostname_if_no_hostname(self):
my_returns = MOCK_RETURNS.copy()
- my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname']
- del my_returns['hostname']
+ my_returns["sdc:hostname"] = "sdc-" + my_returns["hostname"]
+ del my_returns["hostname"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(my_returns['sdc:hostname'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ my_returns["sdc:hostname"], dsrc.metadata["local-hostname"]
+ )
def test_sdc_uuid_if_no_hostname_or_sdc_hostname(self):
my_returns = MOCK_RETURNS.copy()
- del my_returns['hostname']
+ del my_returns["hostname"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(my_returns['sdc:uuid'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ my_returns["sdc:uuid"], dsrc.metadata["local-hostname"]
+ )
def test_userdata(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['user-data'],
- dsrc.metadata['legacy-user-data'])
- self.assertEqual(MOCK_RETURNS['cloud-init:user-data'],
- dsrc.userdata_raw)
+ self.assertEqual(
+ MOCK_RETURNS["user-data"], dsrc.metadata["legacy-user-data"]
+ )
+ self.assertEqual(
+ MOCK_RETURNS["cloud-init:user-data"], dsrc.userdata_raw
+ )
def test_sdc_nics(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(json.loads(MOCK_RETURNS['sdc:nics']),
- dsrc.metadata['network-data'])
+ self.assertEqual(
+ json.loads(MOCK_RETURNS["sdc:nics"]), dsrc.metadata["network-data"]
+ )
def test_sdc_scripts(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['user-script'],
- dsrc.metadata['user-script'])
+ self.assertEqual(
+ MOCK_RETURNS["user-script"], dsrc.metadata["user-script"]
+ )
legacy_script_f = "%s/user-script" % self.legacy_user_d
print("legacy_script_f=%s" % legacy_script_f)
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
- self.assertEqual(user_script_perm, '700')
+ self.assertEqual(user_script_perm, "700")
def test_scripts_shebanged(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['user-script'],
- dsrc.metadata['user-script'])
+ self.assertEqual(
+ MOCK_RETURNS["user-script"], dsrc.metadata["user-script"]
+ )
legacy_script_f = "%s/user-script" % self.legacy_user_d
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
shebang = None
- with open(legacy_script_f, 'r') as f:
+ with open(legacy_script_f, "r") as f:
shebang = f.readlines()[0].strip()
self.assertEqual(shebang, "#!/bin/bash")
user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
- self.assertEqual(user_script_perm, '700')
+ self.assertEqual(user_script_perm, "700")
def test_scripts_shebang_not_added(self):
"""
- Test that the SmartOS requirement that plain text scripts
- are executable. This test makes sure that plain texts scripts
- with out file magic have it added appropriately by cloud-init.
+ Test that the SmartOS requirement that plain text scripts
+ are executable. This test makes sure that plain texts scripts
+ with out file magic have it added appropriately by cloud-init.
"""
my_returns = MOCK_RETURNS.copy()
- my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl',
- 'print("hi")', ''])
+ my_returns["user-script"] = "\n".join(
+ ["#!/usr/bin/perl", 'print("hi")', ""]
+ )
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(my_returns['user-script'],
- dsrc.metadata['user-script'])
+ self.assertEqual(
+ my_returns["user-script"], dsrc.metadata["user-script"]
+ )
legacy_script_f = "%s/user-script" % self.legacy_user_d
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
shebang = None
- with open(legacy_script_f, 'r') as f:
+ with open(legacy_script_f, "r") as f:
shebang = f.readlines()[0].strip()
self.assertEqual(shebang, "#!/usr/bin/perl")
def test_userdata_removed(self):
"""
- User-data in the SmartOS world is supposed to be written to a file
- each and every boot. This tests to make sure that in the event the
- legacy user-data is removed, the existing user-data is backed-up
- and there is no /var/db/user-data left.
+ User-data in the SmartOS world is supposed to be written to a file
+ each and every boot. This tests to make sure that in the event the
+ legacy user-data is removed, the existing user-data is backed-up
+ and there is no /var/db/user-data left.
"""
user_data_f = "%s/mdata-user-data" % self.legacy_user_d
- with open(user_data_f, 'w') as f:
+ with open(user_data_f, "w") as f:
f.write("PREVIOUS")
my_returns = MOCK_RETURNS.copy()
- del my_returns['user-data']
+ del my_returns["user-data"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertFalse(dsrc.metadata.get('legacy-user-data'))
+ self.assertFalse(dsrc.metadata.get("legacy-user-data"))
found_new = False
for root, _dirs, files in os.walk(self.legacy_user_d):
for name in files:
name_f = os.path.join(root, name)
permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:]
- if re.match(r'.*\/mdata-user-data$', name_f):
+ if re.match(r".*\/mdata-user-data$", name_f):
found_new = True
print(name_f)
- self.assertEqual(permissions, '400')
+ self.assertEqual(permissions, "400")
self.assertFalse(found_new)
@@ -587,17 +636,18 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['sdc:vendor-data'],
- dsrc.metadata['vendor-data'])
+ self.assertEqual(
+ MOCK_RETURNS["sdc:vendor-data"], dsrc.metadata["vendor-data"]
+ )
def test_default_vendor_data(self):
my_returns = MOCK_RETURNS.copy()
- def_op_script = my_returns['sdc:vendor-data']
- del my_returns['sdc:vendor-data']
+ def_op_script = my_returns["sdc:vendor-data"]
+ del my_returns["sdc:vendor-data"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertNotEqual(def_op_script, dsrc.metadata['vendor-data'])
+ self.assertNotEqual(def_op_script, dsrc.metadata["vendor-data"])
# we expect default vendor-data is a boothook
self.assertTrue(dsrc.vendordata_raw.startswith("#cloud-boothook"))
@@ -606,15 +656,19 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['disable_iptables_flag'],
- dsrc.metadata['iptables_disable'])
+ self.assertEqual(
+ MOCK_RETURNS["disable_iptables_flag"],
+ dsrc.metadata["iptables_disable"],
+ )
def test_motd_sys_info(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['enable_motd_sys_info'],
- dsrc.metadata['motd_sys_info'])
+ self.assertEqual(
+ MOCK_RETURNS["enable_motd_sys_info"],
+ dsrc.metadata["motd_sys_info"],
+ )
def test_default_ephemeral(self):
# Test to make sure that the builtin config has the ephemeral
@@ -625,16 +679,16 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
ret = dsrc.get_data()
self.assertTrue(ret)
- assert 'disk_setup' in cfg
- assert 'fs_setup' in cfg
- self.assertIsInstance(cfg['disk_setup'], dict)
- self.assertIsInstance(cfg['fs_setup'], list)
+ assert "disk_setup" in cfg
+ assert "fs_setup" in cfg
+ self.assertIsInstance(cfg["disk_setup"], dict)
+ self.assertIsInstance(cfg["fs_setup"], list)
def test_override_disk_aliases(self):
# Test to make sure that the built-in DS is overriden
builtin = DataSourceSmartOS.BUILTIN_DS_CONFIG
- mydscfg = {'disk_aliases': {'FOO': '/dev/bar'}}
+ mydscfg = {"disk_aliases": {"FOO": "/dev/bar"}}
# expect that these values are in builtin, or this is pointless
for k in mydscfg:
@@ -644,21 +698,30 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(mydscfg['disk_aliases']['FOO'],
- dsrc.ds_cfg['disk_aliases']['FOO'])
+ self.assertEqual(
+ mydscfg["disk_aliases"]["FOO"], dsrc.ds_cfg["disk_aliases"]["FOO"]
+ )
- self.assertEqual(dsrc.device_name_to_device('FOO'),
- mydscfg['disk_aliases']['FOO'])
+ self.assertEqual(
+ dsrc.device_name_to_device("FOO"), mydscfg["disk_aliases"]["FOO"]
+ )
def test_reconfig_network_on_boot(self):
# Test to ensure that network is configured from metadata on each boot
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- self.assertSetEqual(set([EventType.BOOT_NEW_INSTANCE, EventType.BOOT]),
- dsrc.update_events['network'])
+ self.assertSetEqual(
+ {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ },
+ dsrc.default_update_events[EventScope.NETWORK],
+ )
class TestIdentifyFile(CiTestCase):
"""Test the 'identify_file' utility."""
+
@skipIf(not which("file"), "command 'file' not available.")
def test_file_happy_path(self):
"""Test file is available and functional on plain text."""
@@ -676,14 +739,16 @@ class TestIdentifyFile(CiTestCase):
self.assertEqual(None, identify_file(fname))
self.assertEqual(
[mock.call(["file", "--brief", "--mime-type", fname])],
- m_subp.call_args_list)
+ m_subp.call_args_list,
+ )
class ShortReader(object):
"""Implements a 'read' interface for bytes provided.
much like io.BytesIO but the 'endbyte' acts as if EOF.
When it is reached a short will be returned."""
- def __init__(self, initial_bytes, endbyte=b'\0'):
+
+ def __init__(self, initial_bytes, endbyte=b"\0"):
self.data = initial_bytes
self.index = 0
self.len = len(self.data)
@@ -696,7 +761,7 @@ class ShortReader(object):
def read(self, size=-1):
"""Read size bytes but not past a null."""
if size == 0 or self.index >= self.len:
- return b''
+ return b""
rsize = size
if size < 0 or size + self.index > self.len:
@@ -707,7 +772,7 @@ class ShortReader(object):
rsize = next_null - self.index + 1
i = self.index
self.index += rsize
- ret = self.data[i:i + rsize]
+ ret = self.data[i : i + rsize]
if len(ret) and ret[-1:] == self.endbyte:
ret = ret[:-1]
return ret
@@ -715,32 +780,34 @@ class ShortReader(object):
class TestJoyentMetadataClient(FilesystemMockingTestCase):
- invalid = b'invalid command\n'
- failure = b'FAILURE\n'
- v2_ok = b'V2_OK\n'
+ invalid = b"invalid command\n"
+ failure = b"FAILURE\n"
+ v2_ok = b"V2_OK\n"
def setUp(self):
super(TestJoyentMetadataClient, self).setUp()
self.serial = mock.MagicMock(spec=serial.Serial)
- self.request_id = 0xabcdef12
- self.metadata_value = 'value'
+ self.request_id = 0xABCDEF12
+ self.metadata_value = "value"
self.response_parts = {
- 'command': 'SUCCESS',
- 'crc': 'b5a9ff00',
- 'length': SUCCESS_LEN + len(b64e(self.metadata_value)),
- 'payload': b64e(self.metadata_value),
- 'request_id': '{0:08x}'.format(self.request_id),
+ "command": "SUCCESS",
+ "crc": "b5a9ff00",
+ "length": SUCCESS_LEN + len(b64e(self.metadata_value)),
+ "payload": b64e(self.metadata_value),
+ "request_id": "{0:08x}".format(self.request_id),
}
def make_response():
- payloadstr = ''
- if 'payload' in self.response_parts:
- payloadstr = ' {0}'.format(self.response_parts['payload'])
- return ('V2 {length} {crc} {request_id} '
- '{command}{payloadstr}\n'.format(
- payloadstr=payloadstr,
- **self.response_parts).encode('ascii'))
+ payloadstr = ""
+ if "payload" in self.response_parts:
+ payloadstr = " {0}".format(self.response_parts["payload"])
+ return (
+ "V2 {length} {crc} {request_id} "
+ "{command}{payloadstr}\n".format(
+ payloadstr=payloadstr, **self.response_parts
+ ).encode("ascii")
+ )
self.metasource_data = None
@@ -754,41 +821,49 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
self.serial.read.side_effect = read_response
self.patched_funcs.enter_context(
- mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint',
- mock.Mock(return_value=self.request_id)))
+ mock.patch(
+ "cloudinit.sources.DataSourceSmartOS.random.randint",
+ mock.Mock(return_value=self.request_id),
+ )
+ )
def _get_client(self):
return DataSourceSmartOS.JoyentMetadataClient(
- fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM)
+ fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM
+ )
def _get_serial_client(self):
self.serial.timeout = 1
- return DataSourceSmartOS.JoyentMetadataSerialClient(None,
- fp=self.serial)
+ return DataSourceSmartOS.JoyentMetadataSerialClient(
+ None, fp=self.serial
+ )
def assertEndsWith(self, haystack, prefix):
- self.assertTrue(haystack.endswith(prefix),
- "{0} does not end with '{1}'".format(
- repr(haystack), prefix))
+ self.assertTrue(
+ haystack.endswith(prefix),
+ "{0} does not end with '{1}'".format(repr(haystack), prefix),
+ )
def assertStartsWith(self, haystack, prefix):
- self.assertTrue(haystack.startswith(prefix),
- "{0} does not start with '{1}'".format(
- repr(haystack), prefix))
+ self.assertTrue(
+ haystack.startswith(prefix),
+ "{0} does not start with '{1}'".format(repr(haystack), prefix),
+ )
def assertNoMoreSideEffects(self, obj):
self.assertRaises(StopIteration, obj)
def test_get_metadata_writes_a_single_line(self):
client = self._get_client()
- client.get('some_key')
+ client.get("some_key")
self.assertEqual(1, self.serial.write.call_count)
written_line = self.serial.write.call_args[0][0]
- self.assertEndsWith(written_line.decode('ascii'),
- b'\n'.decode('ascii'))
- self.assertEqual(1, written_line.count(b'\n'))
+ self.assertEndsWith(
+ written_line.decode("ascii"), b"\n".decode("ascii")
+ )
+ self.assertEqual(1, written_line.count(b"\n"))
- def _get_written_line(self, key='some_key'):
+ def _get_written_line(self, key="some_key"):
client = self._get_client()
client.get(key)
return self.serial.write.call_args[0][0]
@@ -798,76 +873,86 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
def test_get_metadata_line_starts_with_v2(self):
foo = self._get_written_line()
- self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii'))
+ self.assertStartsWith(foo.decode("ascii"), b"V2".decode("ascii"))
def test_get_metadata_uses_get_command(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
- self.assertEqual('GET', parts[4])
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
+ self.assertEqual("GET", parts[4])
def test_get_metadata_base64_encodes_argument(self):
- key = 'my_key'
- parts = self._get_written_line(key).decode('ascii').strip().split(' ')
+ key = "my_key"
+ parts = self._get_written_line(key).decode("ascii").strip().split(" ")
self.assertEqual(b64e(key), parts[5])
def test_get_metadata_calculates_length_correctly(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
- expected_length = len(' '.join(parts[3:]))
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
+ expected_length = len(" ".join(parts[3:]))
self.assertEqual(expected_length, int(parts[1]))
def test_get_metadata_uses_appropriate_request_id(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
request_id = parts[3]
self.assertEqual(8, len(request_id))
self.assertEqual(request_id, request_id.lower())
def test_get_metadata_uses_random_number_for_request_id(self):
line = self._get_written_line()
- request_id = line.decode('ascii').strip().split(' ')[3]
- self.assertEqual('{0:08x}'.format(self.request_id), request_id)
+ request_id = line.decode("ascii").strip().split(" ")[3]
+ self.assertEqual("{0:08x}".format(self.request_id), request_id)
def test_get_metadata_checksums_correctly(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
- expected_checksum = '{0:08x}'.format(
- crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff)
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
+ expected_checksum = "{0:08x}".format(
+ crc32(" ".join(parts[3:]).encode("utf-8")) & 0xFFFFFFFF
+ )
checksum = parts[2]
self.assertEqual(expected_checksum, checksum)
def test_get_metadata_reads_a_line(self):
client = self._get_client()
- client.get('some_key')
+ client.get("some_key")
self.assertEqual(self.metasource_data_len, self.serial.read.call_count)
def test_get_metadata_returns_valid_value(self):
client = self._get_client()
- value = client.get('some_key')
+ value = client.get("some_key")
self.assertEqual(self.metadata_value, value)
def test_get_metadata_throws_exception_for_incorrect_length(self):
- self.response_parts['length'] = 0
+ self.response_parts["length"] = 0
client = self._get_client()
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get, 'some_key')
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get,
+ "some_key",
+ )
def test_get_metadata_throws_exception_for_incorrect_crc(self):
- self.response_parts['crc'] = 'deadbeef'
+ self.response_parts["crc"] = "deadbeef"
client = self._get_client()
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get, 'some_key')
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get,
+ "some_key",
+ )
def test_get_metadata_throws_exception_for_request_id_mismatch(self):
- self.response_parts['request_id'] = 'deadbeef'
+ self.response_parts["request_id"] = "deadbeef"
client = self._get_client()
- client._checksum = lambda _: self.response_parts['crc']
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get, 'some_key')
+ client._checksum = lambda _: self.response_parts["crc"]
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get,
+ "some_key",
+ )
def test_get_metadata_returns_None_if_value_not_found(self):
- self.response_parts['payload'] = ''
- self.response_parts['command'] = 'NOTFOUND'
- self.response_parts['length'] = NOTFOUND_LEN
+ self.response_parts["payload"] = ""
+ self.response_parts["command"] = "NOTFOUND"
+ self.response_parts["length"] = NOTFOUND_LEN
client = self._get_client()
- client._checksum = lambda _: self.response_parts['crc']
- self.assertIsNone(client.get('some_key'))
+ client._checksum = lambda _: self.response_parts["crc"]
+ self.assertIsNone(client.get("some_key"))
def test_negotiate(self):
client = self._get_client()
@@ -879,55 +964,58 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
def test_negotiate_short_response(self):
client = self._get_client()
# chopped '\n' from v2_ok.
- reader = ShortReader(self.v2_ok[:-1] + b'\0')
+ reader = ShortReader(self.v2_ok[:-1] + b"\0")
client.fp.read.side_effect = reader.read
- self.assertRaises(DataSourceSmartOS.JoyentMetadataTimeoutException,
- client._negotiate)
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataTimeoutException, client._negotiate
+ )
self.assertTrue(reader.emptied)
def test_negotiate_bad_response(self):
client = self._get_client()
- reader = ShortReader(b'garbage\n' + self.v2_ok)
+ reader = ShortReader(b"garbage\n" + self.v2_ok)
client.fp.read.side_effect = reader.read
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client._negotiate)
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException, client._negotiate
+ )
self.assertEqual(self.v2_ok, client.fp.read())
def test_serial_open_transport(self):
client = self._get_serial_client()
- reader = ShortReader(b'garbage\0' + self.invalid + self.v2_ok)
+ reader = ShortReader(b"garbage\0" + self.invalid + self.v2_ok)
client.fp.read.side_effect = reader.read
client.open_transport()
self.assertTrue(reader.emptied)
def test_flush_failure(self):
client = self._get_serial_client()
- reader = ShortReader(b'garbage' + b'\0' + self.failure +
- self.invalid + self.v2_ok)
+ reader = ShortReader(
+ b"garbage" + b"\0" + self.failure + self.invalid + self.v2_ok
+ )
client.fp.read.side_effect = reader.read
client.open_transport()
self.assertTrue(reader.emptied)
def test_flush_many_timeouts(self):
client = self._get_serial_client()
- reader = ShortReader(b'\0' * 100 + self.invalid + self.v2_ok)
+ reader = ShortReader(b"\0" * 100 + self.invalid + self.v2_ok)
client.fp.read.side_effect = reader.read
client.open_transport()
self.assertTrue(reader.emptied)
def test_list_metadata_returns_list(self):
- parts = ['foo', 'bar']
- value = b64e('\n'.join(parts))
- self.response_parts['payload'] = value
- self.response_parts['crc'] = '40873553'
- self.response_parts['length'] = SUCCESS_LEN + len(value)
+ parts = ["foo", "bar"]
+ value = b64e("\n".join(parts))
+ self.response_parts["payload"] = value
+ self.response_parts["crc"] = "40873553"
+ self.response_parts["length"] = SUCCESS_LEN + len(value)
client = self._get_client()
self.assertEqual(client.list(), parts)
def test_list_metadata_returns_empty_list_if_no_customer_metadata(self):
- del self.response_parts['payload']
- self.response_parts['length'] = SUCCESS_LEN - 1
- self.response_parts['crc'] = '14e563ba'
+ del self.response_parts["payload"]
+ self.response_parts["length"] = SUCCESS_LEN - 1
+ self.response_parts["crc"] = "14e563ba"
client = self._get_client()
self.assertEqual(client.list(), [])
@@ -935,181 +1023,354 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
class TestNetworkConversion(CiTestCase):
def test_convert_simple(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.102/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'static',
- 'address': '192.168.128.93/22'}],
- 'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.102/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:f5:e4:f5",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "192.168.128.93/22"}
+ ],
+ "mtu": 8500,
+ "mac_address": "90:b8:d0:a5:ff:cd",
+ },
+ ],
+ }
found = convert_net(SDC_NICS)
self.assertEqual(expected, found)
def test_convert_simple_alt(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'static',
- 'address': '10.210.1.217/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "10.210.1.217/24"}
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
found = convert_net(SDC_NICS_ALT)
self.assertEqual(expected, found)
def test_convert_simple_dhcp(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'dhcp4'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [{"type": "dhcp4"}],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
found = convert_net(SDC_NICS_DHCP)
self.assertEqual(expected, found)
def test_convert_simple_multi_ip(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'},
- {'type': 'static',
- 'address': '8.12.42.52/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'static',
- 'address': '10.210.1.217/24'},
- {'type': 'static',
- 'address': '10.210.1.151/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ },
+ {"type": "static", "address": "8.12.42.52/24"},
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "10.210.1.217/24"},
+ {"type": "static", "address": "10.210.1.151/24"},
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
found = convert_net(SDC_NICS_MIP)
self.assertEqual(expected, found)
def test_convert_with_dns(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'dhcp4'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'},
- {'type': 'nameserver',
- 'address': ['8.8.8.8', '8.8.8.1'], 'search': ["local"]}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [{"type": "dhcp4"}],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ {
+ "type": "nameserver",
+ "address": ["8.8.8.8", "8.8.8.1"],
+ "search": ["local"],
+ },
+ ],
+ }
found = convert_net(
- network_data=SDC_NICS_DHCP, dns_servers=['8.8.8.8', '8.8.8.1'],
- dns_domain="local")
+ network_data=SDC_NICS_DHCP,
+ dns_servers=["8.8.8.8", "8.8.8.1"],
+ dns_domain="local",
+ )
self.assertEqual(expected, found)
def test_convert_simple_multi_ipv6(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'address':
- '2001:4800:78ff:1b:be76:4eff:fe06:96b3/64'},
- {'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'static',
- 'address': '10.210.1.217/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "address": (
+ "2001:4800:78ff:1b:be76:4eff:fe06:96b3/64"
+ ),
+ },
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ },
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "10.210.1.217/24"}
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
found = convert_net(SDC_NICS_MIP_IPV6)
self.assertEqual(expected, found)
def test_convert_simple_both_ipv4_ipv6(self):
expected = {
- 'version': 1,
- 'config': [
- {'mac_address': '90:b8:d0:ae:64:51', 'mtu': 1500,
- 'name': 'net0', 'type': 'physical',
- 'subnets': [{'address': '2001::10/64', 'gateway': '2001::1',
- 'type': 'static'},
- {'address': '8.12.42.51/24',
- 'gateway': '8.12.42.1',
- 'type': 'static'},
- {'address': '2001::11/64', 'type': 'static'},
- {'address': '8.12.42.52/32', 'type': 'static'}]},
- {'mac_address': '90:b8:d0:bd:4f:9c', 'mtu': 1500,
- 'name': 'net1', 'type': 'physical',
- 'subnets': [{'address': '10.210.1.217/24',
- 'type': 'static'}]}]}
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "90:b8:d0:ae:64:51",
+ "mtu": 1500,
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "2001::10/64",
+ "gateway": "2001::1",
+ "type": "static",
+ },
+ {
+ "address": "8.12.42.51/24",
+ "gateway": "8.12.42.1",
+ "type": "static",
+ },
+ {"address": "2001::11/64", "type": "static"},
+ {"address": "8.12.42.52/32", "type": "static"},
+ ],
+ },
+ {
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ "mtu": 1500,
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"address": "10.210.1.217/24", "type": "static"}
+ ],
+ },
+ ],
+ }
found = convert_net(SDC_NICS_IPV4_IPV6)
self.assertEqual(expected, found)
def test_gateways_not_on_all_nics(self):
expected = {
- 'version': 1,
- 'config': [
- {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500,
- 'name': 'net0', 'type': 'physical',
- 'subnets': [{'address': '8.12.42.26/24',
- 'gateway': '8.12.42.1', 'type': 'static'}]},
- {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500,
- 'name': 'net1', 'type': 'physical',
- 'subnets': [{'address': '10.210.1.27/24',
- 'type': 'static'}]}]}
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "90:b8:d0:d8:82:b4",
+ "mtu": 1500,
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "8.12.42.26/24",
+ "gateway": "8.12.42.1",
+ "type": "static",
+ }
+ ],
+ },
+ {
+ "mac_address": "90:b8:d0:0a:51:31",
+ "mtu": 1500,
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"address": "10.210.1.27/24", "type": "static"}
+ ],
+ },
+ ],
+ }
found = convert_net(SDC_NICS_SINGLE_GATEWAY)
self.assertEqual(expected, found)
def test_routes_on_all_nics(self):
routes = [
- {'linklocal': False, 'dst': '3.0.0.0/8', 'gateway': '8.12.42.3'},
- {'linklocal': False, 'dst': '4.0.0.0/8', 'gateway': '10.210.1.4'}]
+ {"linklocal": False, "dst": "3.0.0.0/8", "gateway": "8.12.42.3"},
+ {"linklocal": False, "dst": "4.0.0.0/8", "gateway": "10.210.1.4"},
+ ]
expected = {
- 'version': 1,
- 'config': [
- {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500,
- 'name': 'net0', 'type': 'physical',
- 'subnets': [{'address': '8.12.42.26/24',
- 'gateway': '8.12.42.1', 'type': 'static',
- 'routes': [{'network': '3.0.0.0/8',
- 'gateway': '8.12.42.3'},
- {'network': '4.0.0.0/8',
- 'gateway': '10.210.1.4'}]}]},
- {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500,
- 'name': 'net1', 'type': 'physical',
- 'subnets': [{'address': '10.210.1.27/24', 'type': 'static',
- 'routes': [{'network': '3.0.0.0/8',
- 'gateway': '8.12.42.3'},
- {'network': '4.0.0.0/8',
- 'gateway': '10.210.1.4'}]}]}]}
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "90:b8:d0:d8:82:b4",
+ "mtu": 1500,
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "8.12.42.26/24",
+ "gateway": "8.12.42.1",
+ "type": "static",
+ "routes": [
+ {
+ "network": "3.0.0.0/8",
+ "gateway": "8.12.42.3",
+ },
+ {
+ "network": "4.0.0.0/8",
+ "gateway": "10.210.1.4",
+ },
+ ],
+ }
+ ],
+ },
+ {
+ "mac_address": "90:b8:d0:0a:51:31",
+ "mtu": 1500,
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "10.210.1.27/24",
+ "type": "static",
+ "routes": [
+ {
+ "network": "3.0.0.0/8",
+ "gateway": "8.12.42.3",
+ },
+ {
+ "network": "4.0.0.0/8",
+ "gateway": "10.210.1.4",
+ },
+ ],
+ }
+ ],
+ },
+ ],
+ }
found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes)
self.maxDiff = None
self.assertEqual(expected, found)
-@unittest.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM,
- "Only supported on KVM and bhyve guests under SmartOS")
-@unittest.skipUnless(os.access(SERIAL_DEVICE, os.W_OK),
- "Requires write access to " + SERIAL_DEVICE)
+@unittest.skipUnless(
+ get_smartos_environ() == SMARTOS_ENV_KVM,
+ "Only supported on KVM and bhyve guests under SmartOS",
+)
+@unittest.skipUnless(
+ os.access(SERIAL_DEVICE, os.W_OK),
+ "Requires write access to " + SERIAL_DEVICE,
+)
@unittest.skipUnless(HAS_PYSERIAL is True, "pyserial not available")
class TestSerialConcurrency(CiTestCase):
"""
- This class tests locking on an actual serial port, and as such can only
- be run in a kvm or bhyve guest running on a SmartOS host. A test run on
- a metadata socket will not be valid because a metadata socket ensures
- there is only one session over a connection. In contrast, in the
- absence of proper locking multiple processes opening the same serial
- port can corrupt each others' exchanges with the metadata server.
-
- This takes on the order of 2 to 3 minutes to run.
+ This class tests locking on an actual serial port, and as such can only
+ be run in a kvm or bhyve guest running on a SmartOS host. A test run on
+ a metadata socket will not be valid because a metadata socket ensures
+ there is only one session over a connection. In contrast, in the
+ absence of proper locking multiple processes opening the same serial
+ port can corrupt each others' exchanges with the metadata server.
+
+ This takes on the order of 2 to 3 minutes to run.
"""
- allowed_subp = ['mdata-get']
+
+ allowed_subp = ["mdata-get"]
def setUp(self):
self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop)
@@ -1124,16 +1385,16 @@ class TestSerialConcurrency(CiTestCase):
def start_mdata_loop(self):
"""
- The mdata-get command is repeatedly run in a separate process so
- that it may try to race with metadata operations performed in the
- main test process. Use of mdata-get is better than two processes
- using the protocol implementation in DataSourceSmartOS because we
- are testing to be sure that cloud-init and mdata-get respect each
- others locks.
+ The mdata-get command is repeatedly run in a separate process so
+ that it may try to race with metadata operations performed in the
+ main test process. Use of mdata-get is better than two processes
+ using the protocol implementation in DataSourceSmartOS because we
+ are testing to be sure that cloud-init and mdata-get respect each
+ others locks.
"""
rcs = list(range(0, 256))
while True:
- subp(['mdata-get', 'sdc:routes'], rcs=rcs)
+ subp(["mdata-get", "sdc:routes"], rcs=rcs)
def test_all_keys(self):
self.assertIsNotNone(self.mdata_proc.pid)
@@ -1156,4 +1417,5 @@ class TestSerialConcurrency(CiTestCase):
self.assertIsNone(self.mdata_proc.exitcode)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_upcloud.py b/tests/unittests/sources/test_upcloud.py
new file mode 100644
index 00000000..e1125b65
--- /dev/null
+++ b/tests/unittests/sources/test_upcloud.py
@@ -0,0 +1,331 @@
+# Author: Antti Myyrä <antti.myyra@upcloud.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+from cloudinit import helpers, settings, sources
+from cloudinit.sources.DataSourceUpCloud import (
+ DataSourceUpCloud,
+ DataSourceUpCloudLocal,
+)
+from tests.unittests.helpers import CiTestCase, mock
+
+UC_METADATA = json.loads(
+ """
+{
+ "cloud_name": "upcloud",
+ "instance_id": "00322b68-0096-4042-9406-faad61922128",
+ "hostname": "test.example.com",
+ "platform": "servers",
+ "subplatform": "metadata (http://169.254.169.254)",
+ "public_keys": [
+ "ssh-rsa AAAAB.... test1@example.com",
+ "ssh-rsa AAAAB.... test2@example.com"
+ ],
+ "region": "fi-hel2",
+ "network": {
+ "interfaces": [
+ {
+ "index": 1,
+ "ip_addresses": [
+ {
+ "address": "94.237.105.53",
+ "dhcp": true,
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "94.237.104.1",
+ "network": "94.237.104.0/22"
+ },
+ {
+ "address": "94.237.105.50",
+ "dhcp": false,
+ "dns": null,
+ "family": "IPv4",
+ "floating": true,
+ "gateway": "",
+ "network": "94.237.105.50/32"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:36:e7",
+ "network_id": "031457f4-0f8c-483c-96f2-eccede02909c",
+ "type": "public"
+ },
+ {
+ "index": 2,
+ "ip_addresses": [
+ {
+ "address": "10.6.3.27",
+ "dhcp": true,
+ "dns": null,
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "10.6.0.1",
+ "network": "10.6.0.0/22"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:84:cc",
+ "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1",
+ "type": "utility"
+ },
+ {
+ "index": 3,
+ "ip_addresses": [
+ {
+ "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7",
+ "dhcp": true,
+ "dns": [
+ "2a04:3540:53::1",
+ "2a04:3544:53::1"
+ ],
+ "family": "IPv6",
+ "floating": false,
+ "gateway": "2a04:3545:1000:720::1",
+ "network": "2a04:3545:1000:720::/64"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:63:e7",
+ "network_id": "03000000-0000-4000-8046-000000000000",
+ "type": "public"
+ },
+ {
+ "index": 4,
+ "ip_addresses": [
+ {
+ "address": "172.30.1.10",
+ "dhcp": true,
+ "dns": null,
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "172.30.1.1",
+ "network": "172.30.1.0/24"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:8a:e1",
+ "network_id": "035a0a4a-7704-4de5-820d-189fc8132714",
+ "type": "private"
+ }
+ ],
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ]
+ },
+ "storage": {
+ "disks": [
+ {
+ "id": "014efb65-223b-4d44-8f0a-c29535b88dcf",
+ "serial": "014efb65223b4d448f0a",
+ "size": 10240,
+ "type": "disk",
+ "tier": "maxiops"
+ }
+ ]
+ },
+ "tags": [],
+ "user_data": "",
+ "vendor_data": ""
+}
+"""
+)
+
+UC_METADATA[
+ "user_data"
+] = b"""#cloud-config
+runcmd:
+- [touch, /root/cloud-init-worked ]
+"""
+
+MD_URL = "http://169.254.169.254/metadata/v1.json"
+
+
+def _mock_dmi():
+ return True, "00322b68-0096-4042-9406-faad61922128"
+
+
+class TestUpCloudMetadata(CiTestCase):
+ """
+ Test reading the meta-data
+ """
+
+ def setUp(self):
+ super(TestUpCloudMetadata, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self, get_sysinfo=_mock_dmi):
+ ds = DataSourceUpCloud(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ if get_sysinfo:
+ ds._get_sysinfo = get_sysinfo
+ return ds
+
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_sysinfo")
+ def test_returns_false_not_on_upcloud(self, m_read_sysinfo):
+ m_read_sysinfo.return_value = (False, None)
+ ds = self.get_ds(get_sysinfo=None)
+ self.assertEqual(False, ds.get_data())
+ self.assertTrue(m_read_sysinfo.called)
+
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata")
+ def test_metadata(self, mock_readmd):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ ds = self.get_ds()
+ ds.perform_dhcp_setup = False
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ self.assertEqual(UC_METADATA.get("user_data"), ds.get_userdata_raw())
+ self.assertEqual(
+ UC_METADATA.get("vendor_data"), ds.get_vendordata_raw()
+ )
+ self.assertEqual(UC_METADATA.get("region"), ds.availability_zone)
+ self.assertEqual(UC_METADATA.get("instance_id"), ds.get_instance_id())
+ self.assertEqual(UC_METADATA.get("cloud_name"), ds.cloud_name)
+
+ self.assertEqual(
+ UC_METADATA.get("public_keys"), ds.get_public_ssh_keys()
+ )
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+
+class TestUpCloudNetworkSetup(CiTestCase):
+ """
+ Test reading the meta-data on networked context
+ """
+
+ def setUp(self):
+ super(TestUpCloudNetworkSetup, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self, get_sysinfo=_mock_dmi):
+ ds = DataSourceUpCloudLocal(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ if get_sysinfo:
+ ds._get_sysinfo = get_sysinfo
+ return ds
+
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ def test_network_configured_metadata(
+ self, m_net, m_dhcp, m_fallback_nic, mock_readmd
+ ):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ m_fallback_nic.return_value = "eth1"
+ m_dhcp.return_value = [
+ {
+ "interface": "eth1",
+ "fixed-address": "10.6.3.27",
+ "routers": "10.6.0.1",
+ "subnet-mask": "22",
+ "broadcast-address": "10.6.3.255",
+ }
+ ]
+
+ ds = self.get_ds()
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(m_dhcp.called)
+ m_dhcp.assert_called_with("eth1", None)
+
+ m_net.assert_called_once_with(
+ broadcast="10.6.3.255",
+ interface="eth1",
+ ip="10.6.3.27",
+ prefix_or_mask="22",
+ router="10.6.0.1",
+ static_routes=None,
+ )
+
+ self.assertTrue(mock_readmd.called)
+
+ self.assertEqual(UC_METADATA.get("region"), ds.availability_zone)
+ self.assertEqual(UC_METADATA.get("instance_id"), ds.get_instance_id())
+ self.assertEqual(UC_METADATA.get("cloud_name"), ds.cloud_name)
+
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata")
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_network_configuration(self, m_get_by_mac, mock_readmd):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ raw_ifaces = UC_METADATA.get("network").get("interfaces")
+ self.assertEqual(4, len(raw_ifaces))
+
+ m_get_by_mac.return_value = {
+ raw_ifaces[0].get("mac"): "eth0",
+ raw_ifaces[1].get("mac"): "eth1",
+ raw_ifaces[2].get("mac"): "eth2",
+ raw_ifaces[3].get("mac"): "eth3",
+ }
+
+ ds = self.get_ds()
+ ds.perform_dhcp_setup = False
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ netcfg = ds.network_config
+
+ self.assertEqual(1, netcfg.get("version"))
+
+ config = netcfg.get("config")
+ self.assertIsInstance(config, list)
+ self.assertEqual(5, len(config))
+ self.assertEqual("physical", config[3].get("type"))
+
+ self.assertEqual(
+ raw_ifaces[2].get("mac"), config[2].get("mac_address")
+ )
+ self.assertEqual(1, len(config[2].get("subnets")))
+ self.assertEqual(
+ "ipv6_dhcpv6-stateless", config[2].get("subnets")[0].get("type")
+ )
+
+ self.assertEqual(2, len(config[0].get("subnets")))
+ self.assertEqual("static", config[0].get("subnets")[1].get("type"))
+
+ dns = config[4]
+ self.assertEqual("nameserver", dns.get("type"))
+ self.assertEqual(2, len(dns.get("address")))
+ self.assertEqual(
+ UC_METADATA.get("network").get("dns")[1], dns.get("address")[1]
+ )
+
+
+class TestUpCloudDatasourceLoading(CiTestCase):
+ def test_get_datasource_list_returns_in_local(self):
+ deps = (sources.DEP_FILESYSTEM,)
+ ds_list = sources.DataSourceUpCloud.get_datasource_list(deps)
+ self.assertEqual(ds_list, [DataSourceUpCloudLocal])
+
+ def test_get_datasource_list_returns_in_normal(self):
+ deps = (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)
+ ds_list = sources.DataSourceUpCloud.get_datasource_list(deps)
+ self.assertEqual(ds_list, [DataSourceUpCloud])
+
+ def test_list_sources_finds_ds(self):
+ found = sources.list_sources(
+ ["UpCloud"],
+ (sources.DEP_FILESYSTEM, sources.DEP_NETWORK),
+ ["cloudinit.sources"],
+ )
+ self.assertEqual([DataSourceUpCloud], found)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py
new file mode 100644
index 00000000..dd331349
--- /dev/null
+++ b/tests/unittests/sources/test_vmware.py
@@ -0,0 +1,389 @@
+# Copyright (c) 2021 VMware, Inc. All Rights Reserved.
+#
+# Authors: Andrew Kutz <akutz@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import base64
+import gzip
+import os
+
+import pytest
+
+from cloudinit import dmi, helpers, safeyaml, settings
+from cloudinit.sources import DataSourceVMware
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ populate_dir,
+)
+
+PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name"
+PRODUCT_NAME = "VMware7,1"
+PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB"
+REROOT_FILES = {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ PRODUCT_NAME_FILE_PATH: PRODUCT_NAME,
+}
+
+VMW_MULTIPLE_KEYS = [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@vmw.com",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@vmw.com",
+]
+VMW_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@vmw.com"
+
+VMW_METADATA_YAML = """instance-id: cloud-vm
+local-hostname: cloud-vm
+network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+"""
+
+VMW_USERDATA_YAML = """## template: jinja
+#cloud-config
+users:
+- default
+"""
+
+VMW_VENDORDATA_YAML = """## template: jinja
+#cloud-config
+runcmd:
+- echo "Hello, world."
+"""
+
+
+@pytest.fixture(autouse=True)
+def common_patches():
+ with mock.patch("cloudinit.util.platform.platform", return_value="Linux"):
+ with mock.patch.multiple(
+ "cloudinit.dmi",
+ is_container=mock.Mock(return_value=False),
+ is_FreeBSD=mock.Mock(return_value=False),
+ ):
+ yield
+
+
+class TestDataSourceVMware(CiTestCase):
+ """
+ Test common functionality that is not transport specific.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMware, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_no_data_access_method(self):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = None
+ ret = ds.get_data()
+ self.assertFalse(ret)
+
+ def test_get_host_info(self):
+ host_info = DataSourceVMware.get_host_info()
+ self.assertTrue(host_info)
+ self.assertTrue(host_info["hostname"])
+ self.assertTrue(host_info["local-hostname"])
+ self.assertTrue(host_info["local_hostname"])
+ self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4])
+
+
+class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase):
+ """
+ Test the envvar transport.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareEnvVars, self).setUp()
+ self.tmp = self.tmp_dir()
+ os.environ[DataSourceVMware.VMX_GUESTINFO] = "1"
+ self.create_system_files()
+
+ def tearDown(self):
+ del os.environ[DataSourceVMware.VMX_GUESTINFO]
+ return super(TestDataSourceVMwareEnvVars, self).tearDown()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ def assert_get_data_ok(self, m_fn, m_fn_call_count=6):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = None
+ ret = ds.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(m_fn_call_count, m_fn.call_count)
+ self.assertEqual(
+ ds.data_access_method, DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR
+ )
+ return ds
+
+ def assert_metadata(self, metadata, m_fn, m_fn_call_count=6):
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count)
+ assert_metadata(self, ds, metadata)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_subplatform(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+ self.assertEqual(
+ ds.subplatform,
+ "%s (%s)"
+ % (
+ DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR,
+ DataSourceVMware.get_guestinfo_envvar_key_name("metadata"),
+ ),
+ )
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_only(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_userdata_only(self, m_fn):
+ m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_vendordata_only(self, m_fn):
+ m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_base64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_b64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_gzip_base64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gzip+base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_gz_b64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gz+b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_metadata_single_ssh_key(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_SINGLE_KEY
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_metadata_multiple_ssh_keys(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_MULTIPLE_KEYS
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+
+class TestDataSourceVMwareGuestInfo(FilesystemMockingTestCase):
+ """
+ Test the guestinfo transport on a VMware platform.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareGuestInfo, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.create_system_files()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ PRODUCT_NAME_FILE_PATH: PRODUCT_NAME,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ def assert_get_data_ok(self, m_fn, m_fn_call_count=6):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = "vmware-rpctool"
+ ret = ds.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(m_fn_call_count, m_fn.call_count)
+ self.assertEqual(
+ ds.data_access_method,
+ DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO,
+ )
+ return ds
+
+ def assert_metadata(self, metadata, m_fn, m_fn_call_count=6):
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count)
+ assert_metadata(self, ds, metadata)
+
+ def test_ds_valid_on_vmware_platform(self):
+ system_type = dmi.read_dmi_data("system-product-name")
+ self.assertEqual(system_type, PRODUCT_NAME)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_subplatform(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+ self.assertEqual(
+ ds.subplatform,
+ "%s (%s)"
+ % (
+ DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO,
+ DataSourceVMware.get_guestinfo_key_name("metadata"),
+ ),
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_userdata_only(self, m_fn):
+ m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_vendordata_only(self, m_fn):
+ m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_metadata_single_ssh_key(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_SINGLE_KEY
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_metadata_multiple_ssh_keys(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_MULTIPLE_KEYS
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_base64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_b64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_gzip_base64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gzip+base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_gz_b64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gz+b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+
+class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase):
+ """
+ Test the guestinfo transport on a non-VMware platform.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareGuestInfo_InvalidPlatform, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.create_system_files()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_ds_invalid_on_non_vmware_platform(self, m_fn):
+ system_type = dmi.read_dmi_data("system-product-name")
+ self.assertEqual(system_type, None)
+
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = "vmware-rpctool"
+ ret = ds.get_data()
+ self.assertFalse(ret)
+
+
+def assert_metadata(test_obj, ds, metadata):
+ test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id())
+ test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname())
+
+ expected_public_keys = metadata.get("public_keys")
+ if not isinstance(expected_public_keys, list):
+ expected_public_keys = [expected_public_keys]
+
+ test_obj.assertEqual(expected_public_keys, ds.get_public_ssh_keys())
+ test_obj.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+
+def get_ds(temp_dir):
+ ds = DataSourceVMware.DataSourceVMware(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": temp_dir})
+ )
+ ds.vmware_rpctool = "vmware-rpctool"
+ return ds
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py
new file mode 100644
index 00000000..18b2c084
--- /dev/null
+++ b/tests/unittests/sources/test_vultr.py
@@ -0,0 +1,339 @@
+# Author: Eric Benner <ebenner@vultr.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# Vultr Metadata API:
+# https://www.vultr.com/metadata/
+
+import json
+
+from cloudinit import helpers, settings
+from cloudinit.net.dhcp import NoDHCPLeaseError
+from cloudinit.sources import DataSourceVultr
+from cloudinit.sources.helpers import vultr
+from tests.unittests.helpers import CiTestCase, mock
+
+# Vultr metadata test data
+VULTR_V1_1 = {
+ "bgp": {
+ "ipv4": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
+ },
+ "ipv6": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
+ },
+ },
+ "hostname": "CLOUDINIT_1",
+ "instanceid": "42506325",
+ "interfaces": [
+ {
+ "ipv4": {
+ "additional": [],
+ "address": "108.61.89.242",
+ "gateway": "108.61.89.1",
+ "netmask": "255.255.255.0",
+ },
+ "ipv6": {
+ "additional": [],
+ "address": "2001:19f0:5:56c2:5400:03ff:fe15:c465",
+ "network": "2001:19f0:5:56c2::",
+ "prefix": "64",
+ },
+ "mac": "56:00:03:15:c4:65",
+ "network-type": "public",
+ }
+ ],
+ "public-keys": ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"],
+ "region": {"regioncode": "EWR"},
+ "user-defined": [],
+ "startup-script": "echo No configured startup script",
+ "raid1-script": "",
+ "user-data": [],
+ "vendor-data": [
+ {
+ "package_upgrade": "true",
+ "disable_root": 0,
+ "ssh_pwauth": 1,
+ "chpasswd": {
+ "expire": False,
+ "list": ["root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/"],
+ },
+ "system_info": {"default_user": {"name": "root"}},
+ }
+ ],
+}
+
+VULTR_V1_2 = {
+ "bgp": {
+ "ipv4": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
+ },
+ "ipv6": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
+ },
+ },
+ "hostname": "CLOUDINIT_2",
+ "instance-v2-id": "29bea708-2e6e-480a-90ad-0e6b5d5ad62f",
+ "instanceid": "42872224",
+ "interfaces": [
+ {
+ "ipv4": {
+ "additional": [],
+ "address": "45.76.7.171",
+ "gateway": "45.76.6.1",
+ "netmask": "255.255.254.0",
+ },
+ "ipv6": {
+ "additional": [
+ {"network": "2002:19f0:5:28a7::", "prefix": "64"}
+ ],
+ "address": "2001:19f0:5:28a7:5400:03ff:fe1b:4eca",
+ "network": "2001:19f0:5:28a7::",
+ "prefix": "64",
+ },
+ "mac": "56:00:03:1b:4e:ca",
+ "network-type": "public",
+ },
+ {
+ "ipv4": {
+ "additional": [],
+ "address": "10.1.112.3",
+ "gateway": "",
+ "netmask": "255.255.240.0",
+ },
+ "ipv6": {"additional": [], "network": "", "prefix": ""},
+ "mac": "5a:00:03:1b:4e:ca",
+ "network-type": "private",
+ "network-v2-id": "fbbe2b5b-b986-4396-87f5-7246660ccb64",
+ "networkid": "net5e7155329d730",
+ },
+ ],
+ "public-keys": ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"],
+ "region": {"regioncode": "EWR"},
+ "user-defined": [],
+ "startup-script": "echo No configured startup script",
+ "user-data": [],
+ "vendor-data": [
+ {
+ "package_upgrade": "true",
+ "disable_root": 0,
+ "ssh_pwauth": 1,
+ "chpasswd": {
+ "expire": False,
+ "list": ["root:$6$SxXx...k2mJNIzZB5vMCDBlYT1"],
+ },
+ "system_info": {"default_user": {"name": "root"}},
+ }
+ ],
+}
+
+SSH_KEYS_1 = ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"]
+
+INTERFACES = [
+ ["lo", "56:00:03:15:c4:00", "drv", "devid0"],
+ ["dummy0", "56:00:03:15:c4:01", "drv", "devid1"],
+ ["eth1", "56:00:03:15:c4:02", "drv", "devid2"],
+ ["eth0", "56:00:03:15:c4:04", "drv", "devid4"],
+ ["eth2", "56:00:03:15:c4:03", "drv", "devid3"],
+]
+
+# Expected generated objects
+
+# Expected config
+EXPECTED_VULTR_CONFIG = {
+ "package_upgrade": "true",
+ "disable_root": 0,
+ "ssh_pwauth": 1,
+ "chpasswd": {
+ "expire": False,
+ "list": ["root:$6$SxXx...k2mJNIzZB5vMCDBlYT1"],
+ },
+ "system_info": {"default_user": {"name": "root"}},
+}
+
+# Expected network config object from generator
+EXPECTED_VULTR_NETWORK_1 = {
+ "version": 1,
+ "config": [
+ {"type": "nameserver", "address": ["108.61.10.10"]},
+ {
+ "name": "eth0",
+ "type": "physical",
+ "mac_address": "56:00:03:15:c4:65",
+ "accept-ra": 1,
+ "subnets": [
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
+ ],
+ },
+ ],
+}
+
+EXPECTED_VULTR_NETWORK_2 = {
+ "version": 1,
+ "config": [
+ {"type": "nameserver", "address": ["108.61.10.10"]},
+ {
+ "name": "eth0",
+ "type": "physical",
+ "mac_address": "56:00:03:1b:4e:ca",
+ "accept-ra": 1,
+ "subnets": [
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
+ {
+ "type": "static6",
+ "control": "auto",
+ "address": "2002:19f0:5:28a7::/64",
+ },
+ ],
+ },
+ {
+ "name": "eth1",
+ "type": "physical",
+ "mac_address": "5a:00:03:1b:4e:ca",
+ "subnets": [
+ {
+ "type": "static",
+ "control": "auto",
+ "address": "10.1.112.3",
+ "netmask": "255.255.240.0",
+ }
+ ],
+ },
+ ],
+}
+
+
+INTERFACE_MAP = {
+ "56:00:03:15:c4:65": "eth0",
+ "56:00:03:1b:4e:ca": "eth0",
+ "5a:00:03:1b:4e:ca": "eth1",
+}
+
+
+EPHERMERAL_USED = ""
+
+
+class TestDataSourceVultr(CiTestCase):
+ def setUp(self):
+ super(TestDataSourceVultr, self).setUp()
+
+ # Stored as a dict to make it easier to maintain
+ raw1 = json.dumps(VULTR_V1_1["vendor-data"][0])
+ raw2 = json.dumps(VULTR_V1_2["vendor-data"][0])
+
+ # Make expected format
+ VULTR_V1_1["vendor-data"] = [raw1]
+ VULTR_V1_2["vendor-data"] = [raw2]
+
+ self.tmp = self.tmp_dir()
+
+ # Test the datasource itself
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.sources.helpers.vultr.is_vultr")
+ @mock.patch("cloudinit.sources.helpers.vultr.get_metadata")
+ def test_datasource(self, mock_getmeta, mock_isvultr, mock_netmap):
+ mock_getmeta.return_value = VULTR_V1_2
+ mock_isvultr.return_value = True
+ mock_netmap.return_value = INTERFACE_MAP
+
+ source = DataSourceVultr.DataSourceVultr(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+
+ # Test for failure
+ self.assertEqual(True, source._get_data())
+
+ # Test instance id
+ self.assertEqual("42872224", source.metadata["instanceid"])
+
+ # Test hostname
+ self.assertEqual("CLOUDINIT_2", source.metadata["local-hostname"])
+
+ # Test ssh keys
+ self.assertEqual(SSH_KEYS_1, source.metadata["public-keys"])
+
+ # Test vendor data generation
+ orig_val = self.maxDiff
+ self.maxDiff = None
+
+ vendordata = source.vendordata_raw
+
+ # Test vendor config
+ self.assertEqual(
+ EXPECTED_VULTR_CONFIG,
+ json.loads(vendordata[0].replace("#cloud-config", "")),
+ )
+
+ self.maxDiff = orig_val
+
+ # Test network config generation
+ self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config)
+
+ # Test network config generation
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_network_config(self, mock_netmap):
+ mock_netmap.return_value = INTERFACE_MAP
+ interf = VULTR_V1_1["interfaces"]
+
+ self.assertEqual(
+ EXPECTED_VULTR_NETWORK_1, vultr.generate_network_config(interf)
+ )
+
+ # Test Private Networking config generation
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_private_network_config(self, mock_netmap):
+ mock_netmap.return_value = INTERFACE_MAP
+ interf = VULTR_V1_2["interfaces"]
+
+ self.assertEqual(
+ EXPECTED_VULTR_NETWORK_2, vultr.generate_network_config(interf)
+ )
+
+ def ephemeral_init(self, iface="", connectivity_url_data=None):
+ global EPHERMERAL_USED
+ EPHERMERAL_USED = iface
+ if iface == "eth0":
+ return
+ raise NoDHCPLeaseError("Generic for testing")
+
+ # Test interface seeking to ensure we are able to find the correct one
+ @mock.patch("cloudinit.net.dhcp.EphemeralDHCPv4.__init__", ephemeral_init)
+ @mock.patch("cloudinit.sources.helpers.vultr.is_vultr")
+ @mock.patch("cloudinit.sources.helpers.vultr.read_metadata")
+ @mock.patch("cloudinit.net.get_interfaces")
+ def test_interface_seek(
+ self, mock_get_interfaces, mock_read_metadata, mock_isvultr
+ ):
+ mock_read_metadata.side_effect = NoDHCPLeaseError(
+ "Generic for testing"
+ )
+ mock_isvultr.return_value = True
+ mock_get_interfaces.return_value = INTERFACES
+
+ source = DataSourceVultr.DataSourceVultr(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+
+ try:
+ source._get_data()
+ except Exception:
+ pass
+
+ self.assertEqual(EPHERMERAL_USED, INTERFACES[3][0])
+
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/__init__.py b/tests/unittests/sources/vmware/__init__.py
index e69de29b..e69de29b 100644
--- a/tests/cloud_tests/platforms/lxd/__init__.py
+++ b/tests/unittests/sources/vmware/__init__.py
diff --git a/tests/unittests/test_vmware/test_custom_script.py b/tests/unittests/sources/vmware/test_custom_script.py
index f89f8157..9b3e079f 100644
--- a/tests/unittests/test_vmware/test_custom_script.py
+++ b/tests/unittests/sources/vmware/test_custom_script.py
@@ -7,14 +7,15 @@
import os
import stat
+
from cloudinit import util
from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
CustomScriptConstant,
CustomScriptNotFound,
- PreCustomScript,
PostCustomScript,
+ PreCustomScript,
)
-from cloudinit.tests.helpers import CiTestCase, mock
+from tests.unittests.helpers import CiTestCase, mock
class TestVmwareCustomScript(CiTestCase):
@@ -22,8 +23,7 @@ class TestVmwareCustomScript(CiTestCase):
self.tmpDir = self.tmp_dir()
# Mock the tmpDir as the root dir in VM.
self.execDir = os.path.join(self.tmpDir, ".customization")
- self.execScript = os.path.join(self.execDir,
- ".customize.sh")
+ self.execScript = os.path.join(self.execDir, ".customize.sh")
def test_prepare_custom_script(self):
"""
@@ -36,23 +36,24 @@ class TestVmwareCustomScript(CiTestCase):
preCust = PreCustomScript("random-vmw-test", self.tmpDir)
self.assertEqual("random-vmw-test", preCust.scriptname)
self.assertEqual(self.tmpDir, preCust.directory)
- self.assertEqual(self.tmp_path("random-vmw-test", self.tmpDir),
- preCust.scriptpath)
+ self.assertEqual(
+ self.tmp_path("random-vmw-test", self.tmpDir), preCust.scriptpath
+ )
with self.assertRaises(CustomScriptNotFound):
preCust.prepare_script()
# Custom script exists.
custScript = self.tmp_path("test-cust", self.tmpDir)
util.write_file(custScript, "test-CR-strip\r\r")
- with mock.patch.object(CustomScriptConstant,
- "CUSTOM_TMP_DIR",
- self.execDir):
- with mock.patch.object(CustomScriptConstant,
- "CUSTOM_SCRIPT",
- self.execScript):
- postCust = PostCustomScript("test-cust",
- self.tmpDir,
- self.tmpDir)
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_TMP_DIR", self.execDir
+ ):
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_SCRIPT", self.execScript
+ ):
+ postCust = PostCustomScript(
+ "test-cust", self.tmpDir, self.tmpDir
+ )
self.assertEqual("test-cust", postCust.scriptname)
self.assertEqual(self.tmpDir, postCust.directory)
self.assertEqual(custScript, postCust.scriptpath)
@@ -84,26 +85,30 @@ class TestVmwareCustomScript(CiTestCase):
ccScriptDir = self.tmp_dir()
ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh")
markerFile = os.path.join(self.tmpDir, ".markerFile")
- with mock.patch.object(CustomScriptConstant,
- "CUSTOM_TMP_DIR",
- self.execDir):
- with mock.patch.object(CustomScriptConstant,
- "CUSTOM_SCRIPT",
- self.execScript):
- with mock.patch.object(CustomScriptConstant,
- "POST_CUSTOM_PENDING_MARKER",
- markerFile):
- postCust = PostCustomScript("test-cust",
- self.tmpDir,
- ccScriptDir)
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_TMP_DIR", self.execDir
+ ):
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_SCRIPT", self.execScript
+ ):
+ with mock.patch.object(
+ CustomScriptConstant,
+ "POST_CUSTOM_PENDING_MARKER",
+ markerFile,
+ ):
+ postCust = PostCustomScript(
+ "test-cust", self.tmpDir, ccScriptDir
+ )
postCust.execute()
# Check cc_scripts_per_instance and marker file
# are created.
self.assertTrue(os.path.exists(ccScript))
with open(ccScript, "r") as f:
content = f.read()
- self.assertEqual(content,
- "This is the script to run post cust")
+ self.assertEqual(
+ content, "This is the script to run post cust"
+ )
self.assertTrue(os.path.exists(markerFile))
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/vmware/test_guestcust_util.py b/tests/unittests/sources/vmware/test_guestcust_util.py
new file mode 100644
index 00000000..fc63bcae
--- /dev/null
+++ b/tests/unittests/sources/vmware/test_guestcust_util.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2019 Canonical Ltd.
+# Copyright (C) 2019 VMware INC.
+#
+# Author: Xiaofeng Wang <xiaofengw@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import subp
+from cloudinit.sources.helpers.vmware.imc.config import Config
+from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
+from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
+ get_tools_config,
+ set_gc_status,
+)
+from tests.unittests.helpers import CiTestCase, mock
+
+
+class TestGuestCustUtil(CiTestCase):
+ def test_get_tools_config_not_installed(self):
+ """
+ This test is designed to verify the behavior if vmware-toolbox-cmd
+ is not installed.
+ """
+ with mock.patch.object(subp, "which", return_value=None):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"), "defaultVal"
+ )
+
+ def test_get_tools_config_internal_exception(self):
+ """
+ This test is designed to verify the behavior if internal exception
+ is raised.
+ """
+ with mock.patch.object(subp, "which", return_value="/dummy/path"):
+ with mock.patch.object(
+ subp,
+ "subp",
+ return_value=("key=value", b""),
+ side_effect=subp.ProcessExecutionError(
+ "subp failed", exit_code=99
+ ),
+ ):
+ # verify return value is 'defaultVal', not 'value'.
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"),
+ "defaultVal",
+ )
+
+ def test_get_tools_config_normal(self):
+ """
+ This test is designed to verify the value could be parsed from
+ key = value of the given [section]
+ """
+ with mock.patch.object(subp, "which", return_value="/dummy/path"):
+ # value is not blank
+ with mock.patch.object(
+ subp, "subp", return_value=("key = value ", b"")
+ ):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"), "value"
+ )
+ # value is blank
+ with mock.patch.object(subp, "subp", return_value=("key = ", b"")):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"), ""
+ )
+ # value contains =
+ with mock.patch.object(
+ subp, "subp", return_value=("key=Bar=Wark", b"")
+ ):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"),
+ "Bar=Wark",
+ )
+
+ # value contains specific characters
+ with mock.patch.object(
+ subp, "subp", return_value=("[a] b.c_d=e-f", b"")
+ ):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"), "e-f"
+ )
+
+ def test_set_gc_status(self):
+ """
+ This test is designed to verify the behavior of set_gc_status
+ """
+ # config is None, return None
+ self.assertEqual(set_gc_status(None, "Successful"), None)
+
+ # post gc status is NO, return None
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertEqual(set_gc_status(conf, "Successful"), None)
+
+ # post gc status is YES, subp is called to execute command
+ cf._insertKey("MISC|POST-GC-STATUS", "YES")
+ conf = Config(cf)
+ with mock.patch.object(
+ subp, "subp", return_value=("ok", b"")
+ ) as mockobj:
+ self.assertEqual(set_gc_status(conf, "Successful"), ("ok", b""))
+ mockobj.assert_called_once_with(
+ ["vmware-rpctool", "info-set guestinfo.gc.status Successful"],
+ rcs=[0],
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py
new file mode 100644
index 00000000..38d45d0e
--- /dev/null
+++ b/tests/unittests/sources/vmware/test_vmware_config_file.py
@@ -0,0 +1,635 @@
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2016 VMware INC.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+# Pengpeng Sun <pengpengs@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+import os
+import sys
+import tempfile
+import textwrap
+
+from cloudinit.sources.DataSourceOVF import (
+ get_network_config_from_conf,
+ read_vmware_imc,
+)
+from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum
+from cloudinit.sources.helpers.vmware.imc.config import Config
+from cloudinit.sources.helpers.vmware.imc.config_file import (
+ ConfigFile as WrappedConfigFile,
+)
+from cloudinit.sources.helpers.vmware.imc.config_nic import (
+ NicConfigurator,
+ gen_subnet,
+)
+from tests.unittests.helpers import CiTestCase, cloud_init_project_dir
+
+logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
+logger = logging.getLogger(__name__)
+
+
+def ConfigFile(path: str):
+ return WrappedConfigFile(cloud_init_project_dir(path))
+
+
+class TestVmwareConfigFile(CiTestCase):
+ def test_utility_methods(self):
+ """Tests basic utility methods of ConfigFile class"""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ cf.clear()
+
+ self.assertEqual(0, len(cf), "clear size")
+
+ cf._insertKey(" PASSWORD|-PASS ", " foo ")
+ cf._insertKey("BAR", " ")
+
+ self.assertEqual(2, len(cf), "insert size")
+ self.assertEqual("foo", cf["PASSWORD|-PASS"], "password")
+ self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword")
+ self.assertFalse(
+ cf.should_keep_current_value("PASSWORD|-PASS"), "keepPassword"
+ )
+ self.assertFalse(
+ cf.should_remove_current_value("PASSWORD|-PASS"), "removePassword"
+ )
+ self.assertFalse("FOO" in cf, "hasFoo")
+ self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo")
+ self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo")
+ self.assertTrue("BAR" in cf, "hasBar")
+ self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar")
+ self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar")
+
+ def test_datasource_instance_id(self):
+ """Tests instance id for the DatasourceOVF"""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ instance_id_prefix = "iid-vmware-"
+
+ conf = Config(cf)
+
+ (md1, _, _) = read_vmware_imc(conf)
+ self.assertIn(instance_id_prefix, md1["instance-id"])
+ self.assertEqual(md1["instance-id"], "iid-vmware-imc")
+
+ (md2, _, _) = read_vmware_imc(conf)
+ self.assertIn(instance_id_prefix, md2["instance-id"])
+ self.assertEqual(md2["instance-id"], "iid-vmware-imc")
+
+ self.assertEqual(md2["instance-id"], md1["instance-id"])
+
+ def test_configfile_static_2nics(self):
+ """Tests Config class for a configuration with two static NICs."""
+ cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
+
+ conf = Config(cf)
+
+ self.assertEqual("myhost1", conf.host_name, "hostName")
+ self.assertEqual("Africa/Abidjan", conf.timezone, "tz")
+ self.assertTrue(conf.utc, "utc")
+
+ self.assertEqual(
+ ["10.20.145.1", "10.20.145.2"], conf.name_servers, "dns"
+ )
+ self.assertEqual(
+ ["eng.vmware.com", "proxy.vmware.com"],
+ conf.dns_suffixes,
+ "suffixes",
+ )
+
+ nics = conf.nics
+ ipv40 = nics[0].staticIpv4
+
+ self.assertEqual(2, len(nics), "nics")
+ self.assertEqual("NIC1", nics[0].name, "nic0")
+ self.assertEqual("00:50:56:a6:8c:08", nics[0].mac, "mac0")
+ self.assertEqual(BootProtoEnum.STATIC, nics[0].bootProto, "bootproto0")
+ self.assertEqual("10.20.87.154", ipv40[0].ip, "ipv4Addr0")
+ self.assertEqual("255.255.252.0", ipv40[0].netmask, "ipv4Mask0")
+ self.assertEqual(2, len(ipv40[0].gateways), "ipv4Gw0")
+ self.assertEqual("10.20.87.253", ipv40[0].gateways[0], "ipv4Gw0_0")
+ self.assertEqual("10.20.87.105", ipv40[0].gateways[1], "ipv4Gw0_1")
+
+ self.assertEqual(1, len(nics[0].staticIpv6), "ipv6Cnt0")
+ self.assertEqual(
+ "fc00:10:20:87::154", nics[0].staticIpv6[0].ip, "ipv6Addr0"
+ )
+
+ self.assertEqual("NIC2", nics[1].name, "nic1")
+ self.assertTrue(not nics[1].staticIpv6, "ipv61 dhcp")
+
+ def test_config_file_dhcp_2nics(self):
+ """Tests Config class for a configuration with two DHCP NICs."""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ conf = Config(cf)
+ nics = conf.nics
+ self.assertEqual(2, len(nics), "nics")
+ self.assertEqual("NIC1", nics[0].name, "nic0")
+ self.assertEqual("00:50:56:a6:8c:08", nics[0].mac, "mac0")
+ self.assertEqual(BootProtoEnum.DHCP, nics[0].bootProto, "bootproto0")
+
+ def test_config_password(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ cf._insertKey("PASSWORD|-PASS", "test-password")
+ cf._insertKey("PASSWORD|RESET", "no")
+
+ conf = Config(cf)
+ self.assertEqual("test-password", conf.admin_password, "password")
+ self.assertFalse(conf.reset_password, "do not reset password")
+
+ def test_config_reset_passwd(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ cf._insertKey("PASSWORD|-PASS", "test-password")
+ cf._insertKey("PASSWORD|RESET", "random")
+
+ conf = Config(cf)
+ with self.assertRaises(ValueError):
+ pw = conf.reset_password
+ self.assertIsNone(pw)
+
+ cf.clear()
+ cf._insertKey("PASSWORD|RESET", "yes")
+ self.assertEqual(1, len(cf), "insert size")
+
+ conf = Config(cf)
+ self.assertTrue(conf.reset_password, "reset password")
+
+ def test_get_config_nameservers(self):
+ """Tests DNS and nameserver settings in a configuration."""
+ cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
+
+ config = Config(cf)
+
+ network_config = get_network_config_from_conf(config, False)
+
+ self.assertEqual(1, network_config.get("version"))
+
+ config_types = network_config.get("config")
+ name_servers = None
+ dns_suffixes = None
+
+ for type in config_types:
+ if type.get("type") == "nameserver":
+ name_servers = type.get("address")
+ dns_suffixes = type.get("search")
+ break
+
+ self.assertEqual(["10.20.145.1", "10.20.145.2"], name_servers, "dns")
+ self.assertEqual(
+ ["eng.vmware.com", "proxy.vmware.com"], dns_suffixes, "suffixes"
+ )
+
+ def test_gen_subnet(self):
+ """Tests if gen_subnet properly calculates network subnet from
+ IPv4 address and netmask"""
+ ip_subnet_list = [
+ ["10.20.87.253", "255.255.252.0", "10.20.84.0"],
+ ["10.20.92.105", "255.255.252.0", "10.20.92.0"],
+ ["192.168.0.10", "255.255.0.0", "192.168.0.0"],
+ ]
+ for entry in ip_subnet_list:
+ self.assertEqual(
+ entry[2],
+ gen_subnet(entry[0], entry[1]),
+ "Subnet for a specified ip and netmask",
+ )
+
+ def test_get_config_dns_suffixes(self):
+ """Tests if get_network_config_from_conf properly
+ generates nameservers and dns settings from a
+ specified configuration"""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ config = Config(cf)
+
+ network_config = get_network_config_from_conf(config, False)
+
+ self.assertEqual(1, network_config.get("version"))
+
+ config_types = network_config.get("config")
+ name_servers = None
+ dns_suffixes = None
+
+ for type in config_types:
+ if type.get("type") == "nameserver":
+ name_servers = type.get("address")
+ dns_suffixes = type.get("search")
+ break
+
+ self.assertEqual([], name_servers, "dns")
+ self.assertEqual(["eng.vmware.com"], dns_suffixes, "suffixes")
+
+ def test_get_nics_list_dhcp(self):
+ """Tests if NicConfigurator properly calculates network subnets
+ for a configuration with a list of DHCP NICs"""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ config = Config(cf)
+
+ nicConfigurator = NicConfigurator(config.nics, False)
+ nics_cfg_list = nicConfigurator.generate()
+
+ self.assertEqual(2, len(nics_cfg_list), "number of config elements")
+
+ nic1 = {"name": "NIC1"}
+ nic2 = {"name": "NIC2"}
+ for cfg in nics_cfg_list:
+ if cfg.get("name") == nic1.get("name"):
+ nic1.update(cfg)
+ elif cfg.get("name") == nic2.get("name"):
+ nic2.update(cfg)
+
+ self.assertEqual("physical", nic1.get("type"), "type of NIC1")
+ self.assertEqual("NIC1", nic1.get("name"), "name of NIC1")
+ self.assertEqual(
+ "00:50:56:a6:8c:08", nic1.get("mac_address"), "mac address of NIC1"
+ )
+ subnets = nic1.get("subnets")
+ self.assertEqual(1, len(subnets), "number of subnets for NIC1")
+ subnet = subnets[0]
+ self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC1")
+ self.assertEqual("auto", subnet.get("control"), "NIC1 Control type")
+
+ self.assertEqual("physical", nic2.get("type"), "type of NIC2")
+ self.assertEqual("NIC2", nic2.get("name"), "name of NIC2")
+ self.assertEqual(
+ "00:50:56:a6:5a:de", nic2.get("mac_address"), "mac address of NIC2"
+ )
+ subnets = nic2.get("subnets")
+ self.assertEqual(1, len(subnets), "number of subnets for NIC2")
+ subnet = subnets[0]
+ self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC2")
+ self.assertEqual("auto", subnet.get("control"), "NIC2 Control type")
+
+ def test_get_nics_list_static(self):
+ """Tests if NicConfigurator properly calculates network subnets
+ for a configuration with 2 static NICs"""
+ cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
+
+ config = Config(cf)
+
+ nicConfigurator = NicConfigurator(config.nics, False)
+ nics_cfg_list = nicConfigurator.generate()
+
+ self.assertEqual(2, len(nics_cfg_list), "number of elements")
+
+ nic1 = {"name": "NIC1"}
+ nic2 = {"name": "NIC2"}
+ route_list = []
+ for cfg in nics_cfg_list:
+ cfg_type = cfg.get("type")
+ if cfg_type == "physical":
+ if cfg.get("name") == nic1.get("name"):
+ nic1.update(cfg)
+ elif cfg.get("name") == nic2.get("name"):
+ nic2.update(cfg)
+
+ self.assertEqual("physical", nic1.get("type"), "type of NIC1")
+ self.assertEqual("NIC1", nic1.get("name"), "name of NIC1")
+ self.assertEqual(
+ "00:50:56:a6:8c:08", nic1.get("mac_address"), "mac address of NIC1"
+ )
+
+ subnets = nic1.get("subnets")
+ self.assertEqual(2, len(subnets), "Number of subnets")
+
+ static_subnet = []
+ static6_subnet = []
+
+ for subnet in subnets:
+ subnet_type = subnet.get("type")
+ if subnet_type == "static":
+ static_subnet.append(subnet)
+ elif subnet_type == "static6":
+ static6_subnet.append(subnet)
+ else:
+ self.assertEqual(True, False, "Unknown type")
+ if "route" in subnet:
+ for route in subnet.get("routes"):
+ route_list.append(route)
+
+ self.assertEqual(1, len(static_subnet), "Number of static subnet")
+ self.assertEqual(1, len(static6_subnet), "Number of static6 subnet")
+
+ subnet = static_subnet[0]
+ self.assertEqual(
+ "10.20.87.154",
+ subnet.get("address"),
+ "IPv4 address of static subnet",
+ )
+ self.assertEqual(
+ "255.255.252.0", subnet.get("netmask"), "NetMask of static subnet"
+ )
+ self.assertEqual(
+ "auto", subnet.get("control"), "control for static subnet"
+ )
+
+ subnet = static6_subnet[0]
+ self.assertEqual(
+ "fc00:10:20:87::154",
+ subnet.get("address"),
+ "IPv6 address of static subnet",
+ )
+ self.assertEqual(
+ "64", subnet.get("netmask"), "NetMask of static6 subnet"
+ )
+
+ route_set = set(["10.20.87.253", "10.20.87.105", "192.168.0.10"])
+ for route in route_list:
+ self.assertEqual(10000, route.get("metric"), "metric of route")
+ gateway = route.get("gateway")
+ if gateway in route_set:
+ route_set.discard(gateway)
+ else:
+ self.assertEqual(True, False, "invalid gateway %s" % (gateway))
+
+ self.assertEqual("physical", nic2.get("type"), "type of NIC2")
+ self.assertEqual("NIC2", nic2.get("name"), "name of NIC2")
+ self.assertEqual(
+ "00:50:56:a6:ef:7d", nic2.get("mac_address"), "mac address of NIC2"
+ )
+
+ subnets = nic2.get("subnets")
+ self.assertEqual(1, len(subnets), "Number of subnets for NIC2")
+
+ subnet = subnets[0]
+ self.assertEqual("static", subnet.get("type"), "Subnet type")
+ self.assertEqual(
+ "192.168.6.102", subnet.get("address"), "Subnet address"
+ )
+ self.assertEqual(
+ "255.255.0.0", subnet.get("netmask"), "Subnet netmask"
+ )
+
+ def test_custom_script(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertIsNone(conf.custom_script_name)
+ cf._insertKey("CUSTOM-SCRIPT|SCRIPT-NAME", "test-script")
+ conf = Config(cf)
+ self.assertEqual("test-script", conf.custom_script_name)
+
+ def test_post_gc_status(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertFalse(conf.post_gc_status)
+ cf._insertKey("MISC|POST-GC-STATUS", "YES")
+ conf = Config(cf)
+ self.assertTrue(conf.post_gc_status)
+
+ def test_no_default_run_post_script(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertFalse(conf.default_run_post_script)
+ cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "NO")
+ conf = Config(cf)
+ self.assertFalse(conf.default_run_post_script)
+
+ def test_yes_default_run_post_script(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "yes")
+ conf = Config(cf)
+ self.assertTrue(conf.default_run_post_script)
+
+
+class TestVmwareNetConfig(CiTestCase):
+ """Test conversion of vmware config to cloud-init config."""
+
+ maxDiff = None
+
+ def _get_NicConfigurator(self, text):
+ fp = None
+ try:
+ with tempfile.NamedTemporaryFile(
+ mode="w", dir=self.tmp_dir(), delete=False
+ ) as fp:
+ fp.write(text)
+ fp.close()
+ cfg = Config(ConfigFile(fp.name))
+ return NicConfigurator(cfg.nics, use_system_devices=False)
+ finally:
+ if fp:
+ os.unlink(fp.name)
+
+ def test_non_primary_nic_without_gateway(self):
+ """A non primary nic set is not required to have a gateway."""
+ config = textwrap.dedent(
+ """\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = myhost1
+ DOMAINNAME = eng.vmware.com
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:a6:8c:08
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 10.20.87.154
+ NETMASK = 255.255.252.0
+ """
+ )
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:a6:8c:08",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "10.20.87.154",
+ "netmask": "255.255.252.0",
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
+
+ def test_non_primary_nic_with_gateway(self):
+ """A non primary nic set can have a gateway."""
+ config = textwrap.dedent(
+ """\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = myhost1
+ DOMAINNAME = eng.vmware.com
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:a6:8c:08
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 10.20.87.154
+ NETMASK = 255.255.252.0
+ GATEWAY = 10.20.87.253
+ """
+ )
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:a6:8c:08",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "10.20.87.154",
+ "netmask": "255.255.252.0",
+ "routes": [
+ {
+ "type": "route",
+ "destination": "10.20.84.0/22",
+ "gateway": "10.20.87.253",
+ "metric": 10000,
+ }
+ ],
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
+
+ def test_cust_non_primary_nic_with_gateway_(self):
+ """A customer non primary nic set can have a gateway."""
+ config = textwrap.dedent(
+ """\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = static-debug-vm
+ DOMAINNAME = cluster.local
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:ac:d1:8a
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 100.115.223.75
+ NETMASK = 255.255.255.0
+ GATEWAY = 100.115.223.254
+
+
+ [DNS]
+ DNSFROMDHCP=no
+
+ NAMESERVER|1 = 8.8.8.8
+
+ [DATETIME]
+ UTC = yes
+ """
+ )
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:ac:d1:8a",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "100.115.223.75",
+ "netmask": "255.255.255.0",
+ "routes": [
+ {
+ "type": "route",
+ "destination": "100.115.223.0/24",
+ "gateway": "100.115.223.254",
+ "metric": 10000,
+ }
+ ],
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
+
+ def test_a_primary_nic_with_gateway(self):
+ """A primary nic set can have a gateway."""
+ config = textwrap.dedent(
+ """\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = myhost1
+ DOMAINNAME = eng.vmware.com
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:a6:8c:08
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 10.20.87.154
+ NETMASK = 255.255.252.0
+ PRIMARY = true
+ GATEWAY = 10.20.87.253
+ """
+ )
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:a6:8c:08",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "10.20.87.154",
+ "netmask": "255.255.252.0",
+ "gateway": "10.20.87.253",
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
+
+ def test_meta_data(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertIsNone(conf.meta_data_name)
+ cf._insertKey("CLOUDINIT|METADATA", "test-metadata")
+ conf = Config(cf)
+ self.assertEqual("test-metadata", conf.meta_data_name)
+
+ def test_user_data(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertIsNone(conf.user_data_name)
+ cf._insertKey("CLOUDINIT|USERDATA", "test-userdata")
+ conf = Config(cf)
+ self.assertEqual("test-userdata", conf.user_data_name)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index 739bbebf..0ed8a120 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -5,14 +5,9 @@ import os
import shutil
import tempfile
+from cloudinit import handlers, helpers, settings, url_helper, util
from cloudinit.cmd import main
-from cloudinit import handlers
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit import url_helper
-from cloudinit import util
-
-from cloudinit.tests.helpers import TestCase, CiTestCase, ExitStack, mock
+from tests.unittests.helpers import CiTestCase, ExitStack, TestCase, mock
class FakeModule(handlers.Handler):
@@ -28,7 +23,6 @@ class FakeModule(handlers.Handler):
class TestWalkerHandleHandler(TestCase):
-
def setUp(self):
super(TestWalkerHandleHandler, self).setUp()
tmpdir = tempfile.mkdtemp()
@@ -39,13 +33,16 @@ class TestWalkerHandleHandler(TestCase):
"frequency": "",
"handlerdir": tmpdir,
"handlers": helpers.ContentHandlers(),
- "data": None}
+ "data": None,
+ }
self.expected_module_name = "part-handler-%03d" % (
- self.data["handlercount"],)
+ self.data["handlercount"],
+ )
expected_file_name = "%s.py" % self.expected_module_name
self.expected_file_fullname = os.path.join(
- self.data["handlerdir"], expected_file_name)
+ self.data["handlerdir"], expected_file_name
+ )
self.module_fake = FakeModule()
self.ctype = None
self.filename = None
@@ -56,45 +53,55 @@ class TestWalkerHandleHandler(TestCase):
resources = ExitStack()
self.addCleanup(resources.close)
self.write_file_mock = resources.enter_context(
- mock.patch('cloudinit.util.write_file'))
+ mock.patch("cloudinit.util.write_file")
+ )
def test_no_errors(self):
"""Payload gets written to file and added to C{pdata}."""
- with mock.patch('cloudinit.importer.import_module',
- return_value=self.module_fake) as mockobj:
- handlers.walker_handle_handler(self.data, self.ctype,
- self.filename, self.payload)
+ with mock.patch(
+ "cloudinit.importer.import_module", return_value=self.module_fake
+ ) as mockobj:
+ handlers.walker_handle_handler(
+ self.data, self.ctype, self.filename, self.payload
+ )
mockobj.assert_called_once_with(self.expected_module_name)
self.write_file_mock.assert_called_once_with(
- self.expected_file_fullname, self.payload, 0o600)
- self.assertEqual(self.data['handlercount'], 1)
+ self.expected_file_fullname, self.payload, 0o600
+ )
+ self.assertEqual(self.data["handlercount"], 1)
def test_import_error(self):
"""Module import errors are logged. No handler added to C{pdata}."""
- with mock.patch('cloudinit.importer.import_module',
- side_effect=ImportError) as mockobj:
- handlers.walker_handle_handler(self.data, self.ctype,
- self.filename, self.payload)
+ with mock.patch(
+ "cloudinit.importer.import_module", side_effect=ImportError
+ ) as mockobj:
+ handlers.walker_handle_handler(
+ self.data, self.ctype, self.filename, self.payload
+ )
mockobj.assert_called_once_with(self.expected_module_name)
self.write_file_mock.assert_called_once_with(
- self.expected_file_fullname, self.payload, 0o600)
- self.assertEqual(self.data['handlercount'], 0)
+ self.expected_file_fullname, self.payload, 0o600
+ )
+ self.assertEqual(self.data["handlercount"], 0)
def test_attribute_error(self):
"""Attribute errors are logged. No handler added to C{pdata}."""
- with mock.patch('cloudinit.importer.import_module',
- side_effect=AttributeError,
- return_value=self.module_fake) as mockobj:
- handlers.walker_handle_handler(self.data, self.ctype,
- self.filename, self.payload)
+ with mock.patch(
+ "cloudinit.importer.import_module",
+ side_effect=AttributeError,
+ return_value=self.module_fake,
+ ) as mockobj:
+ handlers.walker_handle_handler(
+ self.data, self.ctype, self.filename, self.payload
+ )
mockobj.assert_called_once_with(self.expected_module_name)
self.write_file_mock.assert_called_once_with(
- self.expected_file_fullname, self.payload, 0o600)
- self.assertEqual(self.data['handlercount'], 0)
+ self.expected_file_fullname, self.payload, 0o600
+ )
+ self.assertEqual(self.data["handlercount"], 0)
class TestHandlerHandlePart(TestCase):
-
def setUp(self):
super(TestHandlerHandlePart, self).setUp()
self.data = "fake data"
@@ -103,7 +110,7 @@ class TestHandlerHandlePart(TestCase):
self.payload = "fake payload"
self.frequency = settings.PER_INSTANCE
self.headers = {
- 'Content-Type': self.ctype,
+ "Content-Type": self.ctype,
}
def test_normal_version_1(self):
@@ -111,126 +118,172 @@ class TestHandlerHandlePart(TestCase):
C{handle_part} is called without C{frequency} for
C{handler_version} == 1.
"""
- mod_mock = mock.Mock(frequency=settings.PER_INSTANCE,
- handler_version=1)
- handlers.run_part(mod_mock, self.data, self.filename, self.payload,
- self.frequency, self.headers)
+ mod_mock = mock.Mock(
+ frequency=settings.PER_INSTANCE, handler_version=1
+ )
+ handlers.run_part(
+ mod_mock,
+ self.data,
+ self.filename,
+ self.payload,
+ self.frequency,
+ self.headers,
+ )
# Assert that the handle_part() method of the mock object got
# called with the expected arguments.
mod_mock.handle_part.assert_called_once_with(
- self.data, self.ctype, self.filename, self.payload)
+ self.data, self.ctype, self.filename, self.payload
+ )
def test_normal_version_2(self):
"""
C{handle_part} is called with C{frequency} for
C{handler_version} == 2.
"""
- mod_mock = mock.Mock(frequency=settings.PER_INSTANCE,
- handler_version=2)
- handlers.run_part(mod_mock, self.data, self.filename, self.payload,
- self.frequency, self.headers)
+ mod_mock = mock.Mock(
+ frequency=settings.PER_INSTANCE, handler_version=2
+ )
+ handlers.run_part(
+ mod_mock,
+ self.data,
+ self.filename,
+ self.payload,
+ self.frequency,
+ self.headers,
+ )
# Assert that the handle_part() method of the mock object got
# called with the expected arguments.
mod_mock.handle_part.assert_called_once_with(
- self.data, self.ctype, self.filename, self.payload,
- settings.PER_INSTANCE)
+ self.data,
+ self.ctype,
+ self.filename,
+ self.payload,
+ settings.PER_INSTANCE,
+ )
def test_modfreq_per_always(self):
"""
C{handle_part} is called regardless of frequency if nofreq is always.
"""
self.frequency = "once"
- mod_mock = mock.Mock(frequency=settings.PER_ALWAYS,
- handler_version=1)
- handlers.run_part(mod_mock, self.data, self.filename, self.payload,
- self.frequency, self.headers)
+ mod_mock = mock.Mock(frequency=settings.PER_ALWAYS, handler_version=1)
+ handlers.run_part(
+ mod_mock,
+ self.data,
+ self.filename,
+ self.payload,
+ self.frequency,
+ self.headers,
+ )
# Assert that the handle_part() method of the mock object got
# called with the expected arguments.
mod_mock.handle_part.assert_called_once_with(
- self.data, self.ctype, self.filename, self.payload)
+ self.data, self.ctype, self.filename, self.payload
+ )
def test_no_handle_when_modfreq_once(self):
"""C{handle_part} is not called if frequency is once."""
self.frequency = "once"
mod_mock = mock.Mock(frequency=settings.PER_ONCE)
- handlers.run_part(mod_mock, self.data, self.filename, self.payload,
- self.frequency, self.headers)
+ handlers.run_part(
+ mod_mock,
+ self.data,
+ self.filename,
+ self.payload,
+ self.frequency,
+ self.headers,
+ )
self.assertEqual(0, mod_mock.handle_part.call_count)
def test_exception_is_caught(self):
"""Exceptions within C{handle_part} are caught and logged."""
- mod_mock = mock.Mock(frequency=settings.PER_INSTANCE,
- handler_version=1)
+ mod_mock = mock.Mock(
+ frequency=settings.PER_INSTANCE, handler_version=1
+ )
mod_mock.handle_part.side_effect = Exception
try:
- handlers.run_part(mod_mock, self.data, self.filename,
- self.payload, self.frequency, self.headers)
+ handlers.run_part(
+ mod_mock,
+ self.data,
+ self.filename,
+ self.payload,
+ self.frequency,
+ self.headers,
+ )
except Exception:
self.fail("Exception was not caught in handle_part")
mod_mock.handle_part.assert_called_once_with(
- self.data, self.ctype, self.filename, self.payload)
+ self.data, self.ctype, self.filename, self.payload
+ )
class TestCmdlineUrl(CiTestCase):
def test_parse_cmdline_url_nokey_raises_keyerror(self):
self.assertRaises(
- KeyError, main.parse_cmdline_url, 'root=foo bar single')
+ KeyError, main.parse_cmdline_url, "root=foo bar single"
+ )
def test_parse_cmdline_url_found(self):
- cmdline = 'root=foo bar single url=http://example.com arg1 -v'
+ cmdline = "root=foo bar single url=http://example.com arg1 -v"
self.assertEqual(
- ('url', 'http://example.com'), main.parse_cmdline_url(cmdline))
+ ("url", "http://example.com"), main.parse_cmdline_url(cmdline)
+ )
- @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
+ @mock.patch("cloudinit.cmd.main.url_helper.read_file_or_url")
def test_invalid_content(self, m_read):
key = "cloud-config-url"
- url = 'http://example.com/foo'
+ url = "http://example.com/foo"
cmdline = "ro %s=%s bar=1" % (key, url)
m_read.return_value = url_helper.StringResponse(b"unexpected blob")
fpath = self.tmp_path("ccfile")
lvl, msg = main.attempt_cmdline_url(
- fpath, network=True, cmdline=cmdline)
+ fpath, network=True, cmdline=cmdline
+ )
self.assertEqual(logging.WARN, lvl)
self.assertIn(url, msg)
self.assertFalse(os.path.exists(fpath))
- @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
+ @mock.patch("cloudinit.cmd.main.url_helper.read_file_or_url")
def test_valid_content(self, m_read):
url = "http://example.com/foo"
payload = b"#cloud-config\nmydata: foo\nbar: wark\n"
- cmdline = "ro %s=%s bar=1" % ('cloud-config-url', url)
+ cmdline = "ro %s=%s bar=1" % ("cloud-config-url", url)
m_read.return_value = url_helper.StringResponse(payload)
fpath = self.tmp_path("ccfile")
lvl, msg = main.attempt_cmdline_url(
- fpath, network=True, cmdline=cmdline)
+ fpath, network=True, cmdline=cmdline
+ )
self.assertEqual(util.load_file(fpath, decode=False), payload)
self.assertEqual(logging.INFO, lvl)
self.assertIn(url, msg)
- @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
+ @mock.patch("cloudinit.cmd.main.url_helper.read_file_or_url")
def test_no_key_found(self, m_read):
cmdline = "ro mykey=http://example.com/foo root=foo"
fpath = self.tmp_path("ccpath")
lvl, _msg = main.attempt_cmdline_url(
- fpath, network=True, cmdline=cmdline)
+ fpath, network=True, cmdline=cmdline
+ )
m_read.assert_not_called()
self.assertFalse(os.path.exists(fpath))
self.assertEqual(logging.DEBUG, lvl)
- @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
+ @mock.patch("cloudinit.cmd.main.url_helper.read_file_or_url")
def test_exception_warns(self, m_read):
url = "http://example.com/foo"
cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url
fpath = self.tmp_path("ccfile")
m_read.side_effect = url_helper.UrlError(
- cause="Unexpected Error", url="http://example.com/foo")
+ cause="Unexpected Error", url="http://example.com/foo"
+ )
lvl, msg = main.attempt_cmdline_url(
- fpath, network=True, cmdline=cmdline)
+ fpath, network=True, cmdline=cmdline
+ )
self.assertEqual(logging.WARN, lvl)
self.assertIn(url, msg)
self.assertFalse(os.path.exists(fpath))
diff --git a/tests/unittests/test_atomic_helper.py b/tests/unittests/test_atomic_helper.py
index 0101b0e3..684a9ae5 100644
--- a/tests/unittests/test_atomic_helper.py
+++ b/tests/unittests/test_atomic_helper.py
@@ -5,8 +5,7 @@ import os
import stat
from cloudinit import atomic_helper
-
-from cloudinit.tests.helpers import CiTestCase
+from tests.unittests.helpers import CiTestCase
class TestAtomicHelper(CiTestCase):
@@ -34,7 +33,7 @@ class TestAtomicHelper(CiTestCase):
def test_write_json(self):
"""write_json output is readable json."""
path = self.tmp_path("test_write_json")
- data = {'key1': 'value1', 'key2': ['i1', 'i2']}
+ data = {"key1": "value1", "key2": ["i1", "i2"]}
atomic_helper.write_json(path, data)
with open(path, "r") as fp:
found = json.load(fp)
@@ -55,4 +54,5 @@ class TestAtomicHelper(CiTestCase):
file_stat = os.stat(path)
self.assertEqual(perms, stat.S_IMODE(file_stat.st_mode))
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index c5675249..0dae924d 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -9,47 +9,60 @@ import shutil
import tempfile
from textwrap import dedent
+import pytest
-from cloudinit.tests.helpers import (
- FilesystemMockingTestCase, CiTestCase, mock, skipUnlessJinja)
-
-from cloudinit import handlers
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import handlers, helpers, subp, util
+from cloudinit.cmd.devel import read_cfg_paths
from cloudinit.handlers.cloud_config import CloudConfigPartHandler
from cloudinit.handlers.jinja_template import (
- JinjaTemplatePartHandler, convert_jinja_instance_data,
- render_jinja_payload)
+ JinjaTemplatePartHandler,
+ convert_jinja_instance_data,
+ render_jinja_payload,
+)
from cloudinit.handlers.shell_script import ShellScriptPartHandler
+from cloudinit.handlers.shell_script_by_frequency import (
+ get_script_folder_by_frequency,
+ path_map,
+)
from cloudinit.handlers.upstart_job import UpstartJobPartHandler
+from cloudinit.settings import PER_ALWAYS, PER_INSTANCE, PER_ONCE
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ skipUnlessJinja,
+)
-from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE)
+INSTANCE_DATA_FILE = "instance-data-sensitive.json"
class TestUpstartJobPartHandler(FilesystemMockingTestCase):
- mpath = 'cloudinit.handlers.upstart_job.'
+ mpath = "cloudinit.handlers.upstart_job."
def test_upstart_frequency_no_out(self):
c_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, c_root)
up_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, up_root)
- paths = helpers.Paths({
- 'cloud_dir': c_root,
- 'upstart_dir': up_root,
- })
+ paths = helpers.Paths(
+ {
+ "cloud_dir": c_root,
+ "upstart_dir": up_root,
+ }
+ )
h = UpstartJobPartHandler(paths)
# No files should be written out when
# the frequency is ! per-instance
- h.handle_part('', handlers.CONTENT_START,
- None, None, None)
- h.handle_part('blah', 'text/upstart-job',
- 'test.conf', 'blah', frequency=PER_ALWAYS)
- h.handle_part('', handlers.CONTENT_END,
- None, None, None)
+ h.handle_part("", handlers.CONTENT_START, None, None, None)
+ h.handle_part(
+ "blah",
+ "text/upstart-job",
+ "test.conf",
+ "blah",
+ frequency=PER_ALWAYS,
+ )
+ h.handle_part("", handlers.CONTENT_END, None, None, None)
self.assertEqual(0, len(os.listdir(up_root)))
def test_upstart_frequency_single(self):
@@ -59,47 +72,54 @@ class TestUpstartJobPartHandler(FilesystemMockingTestCase):
self.patchOS(new_root)
self.patchUtils(new_root)
- paths = helpers.Paths({
- 'upstart_dir': "/etc/upstart",
- })
+ paths = helpers.Paths(
+ {
+ "upstart_dir": "/etc/upstart",
+ }
+ )
util.ensure_dir("/run")
util.ensure_dir("/etc/upstart")
- with mock.patch(self.mpath + 'SUITABLE_UPSTART', return_value=True):
- with mock.patch.object(subp, 'subp') as m_subp:
+ with mock.patch(self.mpath + "SUITABLE_UPSTART", return_value=True):
+ with mock.patch.object(subp, "subp") as m_subp:
h = UpstartJobPartHandler(paths)
- h.handle_part('', handlers.CONTENT_START,
- None, None, None)
- h.handle_part('blah', 'text/upstart-job',
- 'test.conf', 'blah', frequency=PER_INSTANCE)
- h.handle_part('', handlers.CONTENT_END,
- None, None, None)
+ h.handle_part("", handlers.CONTENT_START, None, None, None)
+ h.handle_part(
+ "blah",
+ "text/upstart-job",
+ "test.conf",
+ "blah",
+ frequency=PER_INSTANCE,
+ )
+ h.handle_part("", handlers.CONTENT_END, None, None, None)
- self.assertEqual(len(os.listdir('/etc/upstart')), 1)
+ self.assertEqual(len(os.listdir("/etc/upstart")), 1)
m_subp.assert_called_once_with(
- ['initctl', 'reload-configuration'], capture=False)
+ ["initctl", "reload-configuration"], capture=False
+ )
class TestJinjaTemplatePartHandler(CiTestCase):
with_logs = True
- mpath = 'cloudinit.handlers.jinja_template.'
+ mpath = "cloudinit.handlers.jinja_template."
def setUp(self):
super(TestJinjaTemplatePartHandler, self).setUp()
self.tmp = self.tmp_dir()
- self.run_dir = os.path.join(self.tmp, 'run_dir')
+ self.run_dir = os.path.join(self.tmp, "run_dir")
util.ensure_dir(self.run_dir)
- self.paths = helpers.Paths({
- 'cloud_dir': self.tmp, 'run_dir': self.run_dir})
+ self.paths = helpers.Paths(
+ {"cloud_dir": self.tmp, "run_dir": self.run_dir}
+ )
def test_jinja_template_part_handler_defaults(self):
"""On init, paths are saved and subhandler types are empty."""
h = JinjaTemplatePartHandler(self.paths)
- self.assertEqual(['## template: jinja'], h.prefixes)
+ self.assertEqual(["## template: jinja"], h.prefixes)
self.assertEqual(3, h.handler_version)
self.assertEqual(self.paths, h.paths)
self.assertEqual({}, h.sub_handlers)
@@ -109,34 +129,47 @@ class TestJinjaTemplatePartHandler(CiTestCase):
script_handler = ShellScriptPartHandler(self.paths)
cloudconfig_handler = CloudConfigPartHandler(self.paths)
h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler, cloudconfig_handler])
+ self.paths, sub_handlers=[script_handler, cloudconfig_handler]
+ )
self.assertCountEqual(
- ['text/cloud-config', 'text/cloud-config-jsonp',
- 'text/x-shellscript'],
- h.sub_handlers)
+ [
+ "text/cloud-config",
+ "text/cloud-config-jsonp",
+ "text/x-shellscript",
+ ],
+ h.sub_handlers,
+ )
def test_jinja_template_part_handler_looks_up_subhandler_types(self):
"""When sub_handlers are passed, init lists types of subhandlers."""
script_handler = ShellScriptPartHandler(self.paths)
cloudconfig_handler = CloudConfigPartHandler(self.paths)
h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler, cloudconfig_handler])
+ self.paths, sub_handlers=[script_handler, cloudconfig_handler]
+ )
self.assertCountEqual(
- ['text/cloud-config', 'text/cloud-config-jsonp',
- 'text/x-shellscript'],
- h.sub_handlers)
+ [
+ "text/cloud-config",
+ "text/cloud-config-jsonp",
+ "text/x-shellscript",
+ ],
+ h.sub_handlers,
+ )
def test_jinja_template_handle_noop_on_content_signals(self):
"""Perform no part handling when content type is CONTENT_SIGNALS."""
script_handler = ShellScriptPartHandler(self.paths)
- h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler])
- with mock.patch.object(script_handler, 'handle_part') as m_handle_part:
+ h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler])
+ with mock.patch.object(script_handler, "handle_part") as m_handle_part:
h.handle_part(
- data='data', ctype=handlers.CONTENT_START, filename='part-1',
- payload='## template: jinja\n#!/bin/bash\necho himom',
- frequency='freq', headers='headers')
+ data="data",
+ ctype=handlers.CONTENT_START,
+ filename="part-1",
+ payload="## template: jinja\n#!/bin/bash\necho himom",
+ frequency="freq",
+ headers="headers",
+ )
m_handle_part.assert_not_called()
@skipUnlessJinja()
@@ -145,21 +178,24 @@ class TestJinjaTemplatePartHandler(CiTestCase):
script_handler = ShellScriptPartHandler(self.paths)
self.assertEqual(2, script_handler.handler_version)
- # Create required instance-data.json file
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
- instance_data = {'topkey': 'echo himom'}
+ # Create required instance data json file
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
+ instance_data = {"topkey": "echo himom"}
util.write_file(instance_json, util.json_dumps(instance_data))
- h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler])
- with mock.patch.object(script_handler, 'handle_part') as m_part:
+ h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler])
+ with mock.patch.object(script_handler, "handle_part") as m_part:
# ctype with leading '!' not in handlers.CONTENT_SIGNALS
h.handle_part(
- data='data', ctype="!" + handlers.CONTENT_START,
- filename='part01',
- payload='## template: jinja \t \n#!/bin/bash\n{{ topkey }}',
- frequency='freq', headers='headers')
+ data="data",
+ ctype="!" + handlers.CONTENT_START,
+ filename="part01",
+ payload="## template: jinja \t \n#!/bin/bash\n{{ topkey }}",
+ frequency="freq",
+ headers="headers",
+ )
m_part.assert_called_once_with(
- 'data', '!__begin__', 'part01', '#!/bin/bash\necho himom', 'freq')
+ "data", "!__begin__", "part01", "#!/bin/bash\necho himom", "freq"
+ )
@skipUnlessJinja()
def test_jinja_template_handle_subhandler_v3_with_clean_payload(self):
@@ -168,126 +204,172 @@ class TestJinjaTemplatePartHandler(CiTestCase):
self.assertEqual(3, cloudcfg_handler.handler_version)
# Create required instance-data.json file
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
- instance_data = {'topkey': {'sub': 'runcmd: [echo hi]'}}
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
+ instance_data = {"topkey": {"sub": "runcmd: [echo hi]"}}
util.write_file(instance_json, util.json_dumps(instance_data))
h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[cloudcfg_handler])
- with mock.patch.object(cloudcfg_handler, 'handle_part') as m_part:
+ self.paths, sub_handlers=[cloudcfg_handler]
+ )
+ with mock.patch.object(cloudcfg_handler, "handle_part") as m_part:
# ctype with leading '!' not in handlers.CONTENT_SIGNALS
h.handle_part(
- data='data', ctype="!" + handlers.CONTENT_END,
- filename='part01',
- payload='## template: jinja\n#cloud-config\n{{ topkey.sub }}',
- frequency='freq', headers='headers')
+ data="data",
+ ctype="!" + handlers.CONTENT_END,
+ filename="part01",
+ payload="## template: jinja\n#cloud-config\n{{ topkey.sub }}",
+ frequency="freq",
+ headers="headers",
+ )
m_part.assert_called_once_with(
- 'data', '!__end__', 'part01', '#cloud-config\nruncmd: [echo hi]',
- 'freq', 'headers')
+ "data",
+ "!__end__",
+ "part01",
+ "#cloud-config\nruncmd: [echo hi]",
+ "freq",
+ "headers",
+ )
def test_jinja_template_handle_errors_on_missing_instance_data_json(self):
"""If instance-data is absent, raise an error from handle_part."""
script_handler = ShellScriptPartHandler(self.paths)
- h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler])
+ h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler])
with self.assertRaises(RuntimeError) as context_manager:
h.handle_part(
- data='data', ctype="!" + handlers.CONTENT_START,
- filename='part01',
- payload='## template: jinja \n#!/bin/bash\necho himom',
- frequency='freq', headers='headers')
- script_file = os.path.join(script_handler.script_dir, 'part01')
+ data="data",
+ ctype="!" + handlers.CONTENT_START,
+ filename="part01",
+ payload="## template: jinja \n#!/bin/bash\necho himom",
+ frequency="freq",
+ headers="headers",
+ )
+ script_file = os.path.join(script_handler.script_dir, "part01")
self.assertEqual(
- 'Cannot render jinja template vars. Instance data not yet present'
- ' at {}/instance-data.json'.format(
- self.run_dir), str(context_manager.exception))
+ "Cannot render jinja template vars. Instance data not yet present"
+ " at {}/{}".format(self.run_dir, INSTANCE_DATA_FILE),
+ str(context_manager.exception),
+ )
self.assertFalse(
os.path.exists(script_file),
- 'Unexpected file created %s' % script_file)
+ "Unexpected file created %s" % script_file,
+ )
def test_jinja_template_handle_errors_on_unreadable_instance_data(self):
"""If instance-data is unreadable, raise an error from handle_part."""
script_handler = ShellScriptPartHandler(self.paths)
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
util.write_file(instance_json, util.json_dumps({}))
- h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler])
- with mock.patch(self.mpath + 'load_file') as m_load:
+ h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler])
+ with mock.patch(self.mpath + "load_file") as m_load:
with self.assertRaises(RuntimeError) as context_manager:
- m_load.side_effect = OSError(errno.EACCES, 'Not allowed')
+ m_load.side_effect = OSError(errno.EACCES, "Not allowed")
h.handle_part(
- data='data', ctype="!" + handlers.CONTENT_START,
- filename='part01',
- payload='## template: jinja \n#!/bin/bash\necho himom',
- frequency='freq', headers='headers')
- script_file = os.path.join(script_handler.script_dir, 'part01')
+ data="data",
+ ctype="!" + handlers.CONTENT_START,
+ filename="part01",
+ payload="## template: jinja \n#!/bin/bash\necho himom",
+ frequency="freq",
+ headers="headers",
+ )
+ script_file = os.path.join(script_handler.script_dir, "part01")
self.assertEqual(
- 'Cannot render jinja template vars. No read permission on'
- " '{rdir}/instance-data.json'. Try sudo".format(rdir=self.run_dir),
- str(context_manager.exception))
+ "Cannot render jinja template vars. No read permission on "
+ "'{}/{}'. Try sudo".format(self.run_dir, INSTANCE_DATA_FILE),
+ str(context_manager.exception),
+ )
self.assertFalse(
os.path.exists(script_file),
- 'Unexpected file created %s' % script_file)
+ "Unexpected file created %s" % script_file,
+ )
@skipUnlessJinja()
def test_jinja_template_handle_renders_jinja_content(self):
- """When present, render jinja variables from instance-data.json."""
+ """When present, render jinja variables from instance data"""
script_handler = ShellScriptPartHandler(self.paths)
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
- instance_data = {'topkey': {'subkey': 'echo himom'}}
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
+ instance_data = {"topkey": {"subkey": "echo himom"}}
util.write_file(instance_json, util.json_dumps(instance_data))
- h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler])
+ h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler])
h.handle_part(
- data='data', ctype="!" + handlers.CONTENT_START,
- filename='part01',
+ data="data",
+ ctype="!" + handlers.CONTENT_START,
+ filename="part01",
payload=(
- '## template: jinja \n'
- '#!/bin/bash\n'
- '{{ topkey.subkey|default("nosubkey") }}'),
- frequency='freq', headers='headers')
- script_file = os.path.join(script_handler.script_dir, 'part01')
+ "## template: jinja \n"
+ "#!/bin/bash\n"
+ '{{ topkey.subkey|default("nosubkey") }}'
+ ),
+ frequency="freq",
+ headers="headers",
+ )
+ script_file = os.path.join(script_handler.script_dir, "part01")
self.assertNotIn(
- 'Instance data not yet present at {}/instance-data.json'.format(
- self.run_dir),
- self.logs.getvalue())
+ "Instance data not yet present at {}/{}".format(
+ self.run_dir, INSTANCE_DATA_FILE
+ ),
+ self.logs.getvalue(),
+ )
self.assertEqual(
- '#!/bin/bash\necho himom', util.load_file(script_file))
+ "#!/bin/bash\necho himom", util.load_file(script_file)
+ )
@skipUnlessJinja()
def test_jinja_template_handle_renders_jinja_content_missing_keys(self):
"""When specified jinja variable is undefined, log a warning."""
script_handler = ShellScriptPartHandler(self.paths)
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
- instance_data = {'topkey': {'subkey': 'echo himom'}}
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
+ instance_data = {"topkey": {"subkey": "echo himom"}}
util.write_file(instance_json, util.json_dumps(instance_data))
- h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler])
+ h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler])
h.handle_part(
- data='data', ctype="!" + handlers.CONTENT_START,
- filename='part01',
- payload='## template: jinja \n#!/bin/bash\n{{ goodtry }}',
- frequency='freq', headers='headers')
- script_file = os.path.join(script_handler.script_dir, 'part01')
+ data="data",
+ ctype="!" + handlers.CONTENT_START,
+ filename="part01",
+ payload="## template: jinja \n#!/bin/bash\n{{ goodtry }}",
+ frequency="freq",
+ headers="headers",
+ )
+ script_file = os.path.join(script_handler.script_dir, "part01")
self.assertTrue(
os.path.exists(script_file),
- 'Missing expected file %s' % script_file)
+ "Missing expected file %s" % script_file,
+ )
self.assertIn(
"WARNING: Could not render jinja template variables in file"
" 'part01': 'goodtry'\n",
- self.logs.getvalue())
-
-
-class TestConvertJinjaInstanceData(CiTestCase):
-
- def test_convert_instance_data_hyphens_to_underscores(self):
- """Replace hyphenated keys with underscores in instance-data."""
- data = {'hyphenated-key': 'hyphenated-val',
- 'underscore_delim_key': 'underscore_delimited_val'}
- expected_data = {'hyphenated_key': 'hyphenated-val',
- 'underscore_delim_key': 'underscore_delimited_val'}
- self.assertEqual(
- expected_data,
- convert_jinja_instance_data(data=data))
+ self.logs.getvalue(),
+ )
+
+
+class TestConvertJinjaInstanceData:
+ @pytest.mark.parametrize(
+ "include_key_aliases,data,expected",
+ (
+ (False, {"my-key": "my-val"}, {"my-key": "my-val"}),
+ (
+ True,
+ {"my-key": "my-val"},
+ {"my-key": "my-val", "my_key": "my-val"},
+ ),
+ (False, {"my.key": "my.val"}, {"my.key": "my.val"}),
+ (
+ True,
+ {"my.key": "my.val"},
+ {"my.key": "my.val", "my_key": "my.val"},
+ ),
+ (
+ True,
+ {"my/key": "my/val"},
+ {"my/key": "my/val", "my_key": "my/val"},
+ ),
+ ),
+ )
+ def test_convert_instance_data_operators_to_underscores(
+ self, include_key_aliases, data, expected
+ ):
+ """Replace Jinja operators keys with underscores in instance-data."""
+ assert expected == convert_jinja_instance_data(
+ data=data, include_key_aliases=include_key_aliases
+ )
def test_convert_instance_data_promotes_versioned_keys_to_top_level(self):
"""Any versioned keys are promoted as top-level keys
@@ -296,45 +378,49 @@ class TestConvertJinjaInstanceData(CiTestCase):
allow ease of reference for users. Intsead of v1.availability_zone,
the name availability_zone can be used in templates.
"""
- data = {'ds': {'dskey1': 1, 'dskey2': 2},
- 'v1': {'v1key1': 'v1.1'},
- 'v2': {'v2key1': 'v2.1'}}
+ data = {
+ "ds": {"dskey1": 1, "dskey2": 2},
+ "v1": {"v1key1": "v1.1"},
+ "v2": {"v2key1": "v2.1"},
+ }
expected_data = copy.deepcopy(data)
- expected_data.update({'v1key1': 'v1.1', 'v2key1': 'v2.1'})
+ expected_data.update({"v1key1": "v1.1", "v2key1": "v2.1"})
converted_data = convert_jinja_instance_data(data=data)
- self.assertCountEqual(
- ['ds', 'v1', 'v2', 'v1key1', 'v2key1'], converted_data.keys())
- self.assertEqual(
- expected_data,
- converted_data)
+ assert sorted(["ds", "v1", "v2", "v1key1", "v2key1"]) == sorted(
+ converted_data.keys()
+ )
+ assert expected_data == converted_data
def test_convert_instance_data_most_recent_version_of_promoted_keys(self):
"""The most-recent versioned key value is promoted to top-level."""
- data = {'v1': {'key1': 'old v1 key1', 'key2': 'old v1 key2'},
- 'v2': {'key1': 'newer v2 key1', 'key3': 'newer v2 key3'},
- 'v3': {'key1': 'newest v3 key1'}}
+ data = {
+ "v1": {"key1": "old v1 key1", "key2": "old v1 key2"},
+ "v2": {"key1": "newer v2 key1", "key3": "newer v2 key3"},
+ "v3": {"key1": "newest v3 key1"},
+ }
expected_data = copy.deepcopy(data)
expected_data.update(
- {'key1': 'newest v3 key1', 'key2': 'old v1 key2',
- 'key3': 'newer v2 key3'})
+ {
+ "key1": "newest v3 key1",
+ "key2": "old v1 key2",
+ "key3": "newer v2 key3",
+ }
+ )
converted_data = convert_jinja_instance_data(data=data)
- self.assertEqual(
- expected_data,
- converted_data)
+ assert expected_data == converted_data
def test_convert_instance_data_decodes_decode_paths(self):
"""Any decode_paths provided are decoded by convert_instance_data."""
- data = {'key1': {'subkey1': 'aGkgbW9t'}, 'key2': 'aGkgZGFk'}
+ data = {"key1": {"subkey1": "aGkgbW9t"}, "key2": "aGkgZGFk"}
expected_data = copy.deepcopy(data)
- expected_data['key1']['subkey1'] = 'hi mom'
+ expected_data["key1"]["subkey1"] = "hi mom"
converted_data = convert_jinja_instance_data(
- data=data, decode_paths=('key1/subkey1',))
- self.assertEqual(
- expected_data,
- converted_data)
+ data=data, decode_paths=("key1/subkey1",)
+ )
+ assert expected_data == converted_data
class TestRenderJinjaPayload(CiTestCase):
@@ -345,39 +431,69 @@ class TestRenderJinjaPayload(CiTestCase):
def test_render_jinja_payload_logs_jinja_vars_on_debug(self):
"""When debug is True, log jinja varables available."""
payload = (
- '## template: jinja\n#!/bin/sh\necho hi from {{ v1.hostname }}')
- instance_data = {'v1': {'hostname': 'foo'}, 'instance-id': 'iid'}
- expected_log = dedent("""\
+ "## template: jinja\n#!/bin/sh\necho hi from {{ v1.hostname }}"
+ )
+ instance_data = {"v1": {"hostname": "foo"}, "instance-id": "iid"}
+ expected_log = dedent(
+ """\
DEBUG: Converted jinja variables
{
"hostname": "foo",
+ "instance-id": "iid",
"instance_id": "iid",
"v1": {
"hostname": "foo"
}
}
- """)
+ """
+ )
self.assertEqual(
render_jinja_payload(
- payload=payload, payload_fn='myfile',
- instance_data=instance_data, debug=True),
- '#!/bin/sh\necho hi from foo')
+ payload=payload,
+ payload_fn="myfile",
+ instance_data=instance_data,
+ debug=True,
+ ),
+ "#!/bin/sh\necho hi from foo",
+ )
self.assertEqual(expected_log, self.logs.getvalue())
@skipUnlessJinja()
def test_render_jinja_payload_replaces_missing_variables_and_warns(self):
"""Warn on missing jinja variables and replace the absent variable."""
- payload = (
- '## template: jinja\n#!/bin/sh\necho hi from {{ NOTHERE }}')
- instance_data = {'v1': {'hostname': 'foo'}, 'instance-id': 'iid'}
+ payload = "## template: jinja\n#!/bin/sh\necho hi from {{ NOTHERE }}"
+ instance_data = {"v1": {"hostname": "foo"}, "instance-id": "iid"}
self.assertEqual(
render_jinja_payload(
- payload=payload, payload_fn='myfile',
- instance_data=instance_data),
- '#!/bin/sh\necho hi from CI_MISSING_JINJA_VAR/NOTHERE')
+ payload=payload,
+ payload_fn="myfile",
+ instance_data=instance_data,
+ ),
+ "#!/bin/sh\necho hi from CI_MISSING_JINJA_VAR/NOTHERE",
+ )
expected_log = (
- 'WARNING: Could not render jinja template variables in file'
- " 'myfile': 'NOTHERE'")
+ "WARNING: Could not render jinja template variables in file"
+ " 'myfile': 'NOTHERE'"
+ )
self.assertIn(expected_log, self.logs.getvalue())
+
+class TestShellScriptByFrequencyHandlers:
+ def do_test_frequency(self, frequency):
+ ci_paths = read_cfg_paths()
+ scripts_dir = ci_paths.get_cpath("scripts")
+ testFolder = os.path.join(scripts_dir, path_map[frequency])
+ folder = get_script_folder_by_frequency(frequency, scripts_dir)
+ assert testFolder == folder
+
+ def test_get_script_folder_per_boot(self):
+ self.do_test_frequency(PER_ALWAYS)
+
+ def test_get_script_folder_per_instance(self):
+ self.do_test_frequency(PER_INSTANCE)
+
+ def test_get_script_folder_per_once(self):
+ self.do_test_frequency(PER_ONCE)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index 74f85959..bed73a93 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -1,13 +1,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import os
+import contextlib
import io
+import os
from collections import namedtuple
from cloudinit.cmd import main as cli
-from cloudinit.tests import helpers as test_helpers
from cloudinit.util import load_file, load_json
-
+from tests.unittests import helpers as test_helpers
mock = test_helpers.mock
@@ -23,7 +23,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
def _call_main(self, sysv_args=None):
if not sysv_args:
- sysv_args = ['cloud-init']
+ sysv_args = ["cloud-init"]
try:
return cli.main(sysv_args=sysv_args)
except SystemExit as e:
@@ -35,36 +35,37 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
Valid name values are only init and modules.
"""
tmpd = self.tmp_dir()
- data_d = self.tmp_path('data', tmpd)
- link_d = self.tmp_path('link', tmpd)
- FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode'])
+ data_d = self.tmp_path("data", tmpd)
+ link_d = self.tmp_path("link", tmpd)
+ FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"])
def myaction():
- raise Exception('Should not call myaction')
+ raise Exception("Should not call myaction")
- myargs = FakeArgs(('doesnotmatter', myaction), False, 'bogusmode')
+ myargs = FakeArgs(("doesnotmatter", myaction), False, "bogusmode")
with self.assertRaises(ValueError) as cm:
- cli.status_wrapper('init1', myargs, data_d, link_d)
- self.assertEqual('unknown name: init1', str(cm.exception))
- self.assertNotIn('Should not call myaction', self.logs.getvalue())
+ cli.status_wrapper("init1", myargs, data_d, link_d)
+ self.assertEqual("unknown name: init1", str(cm.exception))
+ self.assertNotIn("Should not call myaction", self.logs.getvalue())
def test_status_wrapper_errors_on_invalid_modes(self):
"""status_wrapper will error if a parameter combination is invalid."""
tmpd = self.tmp_dir()
- data_d = self.tmp_path('data', tmpd)
- link_d = self.tmp_path('link', tmpd)
- FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode'])
+ data_d = self.tmp_path("data", tmpd)
+ link_d = self.tmp_path("link", tmpd)
+ FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"])
def myaction():
- raise Exception('Should not call myaction')
+ raise Exception("Should not call myaction")
- myargs = FakeArgs(('modules_name', myaction), False, 'bogusmode')
+ myargs = FakeArgs(("modules_name", myaction), False, "bogusmode")
with self.assertRaises(ValueError) as cm:
- cli.status_wrapper('modules', myargs, data_d, link_d)
+ cli.status_wrapper("modules", myargs, data_d, link_d)
self.assertEqual(
"Invalid cloud init mode specified 'modules-bogusmode'",
- str(cm.exception))
- self.assertNotIn('Should not call myaction', self.logs.getvalue())
+ str(cm.exception),
+ )
+ self.assertNotIn("Should not call myaction", self.logs.getvalue())
def test_status_wrapper_init_local_writes_fresh_status_info(self):
"""When running in init-local mode, status_wrapper writes status.json.
@@ -72,78 +73,90 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
Old status and results artifacts are also removed.
"""
tmpd = self.tmp_dir()
- data_d = self.tmp_path('data', tmpd)
- link_d = self.tmp_path('link', tmpd)
- status_link = self.tmp_path('status.json', link_d)
+ data_d = self.tmp_path("data", tmpd)
+ link_d = self.tmp_path("link", tmpd)
+ status_link = self.tmp_path("status.json", link_d)
# Write old artifacts which will be removed or updated.
for _dir in data_d, link_d:
test_helpers.populate_dir(
- _dir, {'status.json': 'old', 'result.json': 'old'})
+ _dir, {"status.json": "old", "result.json": "old"}
+ )
- FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode'])
+ FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"])
def myaction(name, args):
# Return an error to watch status capture them
- return 'SomeDatasource', ['an error']
+ return "SomeDatasource", ["an error"]
- myargs = FakeArgs(('ignored_name', myaction), True, 'bogusmode')
- cli.status_wrapper('init', myargs, data_d, link_d)
+ myargs = FakeArgs(("ignored_name", myaction), True, "bogusmode")
+ cli.status_wrapper("init", myargs, data_d, link_d)
# No errors reported in status
- status_v1 = load_json(load_file(status_link))['v1']
- self.assertEqual(['an error'], status_v1['init-local']['errors'])
- self.assertEqual('SomeDatasource', status_v1['datasource'])
+ status_v1 = load_json(load_file(status_link))["v1"]
+ self.assertEqual(["an error"], status_v1["init-local"]["errors"])
+ self.assertEqual("SomeDatasource", status_v1["datasource"])
self.assertFalse(
- os.path.exists(self.tmp_path('result.json', data_d)),
- 'unexpected result.json found')
+ os.path.exists(self.tmp_path("result.json", data_d)),
+ "unexpected result.json found",
+ )
self.assertFalse(
- os.path.exists(self.tmp_path('result.json', link_d)),
- 'unexpected result.json link found')
+ os.path.exists(self.tmp_path("result.json", link_d)),
+ "unexpected result.json link found",
+ )
def test_no_arguments_shows_usage(self):
exit_code = self._call_main()
- self.assertIn('usage: cloud-init', self.stderr.getvalue())
+ self.assertIn("usage: cloud-init", self.stderr.getvalue())
self.assertEqual(2, exit_code)
def test_no_arguments_shows_error_message(self):
exit_code = self._call_main()
missing_subcommand_message = [
- 'too few arguments', # python2.7 msg
- 'the following arguments are required: subcommand' # python3 msg
+ "too few arguments", # python2.7 msg
+ "the following arguments are required: subcommand", # python3 msg
]
error = self.stderr.getvalue()
- matches = ([msg in error for msg in missing_subcommand_message])
+ matches = [msg in error for msg in missing_subcommand_message]
self.assertTrue(
- any(matches), 'Did not find error message for missing subcommand')
+ any(matches), "Did not find error message for missing subcommand"
+ )
self.assertEqual(2, exit_code)
def test_all_subcommands_represented_in_help(self):
"""All known subparsers are represented in the cloud-int help doc."""
self._call_main()
error = self.stderr.getvalue()
- expected_subcommands = ['analyze', 'clean', 'devel', 'dhclient-hook',
- 'features', 'init', 'modules', 'single']
+ expected_subcommands = [
+ "analyze",
+ "clean",
+ "devel",
+ "dhclient-hook",
+ "features",
+ "init",
+ "modules",
+ "single",
+ ]
for subcommand in expected_subcommands:
self.assertIn(subcommand, error)
- @mock.patch('cloudinit.cmd.main.status_wrapper')
+ @mock.patch("cloudinit.cmd.main.status_wrapper")
def test_init_subcommand_parser(self, m_status_wrapper):
"""The subcommand 'init' calls status_wrapper passing init."""
- self._call_main(['cloud-init', 'init'])
+ self._call_main(["cloud-init", "init"])
(name, parseargs) = m_status_wrapper.call_args_list[0][0]
- self.assertEqual('init', name)
- self.assertEqual('init', parseargs.subcommand)
- self.assertEqual('init', parseargs.action[0])
- self.assertEqual('main_init', parseargs.action[1].__name__)
+ self.assertEqual("init", name)
+ self.assertEqual("init", parseargs.subcommand)
+ self.assertEqual("init", parseargs.action[0])
+ self.assertEqual("main_init", parseargs.action[1].__name__)
- @mock.patch('cloudinit.cmd.main.status_wrapper')
+ @mock.patch("cloudinit.cmd.main.status_wrapper")
def test_modules_subcommand_parser(self, m_status_wrapper):
"""The subcommand 'modules' calls status_wrapper passing modules."""
- self._call_main(['cloud-init', 'modules'])
+ self._call_main(["cloud-init", "modules"])
(name, parseargs) = m_status_wrapper.call_args_list[0][0]
- self.assertEqual('modules', name)
- self.assertEqual('modules', parseargs.subcommand)
- self.assertEqual('modules', parseargs.action[0])
- self.assertEqual('main_modules', parseargs.action[1].__name__)
+ self.assertEqual("modules", name)
+ self.assertEqual("modules", parseargs.subcommand)
+ self.assertEqual("modules", parseargs.action[0])
+ self.assertEqual("main_modules", parseargs.action[1].__name__)
def test_conditional_subcommands_from_entry_point_sys_argv(self):
"""Subcommands from entry-point are properly parsed from sys.argv."""
@@ -151,14 +164,22 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
self.patchStdoutAndStderr(stdout=stdout)
expected_errors = [
- 'usage: cloud-init analyze', 'usage: cloud-init clean',
- 'usage: cloud-init collect-logs', 'usage: cloud-init devel',
- 'usage: cloud-init status']
+ "usage: cloud-init analyze",
+ "usage: cloud-init clean",
+ "usage: cloud-init collect-logs",
+ "usage: cloud-init devel",
+ "usage: cloud-init status",
+ ]
conditional_subcommands = [
- 'analyze', 'clean', 'collect-logs', 'devel', 'status']
+ "analyze",
+ "clean",
+ "collect-logs",
+ "devel",
+ "status",
+ ]
# The cloud-init entrypoint calls main without passing sys_argv
for subcommand in conditional_subcommands:
- with mock.patch('sys.argv', ['cloud-init', subcommand, '-h']):
+ with mock.patch("sys.argv", ["cloud-init", subcommand, "-h"]):
try:
cli.main()
except SystemExit as e:
@@ -168,9 +189,9 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
def test_analyze_subcommand_parser(self):
"""The subcommand cloud-init analyze calls the correct subparser."""
- self._call_main(['cloud-init', 'analyze'])
+ self._call_main(["cloud-init", "analyze"])
# These subcommands only valid for cloud-init analyze script
- expected_subcommands = ['blame', 'show', 'dump']
+ expected_subcommands = ["blame", "show", "dump"]
error = self.stderr.getvalue()
for subcommand in expected_subcommands:
self.assertIn(subcommand, error)
@@ -180,94 +201,177 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
# Provide -h param to collect-logs to avoid having to mock behavior.
stdout = io.StringIO()
self.patchStdoutAndStderr(stdout=stdout)
- self._call_main(['cloud-init', 'collect-logs', '-h'])
- self.assertIn('usage: cloud-init collect-log', stdout.getvalue())
+ self._call_main(["cloud-init", "collect-logs", "-h"])
+ self.assertIn("usage: cloud-init collect-log", stdout.getvalue())
def test_clean_subcommand_parser(self):
"""The subcommand cloud-init clean calls the subparser."""
# Provide -h param to clean to avoid having to mock behavior.
stdout = io.StringIO()
self.patchStdoutAndStderr(stdout=stdout)
- self._call_main(['cloud-init', 'clean', '-h'])
- self.assertIn('usage: cloud-init clean', stdout.getvalue())
+ self._call_main(["cloud-init", "clean", "-h"])
+ self.assertIn("usage: cloud-init clean", stdout.getvalue())
def test_status_subcommand_parser(self):
"""The subcommand cloud-init status calls the subparser."""
# Provide -h param to clean to avoid having to mock behavior.
stdout = io.StringIO()
self.patchStdoutAndStderr(stdout=stdout)
- self._call_main(['cloud-init', 'status', '-h'])
- self.assertIn('usage: cloud-init status', stdout.getvalue())
+ self._call_main(["cloud-init", "status", "-h"])
+ self.assertIn("usage: cloud-init status", stdout.getvalue())
def test_devel_subcommand_parser(self):
"""The subcommand cloud-init devel calls the correct subparser."""
- self._call_main(['cloud-init', 'devel'])
+ self._call_main(["cloud-init", "devel"])
# These subcommands only valid for cloud-init schema script
- expected_subcommands = ['schema']
+ expected_subcommands = ["schema"]
error = self.stderr.getvalue()
for subcommand in expected_subcommands:
self.assertIn(subcommand, error)
def test_wb_devel_schema_subcommand_parser(self):
"""The subcommand cloud-init schema calls the correct subparser."""
- exit_code = self._call_main(['cloud-init', 'devel', 'schema'])
+ exit_code = self._call_main(["cloud-init", "devel", "schema"])
self.assertEqual(1, exit_code)
# Known whitebox output from schema subcommand
self.assertEqual(
- 'Expected one of --config-file, --system or --docs arguments\n',
- self.stderr.getvalue())
+ "Error:\n"
+ "Expected one of --config-file, --system or --docs arguments\n",
+ self.stderr.getvalue(),
+ )
+
+ def test_wb_devel_schema_subcommand_doc_all_spot_check(self):
+ """Validate that doc content has correct values from known examples.
+
+ Ensure that schema doc is returned
+ """
- def test_wb_devel_schema_subcommand_doc_content(self):
- """Validate that doc content is sane from known examples."""
+ # Note: patchStdoutAndStderr() is convenient for reducing boilerplate,
+ # but inspecting the code for debugging is not ideal
+ # contextlib.redirect_stdout() provides similar behavior as a context
+ # manager
stdout = io.StringIO()
- self.patchStdoutAndStderr(stdout=stdout)
- self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all'])
- expected_doc_sections = [
- '**Supported distros:** all',
- '**Supported distros:** alpine, centos, debian, fedora',
- '**Config schema**:\n **resize_rootfs:** (true/false/noblock)',
- '**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n'
- ]
+ with contextlib.redirect_stdout(stdout):
+ self._call_main(["cloud-init", "devel", "schema", "--docs", "all"])
+ expected_doc_sections = [
+ "**Supported distros:** all",
+ "**Supported distros:** almalinux, alpine, centos, "
+ "cloudlinux, debian, eurolinux, fedora, miraclelinux, "
+ "openEuler, opensuse, photon, rhel, rocky, sles, ubuntu, "
+ "virtuozzo",
+ "**Config schema**:\n **resize_rootfs:** "
+ "(true/false/noblock)",
+ "**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n",
+ ]
stdout = stdout.getvalue()
for expected in expected_doc_sections:
self.assertIn(expected, stdout)
- @mock.patch('cloudinit.cmd.main.main_single')
+ def test_wb_devel_schema_subcommand_single_spot_check(self):
+ """Validate that doc content has correct values from known example.
+
+ Validate 'all' arg
+ """
+
+ # Note: patchStdoutAndStderr() is convenient for reducing boilerplate,
+ # but inspecting the code for debugging is not ideal
+ # contextlib.redirect_stdout() provides similar behavior as a context
+ # manager
+ stdout = io.StringIO()
+ with contextlib.redirect_stdout(stdout):
+ self._call_main(
+ ["cloud-init", "devel", "schema", "--docs", "cc_runcmd"]
+ )
+ expected_doc_sections = [
+ "Runcmd\n------\n**Summary:** Run arbitrary commands"
+ ]
+ stdout = stdout.getvalue()
+ for expected in expected_doc_sections:
+ self.assertIn(expected, stdout)
+
+ def test_wb_devel_schema_subcommand_multiple_spot_check(self):
+ """Validate that doc content has correct values from known example.
+
+ Validate single arg
+ """
+
+ stdout = io.StringIO()
+ with contextlib.redirect_stdout(stdout):
+ self._call_main(
+ [
+ "cloud-init",
+ "devel",
+ "schema",
+ "--docs",
+ "cc_runcmd",
+ "cc_resizefs",
+ ]
+ )
+ expected_doc_sections = [
+ "Runcmd\n------\n**Summary:** Run arbitrary commands",
+ "Resizefs\n--------\n**Summary:** Resize filesystem",
+ ]
+ stdout = stdout.getvalue()
+ for expected in expected_doc_sections:
+ self.assertIn(expected, stdout)
+
+ def test_wb_devel_schema_subcommand_bad_arg_fails(self):
+ """Validate that doc content has correct values from known example.
+
+ Validate multiple args
+ """
+
+ # Note: patchStdoutAndStderr() is convenient for reducing boilerplate,
+ # but inspecting the code for debugging is not ideal
+ # contextlib.redirect_stdout() provides similar behavior as a context
+ # manager
+ stderr = io.StringIO()
+ with contextlib.redirect_stderr(stderr):
+ self._call_main(
+ ["cloud-init", "devel", "schema", "--docs", "garbage_value"]
+ )
+ expected_doc_sections = ["Invalid --docs value"]
+ stderr = stderr.getvalue()
+ for expected in expected_doc_sections:
+ self.assertIn(expected, stderr)
+
+ @mock.patch("cloudinit.cmd.main.main_single")
def test_single_subcommand(self, m_main_single):
"""The subcommand 'single' calls main_single with valid args."""
- self._call_main(['cloud-init', 'single', '--name', 'cc_ntp'])
+ self._call_main(["cloud-init", "single", "--name", "cc_ntp"])
(name, parseargs) = m_main_single.call_args_list[0][0]
- self.assertEqual('single', name)
- self.assertEqual('single', parseargs.subcommand)
- self.assertEqual('single', parseargs.action[0])
+ self.assertEqual("single", name)
+ self.assertEqual("single", parseargs.subcommand)
+ self.assertEqual("single", parseargs.action[0])
self.assertFalse(parseargs.debug)
self.assertFalse(parseargs.force)
self.assertIsNone(parseargs.frequency)
- self.assertEqual('cc_ntp', parseargs.name)
+ self.assertEqual("cc_ntp", parseargs.name)
self.assertFalse(parseargs.report)
- @mock.patch('cloudinit.cmd.main.dhclient_hook.handle_args')
+ @mock.patch("cloudinit.cmd.main.dhclient_hook.handle_args")
def test_dhclient_hook_subcommand(self, m_handle_args):
"""The subcommand 'dhclient-hook' calls dhclient_hook with args."""
- self._call_main(['cloud-init', 'dhclient-hook', 'up', 'eth0'])
+ self._call_main(["cloud-init", "dhclient-hook", "up", "eth0"])
(name, parseargs) = m_handle_args.call_args_list[0][0]
- self.assertEqual('dhclient-hook', name)
- self.assertEqual('dhclient-hook', parseargs.subcommand)
- self.assertEqual('dhclient-hook', parseargs.action[0])
+ self.assertEqual("dhclient-hook", name)
+ self.assertEqual("dhclient-hook", parseargs.subcommand)
+ self.assertEqual("dhclient-hook", parseargs.action[0])
self.assertFalse(parseargs.debug)
self.assertFalse(parseargs.force)
- self.assertEqual('up', parseargs.event)
- self.assertEqual('eth0', parseargs.interface)
+ self.assertEqual("up", parseargs.event)
+ self.assertEqual("eth0", parseargs.interface)
- @mock.patch('cloudinit.cmd.main.main_features')
+ @mock.patch("cloudinit.cmd.main.main_features")
def test_features_hook_subcommand(self, m_features):
"""The subcommand 'features' calls main_features with args."""
- self._call_main(['cloud-init', 'features'])
+ self._call_main(["cloud-init", "features"])
(name, parseargs) = m_features.call_args_list[0][0]
- self.assertEqual('features', name)
- self.assertEqual('features', parseargs.subcommand)
- self.assertEqual('features', parseargs.action[0])
+ self.assertEqual("features", name)
+ self.assertEqual("features", parseargs.subcommand)
+ self.assertEqual("features", parseargs.action[0])
self.assertFalse(parseargs.debug)
self.assertFalse(parseargs.force)
+
# : ts=4 expandtab
diff --git a/cloudinit/tests/test_conftest.py b/tests/unittests/test_conftest.py
index 6f1263a5..68903430 100644
--- a/cloudinit/tests/test_conftest.py
+++ b/tests/unittests/test_conftest.py
@@ -1,7 +1,7 @@
import pytest
from cloudinit import subp
-from cloudinit.tests.helpers import CiTestCase
+from tests.unittests.helpers import CiTestCase
class TestDisableSubpUsage:
@@ -19,7 +19,7 @@ class TestDisableSubpUsage:
@pytest.mark.allow_all_subp
def test_subp_usage_can_be_reenabled(self):
- subp.subp(['whoami'])
+ subp.subp(["whoami"])
@pytest.mark.allow_subp_for("whoami")
def test_subp_usage_can_be_conditionally_reenabled(self):
@@ -28,15 +28,15 @@ class TestDisableSubpUsage:
with pytest.raises(AssertionError) as excinfo:
subp.subp(["some", "args"])
assert "allowed: whoami" in str(excinfo.value)
- subp.subp(['whoami'])
+ subp.subp(["whoami"])
@pytest.mark.allow_subp_for("whoami", "bash")
def test_subp_usage_can_be_conditionally_reenabled_for_multiple_cmds(self):
with pytest.raises(AssertionError) as excinfo:
subp.subp(["some", "args"])
assert "allowed: whoami,bash" in str(excinfo.value)
- subp.subp(['bash', '-c', 'true'])
- subp.subp(['whoami'])
+ subp.subp(["bash", "-c", "true"])
+ subp.subp(["whoami"])
@pytest.mark.allow_all_subp
@pytest.mark.allow_subp_for("bash")
@@ -60,6 +60,6 @@ class TestDisableSubpUsageInTestSubclass(CiTestCase):
_old_allowed_subp = self.allow_subp
self.allowed_subp = True
try:
- subp.subp(['bash', '-c', 'true'])
+ subp.subp(["bash", "-c", "true"])
finally:
self.allowed_subp = _old_allowed_subp
diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py
index bfd07ecf..109e0208 100644
--- a/tests/unittests/test_cs_util.py
+++ b/tests/unittests/test_cs_util.py
@@ -1,9 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.tests import helpers as test_helpers
-
from cloudinit.cs_utils import Cepko
-
+from tests.unittests import helpers as test_helpers
SERVER_CONTEXT = {
"cpu": 1000,
@@ -16,7 +14,7 @@ SERVER_CONTEXT = {
"smp": 1,
"tags": ["much server", "very performance"],
"uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e889",
- "vnc_password": "9e84d6cb49e46379"
+ "vnc_password": "9e84d6cb49e46379",
}
@@ -25,7 +23,7 @@ class CepkoMock(Cepko):
return SERVER_CONTEXT
def get(self, key="", request_pattern=None):
- return SERVER_CONTEXT['tags']
+ return SERVER_CONTEXT["tags"]
# 2015-01-22 BAW: This test is completely useless because it only ever tests
@@ -34,33 +32,36 @@ class CepkoMock(Cepko):
class CepkoResultTests(test_helpers.TestCase):
def setUp(self):
self.c = Cepko()
- raise test_helpers.SkipTest('This test is completely useless')
+ raise test_helpers.SkipTest("This test is completely useless")
def test_getitem(self):
result = self.c.all()
- self.assertEqual("65b2fb23-8c03-4187-a3ba-8b7c919e889", result['uuid'])
- self.assertEqual([], result['requirements'])
- self.assertEqual("much server", result['tags'][0])
- self.assertEqual(1, result['smp'])
+ self.assertEqual("65b2fb23-8c03-4187-a3ba-8b7c919e889", result["uuid"])
+ self.assertEqual([], result["requirements"])
+ self.assertEqual("much server", result["tags"][0])
+ self.assertEqual(1, result["smp"])
def test_len(self):
self.assertEqual(len(SERVER_CONTEXT), len(self.c.all()))
def test_contains(self):
result = self.c.all()
- self.assertTrue('uuid' in result)
- self.assertFalse('uid' in result)
- self.assertTrue('meta' in result)
- self.assertFalse('ssh_public_key' in result)
+ self.assertTrue("uuid" in result)
+ self.assertFalse("uid" in result)
+ self.assertTrue("meta" in result)
+ self.assertFalse("ssh_public_key" in result)
def test_iter(self):
- self.assertEqual(sorted(SERVER_CONTEXT.keys()),
- sorted([key for key in self.c.all()]))
+ self.assertEqual(
+ sorted(SERVER_CONTEXT.keys()),
+ sorted([key for key in self.c.all()]),
+ )
def test_with_list_as_result(self):
- result = self.c.get('tags')
- self.assertEqual('much server', result[0])
- self.assertTrue('very performance' in result)
+ result = self.c.get("tags")
+ self.assertEqual("much server", result[0])
+ self.assertTrue("very performance" in result)
self.assertEqual(2, len(result))
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index fb2b55e8..a5018a42 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -5,39 +5,33 @@
import gzip
import logging
import os
-from io import BytesIO, StringIO
-from unittest import mock
-
from email import encoders
from email.mime.application import MIMEApplication
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
+from io import BytesIO, StringIO
+from unittest import mock
import httpretty
from cloudinit import handlers
from cloudinit import helpers as c_helpers
-from cloudinit import log
-from cloudinit.settings import (PER_INSTANCE)
-from cloudinit import sources
-from cloudinit import stages
+from cloudinit import log, safeyaml, sources, stages
from cloudinit import user_data as ud
-from cloudinit import safeyaml
from cloudinit import util
-
-from cloudinit.tests import helpers
-
+from cloudinit.settings import PER_INSTANCE
+from tests.unittests import helpers
INSTANCE_ID = "i-testing"
class FakeDataSource(sources.DataSource):
-
- def __init__(self, userdata=None, vendordata=None):
+ def __init__(self, userdata=None, vendordata=None, vendordata2=None):
sources.DataSource.__init__(self, {}, None, None)
- self.metadata = {'instance-id': INSTANCE_ID}
+ self.metadata = {"instance-id": INSTANCE_ID}
self.userdata_raw = userdata
self.vendordata_raw = vendordata
+ self.vendordata2_raw = vendordata2
def count_messages(root):
@@ -51,7 +45,7 @@ def count_messages(root):
def gzip_text(text):
contents = BytesIO()
- f = gzip.GzipFile(fileobj=contents, mode='wb')
+ f = gzip.GzipFile(fileobj=contents, mode="wb")
f.write(util.encode_text(text))
f.flush()
f.close()
@@ -61,7 +55,6 @@ def gzip_text(text):
# FIXME: these tests shouldn't be checking log output??
# Weirddddd...
class TestConsumeUserData(helpers.FilesystemMockingTestCase):
-
def setUp(self):
super(TestConsumeUserData, self).setUp()
self._log = None
@@ -86,13 +79,13 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
return log_file
def test_simple_jsonp(self):
- blob = '''
+ blob = """
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
{ "op": "add", "path": "/bar", "value": "qux2" }
]
-'''
+"""
ci = stages.Init()
ci.datasource = FakeDataSource(blob)
@@ -102,64 +95,84 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEqual(2, len(cc))
- self.assertEqual('qux', cc['baz'])
- self.assertEqual('qux2', cc['bar'])
+ self.assertEqual("qux", cc["baz"])
+ self.assertEqual("qux2", cc["bar"])
- def test_simple_jsonp_vendor_and_user(self):
+ def test_simple_jsonp_vendor_and_vendor2_and_user(self):
# test that user-data wins over vendor
- user_blob = '''
+ user_blob = """
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
- { "op": "add", "path": "/bar", "value": "qux2" }
+ { "op": "add", "path": "/bar", "value": "qux2" },
+ { "op": "add", "path": "/foobar", "value": "qux3" }
]
-'''
- vendor_blob = '''
+"""
+ vendor_blob = """
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "quxA" },
{ "op": "add", "path": "/bar", "value": "quxB" },
- { "op": "add", "path": "/foo", "value": "quxC" }
+ { "op": "add", "path": "/foo", "value": "quxC" },
+ { "op": "add", "path": "/corge", "value": "quxEE" }
+]
+"""
+ vendor2_blob = """
+#cloud-config-jsonp
+[
+ { "op": "add", "path": "/corge", "value": "quxD" },
+ { "op": "add", "path": "/grault", "value": "quxFF" },
+ { "op": "add", "path": "/foobar", "value": "quxGG" }
]
-'''
+"""
self.reRoot()
initer = stages.Init()
- initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.datasource = FakeDataSource(
+ user_blob, vendordata=vendor_blob, vendordata2=vendor2_blob
+ )
initer.read_cfg()
initer.initialize()
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ (_which_ran, _failures) = mods.run_section("cloud_init_modules")
cfg = mods.cfg
- self.assertIn('vendor_data', cfg)
- self.assertEqual('qux', cfg['baz'])
- self.assertEqual('qux2', cfg['bar'])
- self.assertEqual('quxC', cfg['foo'])
+ self.assertIn("vendor_data", cfg)
+ self.assertIn("vendor_data2", cfg)
+ # Confirm that vendordata2 overrides vendordata, and that
+ # userdata overrides both
+ self.assertEqual("qux", cfg["baz"])
+ self.assertEqual("qux2", cfg["bar"])
+ self.assertEqual("qux3", cfg["foobar"])
+ self.assertEqual("quxC", cfg["foo"])
+ self.assertEqual("quxD", cfg["corge"])
+ self.assertEqual("quxFF", cfg["grault"])
def test_simple_jsonp_no_vendor_consumed(self):
# make sure that vendor data is not consumed
- user_blob = '''
+ user_blob = """
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
{ "op": "add", "path": "/bar", "value": "qux2" },
{ "op": "add", "path": "/vendor_data", "value": {"enabled": "false"}}
]
-'''
- vendor_blob = '''
+"""
+ vendor_blob = """
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "quxA" },
{ "op": "add", "path": "/bar", "value": "quxB" },
{ "op": "add", "path": "/foo", "value": "quxC" }
]
-'''
+"""
self.reRoot()
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
@@ -168,35 +181,37 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ (_which_ran, _failures) = mods.run_section("cloud_init_modules")
cfg = mods.cfg
- self.assertEqual('qux', cfg['baz'])
- self.assertEqual('qux2', cfg['bar'])
- self.assertNotIn('foo', cfg)
+ self.assertEqual("qux", cfg["baz"])
+ self.assertEqual("qux2", cfg["bar"])
+ self.assertNotIn("foo", cfg)
def test_mixed_cloud_config(self):
- blob_cc = '''
+ blob_cc = """
#cloud-config
a: b
c: d
-'''
+"""
message_cc = MIMEBase("text", "cloud-config")
message_cc.set_payload(blob_cc)
- blob_jp = '''
+ blob_jp = """
#cloud-config-jsonp
[
{ "op": "replace", "path": "/a", "value": "c" },
{ "op": "remove", "path": "/c" }
]
-'''
+"""
- message_jp = MIMEBase('text', "cloud-config-jsonp")
+ message_jp = MIMEBase("text", "cloud-config-jsonp")
message_jp.set_payload(blob_jp)
message = MIMEMultipart()
@@ -211,26 +226,26 @@ c: d
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEqual(1, len(cc))
- self.assertEqual('c', cc['a'])
+ self.assertEqual("c", cc["a"])
def test_cloud_config_as_x_shell_script(self):
- blob_cc = '''
+ blob_cc = """
#cloud-config
a: b
c: d
-'''
+"""
message_cc = MIMEBase("text", "x-shellscript")
message_cc.set_payload(blob_cc)
- blob_jp = '''
+ blob_jp = """
#cloud-config-jsonp
[
{ "op": "replace", "path": "/a", "value": "c" },
{ "op": "remove", "path": "/c" }
]
-'''
+"""
- message_jp = MIMEBase('text', "cloud-config-jsonp")
+ message_jp = MIMEBase("text", "cloud-config-jsonp")
message_jp.set_payload(blob_jp)
message = MIMEMultipart()
@@ -245,19 +260,19 @@ c: d
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEqual(1, len(cc))
- self.assertEqual('c', cc['a'])
+ self.assertEqual("c", cc["a"])
def test_vendor_user_yaml_cloud_config(self):
- vendor_blob = '''
+ vendor_blob = """
#cloud-config
a: b
name: vendor
run:
- x
- y
-'''
+"""
- user_blob = '''
+ user_blob = """
#cloud-config
a: c
vendor_data:
@@ -266,7 +281,7 @@ vendor_data:
name: user
run:
- z
-'''
+"""
self.reRoot()
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
@@ -275,108 +290,122 @@ run:
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ (_which_ran, _failures) = mods.run_section("cloud_init_modules")
cfg = mods.cfg
- self.assertIn('vendor_data', cfg)
- self.assertEqual('c', cfg['a'])
- self.assertEqual('user', cfg['name'])
- self.assertNotIn('x', cfg['run'])
- self.assertNotIn('y', cfg['run'])
- self.assertIn('z', cfg['run'])
+ self.assertIn("vendor_data", cfg)
+ self.assertEqual("c", cfg["a"])
+ self.assertEqual("user", cfg["name"])
+ self.assertNotIn("x", cfg["run"])
+ self.assertNotIn("y", cfg["run"])
+ self.assertIn("z", cfg["run"])
def test_vendordata_script(self):
- vendor_blob = '''
+ vendor_blob = """
#!/bin/bash
echo "test"
-'''
+"""
+ vendor2_blob = """
+#!/bin/bash
+echo "dynamic test"
+"""
- user_blob = '''
+ user_blob = """
#cloud-config
vendor_data:
enabled: True
prefix: /bin/true
-'''
+"""
new_root = self.reRoot()
initer = stages.Init()
- initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.datasource = FakeDataSource(
+ user_blob, vendordata=vendor_blob, vendordata2=vendor2_blob
+ )
initer.read_cfg()
initer.initialize()
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (_which_ran, _failures) = mods.run_section('cloud_init_modules')
- vendor_script = initer.paths.get_ipath_cur('vendor_scripts')
+ (_which_ran, _failures) = mods.run_section("cloud_init_modules")
+ vendor_script = initer.paths.get_ipath_cur("vendor_scripts")
vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script)
self.assertTrue(os.path.exists(vendor_script_fns))
def test_merging_cloud_config(self):
- blob = '''
+ blob = """
#cloud-config
a: b
e: f
run:
- b
- c
-'''
+"""
message1 = MIMEBase("text", "cloud-config")
message1.set_payload(blob)
- blob2 = '''
+ blob2 = """
#cloud-config
a: e
e: g
run:
- stuff
- morestuff
-'''
+"""
message2 = MIMEBase("text", "cloud-config")
- message2['X-Merge-Type'] = ('dict(recurse_array,'
- 'recurse_str)+list(append)+str(append)')
+ message2[
+ "X-Merge-Type"
+ ] = "dict(recurse_array,recurse_str)+list(append)+str(append)"
message2.set_payload(blob2)
- blob3 = '''
+ blob3 = """
#cloud-config
e:
- 1
- 2
- 3
p: 1
-'''
+"""
message3 = MIMEBase("text", "cloud-config")
message3.set_payload(blob3)
messages = [message1, message2, message3]
- paths = c_helpers.Paths({}, ds=FakeDataSource(''))
+ paths = c_helpers.Paths({}, ds=FakeDataSource(""))
cloud_cfg = handlers.cloud_config.CloudConfigPartHandler(paths)
self.reRoot()
- cloud_cfg.handle_part(None, handlers.CONTENT_START, None, None, None,
- None)
+ cloud_cfg.handle_part(
+ None, handlers.CONTENT_START, None, None, None, None
+ )
for i, m in enumerate(messages):
headers = dict(m)
fn = "part-%s" % (i + 1)
payload = m.get_payload(decode=True)
- cloud_cfg.handle_part(None, headers['Content-Type'],
- fn, payload, None, headers)
- cloud_cfg.handle_part(None, handlers.CONTENT_END, None, None, None,
- None)
- contents = util.load_file(paths.get_ipath('cloud_config'))
+ cloud_cfg.handle_part(
+ None, headers["Content-Type"], fn, payload, None, headers
+ )
+ cloud_cfg.handle_part(
+ None, handlers.CONTENT_END, None, None, None, None
+ )
+ contents = util.load_file(paths.get_ipath("cloud_config"))
contents = util.load_yaml(contents)
- self.assertEqual(contents['run'], ['b', 'c', 'stuff', 'morestuff'])
- self.assertEqual(contents['a'], 'be')
- self.assertEqual(contents['e'], [1, 2, 3])
- self.assertEqual(contents['p'], 1)
+ self.assertEqual(contents["run"], ["b", "c", "stuff", "morestuff"])
+ self.assertEqual(contents["a"], "be")
+ self.assertEqual(contents["e"], [1, 2, 3])
+ self.assertEqual(contents["p"], 1)
def test_unhandled_type_warning(self):
"""Raw text without magic is ignored but shows warning."""
@@ -385,35 +414,37 @@ p: 1
data = "arbitrary text\n"
ci.datasource = FakeDataSource(data)
- with mock.patch('cloudinit.util.write_file') as mockobj:
+ with mock.patch("cloudinit.util.write_file") as mockobj:
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertIn(
"Unhandled non-multipart (text/x-not-multipart) userdata:",
- log_file.getvalue())
+ log_file.getvalue(),
+ )
mockobj.assert_called_once_with(
- ci.paths.get_ipath("cloud_config"), "", 0o600)
+ ci.paths.get_ipath("cloud_config"), "", 0o600
+ )
def test_mime_gzip_compressed(self):
"""Tests that individual message gzip encoding works."""
def gzip_part(text):
- return MIMEApplication(gzip_text(text), 'gzip')
+ return MIMEApplication(gzip_text(text), "gzip")
- base_content1 = '''
+ base_content1 = """
#cloud-config
a: 2
-'''
+"""
- base_content2 = '''
+ base_content2 = """
#cloud-config
b: 3
c: 4
-'''
+"""
- message = MIMEMultipart('test')
+ message = MIMEMultipart("test")
message.attach(gzip_part(base_content1))
message.attach(gzip_part(base_content2))
ci = stages.Init()
@@ -425,9 +456,9 @@ c: 4
contents = util.load_yaml(contents)
self.assertTrue(isinstance(contents, dict))
self.assertEqual(3, len(contents))
- self.assertEqual(2, contents['a'])
- self.assertEqual(3, contents['b'])
- self.assertEqual(4, contents['c'])
+ self.assertEqual(2, contents["a"])
+ self.assertEqual(3, contents["b"])
+ self.assertEqual(4, contents["c"])
def test_mime_text_plain(self):
"""Mime message of type text/plain is ignored but shows warning."""
@@ -437,15 +468,17 @@ c: 4
message.set_payload("Just text")
ci.datasource = FakeDataSource(message.as_string().encode())
- with mock.patch('cloudinit.util.write_file') as mockobj:
+ with mock.patch("cloudinit.util.write_file") as mockobj:
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertIn(
"Unhandled unknown content-type (text/plain)",
- log_file.getvalue())
+ log_file.getvalue(),
+ )
mockobj.assert_called_once_with(
- ci.paths.get_ipath("cloud_config"), "", 0o600)
+ ci.paths.get_ipath("cloud_config"), "", 0o600
+ )
def test_shellscript(self):
"""Raw text starting #!/bin/sh is treated as script."""
@@ -456,15 +489,18 @@ c: 4
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
- with mock.patch('cloudinit.util.write_file') as mockobj:
+ with mock.patch("cloudinit.util.write_file") as mockobj:
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertEqual("", log_file.getvalue())
- mockobj.assert_has_calls([
- mock.call(outpath, script, 0o700),
- mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600)])
+ mockobj.assert_has_calls(
+ [
+ mock.call(outpath, script, 0o700),
+ mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600),
+ ]
+ )
def test_mime_text_x_shellscript(self):
"""Mime message of type text/x-shellscript is treated as script."""
@@ -477,15 +513,18 @@ c: 4
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
- with mock.patch('cloudinit.util.write_file') as mockobj:
+ with mock.patch("cloudinit.util.write_file") as mockobj:
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertEqual("", log_file.getvalue())
- mockobj.assert_has_calls([
- mock.call(outpath, script, 0o700),
- mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600)])
+ mockobj.assert_has_calls(
+ [
+ mock.call(outpath, script, 0o700),
+ mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600),
+ ]
+ )
def test_mime_text_plain_shell(self):
"""Mime type text/plain starting #!/bin/sh is treated as script."""
@@ -498,41 +537,48 @@ c: 4
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
- with mock.patch('cloudinit.util.write_file') as mockobj:
+ with mock.patch("cloudinit.util.write_file") as mockobj:
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertEqual("", log_file.getvalue())
- mockobj.assert_has_calls([
- mock.call(outpath, script, 0o700),
- mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600)])
+ mockobj.assert_has_calls(
+ [
+ mock.call(outpath, script, 0o700),
+ mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600),
+ ]
+ )
def test_mime_application_octet_stream(self):
"""Mime type application/octet-stream is ignored but shows warning."""
self.reRoot()
ci = stages.Init()
message = MIMEBase("application", "octet-stream")
- message.set_payload(b'\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc')
+ message.set_payload(b"\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc")
encoders.encode_base64(message)
ci.datasource = FakeDataSource(message.as_string().encode())
- with mock.patch('cloudinit.util.write_file') as mockobj:
+ with mock.patch("cloudinit.util.write_file") as mockobj:
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertIn(
"Unhandled unknown content-type (application/octet-stream)",
- log_file.getvalue())
+ log_file.getvalue(),
+ )
mockobj.assert_called_once_with(
- ci.paths.get_ipath("cloud_config"), "", 0o600)
+ ci.paths.get_ipath("cloud_config"), "", 0o600
+ )
def test_cloud_config_archive(self):
- non_decodable = b'\x11\xc9\xb4gTH\xee\x12'
- data = [{'content': '#cloud-config\npassword: gocubs\n'},
- {'content': '#cloud-config\nlocale: chicago\n'},
- {'content': non_decodable}]
- message = b'#cloud-config-archive\n' + safeyaml.dumps(data).encode()
+ non_decodable = b"\x11\xc9\xb4gTH\xee\x12"
+ data = [
+ {"content": "#cloud-config\npassword: gocubs\n"},
+ {"content": "#cloud-config\nlocale: chicago\n"},
+ {"content": non_decodable},
+ ]
+ message = b"#cloud-config-archive\n" + safeyaml.dumps(data).encode()
self.reRoot()
ci = stages.Init()
@@ -545,35 +591,35 @@ c: 4
# consuming the user-data provided should write 'cloud_config' file
# which will have our yaml in it.
- with mock.patch('cloudinit.util.write_file') as mockobj:
+ with mock.patch("cloudinit.util.write_file") as mockobj:
mockobj.side_effect = fsstore
ci.fetch()
ci.consume_data()
cfg = util.load_yaml(fs[ci.paths.get_ipath("cloud_config")])
- self.assertEqual(cfg.get('password'), 'gocubs')
- self.assertEqual(cfg.get('locale'), 'chicago')
+ self.assertEqual(cfg.get("password"), "gocubs")
+ self.assertEqual(cfg.get("locale"), "chicago")
- @mock.patch('cloudinit.util.read_conf_with_confd')
+ @mock.patch("cloudinit.util.read_conf_with_confd")
def test_dont_allow_user_data(self, mock_cfg):
mock_cfg.return_value = {"allow_userdata": False}
# test that user-data is ignored but vendor-data is kept
- user_blob = '''
+ user_blob = """
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
{ "op": "add", "path": "/bar", "value": "qux2" }
]
-'''
- vendor_blob = '''
+"""
+ vendor_blob = """
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "quxA" },
{ "op": "add", "path": "/bar", "value": "quxB" },
{ "op": "add", "path": "/foo", "value": "quxC" }
]
-'''
+"""
self.reRoot()
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
@@ -582,21 +628,22 @@ c: 4
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ (_which_ran, _failures) = mods.run_section("cloud_init_modules")
cfg = mods.cfg
- self.assertIn('vendor_data', cfg)
- self.assertEqual('quxA', cfg['baz'])
- self.assertEqual('quxB', cfg['bar'])
- self.assertEqual('quxC', cfg['foo'])
+ self.assertIn("vendor_data", cfg)
+ self.assertEqual("quxA", cfg["baz"])
+ self.assertEqual("quxB", cfg["bar"])
+ self.assertEqual("quxC", cfg["foo"])
class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
-
def setUp(self):
TestConsumeUserData.setUp(self)
helpers.HttprettyTestCase.setUp(self)
@@ -605,14 +652,14 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
TestConsumeUserData.tearDown(self)
helpers.HttprettyTestCase.tearDown(self)
- @mock.patch('cloudinit.url_helper.time.sleep')
+ @mock.patch("cloudinit.url_helper.time.sleep")
def test_include(self, mock_sleep):
"""Test #include."""
- included_url = 'http://hostname/path'
- included_data = '#cloud-config\nincluded: true\n'
+ included_url = "http://hostname/path"
+ included_data = "#cloud-config\nincluded: true\n"
httpretty.register_uri(httpretty.GET, included_url, included_data)
- blob = '#include\n%s\n' % included_url
+ blob = "#include\n%s\n" % included_url
self.reRoot()
ci = stages.Init()
@@ -621,20 +668,20 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
ci.consume_data()
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
- self.assertTrue(cc.get('included'))
+ self.assertTrue(cc.get("included"))
- @mock.patch('cloudinit.url_helper.time.sleep')
+ @mock.patch("cloudinit.url_helper.time.sleep")
def test_include_bad_url(self, mock_sleep):
"""Test #include with a bad URL."""
- bad_url = 'http://bad/forbidden'
- bad_data = '#cloud-config\nbad: true\n'
+ bad_url = "http://bad/forbidden"
+ bad_data = "#cloud-config\nbad: true\n"
httpretty.register_uri(httpretty.GET, bad_url, bad_data, status=403)
- included_url = 'http://hostname/path'
- included_data = '#cloud-config\nincluded: true\n'
+ included_url = "http://hostname/path"
+ included_data = "#cloud-config\nincluded: true\n"
httpretty.register_uri(httpretty.GET, included_url, included_data)
- blob = '#include\n%s\n%s' % (bad_url, included_url)
+ blob = "#include\n%s\n%s" % (bad_url, included_url)
self.reRoot()
ci = stages.Init()
@@ -642,26 +689,26 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
ci.fetch()
with self.assertRaises(Exception) as context:
ci.consume_data()
- self.assertIn('403', str(context.exception))
+ self.assertIn("403", str(context.exception))
with self.assertRaises(FileNotFoundError):
util.load_file(ci.paths.get_ipath("cloud_config"))
- @mock.patch('cloudinit.url_helper.time.sleep')
+ @mock.patch("cloudinit.url_helper.time.sleep")
@mock.patch(
"cloudinit.user_data.features.ERROR_ON_USER_DATA_FAILURE", False
)
def test_include_bad_url_no_fail(self, mock_sleep):
"""Test #include with a bad URL and failure disabled"""
- bad_url = 'http://bad/forbidden'
- bad_data = '#cloud-config\nbad: true\n'
+ bad_url = "http://bad/forbidden"
+ bad_data = "#cloud-config\nbad: true\n"
httpretty.register_uri(httpretty.GET, bad_url, bad_data, status=403)
- included_url = 'http://hostname/path'
- included_data = '#cloud-config\nincluded: true\n'
+ included_url = "http://hostname/path"
+ included_data = "#cloud-config\nincluded: true\n"
httpretty.register_uri(httpretty.GET, included_url, included_data)
- blob = '#include\n%s\n%s' % (bad_url, included_url)
+ blob = "#include\n%s\n%s" % (bad_url, included_url)
self.reRoot()
ci = stages.Init()
@@ -670,32 +717,33 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
ci.fetch()
ci.consume_data()
- self.assertIn("403 Client Error: Forbidden for url: %s" % bad_url,
- log_file.getvalue())
+ self.assertIn(
+ "403 Client Error: Forbidden for url: %s" % bad_url,
+ log_file.getvalue(),
+ )
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
- self.assertIsNone(cc.get('bad'))
- self.assertTrue(cc.get('included'))
+ self.assertIsNone(cc.get("bad"))
+ self.assertTrue(cc.get("included"))
class TestUDProcess(helpers.ResourceUsingTestCase):
-
def test_bytes_in_userdata(self):
- msg = b'#cloud-config\napt_update: True\n'
+ msg = b"#cloud-config\napt_update: True\n"
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(msg)
self.assertTrue(count_messages(message) == 1)
def test_string_in_userdata(self):
- msg = '#cloud-config\napt_update: True\n'
+ msg = "#cloud-config\napt_update: True\n"
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(msg)
self.assertTrue(count_messages(message) == 1)
def test_compressed_in_userdata(self):
- msg = gzip_text('#cloud-config\napt_update: True\n')
+ msg = gzip_text("#cloud-config\napt_update: True\n")
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(msg)
@@ -703,15 +751,14 @@ class TestUDProcess(helpers.ResourceUsingTestCase):
class TestConvertString(helpers.TestCase):
-
def test_handles_binary_non_utf8_decodable(self):
"""Printable unicode (not utf8-decodable) is safely converted."""
- blob = b'#!/bin/bash\necho \xc3\x84\n'
+ blob = b"#!/bin/bash\necho \xc3\x84\n"
msg = ud.convert_string(blob)
self.assertEqual(blob, msg.get_payload(decode=True))
def test_handles_binary_utf8_decodable(self):
- blob = b'\x32\x32'
+ blob = b"\x32\x32"
msg = ud.convert_string(blob)
self.assertEqual(blob, msg.get_payload(decode=True))
@@ -731,24 +778,31 @@ class TestConvertString(helpers.TestCase):
class TestFetchBaseConfig(helpers.TestCase):
def test_only_builtin_gets_builtin(self):
ret = helpers.wrap_and_call(
- 'cloudinit.stages',
- {'util.read_conf_with_confd': None,
- 'util.read_conf_from_cmdline': None,
- 'read_runtime_config': {'return_value': {}}},
- stages.fetch_base_config)
+ "cloudinit.stages",
+ {
+ "util.read_conf_with_confd": None,
+ "util.read_conf_from_cmdline": None,
+ "read_runtime_config": {"return_value": {}},
+ },
+ stages.fetch_base_config,
+ )
self.assertEqual(util.get_builtin_cfg(), ret)
def test_conf_d_overrides_defaults(self):
builtin = util.get_builtin_cfg()
test_key = sorted(builtin)[0]
- test_value = 'test'
+ test_value = "test"
ret = helpers.wrap_and_call(
- 'cloudinit.stages',
- {'util.read_conf_with_confd':
- {'return_value': {test_key: test_value}},
- 'util.read_conf_from_cmdline': None,
- 'read_runtime_config': {'return_value': {}}},
- stages.fetch_base_config)
+ "cloudinit.stages",
+ {
+ "util.read_conf_with_confd": {
+ "return_value": {test_key: test_value}
+ },
+ "util.read_conf_from_cmdline": None,
+ "read_runtime_config": {"return_value": {}},
+ },
+ stages.fetch_base_config,
+ )
self.assertEqual(ret.get(test_key), test_value)
builtin[test_key] = test_value
self.assertEqual(ret, builtin)
@@ -756,47 +810,64 @@ class TestFetchBaseConfig(helpers.TestCase):
def test_cmdline_overrides_defaults(self):
builtin = util.get_builtin_cfg()
test_key = sorted(builtin)[0]
- test_value = 'test'
+ test_value = "test"
cmdline = {test_key: test_value}
ret = helpers.wrap_and_call(
- 'cloudinit.stages',
- {'util.read_conf_from_cmdline': {'return_value': cmdline},
- 'util.read_conf_with_confd': None,
- 'read_runtime_config': None},
- stages.fetch_base_config)
+ "cloudinit.stages",
+ {
+ "util.read_conf_from_cmdline": {"return_value": cmdline},
+ "util.read_conf_with_confd": None,
+ "read_runtime_config": None,
+ },
+ stages.fetch_base_config,
+ )
self.assertEqual(ret.get(test_key), test_value)
builtin[test_key] = test_value
self.assertEqual(ret, builtin)
def test_cmdline_overrides_confd_runtime_and_defaults(self):
- builtin = {'key1': 'value0', 'key3': 'other2'}
- conf_d = {'key1': 'value1', 'key2': 'other1'}
- cmdline = {'key3': 'other3', 'key2': 'other2'}
- runtime = {'key3': 'runtime3'}
+ builtin = {"key1": "value0", "key3": "other2"}
+ conf_d = {"key1": "value1", "key2": "other1"}
+ cmdline = {"key3": "other3", "key2": "other2"}
+ runtime = {"key3": "runtime3"}
ret = helpers.wrap_and_call(
- 'cloudinit.stages',
- {'util.read_conf_with_confd': {'return_value': conf_d},
- 'util.get_builtin_cfg': {'return_value': builtin},
- 'read_runtime_config': {'return_value': runtime},
- 'util.read_conf_from_cmdline': {'return_value': cmdline}},
- stages.fetch_base_config)
- self.assertEqual(ret, {'key1': 'value1', 'key2': 'other2',
- 'key3': 'other3'})
+ "cloudinit.stages",
+ {
+ "util.read_conf_with_confd": {"return_value": conf_d},
+ "util.get_builtin_cfg": {"return_value": builtin},
+ "read_runtime_config": {"return_value": runtime},
+ "util.read_conf_from_cmdline": {"return_value": cmdline},
+ },
+ stages.fetch_base_config,
+ )
+ self.assertEqual(
+ ret, {"key1": "value1", "key2": "other2", "key3": "other3"}
+ )
def test_order_precedence_is_builtin_system_runtime_cmdline(self):
- builtin = {'key1': 'builtin0', 'key3': 'builtin3'}
- conf_d = {'key1': 'confd1', 'key2': 'confd2', 'keyconfd1': 'kconfd1'}
- runtime = {'key1': 'runtime1', 'key2': 'runtime2'}
- cmdline = {'key1': 'cmdline1'}
+ builtin = {"key1": "builtin0", "key3": "builtin3"}
+ conf_d = {"key1": "confd1", "key2": "confd2", "keyconfd1": "kconfd1"}
+ runtime = {"key1": "runtime1", "key2": "runtime2"}
+ cmdline = {"key1": "cmdline1"}
ret = helpers.wrap_and_call(
- 'cloudinit.stages',
- {'util.read_conf_with_confd': {'return_value': conf_d},
- 'util.get_builtin_cfg': {'return_value': builtin},
- 'util.read_conf_from_cmdline': {'return_value': cmdline},
- 'read_runtime_config': {'return_value': runtime},
- },
- stages.fetch_base_config)
- self.assertEqual(ret, {'key1': 'cmdline1', 'key2': 'runtime2',
- 'key3': 'builtin3', 'keyconfd1': 'kconfd1'})
+ "cloudinit.stages",
+ {
+ "util.read_conf_with_confd": {"return_value": conf_d},
+ "util.get_builtin_cfg": {"return_value": builtin},
+ "util.read_conf_from_cmdline": {"return_value": cmdline},
+ "read_runtime_config": {"return_value": runtime},
+ },
+ stages.fetch_base_config,
+ )
+ self.assertEqual(
+ ret,
+ {
+ "key1": "cmdline1",
+ "key2": "runtime2",
+ "key3": "builtin3",
+ "keyconfd1": "kconfd1",
+ },
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/__init__.py b/tests/unittests/test_datasource/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/unittests/test_datasource/__init__.py
+++ /dev/null
diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py
deleted file mode 100644
index eb2828d5..00000000
--- a/tests/unittests/test_datasource/test_aliyun.py
+++ /dev/null
@@ -1,218 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import functools
-import httpretty
-import os
-from unittest import mock
-
-from cloudinit import helpers
-from cloudinit.sources import DataSourceAliYun as ay
-from cloudinit.tests import helpers as test_helpers
-
-DEFAULT_METADATA = {
- 'instance-id': 'aliyun-test-vm-00',
- 'eipv4': '10.0.0.1',
- 'hostname': 'test-hostname',
- 'image-id': 'm-test',
- 'launch-index': '0',
- 'mac': '00:16:3e:00:00:00',
- 'network-type': 'vpc',
- 'private-ipv4': '192.168.0.1',
- 'serial-number': 'test-string',
- 'vpc-cidr-block': '192.168.0.0/16',
- 'vpc-id': 'test-vpc',
- 'vswitch-id': 'test-vpc',
- 'vswitch-cidr-block': '192.168.0.0/16',
- 'zone-id': 'test-zone-1',
- 'ntp-conf': {'ntp_servers': [
- 'ntp1.aliyun.com',
- 'ntp2.aliyun.com',
- 'ntp3.aliyun.com']},
- 'source-address': ['http://mirrors.aliyun.com',
- 'http://mirrors.aliyuncs.com'],
- 'public-keys': {'key-pair-1': {'openssh-key': 'ssh-rsa AAAAB3...'},
- 'key-pair-2': {'openssh-key': 'ssh-rsa AAAAB3...'}}
-}
-
-DEFAULT_USERDATA = """\
-#cloud-config
-
-hostname: localhost"""
-
-
-def register_mock_metaserver(base_url, data):
- def register_helper(register, base_url, body):
- if isinstance(body, str):
- register(base_url, body)
- elif isinstance(body, list):
- register(base_url.rstrip('/'), '\n'.join(body) + '\n')
- elif isinstance(body, dict):
- if not body:
- register(base_url.rstrip('/') + '/', 'not found',
- status_code=404)
- vals = []
- for k, v in body.items():
- if isinstance(v, (str, list)):
- suffix = k.rstrip('/')
- else:
- suffix = k.rstrip('/') + '/'
- vals.append(suffix)
- url = base_url.rstrip('/') + '/' + suffix
- register_helper(register, url, v)
- register(base_url, '\n'.join(vals) + '\n')
-
- register = functools.partial(httpretty.register_uri, httpretty.GET)
- register_helper(register, base_url, data)
-
-
-class TestAliYunDatasource(test_helpers.HttprettyTestCase):
- def setUp(self):
- super(TestAliYunDatasource, self).setUp()
- cfg = {'datasource': {'AliYun': {'timeout': '1', 'max_wait': '1'}}}
- distro = {}
- paths = helpers.Paths({'run_dir': self.tmp_dir()})
- self.ds = ay.DataSourceAliYun(cfg, distro, paths)
- self.metadata_address = self.ds.metadata_urls[0]
-
- @property
- def default_metadata(self):
- return DEFAULT_METADATA
-
- @property
- def default_userdata(self):
- return DEFAULT_USERDATA
-
- @property
- def metadata_url(self):
- return os.path.join(
- self.metadata_address,
- self.ds.min_metadata_version, 'meta-data') + '/'
-
- @property
- def userdata_url(self):
- return os.path.join(
- self.metadata_address,
- self.ds.min_metadata_version, 'user-data')
-
- # EC2 provides an instance-identity document which must return 404 here
- # for this test to pass.
- @property
- def default_identity(self):
- return {}
-
- @property
- def identity_url(self):
- return os.path.join(self.metadata_address,
- self.ds.min_metadata_version,
- 'dynamic', 'instance-identity')
-
- def regist_default_server(self):
- register_mock_metaserver(self.metadata_url, self.default_metadata)
- register_mock_metaserver(self.userdata_url, self.default_userdata)
- register_mock_metaserver(self.identity_url, self.default_identity)
-
- def _test_get_data(self):
- self.assertEqual(self.ds.metadata, self.default_metadata)
- self.assertEqual(self.ds.userdata_raw,
- self.default_userdata.encode('utf8'))
-
- def _test_get_sshkey(self):
- pub_keys = [v['openssh-key'] for (_, v) in
- self.default_metadata['public-keys'].items()]
- self.assertEqual(self.ds.get_public_ssh_keys(), pub_keys)
-
- def _test_get_iid(self):
- self.assertEqual(self.default_metadata['instance-id'],
- self.ds.get_instance_id())
-
- def _test_host_name(self):
- self.assertEqual(self.default_metadata['hostname'],
- self.ds.get_hostname())
-
- @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
- def test_with_mock_server(self, m_is_aliyun):
- m_is_aliyun.return_value = True
- self.regist_default_server()
- ret = self.ds.get_data()
- self.assertEqual(True, ret)
- self.assertEqual(1, m_is_aliyun.call_count)
- self._test_get_data()
- self._test_get_sshkey()
- self._test_get_iid()
- self._test_host_name()
- self.assertEqual('aliyun', self.ds.cloud_name)
- self.assertEqual('ec2', self.ds.platform)
- self.assertEqual(
- 'metadata (http://100.100.100.200)', self.ds.subplatform)
-
- @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
- def test_returns_false_when_not_on_aliyun(self, m_is_aliyun):
- """If is_aliyun returns false, then get_data should return False."""
- m_is_aliyun.return_value = False
- self.regist_default_server()
- ret = self.ds.get_data()
- self.assertEqual(1, m_is_aliyun.call_count)
- self.assertEqual(False, ret)
-
- def test_parse_public_keys(self):
- public_keys = {}
- self.assertEqual(ay.parse_public_keys(public_keys), [])
-
- public_keys = {'key-pair-0': 'ssh-key-0'}
- self.assertEqual(ay.parse_public_keys(public_keys),
- [public_keys['key-pair-0']])
-
- public_keys = {'key-pair-0': 'ssh-key-0', 'key-pair-1': 'ssh-key-1'}
- self.assertEqual(set(ay.parse_public_keys(public_keys)),
- set([public_keys['key-pair-0'],
- public_keys['key-pair-1']]))
-
- public_keys = {'key-pair-0': ['ssh-key-0', 'ssh-key-1']}
- self.assertEqual(ay.parse_public_keys(public_keys),
- public_keys['key-pair-0'])
-
- public_keys = {'key-pair-0': {'openssh-key': []}}
- self.assertEqual(ay.parse_public_keys(public_keys), [])
-
- public_keys = {'key-pair-0': {'openssh-key': 'ssh-key-0'}}
- self.assertEqual(ay.parse_public_keys(public_keys),
- [public_keys['key-pair-0']['openssh-key']])
-
- public_keys = {'key-pair-0': {'openssh-key': ['ssh-key-0',
- 'ssh-key-1']}}
- self.assertEqual(ay.parse_public_keys(public_keys),
- public_keys['key-pair-0']['openssh-key'])
-
-
-class TestIsAliYun(test_helpers.CiTestCase):
- ALIYUN_PRODUCT = 'Alibaba Cloud ECS'
- read_dmi_data_expected = [mock.call('system-product-name')]
-
- @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
- def test_true_on_aliyun_product(self, m_read_dmi_data):
- """Should return true if the dmi product data has expected value."""
- m_read_dmi_data.return_value = self.ALIYUN_PRODUCT
- ret = ay._is_aliyun()
- self.assertEqual(self.read_dmi_data_expected,
- m_read_dmi_data.call_args_list)
- self.assertEqual(True, ret)
-
- @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
- def test_false_on_empty_string(self, m_read_dmi_data):
- """Should return false on empty value returned."""
- m_read_dmi_data.return_value = ""
- ret = ay._is_aliyun()
- self.assertEqual(self.read_dmi_data_expected,
- m_read_dmi_data.call_args_list)
- self.assertEqual(False, ret)
-
- @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
- def test_false_on_unknown_string(self, m_read_dmi_data):
- """Should return false on an unrelated string."""
- m_read_dmi_data.return_value = "cubs win"
- ret = ay._is_aliyun()
- self.assertEqual(self.read_dmi_data_expected,
- m_read_dmi_data.call_args_list)
- self.assertEqual(False, ret)
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
deleted file mode 100644
index e363c1f9..00000000
--- a/tests/unittests/test_datasource/test_azure.py
+++ /dev/null
@@ -1,2999 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import url_helper
-from cloudinit.sources import (
- UNSET, DataSourceAzure as dsaz, InvalidMetaDataException)
-from cloudinit.util import (b64e, decode_binary, load_file, write_file,
- MountFailedError, json_dumps, load_json)
-from cloudinit.version import version_string as vs
-from cloudinit.tests.helpers import (
- HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call,
- ExitStack, resourceLocation)
-from cloudinit.sources.helpers import netlink
-
-import copy
-import crypt
-import httpretty
-import json
-import os
-import requests
-import stat
-import xml.etree.ElementTree as ET
-import yaml
-
-
-def construct_valid_ovf_env(data=None, pubkeys=None,
- userdata=None, platform_settings=None):
- if data is None:
- data = {'HostName': 'FOOHOST'}
- if pubkeys is None:
- pubkeys = {}
-
- content = """<?xml version="1.0" encoding="utf-8"?>
-<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
- xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
- xmlns:wa="http://schemas.microsoft.com/windowsazure"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-
- <wa:ProvisioningSection><wa:Version>1.0</wa:Version>
- <LinuxProvisioningConfigurationSet
- xmlns="http://schemas.microsoft.com/windowsazure"
- xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
- <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
- """
- for key, dval in data.items():
- if isinstance(dval, dict):
- val = dict(dval).get('text')
- attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v
- in dict(dval).items() if k != 'text'])
- else:
- val = dval
- attrs = ""
- content += "<%s%s>%s</%s>\n" % (key, attrs, val, key)
-
- if userdata:
- content += "<UserData>%s</UserData>\n" % (b64e(userdata))
-
- if pubkeys:
- content += "<SSH><PublicKeys>\n"
- for fp, path, value in pubkeys:
- content += " <PublicKey>"
- if fp and path:
- content += ("<Fingerprint>%s</Fingerprint><Path>%s</Path>" %
- (fp, path))
- if value:
- content += "<Value>%s</Value>" % value
- content += "</PublicKey>\n"
- content += "</PublicKeys></SSH>"
- content += """
- </LinuxProvisioningConfigurationSet>
- </wa:ProvisioningSection>
- <wa:PlatformSettingsSection><wa:Version>1.0</wa:Version>
- <PlatformSettings xmlns="http://schemas.microsoft.com/windowsazure"
- xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
- <KmsServerHostname>kms.core.windows.net</KmsServerHostname>
- <ProvisionGuestAgent>false</ProvisionGuestAgent>
- <GuestAgentPackageName i:nil="true" />"""
- if platform_settings:
- for k, v in platform_settings.items():
- content += "<%s>%s</%s>\n" % (k, v, k)
- if "PreprovisionedVMType" not in platform_settings:
- content += """<PreprovisionedVMType i:nil="true" />"""
- content += """</PlatformSettings></wa:PlatformSettingsSection>
-</Environment>"""
-
- return content
-
-
-NETWORK_METADATA = {
- "compute": {
- "location": "eastus2",
- "name": "my-hostname",
- "offer": "UbuntuServer",
- "osType": "Linux",
- "placementGroupId": "",
- "platformFaultDomain": "0",
- "platformUpdateDomain": "0",
- "publisher": "Canonical",
- "resourceGroupName": "srugroup1",
- "sku": "19.04-DAILY",
- "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777",
- "tags": "",
- "version": "19.04.201906190",
- "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642",
- "vmScaleSetName": "",
- "vmSize": "Standard_DS1_v2",
- "zone": "",
- "publicKeys": [
- {
- "keyData": "key1",
- "path": "path1"
- }
- ]
- },
- "network": {
- "interface": [
- {
- "macAddress": "000D3A047598",
- "ipv6": {
- "ipAddress": []
- },
- "ipv4": {
- "subnet": [
- {
- "prefix": "24",
- "address": "10.0.0.0"
- }
- ],
- "ipAddress": [
- {
- "privateIpAddress": "10.0.0.4",
- "publicIpAddress": "104.46.124.81"
- }
- ]
- }
- }
- ]
- }
-}
-
-SECONDARY_INTERFACE = {
- "macAddress": "220D3A047598",
- "ipv6": {
- "ipAddress": []
- },
- "ipv4": {
- "subnet": [
- {
- "prefix": "24",
- "address": "10.0.1.0"
- }
- ],
- "ipAddress": [
- {
- "privateIpAddress": "10.0.1.5",
- }
- ]
- }
-}
-
-IMDS_NETWORK_METADATA = {
- "interface": [
- {
- "macAddress": "000D3A047598",
- "ipv6": {
- "ipAddress": []
- },
- "ipv4": {
- "subnet": [
- {
- "prefix": "24",
- "address": "10.0.0.0"
- }
- ],
- "ipAddress": [
- {
- "privateIpAddress": "10.0.0.4",
- "publicIpAddress": "104.46.124.81"
- }
- ]
- }
- }
- ]
-}
-
-MOCKPATH = 'cloudinit.sources.DataSourceAzure.'
-
-
-class TestParseNetworkConfig(CiTestCase):
-
- maxDiff = None
- fallback_config = {
- 'version': 1,
- 'config': [{
- 'type': 'physical', 'name': 'eth0',
- 'mac_address': '00:11:22:33:44:55',
- 'params': {'driver': 'hv_netsvc'},
- 'subnets': [{'type': 'dhcp'}],
- }]
- }
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- def test_single_ipv4_nic_configuration(self, m_driver):
- """parse_network_config emits dhcp on single nic with ipv4"""
- expected = {'ethernets': {
- 'eth0': {'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': False,
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'}}, 'version': 2}
- self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- def test_increases_route_metric_for_non_primary_nics(self, m_driver):
- """parse_network_config increases route-metric for each nic"""
- expected = {'ethernets': {
- 'eth0': {'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': False,
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'},
- 'eth1': {'set-name': 'eth1',
- 'match': {'macaddress': '22:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 200}},
- 'eth2': {'set-name': 'eth2',
- 'match': {'macaddress': '33:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2}
- imds_data = copy.deepcopy(NETWORK_METADATA)
- imds_data['network']['interface'].append(SECONDARY_INTERFACE)
- third_intf = copy.deepcopy(SECONDARY_INTERFACE)
- third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33')
- third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0'
- third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6'
- imds_data['network']['interface'].append(third_intf)
- self.assertEqual(expected, dsaz.parse_network_config(imds_data))
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- def test_ipv4_and_ipv6_route_metrics_match_for_nics(self, m_driver):
- """parse_network_config emits matching ipv4 and ipv6 route-metrics."""
- expected = {'ethernets': {
- 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/128'],
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': True,
- 'dhcp6-overrides': {'route-metric': 100},
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'},
- 'eth1': {'set-name': 'eth1',
- 'match': {'macaddress': '22:0d:3a:04:75:98'},
- 'dhcp4': True,
- 'dhcp6': False,
- 'dhcp4-overrides': {'route-metric': 200}},
- 'eth2': {'set-name': 'eth2',
- 'match': {'macaddress': '33:0d:3a:04:75:98'},
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 300},
- 'dhcp6': True,
- 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2}
- imds_data = copy.deepcopy(NETWORK_METADATA)
- nic1 = imds_data['network']['interface'][0]
- nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'})
-
- nic1['ipv6'] = {
- "subnet": [{"address": "2001:dead:beef::16"}],
- "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"},
- {"privateIpAddress": "2001:dead:beef::2"}]
- }
- imds_data['network']['interface'].append(SECONDARY_INTERFACE)
- third_intf = copy.deepcopy(SECONDARY_INTERFACE)
- third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33')
- third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0'
- third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6'
- third_intf['ipv6'] = {
- "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}],
- "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}]
- }
- imds_data['network']['interface'].append(third_intf)
- self.assertEqual(expected, dsaz.parse_network_config(imds_data))
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- def test_ipv4_secondary_ips_will_be_static_addrs(self, m_driver):
- """parse_network_config emits primary ipv4 as dhcp others are static"""
- expected = {'ethernets': {
- 'eth0': {'addresses': ['10.0.0.5/24'],
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': True,
- 'dhcp6-overrides': {'route-metric': 100},
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'}}, 'version': 2}
- imds_data = copy.deepcopy(NETWORK_METADATA)
- nic1 = imds_data['network']['interface'][0]
- nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'})
-
- nic1['ipv6'] = {
- "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
- "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}]
- }
- self.assertEqual(expected, dsaz.parse_network_config(imds_data))
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- def test_ipv6_secondary_ips_will_be_static_cidrs(self, m_driver):
- """parse_network_config emits primary ipv6 as dhcp others are static"""
- expected = {'ethernets': {
- 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/10'],
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': True,
- 'dhcp6-overrides': {'route-metric': 100},
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'}}, 'version': 2}
- imds_data = copy.deepcopy(NETWORK_METADATA)
- nic1 = imds_data['network']['interface'][0]
- nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'})
-
- # Secondary ipv6 addresses currently ignored/unconfigured
- nic1['ipv6'] = {
- "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
- "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"},
- {"privateIpAddress": "2001:dead:beef::2"}]
- }
- self.assertEqual(expected, dsaz.parse_network_config(imds_data))
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value='hv_netvsc')
- def test_match_driver_for_netvsc(self, m_driver):
- """parse_network_config emits driver when using netvsc."""
- expected = {'ethernets': {
- 'eth0': {
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': False,
- 'match': {
- 'macaddress': '00:0d:3a:04:75:98',
- 'driver': 'hv_netvsc',
- },
- 'set-name': 'eth0'
- }}, 'version': 2}
- self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- @mock.patch('cloudinit.net.generate_fallback_config')
- def test_parse_network_config_uses_fallback_cfg_when_no_network_metadata(
- self, m_fallback_config, m_driver):
- """parse_network_config generates fallback network config when the
- IMDS instance metadata is corrupted/invalid, such as when
- network metadata is not present.
- """
- imds_metadata_missing_network_metadata = copy.deepcopy(
- NETWORK_METADATA)
- del imds_metadata_missing_network_metadata['network']
- m_fallback_config.return_value = self.fallback_config
- self.assertEqual(
- self.fallback_config,
- dsaz.parse_network_config(
- imds_metadata_missing_network_metadata))
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- @mock.patch('cloudinit.net.generate_fallback_config')
- def test_parse_network_config_uses_fallback_cfg_when_no_interface_metadata(
- self, m_fallback_config, m_driver):
- """parse_network_config generates fallback network config when the
- IMDS instance metadata is corrupted/invalid, such as when
- network interface metadata is not present.
- """
- imds_metadata_missing_interface_metadata = copy.deepcopy(
- NETWORK_METADATA)
- del imds_metadata_missing_interface_metadata['network']['interface']
- m_fallback_config.return_value = self.fallback_config
- self.assertEqual(
- self.fallback_config,
- dsaz.parse_network_config(
- imds_metadata_missing_interface_metadata))
-
-
-class TestGetMetadataFromIMDS(HttprettyTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestGetMetadataFromIMDS, self).setUp()
- self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2019-06-01"
-
- @mock.patch(MOCKPATH + 'readurl')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True)
- @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
- def test_get_metadata_does_not_dhcp_if_network_is_up(
- self, m_net_is_up, m_dhcp, m_readurl):
- """Do not perform DHCP setup when nic is already up."""
- m_net_is_up.return_value = True
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(NETWORK_METADATA).encode('utf-8'))
- self.assertEqual(
- NETWORK_METADATA,
- dsaz.get_metadata_from_imds('eth9', retries=3))
-
- m_net_is_up.assert_called_with('eth9')
- m_dhcp.assert_not_called()
- self.assertIn(
- "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue())
-
- @mock.patch(MOCKPATH + 'readurl', autospec=True)
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'net.is_up')
- def test_get_compute_metadata_uses_compute_url(
- self, m_net_is_up, m_dhcp, m_readurl):
- """Make sure readurl is called with the correct url when accessing
- network metadata"""
- m_net_is_up.return_value = True
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
-
- dsaz.get_metadata_from_imds(
- 'eth0', retries=3, md_type=dsaz.metadata_type.compute)
- m_readurl.assert_called_with(
- "http://169.254.169.254/metadata/instance?api-version="
- "2019-06-01", exception_cb=mock.ANY,
- headers=mock.ANY, retries=mock.ANY,
- timeout=mock.ANY)
-
- @mock.patch(MOCKPATH + 'readurl', autospec=True)
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'net.is_up')
- def test_get_network_metadata_uses_network_url(
- self, m_net_is_up, m_dhcp, m_readurl):
- """Make sure readurl is called with the correct url when accessing
- network metadata"""
- m_net_is_up.return_value = True
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
-
- dsaz.get_metadata_from_imds(
- 'eth0', retries=3, md_type=dsaz.metadata_type.network)
- m_readurl.assert_called_with(
- "http://169.254.169.254/metadata/instance/network?api-version="
- "2019-06-01", exception_cb=mock.ANY,
- headers=mock.ANY, retries=mock.ANY,
- timeout=mock.ANY)
-
- @mock.patch(MOCKPATH + 'readurl', autospec=True)
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'net.is_up')
- def test_get_default_metadata_uses_compute_url(
- self, m_net_is_up, m_dhcp, m_readurl):
- """Make sure readurl is called with the correct url when accessing
- network metadata"""
- m_net_is_up.return_value = True
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
-
- dsaz.get_metadata_from_imds(
- 'eth0', retries=3)
- m_readurl.assert_called_with(
- "http://169.254.169.254/metadata/instance?api-version="
- "2019-06-01", exception_cb=mock.ANY,
- headers=mock.ANY, retries=mock.ANY,
- timeout=mock.ANY)
-
- @mock.patch(MOCKPATH + 'readurl', autospec=True)
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True)
- @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
- def test_get_metadata_performs_dhcp_when_network_is_down(
- self, m_net_is_up, m_dhcp, m_readurl):
- """Perform DHCP setup when nic is not up."""
- m_net_is_up.return_value = False
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(NETWORK_METADATA).encode('utf-8'))
-
- self.assertEqual(
- NETWORK_METADATA,
- dsaz.get_metadata_from_imds('eth9', retries=2))
-
- m_net_is_up.assert_called_with('eth9')
- m_dhcp.assert_called_with(mock.ANY, 'eth9')
- self.assertIn(
- "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue())
-
- m_readurl.assert_called_with(
- self.network_md_url, exception_cb=mock.ANY,
- headers={'Metadata': 'true'}, retries=2,
- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS)
-
- @mock.patch('cloudinit.url_helper.time.sleep')
- @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
- def test_get_metadata_from_imds_empty_when_no_imds_present(
- self, m_net_is_up, m_sleep):
- """Return empty dict when IMDS network metadata is absent."""
- httpretty.register_uri(
- httpretty.GET,
- dsaz.IMDS_URL + 'instance?api-version=2017-12-01',
- body={}, status=404)
-
- m_net_is_up.return_value = True # skips dhcp
-
- self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=2))
-
- m_net_is_up.assert_called_with('eth9')
- self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list)
- self.assertIn(
- "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue())
-
- @mock.patch('requests.Session.request')
- @mock.patch('cloudinit.url_helper.time.sleep')
- @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
- def test_get_metadata_from_imds_retries_on_timeout(
- self, m_net_is_up, m_sleep, m_request):
- """Retry IMDS network metadata on timeout errors."""
-
- self.attempt = 0
- m_request.side_effect = requests.Timeout('Fake Connection Timeout')
-
- def retry_callback(request, uri, headers):
- self.attempt += 1
- raise requests.Timeout('Fake connection timeout')
-
- httpretty.register_uri(
- httpretty.GET,
- dsaz.IMDS_URL + 'instance?api-version=2017-12-01',
- body=retry_callback)
-
- m_net_is_up.return_value = True # skips dhcp
-
- self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=3))
-
- m_net_is_up.assert_called_with('eth9')
- self.assertEqual([mock.call(1)]*3, m_sleep.call_args_list)
- self.assertIn(
- "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue())
-
-
-class TestAzureDataSource(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestAzureDataSource, self).setUp()
- self.tmp = self.tmp_dir()
-
- # patch cloud_dir, so our 'seed_dir' is guaranteed empty
- self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
- self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent')
-
- self.patches = ExitStack()
- self.addCleanup(self.patches.close)
-
- self.patches.enter_context(mock.patch.object(
- dsaz, '_get_random_seed', return_value='wild'))
- self.m_get_metadata_from_imds = self.patches.enter_context(
- mock.patch.object(
- dsaz, 'get_metadata_from_imds',
- mock.MagicMock(return_value=NETWORK_METADATA)))
- self.m_fallback_nic = self.patches.enter_context(
- mock.patch('cloudinit.sources.net.find_fallback_nic',
- return_value='eth9'))
- self.m_remove_ubuntu_network_scripts = self.patches.enter_context(
- mock.patch.object(
- dsaz, 'maybe_remove_ubuntu_network_config_scripts',
- mock.MagicMock()))
- super(TestAzureDataSource, self).setUp()
-
- def apply_patches(self, patches):
- for module, name, new in patches:
- self.patches.enter_context(mock.patch.object(module, name, new))
-
- def _get_mockds(self):
- sysctl_out = "dev.storvsc.3.%pnpinfo: "\
- "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\
- "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n"
- sysctl_out += "dev.storvsc.2.%pnpinfo: "\
- "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\
- "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n"
- sysctl_out += "dev.storvsc.1.%pnpinfo: "\
- "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "\
- "deviceid=00000000-0001-8899-0000-000000000000\n"
- camctl_devbus = """
-scbus0 on ata0 bus 0
-scbus1 on ata1 bus 0
-scbus2 on blkvsc0 bus 0
-scbus3 on blkvsc1 bus 0
-scbus4 on storvsc2 bus 0
-scbus5 on storvsc3 bus 0
-scbus-1 on xpt0 bus 0
- """
- camctl_dev = """
-<Msft Virtual CD/ROM 1.0> at scbus1 target 0 lun 0 (cd0,pass0)
-<Msft Virtual Disk 1.0> at scbus2 target 0 lun 0 (da0,pass1)
-<Msft Virtual Disk 1.0> at scbus3 target 1 lun 0 (da1,pass2)
- """
- self.apply_patches([
- (dsaz, 'get_dev_storvsc_sysctl', mock.MagicMock(
- return_value=sysctl_out)),
- (dsaz, 'get_camcontrol_dev_bus', mock.MagicMock(
- return_value=camctl_devbus)),
- (dsaz, 'get_camcontrol_dev', mock.MagicMock(
- return_value=camctl_dev))
- ])
- return dsaz
-
- def _get_ds(self, data, agent_command=None, distro='ubuntu',
- apply_network=None):
-
- def dsdevs():
- return data.get('dsdevs', [])
-
- def _invoke_agent(cmd):
- data['agent_invoked'] = cmd
-
- def _wait_for_files(flist, _maxwait=None, _naplen=None):
- data['waited'] = flist
- return []
-
- def _pubkeys_from_crt_files(flist):
- data['pubkey_files'] = flist
- return ["pubkey_from: %s" % f for f in flist]
-
- if data.get('ovfcontent') is not None:
- populate_dir(os.path.join(self.paths.seed_dir, "azure"),
- {'ovf-env.xml': data['ovfcontent']})
-
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
-
- self.m_is_platform_viable = mock.MagicMock(autospec=True)
- self.m_get_metadata_from_fabric = mock.MagicMock(
- return_value={'public-keys': []})
- self.m_report_failure_to_fabric = mock.MagicMock(autospec=True)
- self.m_ephemeral_dhcpv4 = mock.MagicMock()
- self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock()
-
- self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8'
-
- def _dmi_mocks(key):
- if key == 'system-uuid':
- return self.instance_id
- elif key == 'chassis-asset-tag':
- return '7783-7084-3265-9085-8269-3286-77'
-
- self.apply_patches([
- (dsaz, 'list_possible_azure_ds_devs', dsdevs),
- (dsaz, 'invoke_agent', _invoke_agent),
- (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files),
- (dsaz, 'perform_hostname_bounce', mock.MagicMock()),
- (dsaz, 'get_hostname', mock.MagicMock()),
- (dsaz, 'set_hostname', mock.MagicMock()),
- (dsaz, '_is_platform_viable',
- self.m_is_platform_viable),
- (dsaz, 'get_metadata_from_fabric',
- self.m_get_metadata_from_fabric),
- (dsaz, 'report_failure_to_fabric',
- self.m_report_failure_to_fabric),
- (dsaz, 'EphemeralDHCPv4', self.m_ephemeral_dhcpv4),
- (dsaz, 'EphemeralDHCPv4WithReporting',
- self.m_ephemeral_dhcpv4_with_reporting),
- (dsaz, 'get_boot_telemetry', mock.MagicMock()),
- (dsaz, 'get_system_info', mock.MagicMock()),
- (dsaz.subp, 'which', lambda x: True),
- (dsaz.dmi, 'read_dmi_data', mock.MagicMock(
- side_effect=_dmi_mocks)),
- (dsaz.util, 'wait_for_files', mock.MagicMock(
- side_effect=_wait_for_files)),
- ])
-
- if isinstance(distro, str):
- distro_cls = distros.fetch(distro)
- distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths)
- dsrc = dsaz.DataSourceAzure(
- data.get('sys_cfg', {}), distro=distro, paths=self.paths)
- if agent_command is not None:
- dsrc.ds_cfg['agent_command'] = agent_command
- if apply_network is not None:
- dsrc.ds_cfg['apply_network_config'] = apply_network
-
- return dsrc
-
- def _get_and_setup(self, dsrc):
- ret = dsrc.get_data()
- if ret:
- dsrc.setup(True)
- return ret
-
- def xml_equals(self, oxml, nxml):
- """Compare two sets of XML to make sure they are equal"""
-
- def create_tag_index(xml):
- et = ET.fromstring(xml)
- ret = {}
- for x in et.iter():
- ret[x.tag] = x
- return ret
-
- def tags_exists(x, y):
- for tag in x.keys():
- assert tag in y
- for tag in y.keys():
- assert tag in x
-
- def tags_equal(x, y):
- for x_val in x.values():
- y_val = y.get(x_val.tag)
- assert x_val.text == y_val.text
-
- old_cnt = create_tag_index(oxml)
- new_cnt = create_tag_index(nxml)
- tags_exists(old_cnt, new_cnt)
- tags_equal(old_cnt, new_cnt)
-
- def xml_notequals(self, oxml, nxml):
- try:
- self.xml_equals(oxml, nxml)
- except AssertionError:
- return
- raise AssertionError("XML is the same")
-
- def test_get_resource_disk(self):
- ds = self._get_mockds()
- dev = ds.get_resource_disk_on_freebsd(1)
- self.assertEqual("da1", dev)
-
- def test_not_is_platform_viable_seed_should_return_no_datasource(self):
- """Check seed_dir using _is_platform_viable and return False."""
- # Return a non-matching asset tag value
- data = {}
- dsrc = self._get_ds(data)
- self.m_is_platform_viable.return_value = False
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc, '_report_failure') as m_report_failure:
- ret = dsrc.get_data()
- self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
- self.assertFalse(ret)
- self.assertNotIn('agent_invoked', data)
- # Assert that for non viable platforms,
- # there is no communication with the Azure datasource.
- self.assertEqual(
- 0,
- m_crawl_metadata.call_count)
- self.assertEqual(
- 0,
- m_report_failure.call_count)
-
- def test_platform_viable_but_no_devs_should_return_no_datasource(self):
- """For platforms where the Azure platform is viable
- (which is indicated by the matching asset tag),
- the absence of any devs at all (devs == candidate sources
- for crawling Azure datasource) is NOT expected.
- Report failure to Azure as this is an unexpected fatal error.
- """
- data = {}
- dsrc = self._get_ds(data)
- with mock.patch.object(dsrc, '_report_failure') as m_report_failure:
- self.m_is_platform_viable.return_value = True
- ret = dsrc.get_data()
- self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
- self.assertFalse(ret)
- self.assertNotIn('agent_invoked', data)
- self.assertEqual(
- 1,
- m_report_failure.call_count)
-
- def test_crawl_metadata_exception_returns_no_datasource(self):
- data = {}
- dsrc = self._get_ds(data)
- self.m_is_platform_viable.return_value = True
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
- m_crawl_metadata.side_effect = Exception
- ret = dsrc.get_data()
- self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
- self.assertEqual(
- 1,
- m_crawl_metadata.call_count)
- self.assertFalse(ret)
- self.assertNotIn('agent_invoked', data)
-
- def test_crawl_metadata_exception_should_report_failure_with_msg(self):
- data = {}
- dsrc = self._get_ds(data)
- self.m_is_platform_viable.return_value = True
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc, '_report_failure') as m_report_failure:
- m_crawl_metadata.side_effect = Exception
- dsrc.get_data()
- self.assertEqual(
- 1,
- m_crawl_metadata.call_count)
- m_report_failure.assert_called_once_with(
- description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
-
- def test_crawl_metadata_exc_should_log_could_not_crawl_msg(self):
- data = {}
- dsrc = self._get_ds(data)
- self.m_is_platform_viable.return_value = True
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
- m_crawl_metadata.side_effect = Exception
- dsrc.get_data()
- self.assertEqual(
- 1,
- m_crawl_metadata.call_count)
- self.assertIn(
- "Could not crawl Azure metadata",
- self.logs.getvalue())
-
- def test_basic_seed_dir(self):
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(dsrc.userdata_raw, "")
- self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName'])
- self.assertTrue(os.path.isfile(
- os.path.join(self.waagent_d, 'ovf-env.xml')))
- self.assertEqual('azure', dsrc.cloud_name)
- self.assertEqual('azure', dsrc.platform_type)
- self.assertEqual(
- 'seed-dir (%s/seed/azure)' % self.tmp, dsrc.subplatform)
-
- def test_basic_dev_file(self):
- """When a device path is used, present that in subplatform."""
- data = {'sys_cfg': {}, 'dsdevs': ['/dev/cd0']}
- dsrc = self._get_ds(data)
- with mock.patch(MOCKPATH + 'util.mount_cb') as m_mount_cb:
- m_mount_cb.return_value = (
- {'local-hostname': 'me'}, 'ud', {'cfg': ''}, {})
- self.assertTrue(dsrc.get_data())
- self.assertEqual(dsrc.userdata_raw, 'ud')
- self.assertEqual(dsrc.metadata['local-hostname'], 'me')
- self.assertEqual('azure', dsrc.cloud_name)
- self.assertEqual('azure', dsrc.platform_type)
- self.assertEqual('config-disk (/dev/cd0)', dsrc.subplatform)
-
- def test_get_data_non_ubuntu_will_not_remove_network_scripts(self):
- """get_data on non-Ubuntu will not remove ubuntu net scripts."""
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
-
- dsrc = self._get_ds(data, distro='debian')
- dsrc.get_data()
- self.m_remove_ubuntu_network_scripts.assert_not_called()
-
- def test_get_data_on_ubuntu_will_remove_network_scripts(self):
- """get_data will remove ubuntu net scripts on Ubuntu distro."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
-
- dsrc = self._get_ds(data, distro='ubuntu')
- dsrc.get_data()
- self.m_remove_ubuntu_network_scripts.assert_called_once_with()
-
- def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self):
- """When apply_network_config false, do not remove scripts on Ubuntu."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
-
- dsrc = self._get_ds(data, distro='ubuntu')
- dsrc.get_data()
- self.m_remove_ubuntu_network_scripts.assert_not_called()
-
- def test_crawl_metadata_returns_structured_data_and_caches_nothing(self):
- """Return all structured metadata and cache no class attributes."""
- yaml_cfg = "{agent_command: my_command}\n"
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'UserData': {'text': 'FOOBAR', 'encoding': 'plain'},
- 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
- dsrc = self._get_ds(data)
- expected_cfg = {
- 'PreprovisionedVMType': None,
- 'PreprovisionedVm': False,
- 'datasource': {'Azure': {'agent_command': 'my_command'}},
- 'system_info': {'default_user': {'name': u'myuser'}}}
- expected_metadata = {
- 'azure_data': {
- 'configurationsettype': 'LinuxProvisioningConfiguration'},
- 'imds': NETWORK_METADATA,
- 'instance-id': 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8',
- 'local-hostname': u'myhost',
- 'random_seed': 'wild'}
-
- crawled_metadata = dsrc.crawl_metadata()
-
- self.assertCountEqual(
- crawled_metadata.keys(),
- ['cfg', 'files', 'metadata', 'userdata_raw'])
- self.assertEqual(crawled_metadata['cfg'], expected_cfg)
- self.assertEqual(
- list(crawled_metadata['files'].keys()), ['ovf-env.xml'])
- self.assertIn(
- b'<HostName>myhost</HostName>',
- crawled_metadata['files']['ovf-env.xml'])
- self.assertEqual(crawled_metadata['metadata'], expected_metadata)
- self.assertEqual(crawled_metadata['userdata_raw'], 'FOOBAR')
- self.assertEqual(dsrc.userdata_raw, None)
- self.assertEqual(dsrc.metadata, {})
- self.assertEqual(dsrc._metadata_imds, UNSET)
- self.assertFalse(os.path.isfile(
- os.path.join(self.waagent_d, 'ovf-env.xml')))
-
- def test_crawl_metadata_raises_invalid_metadata_on_error(self):
- """crawl_metadata raises an exception on invalid ovf-env.xml."""
- data = {'ovfcontent': "BOGUS", 'sys_cfg': {}}
- dsrc = self._get_ds(data)
- error_msg = ('BrokenAzureDataSource: Invalid ovf-env.xml:'
- ' syntax error: line 1, column 0')
- with self.assertRaises(InvalidMetaDataException) as cm:
- dsrc.crawl_metadata()
- self.assertEqual(str(cm.exception), error_msg)
-
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting')
- @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
- @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
- def test_crawl_metadata_on_reprovision_reports_ready(
- self, poll_imds_func, m_report_ready, m_write, m_dhcp
- ):
- """If reprovisioning, report ready at the end"""
- ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"}
- )
-
- data = {
- 'ovfcontent': ovfenv,
- 'sys_cfg': {}
- }
- dsrc = self._get_ds(data)
- poll_imds_func.return_value = ovfenv
- dsrc.crawl_metadata()
- self.assertEqual(1, m_report_ready.call_count)
-
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting')
- @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
- @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure.'
- '_wait_for_all_nics_ready')
- def test_crawl_metadata_waits_for_nic_on_savable_vms(
- self, detect_nics, poll_imds_func, report_ready_func, m_write, m_dhcp
- ):
- """If reprovisioning, report ready at the end"""
- ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVMType": "Savable",
- "PreprovisionedVm": "True"}
- )
-
- data = {
- 'ovfcontent': ovfenv,
- 'sys_cfg': {}
- }
- dsrc = self._get_ds(data)
- poll_imds_func.return_value = ovfenv
- dsrc.crawl_metadata()
- self.assertEqual(1, report_ready_func.call_count)
- self.assertEqual(1, detect_nics.call_count)
-
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting')
- @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
- @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure.'
- '_wait_for_all_nics_ready')
- @mock.patch('os.path.isfile')
- def test_detect_nics_when_marker_present(
- self, is_file, detect_nics, poll_imds_func, report_ready_func, m_write,
- m_dhcp):
- """If reprovisioning, wait for nic attach if marker present"""
-
- def is_file_ret(key):
- return key == dsaz.REPROVISION_NIC_ATTACH_MARKER_FILE
-
- is_file.side_effect = is_file_ret
- ovfenv = construct_valid_ovf_env()
-
- data = {
- 'ovfcontent': ovfenv,
- 'sys_cfg': {}
- }
-
- dsrc = self._get_ds(data)
- poll_imds_func.return_value = ovfenv
- dsrc.crawl_metadata()
- self.assertEqual(1, report_ready_func.call_count)
- self.assertEqual(1, detect_nics.call_count)
-
- @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
- @mock.patch('cloudinit.sources.helpers.netlink.'
- 'wait_for_media_disconnect_connect')
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
- @mock.patch('cloudinit.sources.DataSourceAzure.readurl')
- def test_crawl_metadata_on_reprovision_reports_ready_using_lease(
- self, m_readurl, m_report_ready,
- m_media_switch, m_write
- ):
- """If reprovisioning, report ready using the obtained lease"""
- ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"}
- )
-
- data = {
- 'ovfcontent': ovfenv,
- 'sys_cfg': {}
- }
- dsrc = self._get_ds(data)
-
- with mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
-
- # For this mock, net should not be up,
- # so that cached ephemeral won't be used.
- # This is so that a NEW ephemeral dhcp lease will be discovered
- # and used instead.
- m_dsrc_distro_networking_is_up.return_value = False
-
- lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}
- self.m_ephemeral_dhcpv4_with_reporting.return_value \
- .__enter__.return_value = lease
- m_media_switch.return_value = None
-
- reprovision_ovfenv = construct_valid_ovf_env()
- m_readurl.return_value = url_helper.StringResponse(
- reprovision_ovfenv.encode('utf-8'))
-
- dsrc.crawl_metadata()
- self.assertEqual(2, m_report_ready.call_count)
- m_report_ready.assert_called_with(lease=lease)
-
- def test_waagent_d_has_0700_perms(self):
- # we expect /var/lib/waagent to be created 0700
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertTrue(os.path.isdir(self.waagent_d))
- self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700)
-
- def test_user_cfg_set_agent_command_plain(self):
- # set dscfg in via plaintext
- # we must have friendly-to-xml formatted plaintext in yaml_cfg
- # not all plaintext is expected to work.
- yaml_cfg = "{agent_command: my_command}\n"
- cfg = yaml.safe_load(yaml_cfg)
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(data['agent_invoked'], cfg['agent_command'])
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- def test_network_config_set_from_imds(self, m_driver):
- """Datasource.network_config returns IMDS network data."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
- expected_network_config = {
- 'ethernets': {
- 'eth0': {'set-name': 'eth0',
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100}}},
- 'version': 2}
- dsrc = self._get_ds(data)
- dsrc.get_data()
- self.assertEqual(expected_network_config, dsrc.network_config)
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- def test_network_config_set_from_imds_route_metric_for_secondary_nic(
- self, m_driver):
- """Datasource.network_config adds route-metric to secondary nics."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
- expected_network_config = {
- 'ethernets': {
- 'eth0': {'set-name': 'eth0',
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100}},
- 'eth1': {'set-name': 'eth1',
- 'match': {'macaddress': '22:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 200}},
- 'eth2': {'set-name': 'eth2',
- 'match': {'macaddress': '33:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 300}}},
- 'version': 2}
- imds_data = copy.deepcopy(NETWORK_METADATA)
- imds_data['network']['interface'].append(SECONDARY_INTERFACE)
- third_intf = copy.deepcopy(SECONDARY_INTERFACE)
- third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33')
- third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0'
- third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6'
- imds_data['network']['interface'].append(third_intf)
-
- self.m_get_metadata_from_imds.return_value = imds_data
- dsrc = self._get_ds(data)
- dsrc.get_data()
- self.assertEqual(expected_network_config, dsrc.network_config)
-
- def test_availability_zone_set_from_imds(self):
- """Datasource.availability returns IMDS platformFaultDomain."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
- dsrc = self._get_ds(data)
- dsrc.get_data()
- self.assertEqual('0', dsrc.availability_zone)
-
- def test_region_set_from_imds(self):
- """Datasource.region returns IMDS region location."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
- dsrc = self._get_ds(data)
- dsrc.get_data()
- self.assertEqual('eastus2', dsrc.region)
-
- def test_user_cfg_set_agent_command(self):
- # set dscfg in via base64 encoded yaml
- cfg = {'agent_command': "my_command"}
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': b64e(yaml.dump(cfg)),
- 'encoding': 'base64'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(data['agent_invoked'], cfg['agent_command'])
-
- def test_sys_cfg_set_agent_command(self):
- sys_cfg = {'datasource': {'Azure': {'agent_command': '_COMMAND'}}}
- data = {'ovfcontent': construct_valid_ovf_env(data={}),
- 'sys_cfg': sys_cfg}
-
- dsrc = self._get_ds(data)
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(data['agent_invoked'], '_COMMAND')
-
- def test_sys_cfg_set_never_destroy_ntfs(self):
- sys_cfg = {'datasource': {'Azure': {
- 'never_destroy_ntfs': 'user-supplied-value'}}}
- data = {'ovfcontent': construct_valid_ovf_env(data={}),
- 'sys_cfg': sys_cfg}
-
- dsrc = self._get_ds(data)
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS),
- 'user-supplied-value')
-
- def test_username_used(self):
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(dsrc.cfg['system_info']['default_user']['name'],
- "myuser")
-
- def test_password_given(self):
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'UserPassword': "mypass"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertIn('default_user', dsrc.cfg['system_info'])
- defuser = dsrc.cfg['system_info']['default_user']
-
- # default user should be updated username and should not be locked.
- self.assertEqual(defuser['name'], odata['UserName'])
- self.assertFalse(defuser['lock_passwd'])
- # passwd is crypt formated string $id$salt$encrypted
- # encrypting plaintext with salt value of everything up to final '$'
- # should equal that after the '$'
- pos = defuser['passwd'].rfind("$") + 1
- self.assertEqual(defuser['passwd'],
- crypt.crypt(odata['UserPassword'],
- defuser['passwd'][0:pos]))
-
- # the same hashed value should also be present in cfg['password']
- self.assertEqual(defuser['passwd'], dsrc.cfg['password'])
-
- def test_user_not_locked_if_password_redacted(self):
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'UserPassword': dsaz.DEF_PASSWD_REDACTION}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertIn('default_user', dsrc.cfg['system_info'])
- defuser = dsrc.cfg['system_info']['default_user']
-
- # default user should be updated username and should not be locked.
- self.assertEqual(defuser['name'], odata['UserName'])
- self.assertIn('lock_passwd', defuser)
- self.assertFalse(defuser['lock_passwd'])
-
- def test_userdata_plain(self):
- mydata = "FOOBAR"
- odata = {'UserData': {'text': mydata, 'encoding': 'plain'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(decode_binary(dsrc.userdata_raw), mydata)
-
- def test_userdata_found(self):
- mydata = "FOOBAR"
- odata = {'UserData': {'text': b64e(mydata), 'encoding': 'base64'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8'))
-
- def test_cfg_has_pubkeys_fingerprint(self):
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
- pubkeys=pubkeys)}
-
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- for mypk in mypklist:
- self.assertIn(mypk, dsrc.cfg['_pubkeys'])
- self.assertIn('pubkey_from', dsrc.metadata['public-keys'][-1])
-
- def test_cfg_has_pubkeys_value(self):
- # make sure that provided key is used over fingerprint
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': 'value1'}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
- pubkeys=pubkeys)}
-
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
-
- for mypk in mypklist:
- self.assertIn(mypk, dsrc.cfg['_pubkeys'])
- self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
-
- def test_cfg_has_no_fingerprint_has_value(self):
- # test value is used when fingerprint not provided
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- mypklist = [{'fingerprint': None, 'path': 'path1', 'value': 'value1'}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
- pubkeys=pubkeys)}
-
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
-
- for mypk in mypklist:
- self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
-
- def test_default_ephemeral(self):
- # make sure the ephemeral device works
- odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- cfg = dsrc.get_config_obj()
-
- self.assertEqual(dsrc.device_name_to_device("ephemeral0"),
- dsaz.RESOURCE_DISK_PATH)
- assert 'disk_setup' in cfg
- assert 'fs_setup' in cfg
- self.assertIsInstance(cfg['disk_setup'], dict)
- self.assertIsInstance(cfg['fs_setup'], list)
-
- def test_provide_disk_aliases(self):
- # Make sure that user can affect disk aliases
- dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}}
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': b64e(yaml.dump(dscfg)),
- 'encoding': 'base64'}}
- usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'},
- 'ephemeral0': False}}
- userdata = '#cloud-config' + yaml.dump(usercfg) + "\n"
-
- ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata)
- data = {'ovfcontent': ovfcontent, 'sys_cfg': {}}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- cfg = dsrc.get_config_obj()
- self.assertTrue(cfg)
-
- def test_userdata_arrives(self):
- userdata = "This is my user-data"
- xml = construct_valid_ovf_env(data={}, userdata=userdata)
- data = {'ovfcontent': xml}
- dsrc = self._get_ds(data)
- dsrc.get_data()
-
- self.assertEqual(userdata.encode('us-ascii'), dsrc.userdata_raw)
-
- def test_password_redacted_in_ovf(self):
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'UserPassword': "mypass"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
-
- self.assertTrue(ret)
- ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml')
-
- # The XML should not be same since the user password is redacted
- on_disk_ovf = load_file(ovf_env_path)
- self.xml_notequals(data['ovfcontent'], on_disk_ovf)
-
- # Make sure that the redacted password on disk is not used by CI
- self.assertNotEqual(dsrc.cfg.get('password'),
- dsaz.DEF_PASSWD_REDACTION)
-
- # Make sure that the password was really encrypted
- et = ET.fromstring(on_disk_ovf)
- for elem in et.iter():
- if 'UserPassword' in elem.tag:
- self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text)
-
- def test_ovf_env_arrives_in_waagent_dir(self):
- xml = construct_valid_ovf_env(data={}, userdata="FOODATA")
- dsrc = self._get_ds({'ovfcontent': xml})
- dsrc.get_data()
-
- # 'data_dir' is '/var/lib/waagent' (walinux-agent's state dir)
- # we expect that the ovf-env.xml file is copied there.
- ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml')
- self.assertTrue(os.path.exists(ovf_env_path))
- self.xml_equals(xml, load_file(ovf_env_path))
-
- def test_ovf_can_include_unicode(self):
- xml = construct_valid_ovf_env(data={})
- xml = u'\ufeff{0}'.format(xml)
- dsrc = self._get_ds({'ovfcontent': xml})
- dsrc.get_data()
-
- def test_dsaz_report_ready_returns_true_when_report_succeeds(
- self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
- self.assertTrue(dsrc._report_ready(lease=mock.MagicMock()))
-
- def test_dsaz_report_ready_returns_false_and_does_not_propagate_exc(
- self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
- self.m_get_metadata_from_fabric.side_effect = Exception
- self.assertFalse(dsrc._report_ready(lease=mock.MagicMock()))
-
- def test_dsaz_report_failure_returns_true_when_report_succeeds(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
- # mock crawl metadata failure to cause report failure
- m_crawl_metadata.side_effect = Exception
-
- self.assertTrue(dsrc._report_failure())
- self.assertEqual(
- 1,
- self.m_report_failure_to_fabric.call_count)
-
- def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc(
- self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \
- as m_ephemeral_dhcp_ctx, \
- mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
- # mock crawl metadata failure to cause report failure
- m_crawl_metadata.side_effect = Exception
-
- # setup mocks to allow using cached ephemeral dhcp lease
- m_dsrc_distro_networking_is_up.return_value = True
- test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245'
- test_lease = {'unknown-245': test_lease_dhcp_option_245}
- m_ephemeral_dhcp_ctx.lease = test_lease
-
- # We expect 3 calls to report_failure_to_fabric,
- # because we try 3 different methods of calling report failure.
- # The different methods are attempted in the following order:
- # 1. Using cached ephemeral dhcp context to report failure to Azure
- # 2. Using new ephemeral dhcp to report failure to Azure
- # 3. Using fallback lease to report failure to Azure
- self.m_report_failure_to_fabric.side_effect = Exception
- self.assertFalse(dsrc._report_failure())
- self.assertEqual(
- 3,
- self.m_report_failure_to_fabric.call_count)
-
- def test_dsaz_report_failure_description_msg(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
- # mock crawl metadata failure to cause report failure
- m_crawl_metadata.side_effect = Exception
-
- test_msg = 'Test report failure description message'
- self.assertTrue(dsrc._report_failure(description=test_msg))
- self.m_report_failure_to_fabric.assert_called_once_with(
- dhcp_opts=mock.ANY, description=test_msg)
-
- def test_dsaz_report_failure_no_description_msg(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
- m_crawl_metadata.side_effect = Exception
-
- self.assertTrue(dsrc._report_failure()) # no description msg
- self.m_report_failure_to_fabric.assert_called_once_with(
- dhcp_opts=mock.ANY, description=None)
-
- def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \
- as m_ephemeral_dhcp_ctx, \
- mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
- # mock crawl metadata failure to cause report failure
- m_crawl_metadata.side_effect = Exception
-
- # setup mocks to allow using cached ephemeral dhcp lease
- m_dsrc_distro_networking_is_up.return_value = True
- test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245'
- test_lease = {'unknown-245': test_lease_dhcp_option_245}
- m_ephemeral_dhcp_ctx.lease = test_lease
-
- self.assertTrue(dsrc._report_failure())
-
- # ensure called with cached ephemeral dhcp lease option 245
- self.m_report_failure_to_fabric.assert_called_once_with(
- description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245)
-
- # ensure cached ephemeral is cleaned
- self.assertEqual(
- 1,
- m_ephemeral_dhcp_ctx.clean_network.call_count)
-
- def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
- # mock crawl metadata failure to cause report failure
- m_crawl_metadata.side_effect = Exception
-
- # net is not up and cannot use cached ephemeral dhcp
- m_dsrc_distro_networking_is_up.return_value = False
- # setup ephemeral dhcp lease discovery mock
- test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245'
- test_lease = {'unknown-245': test_lease_dhcp_option_245}
- self.m_ephemeral_dhcpv4_with_reporting.return_value \
- .__enter__.return_value = test_lease
-
- self.assertTrue(dsrc._report_failure())
-
- # ensure called with the newly discovered
- # ephemeral dhcp lease option 245
- self.m_report_failure_to_fabric.assert_called_once_with(
- description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245)
-
- def test_dsaz_report_failure_no_net_and_no_dhcp_uses_fallback_lease(
- self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
- # mock crawl metadata failure to cause report failure
- m_crawl_metadata.side_effect = Exception
-
- # net is not up and cannot use cached ephemeral dhcp
- m_dsrc_distro_networking_is_up.return_value = False
- # ephemeral dhcp discovery failure,
- # so cannot use a new ephemeral dhcp
- self.m_ephemeral_dhcpv4_with_reporting.return_value \
- .__enter__.side_effect = Exception
-
- self.assertTrue(dsrc._report_failure())
-
- # ensure called with fallback lease
- self.m_report_failure_to_fabric.assert_called_once_with(
- description=mock.ANY,
- fallback_lease_file=dsrc.dhclient_lease_file)
-
- def test_exception_fetching_fabric_data_doesnt_propagate(self):
- """Errors communicating with fabric should warn, but return True."""
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
- self.m_get_metadata_from_fabric.side_effect = Exception
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
-
- def test_fabric_data_included_in_metadata(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
- self.m_get_metadata_from_fabric.return_value = {'test': 'value'}
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual('value', dsrc.metadata['test'])
-
- def test_instance_id_endianness(self):
- """Return the previous iid when dmi uuid is the byteswapped iid."""
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- # byte-swapped previous
- write_file(
- os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
- '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8')
- ds.get_data()
- self.assertEqual(
- '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id'])
- # not byte-swapped previous
- write_file(
- os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
- '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8')
- ds.get_data()
- self.assertEqual(
- 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', ds.metadata['instance-id'])
-
- def test_instance_id_from_dmidecode_used(self):
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- ds.get_data()
- self.assertEqual(self.instance_id, ds.metadata['instance-id'])
-
- def test_instance_id_from_dmidecode_used_for_builtin(self):
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- ds.ds_cfg['agent_command'] = '__builtin__'
- ds.get_data()
- self.assertEqual(self.instance_id, ds.metadata['instance-id'])
-
- @mock.patch(MOCKPATH + 'util.is_FreeBSD')
- @mock.patch(MOCKPATH + '_check_freebsd_cdrom')
- def test_list_possible_azure_ds_devs(self, m_check_fbsd_cdrom,
- m_is_FreeBSD):
- """On FreeBSD, possible devs should show /dev/cd0."""
- m_is_FreeBSD.return_value = True
- m_check_fbsd_cdrom.return_value = True
- self.assertEqual(dsaz.list_possible_azure_ds_devs(), ['/dev/cd0'])
- self.assertEqual(
- [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list)
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- @mock.patch('cloudinit.net.generate_fallback_config')
- def test_imds_network_config(self, mock_fallback, m_driver):
- """Network config is generated from IMDS network data when present."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
-
- expected_cfg = {
- 'ethernets': {
- 'eth0': {'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': False,
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'}},
- 'version': 2}
-
- self.assertEqual(expected_cfg, dsrc.network_config)
- mock_fallback.assert_not_called()
-
- @mock.patch('cloudinit.net.get_interface_mac')
- @mock.patch('cloudinit.net.get_devicelist')
- @mock.patch('cloudinit.net.device_driver')
- @mock.patch('cloudinit.net.generate_fallback_config')
- def test_imds_network_ignored_when_apply_network_config_false(
- self, mock_fallback, mock_dd, mock_devlist, mock_get_mac):
- """When apply_network_config is False, use fallback instead of IMDS."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
- fallback_config = {
- 'version': 1,
- 'config': [{
- 'type': 'physical', 'name': 'eth0',
- 'mac_address': '00:11:22:33:44:55',
- 'params': {'driver': 'hv_netsvc'},
- 'subnets': [{'type': 'dhcp'}],
- }]
- }
- mock_fallback.return_value = fallback_config
-
- mock_devlist.return_value = ['eth0']
- mock_dd.return_value = ['hv_netsvc']
- mock_get_mac.return_value = '00:11:22:33:44:55'
-
- dsrc = self._get_ds(data)
- self.assertTrue(dsrc.get_data())
- self.assertEqual(dsrc.network_config, fallback_config)
-
- @mock.patch('cloudinit.net.get_interface_mac')
- @mock.patch('cloudinit.net.get_devicelist')
- @mock.patch('cloudinit.net.device_driver')
- @mock.patch('cloudinit.net.generate_fallback_config', autospec=True)
- def test_fallback_network_config(self, mock_fallback, mock_dd,
- mock_devlist, mock_get_mac):
- """On absent IMDS network data, generate network fallback config."""
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
-
- fallback_config = {
- 'version': 1,
- 'config': [{
- 'type': 'physical', 'name': 'eth0',
- 'mac_address': '00:11:22:33:44:55',
- 'params': {'driver': 'hv_netsvc'},
- 'subnets': [{'type': 'dhcp'}],
- }]
- }
- mock_fallback.return_value = fallback_config
-
- mock_devlist.return_value = ['eth0']
- mock_dd.return_value = ['hv_netsvc']
- mock_get_mac.return_value = '00:11:22:33:44:55'
-
- dsrc = self._get_ds(data)
- # Represent empty response from network imds
- self.m_get_metadata_from_imds.return_value = {}
- ret = dsrc.get_data()
- self.assertTrue(ret)
-
- netconfig = dsrc.network_config
- self.assertEqual(netconfig, fallback_config)
- mock_fallback.assert_called_with(
- blacklist_drivers=['mlx4_core', 'mlx5_core'],
- config_driver=True)
-
- @mock.patch(MOCKPATH + 'net.get_interfaces', autospec=True)
- @mock.patch(MOCKPATH + 'util.is_FreeBSD')
- def test_blacklist_through_distro(
- self, m_is_freebsd, m_net_get_interfaces):
- """Verify Azure DS updates blacklist drivers in the distro's
- networking object."""
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
-
- distro_cls = distros.fetch('ubuntu')
- distro = distro_cls('ubuntu', {}, self.paths)
- dsrc = self._get_ds(data, distro=distro)
- dsrc.get_data()
- self.assertEqual(distro.networking.blacklist_drivers,
- dsaz.BLACKLIST_DRIVERS)
-
- m_is_freebsd.return_value = False
- distro.networking.get_interfaces_by_mac()
- m_net_get_interfaces.assert_called_with(
- blacklist_drivers=dsaz.BLACKLIST_DRIVERS)
-
- @mock.patch(MOCKPATH + 'subp.subp', autospec=True)
- def test_get_hostname_with_no_args(self, m_subp):
- dsaz.get_hostname()
- m_subp.assert_called_once_with(("hostname",), capture=True)
-
- @mock.patch(MOCKPATH + 'subp.subp', autospec=True)
- def test_get_hostname_with_string_arg(self, m_subp):
- dsaz.get_hostname(hostname_command="hostname")
- m_subp.assert_called_once_with(("hostname",), capture=True)
-
- @mock.patch(MOCKPATH + 'subp.subp', autospec=True)
- def test_get_hostname_with_iterable_arg(self, m_subp):
- dsaz.get_hostname(hostname_command=("hostname",))
- m_subp.assert_called_once_with(("hostname",), capture=True)
-
- @mock.patch(
- 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates')
- def test_get_public_ssh_keys_with_imds(self, m_parse_certificates):
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {
- 'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg
- }
- dsrc = self._get_ds(data)
- dsrc.get_data()
- dsrc.setup(True)
- ssh_keys = dsrc.get_public_ssh_keys()
- self.assertEqual(ssh_keys, ['key1'])
- self.assertEqual(m_parse_certificates.call_count, 0)
-
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
- def test_get_public_ssh_keys_without_imds(
- self,
- m_get_metadata_from_imds):
- m_get_metadata_from_imds.return_value = dict()
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {
- 'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg
- }
- dsrc = self._get_ds(data)
- dsaz.get_metadata_from_fabric.return_value = {'public-keys': ['key2']}
- dsrc.get_data()
- dsrc.setup(True)
- ssh_keys = dsrc.get_public_ssh_keys()
- self.assertEqual(ssh_keys, ['key2'])
-
-
-class TestAzureBounce(CiTestCase):
-
- with_logs = True
-
- def mock_out_azure_moving_parts(self):
- self.patches.enter_context(
- mock.patch.object(dsaz, 'invoke_agent'))
- self.patches.enter_context(
- mock.patch.object(dsaz.util, 'wait_for_files'))
- self.patches.enter_context(
- mock.patch.object(dsaz, 'list_possible_azure_ds_devs',
- mock.MagicMock(return_value=[])))
- self.patches.enter_context(
- mock.patch.object(dsaz, 'get_metadata_from_fabric',
- mock.MagicMock(return_value={})))
- self.patches.enter_context(
- mock.patch.object(dsaz, 'get_metadata_from_imds',
- mock.MagicMock(return_value={})))
- self.patches.enter_context(
- mock.patch.object(dsaz.subp, 'which', lambda x: True))
- self.patches.enter_context(mock.patch.object(
- dsaz, '_get_random_seed', return_value='wild'))
-
- def _dmi_mocks(key):
- if key == 'system-uuid':
- return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8'
- elif key == 'chassis-asset-tag':
- return '7783-7084-3265-9085-8269-3286-77'
- raise RuntimeError('should not get here')
-
- self.patches.enter_context(
- mock.patch.object(dsaz.dmi, 'read_dmi_data',
- mock.MagicMock(side_effect=_dmi_mocks)))
-
- def setUp(self):
- super(TestAzureBounce, self).setUp()
- self.tmp = self.tmp_dir()
- self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent')
- self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
- self.patches = ExitStack()
- self.mock_out_azure_moving_parts()
- self.get_hostname = self.patches.enter_context(
- mock.patch.object(dsaz, 'get_hostname'))
- self.set_hostname = self.patches.enter_context(
- mock.patch.object(dsaz, 'set_hostname'))
- self.subp = self.patches.enter_context(
- mock.patch(MOCKPATH + 'subp.subp'))
- self.find_fallback_nic = self.patches.enter_context(
- mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9'))
-
- def tearDown(self):
- self.patches.close()
- super(TestAzureBounce, self).tearDown()
-
- def _get_ds(self, ovfcontent=None, agent_command=None):
- if ovfcontent is not None:
- populate_dir(os.path.join(self.paths.seed_dir, "azure"),
- {'ovf-env.xml': ovfcontent})
- dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- if agent_command is not None:
- dsrc.ds_cfg['agent_command'] = agent_command
- return dsrc
-
- def _get_and_setup(self, dsrc):
- ret = dsrc.get_data()
- if ret:
- dsrc.setup(True)
- return ret
-
- def get_ovf_env_with_dscfg(self, hostname, cfg):
- odata = {
- 'HostName': hostname,
- 'dscfg': {
- 'text': b64e(yaml.dump(cfg)),
- 'encoding': 'base64'
- }
- }
- return construct_valid_ovf_env(data=odata)
-
- def test_disabled_bounce_does_not_change_hostname(self):
- cfg = {'hostname_bounce': {'policy': 'off'}}
- ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg))
- ds.get_data()
- self.assertEqual(0, self.set_hostname.call_count)
-
- @mock.patch(MOCKPATH + 'perform_hostname_bounce')
- def test_disabled_bounce_does_not_perform_bounce(
- self, perform_hostname_bounce):
- cfg = {'hostname_bounce': {'policy': 'off'}}
- ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg))
- ds.get_data()
- self.assertEqual(0, perform_hostname_bounce.call_count)
-
- def test_same_hostname_does_not_change_hostname(self):
- host_name = 'unchanged-host-name'
- self.get_hostname.return_value = host_name
- cfg = {'hostname_bounce': {'policy': 'yes'}}
- ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg))
- ds.get_data()
- self.assertEqual(0, self.set_hostname.call_count)
-
- @mock.patch(MOCKPATH + 'perform_hostname_bounce')
- def test_unchanged_hostname_does_not_perform_bounce(
- self, perform_hostname_bounce):
- host_name = 'unchanged-host-name'
- self.get_hostname.return_value = host_name
- cfg = {'hostname_bounce': {'policy': 'yes'}}
- ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg))
- ds.get_data()
- self.assertEqual(0, perform_hostname_bounce.call_count)
-
- @mock.patch(MOCKPATH + 'perform_hostname_bounce')
- def test_force_performs_bounce_regardless(self, perform_hostname_bounce):
- host_name = 'unchanged-host-name'
- self.get_hostname.return_value = host_name
- cfg = {'hostname_bounce': {'policy': 'force'}}
- dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg),
- agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(1, perform_hostname_bounce.call_count)
-
- def test_bounce_skipped_on_ifupdown_absent(self):
- host_name = 'unchanged-host-name'
- self.get_hostname.return_value = host_name
- cfg = {'hostname_bounce': {'policy': 'force'}}
- dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg),
- agent_command=['not', '__builtin__'])
- patch_path = MOCKPATH + 'subp.which'
- with mock.patch(patch_path) as m_which:
- m_which.return_value = None
- ret = self._get_and_setup(dsrc)
- self.assertEqual([mock.call('ifup')], m_which.call_args_list)
- self.assertTrue(ret)
- self.assertIn(
- "Skipping network bounce: ifupdown utils aren't present.",
- self.logs.getvalue())
-
- def test_different_hostnames_sets_hostname(self):
- expected_hostname = 'azure-expected-host-name'
- self.get_hostname.return_value = 'default-host-name'
- dsrc = self._get_ds(
- self.get_ovf_env_with_dscfg(expected_hostname, {}),
- agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(expected_hostname,
- self.set_hostname.call_args_list[0][0][0])
-
- @mock.patch(MOCKPATH + 'perform_hostname_bounce')
- def test_different_hostnames_performs_bounce(
- self, perform_hostname_bounce):
- expected_hostname = 'azure-expected-host-name'
- self.get_hostname.return_value = 'default-host-name'
- dsrc = self._get_ds(
- self.get_ovf_env_with_dscfg(expected_hostname, {}),
- agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(1, perform_hostname_bounce.call_count)
-
- def test_different_hostnames_sets_hostname_back(self):
- initial_host_name = 'default-host-name'
- self.get_hostname.return_value = initial_host_name
- dsrc = self._get_ds(
- self.get_ovf_env_with_dscfg('some-host-name', {}),
- agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(initial_host_name,
- self.set_hostname.call_args_list[-1][0][0])
-
- @mock.patch(MOCKPATH + 'perform_hostname_bounce')
- def test_failure_in_bounce_still_resets_host_name(
- self, perform_hostname_bounce):
- perform_hostname_bounce.side_effect = Exception
- initial_host_name = 'default-host-name'
- self.get_hostname.return_value = initial_host_name
- dsrc = self._get_ds(
- self.get_ovf_env_with_dscfg('some-host-name', {}),
- agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(initial_host_name,
- self.set_hostname.call_args_list[-1][0][0])
-
- @mock.patch.object(dsaz, 'get_boot_telemetry')
- def test_environment_correct_for_bounce_command(
- self, mock_get_boot_telemetry):
- interface = 'int0'
- hostname = 'my-new-host'
- old_hostname = 'my-old-host'
- self.get_hostname.return_value = old_hostname
- cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}}
- data = self.get_ovf_env_with_dscfg(hostname, cfg)
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(1, self.subp.call_count)
- bounce_env = self.subp.call_args[1]['env']
- self.assertEqual(interface, bounce_env['interface'])
- self.assertEqual(hostname, bounce_env['hostname'])
- self.assertEqual(old_hostname, bounce_env['old_hostname'])
-
- @mock.patch.object(dsaz, 'get_boot_telemetry')
- def test_default_bounce_command_ifup_used_by_default(
- self, mock_get_boot_telemetry):
- cfg = {'hostname_bounce': {'policy': 'force'}}
- data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(1, self.subp.call_count)
- bounce_args = self.subp.call_args[1]['args']
- self.assertEqual(
- dsaz.BOUNCE_COMMAND_IFUP, bounce_args)
-
- @mock.patch(MOCKPATH + 'perform_hostname_bounce')
- def test_set_hostname_option_can_disable_bounce(
- self, perform_hostname_bounce):
- cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}}
- data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
- self._get_ds(data).get_data()
-
- self.assertEqual(0, perform_hostname_bounce.call_count)
-
- def test_set_hostname_option_can_disable_hostname_set(self):
- cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}}
- data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
- self._get_ds(data).get_data()
-
- self.assertEqual(0, self.set_hostname.call_count)
-
- @mock.patch(MOCKPATH + 'perform_hostname_bounce')
- def test_set_hostname_failed_disable_bounce(
- self, perform_hostname_bounce):
- cfg = {'set_hostname': True, 'hostname_bounce': {'policy': 'force'}}
- self.get_hostname.return_value = "old-hostname"
- self.set_hostname.side_effect = Exception
- data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
- self._get_ds(data).get_data()
-
- self.assertEqual(0, perform_hostname_bounce.call_count)
-
-
-class TestLoadAzureDsDir(CiTestCase):
- """Tests for load_azure_ds_dir."""
-
- def setUp(self):
- self.source_dir = self.tmp_dir()
- super(TestLoadAzureDsDir, self).setUp()
-
- def test_missing_ovf_env_xml_raises_non_azure_datasource_error(self):
- """load_azure_ds_dir raises an error When ovf-env.xml doesn't exit."""
- with self.assertRaises(dsaz.NonAzureDataSource) as context_manager:
- dsaz.load_azure_ds_dir(self.source_dir)
- self.assertEqual(
- 'No ovf-env file found',
- str(context_manager.exception))
-
- def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self):
- """load_azure_ds_dir calls read_azure_ovf to parse the xml."""
- ovf_path = os.path.join(self.source_dir, 'ovf-env.xml')
- with open(ovf_path, 'wb') as stream:
- stream.write(b'invalid xml')
- with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager:
- dsaz.load_azure_ds_dir(self.source_dir)
- self.assertEqual(
- 'Invalid ovf-env.xml: syntax error: line 1, column 0',
- str(context_manager.exception))
-
-
-class TestReadAzureOvf(CiTestCase):
-
- def test_invalid_xml_raises_non_azure_ds(self):
- invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
- self.assertRaises(dsaz.BrokenAzureDataSource,
- dsaz.read_azure_ovf, invalid_xml)
-
- def test_load_with_pubkeys(self):
- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- content = construct_valid_ovf_env(pubkeys=pubkeys)
- (_md, _ud, cfg) = dsaz.read_azure_ovf(content)
- for mypk in mypklist:
- self.assertIn(mypk, cfg['_pubkeys'])
-
-
-class TestCanDevBeReformatted(CiTestCase):
- warning_file = 'dataloss_warning_readme.txt'
-
- def _domock(self, mockpath, sattr=None):
- patcher = mock.patch(mockpath)
- setattr(self, sattr, patcher.start())
- self.addCleanup(patcher.stop)
-
- def patchup(self, devs):
- bypath = {}
- for path, data in devs.items():
- bypath[path] = data
- if 'realpath' in data:
- bypath[data['realpath']] = data
- for ppath, pdata in data.get('partitions', {}).items():
- bypath[ppath] = pdata
- if 'realpath' in data:
- bypath[pdata['realpath']] = pdata
-
- def realpath(d):
- return bypath[d].get('realpath', d)
-
- def partitions_on_device(devpath):
- parts = bypath.get(devpath, {}).get('partitions', {})
- ret = []
- for path, data in parts.items():
- ret.append((data.get('num'), realpath(path)))
- # return sorted by partition number
- return sorted(ret, key=lambda d: d[0])
-
- def mount_cb(device, callback, mtype, update_env_for_mount):
- self.assertEqual('ntfs', mtype)
- self.assertEqual('C', update_env_for_mount.get('LANG'))
- p = self.tmp_dir()
- for f in bypath.get(device).get('files', []):
- write_file(os.path.join(p, f), content=f)
- return callback(p)
-
- def has_ntfs_fs(device):
- return bypath.get(device, {}).get('fs') == 'ntfs'
-
- p = MOCKPATH
- self._domock(p + "_partitions_on_device", 'm_partitions_on_device')
- self._domock(p + "_has_ntfs_filesystem", 'm_has_ntfs_filesystem')
- self._domock(p + "util.mount_cb", 'm_mount_cb')
- self._domock(p + "os.path.realpath", 'm_realpath')
- self._domock(p + "os.path.exists", 'm_exists')
- self._domock(p + "util.SeLinuxGuard", 'm_selguard')
-
- self.m_exists.side_effect = lambda p: p in bypath
- self.m_realpath.side_effect = realpath
- self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs
- self.m_mount_cb.side_effect = mount_cb
- self.m_partitions_on_device.side_effect = partitions_on_device
- self.m_selguard.__enter__ = mock.Mock(return_value=False)
- self.m_selguard.__exit__ = mock.Mock()
-
- def test_three_partitions_is_false(self):
- """A disk with 3 partitions can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1},
- '/dev/sda2': {'num': 2},
- '/dev/sda3': {'num': 3},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertFalse(value)
- self.assertIn("3 or more", msg.lower())
-
- def test_no_partitions_is_false(self):
- """A disk with no partitions can not be formatted."""
- self.patchup({'/dev/sda': {}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertFalse(value)
- self.assertIn("not partitioned", msg.lower())
-
- def test_two_partitions_not_ntfs_false(self):
- """2 partitions and 2nd not ntfs can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1},
- '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertFalse(value)
- self.assertIn("not ntfs", msg.lower())
-
- def test_two_partitions_ntfs_populated_false(self):
- """2 partitions and populated ntfs fs on 2nd can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1},
- '/dev/sda2': {'num': 2, 'fs': 'ntfs',
- 'files': ['secret.txt']},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertFalse(value)
- self.assertIn("files on it", msg.lower())
-
- def test_two_partitions_ntfs_empty_is_true(self):
- """2 partitions and empty ntfs fs on 2nd can be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1},
- '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertTrue(value)
- self.assertIn("safe for", msg.lower())
-
- def test_one_partition_not_ntfs_false(self):
- """1 partition witih fs other than ntfs can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'zfs'},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertFalse(value)
- self.assertIn("not ntfs", msg.lower())
-
- def test_one_partition_ntfs_populated_false(self):
- """1 mountable ntfs partition with many files can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs',
- 'files': ['file1.txt', 'file2.exe']},
- }}})
- with mock.patch.object(dsaz.LOG, 'warning') as warning:
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- wmsg = warning.call_args[0][0]
- self.assertIn("looks like you're using NTFS on the ephemeral disk",
- wmsg)
- self.assertFalse(value)
- self.assertIn("files on it", msg.lower())
-
- def test_one_partition_ntfs_empty_is_true(self):
- """1 mountable ntfs partition and no files can be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []}
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertTrue(value)
- self.assertIn("safe for", msg.lower())
-
- def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self):
- """1 mountable ntfs partition and only warn file can be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs',
- 'files': ['dataloss_warning_readme.txt']}
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertTrue(value)
- self.assertIn("safe for", msg.lower())
-
- def test_one_partition_through_realpath_is_true(self):
- """A symlink to a device with 1 ntfs partition can be formatted."""
- epath = '/dev/disk/cloud/azure_resource'
- self.patchup({
- epath: {
- 'realpath': '/dev/sdb',
- 'partitions': {
- epath + '-part1': {
- 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file],
- 'realpath': '/dev/sdb1'}
- }}})
- value, msg = dsaz.can_dev_be_reformatted(epath,
- preserve_ntfs=False)
- self.assertTrue(value)
- self.assertIn("safe for", msg.lower())
-
- def test_three_partition_through_realpath_is_false(self):
- """A symlink to a device with 3 partitions can not be formatted."""
- epath = '/dev/disk/cloud/azure_resource'
- self.patchup({
- epath: {
- 'realpath': '/dev/sdb',
- 'partitions': {
- epath + '-part1': {
- 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file],
- 'realpath': '/dev/sdb1'},
- epath + '-part2': {'num': 2, 'fs': 'ext3',
- 'realpath': '/dev/sdb2'},
- epath + '-part3': {'num': 3, 'fs': 'ext',
- 'realpath': '/dev/sdb3'}
- }}})
- value, msg = dsaz.can_dev_be_reformatted(epath,
- preserve_ntfs=False)
- self.assertFalse(value)
- self.assertIn("3 or more", msg.lower())
-
- def test_ntfs_mount_errors_true(self):
- """can_dev_be_reformatted does not fail if NTFS is unknown fstype."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []}
- }}})
-
- error_msgs = [
- "Stderr: mount: unknown filesystem type 'ntfs'", # RHEL
- "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'" # SLES
- ]
-
- for err_msg in error_msgs:
- self.m_mount_cb.side_effect = MountFailedError(
- "Failed mounting %s to %s due to: \nUnexpected.\n%s" %
- ('/dev/sda', '/fake-tmp/dir', err_msg))
-
- value, msg = dsaz.can_dev_be_reformatted('/dev/sda',
- preserve_ntfs=False)
- self.assertTrue(value)
- self.assertIn('cannot mount NTFS, assuming', msg)
-
- def test_never_destroy_ntfs_config_false(self):
- """Normally formattable situation with never_destroy_ntfs set."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs',
- 'files': ['dataloss_warning_readme.txt']}
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=True)
- self.assertFalse(value)
- self.assertIn("config says to never destroy NTFS "
- "(datasource.Azure.never_destroy_ntfs)", msg)
-
-
-class TestClearCachedData(CiTestCase):
-
- def test_clear_cached_attrs_clears_imds(self):
- """All class attributes are reset to defaults, including imds data."""
- tmp = self.tmp_dir()
- paths = helpers.Paths(
- {'cloud_dir': tmp, 'run_dir': tmp})
- dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=paths)
- clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds]
- dsrc.metadata = 'md'
- dsrc.userdata = 'ud'
- dsrc._metadata_imds = 'imds'
- dsrc._dirty_cache = True
- dsrc.clear_cached_attrs()
- self.assertEqual(
- [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds],
- clean_values)
-
-
-class TestAzureNetExists(CiTestCase):
-
- def test_azure_net_must_exist_for_legacy_objpkl(self):
- """DataSourceAzureNet must exist for old obj.pkl files
- that reference it."""
- self.assertTrue(hasattr(dsaz, "DataSourceAzureNet"))
-
-
-class TestPreprovisioningReadAzureOvfFlag(CiTestCase):
-
- def test_read_azure_ovf_with_true_flag(self):
- """The read_azure_ovf method should set the PreprovisionedVM
- cfg flag if the proper setting is present."""
- content = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"})
- ret = dsaz.read_azure_ovf(content)
- cfg = ret[2]
- self.assertTrue(cfg['PreprovisionedVm'])
-
- def test_read_azure_ovf_with_false_flag(self):
- """The read_azure_ovf method should set the PreprovisionedVM
- cfg flag to false if the proper setting is false."""
- content = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "False"})
- ret = dsaz.read_azure_ovf(content)
- cfg = ret[2]
- self.assertFalse(cfg['PreprovisionedVm'])
-
- def test_read_azure_ovf_without_flag(self):
- """The read_azure_ovf method should not set the
- PreprovisionedVM cfg flag."""
- content = construct_valid_ovf_env()
- ret = dsaz.read_azure_ovf(content)
- cfg = ret[2]
- self.assertFalse(cfg['PreprovisionedVm'])
- self.assertEqual(None, cfg["PreprovisionedVMType"])
-
- def test_read_azure_ovf_with_running_type(self):
- """The read_azure_ovf method should set PreprovisionedVMType
- cfg flag to Running."""
- content = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVMType": "Running",
- "PreprovisionedVm": "True"})
- ret = dsaz.read_azure_ovf(content)
- cfg = ret[2]
- self.assertTrue(cfg['PreprovisionedVm'])
- self.assertEqual("Running", cfg['PreprovisionedVMType'])
-
- def test_read_azure_ovf_with_savable_type(self):
- """The read_azure_ovf method should set PreprovisionedVMType
- cfg flag to Savable."""
- content = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVMType": "Savable",
- "PreprovisionedVm": "True"})
- ret = dsaz.read_azure_ovf(content)
- cfg = ret[2]
- self.assertTrue(cfg['PreprovisionedVm'])
- self.assertEqual("Savable", cfg['PreprovisionedVMType'])
-
-
-@mock.patch('os.path.isfile')
-class TestPreprovisioningShouldReprovision(CiTestCase):
-
- def setUp(self):
- super(TestPreprovisioningShouldReprovision, self).setUp()
- tmp = self.tmp_dir()
- self.waagent_d = self.tmp_path('/var/lib/waagent', tmp)
- self.paths = helpers.Paths({'cloud_dir': tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
-
- @mock.patch(MOCKPATH + 'util.write_file')
- def test__should_reprovision_with_true_cfg(self, isfile, write_f):
- """The _should_reprovision method should return true with config
- flag present."""
- isfile.return_value = False
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- self.assertTrue(dsa._should_reprovision(
- (None, None, {'PreprovisionedVm': True}, None)))
-
- def test__should_reprovision_with_file_existing(self, isfile):
- """The _should_reprovision method should return True if the sentinal
- exists."""
- isfile.return_value = True
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- self.assertTrue(dsa._should_reprovision(
- (None, None, {'preprovisionedvm': False}, None)))
-
- def test__should_reprovision_returns_false(self, isfile):
- """The _should_reprovision method should return False
- if config and sentinal are not present."""
- isfile.return_value = False
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- self.assertFalse(dsa._should_reprovision((None, None, {}, None)))
-
- @mock.patch(MOCKPATH + 'DataSourceAzure._poll_imds')
- def test_reprovision_calls__poll_imds(self, _poll_imds, isfile):
- """_reprovision will poll IMDS."""
- isfile.return_value = False
- hostname = "myhost"
- username = "myuser"
- odata = {'HostName': hostname, 'UserName': username}
- _poll_imds.return_value = construct_valid_ovf_env(data=odata)
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- dsa._reprovision()
- _poll_imds.assert_called_with()
-
-
-class TestPreprovisioningHotAttachNics(CiTestCase):
-
- def setUp(self):
- super(TestPreprovisioningHotAttachNics, self).setUp()
- self.tmp = self.tmp_dir()
- self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp)
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
-
- @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_detach_event',
- autospec=True)
- @mock.patch(MOCKPATH + 'util.write_file', autospec=True)
- def test_nic_detach_writes_marker(self, m_writefile, m_detach):
- """When we detect that a nic gets detached, we write a marker for it"""
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- nl_sock = mock.MagicMock()
- dsa._wait_for_nic_detach(nl_sock)
- m_detach.assert_called_with(nl_sock)
- self.assertEqual(1, m_detach.call_count)
- m_writefile.assert_called_with(
- dsaz.REPROVISION_NIC_DETACHED_MARKER_FILE, mock.ANY)
-
- @mock.patch(MOCKPATH + 'util.write_file', autospec=True)
- @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting')
- @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
- def test_detect_nic_attach_reports_ready_and_waits_for_detach(
- self, m_detach, m_report_ready, m_dhcp, m_fallback_if,
- m_writefile):
- """Report ready first and then wait for nic detach"""
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- dsa._wait_for_all_nics_ready()
- m_fallback_if.return_value = "Dummy interface"
- self.assertEqual(1, m_report_ready.call_count)
- self.assertEqual(1, m_detach.call_count)
- self.assertEqual(1, m_writefile.call_count)
- self.assertEqual(1, m_dhcp.call_count)
- m_writefile.assert_called_with(dsaz.REPORTED_READY_MARKER_FILE,
- mock.ANY)
-
- @mock.patch('os.path.isfile')
- @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting')
- @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
- def test_detect_nic_attach_skips_report_ready_when_marker_present(
- self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile):
- """Skip reporting ready if we already have a marker file."""
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
-
- def isfile(key):
- return key == dsaz.REPORTED_READY_MARKER_FILE
-
- m_isfile.side_effect = isfile
- dsa._wait_for_all_nics_ready()
- m_fallback_if.return_value = "Dummy interface"
- self.assertEqual(0, m_report_ready.call_count)
- self.assertEqual(0, m_dhcp.call_count)
- self.assertEqual(1, m_detach.call_count)
-
- @mock.patch('os.path.isfile')
- @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting')
- @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
- def test_detect_nic_attach_skips_nic_detach_when_marker_present(
- self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile):
- """Skip wait for nic detach if it already happened."""
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
-
- m_isfile.return_value = True
- dsa._wait_for_all_nics_ready()
- m_fallback_if.return_value = "Dummy interface"
- self.assertEqual(0, m_report_ready.call_count)
- self.assertEqual(0, m_dhcp.call_count)
- self.assertEqual(0, m_detach.call_count)
-
- @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up', autospec=True)
- @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event')
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
- @mock.patch('os.path.isfile')
- def test_wait_for_nic_attach_if_no_fallback_interface(
- self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if,
- m_attach, m_link_up):
- """Wait for nic attach if we do not have a fallback interface"""
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}
-
- m_isfile.return_value = True
- m_attach.return_value = "eth0"
- dhcp_ctx = mock.MagicMock(lease=lease)
- dhcp_ctx.obtain_lease.return_value = lease
- m_dhcpv4.return_value = dhcp_ctx
- m_imds.return_value = IMDS_NETWORK_METADATA
- m_fallback_if.return_value = None
-
- dsa._wait_for_all_nics_ready()
-
- self.assertEqual(0, m_detach.call_count)
- self.assertEqual(1, m_attach.call_count)
- self.assertEqual(1, m_dhcpv4.call_count)
- self.assertEqual(1, m_imds.call_count)
- self.assertEqual(1, m_link_up.call_count)
- m_link_up.assert_called_with(mock.ANY, "eth0")
-
- @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up')
- @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event')
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
- @mock.patch('os.path.isfile')
- def test_wait_for_nic_attach_multinic_attach(
- self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if,
- m_attach, m_link_up):
- """Wait for nic attach if we do not have a fallback interface"""
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}
- m_attach_call_count = 0
-
- def nic_attach_ret(nl_sock, nics_found):
- nonlocal m_attach_call_count
- if m_attach_call_count == 0:
- m_attach_call_count = m_attach_call_count + 1
- return "eth0"
- return "eth1"
-
- def network_metadata_ret(ifname, retries, type):
- # Simulate two NICs by adding the same one twice.
- md = IMDS_NETWORK_METADATA
- md['interface'].append(md['interface'][0])
- if ifname == "eth0":
- return md
- raise requests.Timeout('Fake connection timeout')
-
- m_isfile.return_value = True
- m_attach.side_effect = nic_attach_ret
- dhcp_ctx = mock.MagicMock(lease=lease)
- dhcp_ctx.obtain_lease.return_value = lease
- m_dhcpv4.return_value = dhcp_ctx
- m_imds.side_effect = network_metadata_ret
- m_fallback_if.return_value = None
-
- dsa._wait_for_all_nics_ready()
-
- self.assertEqual(0, m_detach.call_count)
- self.assertEqual(2, m_attach.call_count)
- # DHCP and network metadata calls will only happen on the primary NIC.
- self.assertEqual(1, m_dhcpv4.call_count)
- self.assertEqual(1, m_imds.call_count)
- self.assertEqual(2, m_link_up.call_count)
-
- @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
- def test_wait_for_link_up_returns_if_already_up(
- self, m_is_link_up):
- """Waiting for link to be up should return immediately if the link is
- already up."""
-
- distro_cls = distros.fetch('ubuntu')
- distro = distro_cls('ubuntu', {}, self.paths)
- dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
- m_is_link_up.return_value = True
-
- dsa.wait_for_link_up("eth0")
- self.assertEqual(1, m_is_link_up.call_count)
-
- @mock.patch(MOCKPATH + 'util.write_file')
- @mock.patch('cloudinit.net.read_sys_net')
- @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
- def test_wait_for_link_up_writes_to_device_file(
- self, m_is_link_up, m_read_sys_net, m_writefile):
- """Waiting for link to be up should return immediately if the link is
- already up."""
-
- distro_cls = distros.fetch('ubuntu')
- distro = distro_cls('ubuntu', {}, self.paths)
- dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
-
- callcount = 0
-
- def linkup(key):
- nonlocal callcount
- if callcount == 0:
- callcount += 1
- return False
- return True
-
- m_is_link_up.side_effect = linkup
-
- dsa.wait_for_link_up("eth0")
- self.assertEqual(2, m_is_link_up.call_count)
- self.assertEqual(1, m_read_sys_net.call_count)
- self.assertEqual(2, m_writefile.call_count)
-
- @mock.patch('cloudinit.sources.helpers.netlink.'
- 'create_bound_netlink_socket')
- def test_wait_for_all_nics_ready_raises_if_socket_fails(self, m_socket):
- """Waiting for all nics should raise exception if netlink socket
- creation fails."""
-
- m_socket.side_effect = netlink.NetlinkCreateSocketError
- distro_cls = distros.fetch('ubuntu')
- distro = distro_cls('ubuntu', {}, self.paths)
- dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
-
- self.assertRaises(netlink.NetlinkCreateSocketError,
- dsa._wait_for_all_nics_ready)
- # dsa._wait_for_all_nics_ready()
-
-
-@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
-@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
-@mock.patch('cloudinit.sources.helpers.netlink.'
- 'wait_for_media_disconnect_connect')
-@mock.patch('requests.Session.request')
-@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
-class TestPreprovisioningPollIMDS(CiTestCase):
-
- def setUp(self):
- super(TestPreprovisioningPollIMDS, self).setUp()
- self.tmp = self.tmp_dir()
- self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp)
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
-
- @mock.patch('time.sleep', mock.MagicMock())
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, m_report_ready,
- m_request, m_media_switch, m_dhcp,
- m_net):
- """The poll_imds will retry DHCP on IMDS timeout."""
- report_file = self.tmp_path('report_marker', self.tmp)
- lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}
- m_dhcp.return_value = [lease]
- m_media_switch.return_value = None
- dhcp_ctx = mock.MagicMock(lease=lease)
- dhcp_ctx.obtain_lease.return_value = lease
- m_dhcpv4.return_value = dhcp_ctx
-
- self.tries = 0
-
- def fake_timeout_once(**kwargs):
- self.tries += 1
- if self.tries == 1:
- raise requests.Timeout('Fake connection timeout')
- elif self.tries in (2, 3):
- response = requests.Response()
- response.status_code = 404 if self.tries == 2 else 410
- raise requests.exceptions.HTTPError(
- "fake {}".format(response.status_code), response=response
- )
- # Third try should succeed and stop retries or redhcp
- return mock.MagicMock(status_code=200, text="good", content="good")
-
- m_request.side_effect = fake_timeout_once
-
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
- dsa._poll_imds()
- self.assertEqual(m_report_ready.call_count, 1)
- m_report_ready.assert_called_with(lease=lease)
- self.assertEqual(3, m_dhcpv4.call_count, 'Expected 3 DHCP calls')
- self.assertEqual(4, self.tries, 'Expected 4 total reads from IMDS')
-
- @mock.patch('os.path.isfile')
- def test_poll_imds_skips_dhcp_if_ctx_present(
- self, m_isfile, report_ready_func, fake_resp, m_media_switch,
- m_dhcp, m_net):
- """The poll_imds function should reuse the dhcp ctx if it is already
- present. This happens when we wait for nic to be hot-attached before
- polling for reprovisiondata. Note that if this ctx is set when
- _poll_imds is called, then it is not expected to be waiting for
- media_disconnect_connect either."""
- report_file = self.tmp_path('report_marker', self.tmp)
- m_isfile.return_value = True
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- dsa._ephemeral_dhcp_ctx = "Dummy dhcp ctx"
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
- dsa._poll_imds()
- self.assertEqual(0, m_dhcp.call_count)
- self.assertEqual(0, m_media_switch.call_count)
-
- def test_does_not_poll_imds_report_ready_when_marker_file_exists(
- self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net):
- """poll_imds should not call report ready when the reported ready
- marker file exists"""
- report_file = self.tmp_path('report_marker', self.tmp)
- write_file(report_file, content='dont run report_ready :)')
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}]
- m_media_switch.return_value = None
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
- dsa._poll_imds()
- self.assertEqual(m_report_ready.call_count, 0)
-
- def test_poll_imds_report_ready_success_writes_marker_file(
- self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net):
- """poll_imds should write the report_ready marker file if
- reporting ready succeeds"""
- report_file = self.tmp_path('report_marker', self.tmp)
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}]
- m_media_switch.return_value = None
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- self.assertFalse(os.path.exists(report_file))
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
- dsa._poll_imds()
- self.assertEqual(m_report_ready.call_count, 1)
- self.assertTrue(os.path.exists(report_file))
-
- def test_poll_imds_report_ready_failure_raises_exc_and_doesnt_write_marker(
- self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net):
- """poll_imds should write the report_ready marker file if
- reporting ready succeeds"""
- report_file = self.tmp_path('report_marker', self.tmp)
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}]
- m_media_switch.return_value = None
- m_report_ready.return_value = False
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- self.assertFalse(os.path.exists(report_file))
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
- self.assertRaises(
- InvalidMetaDataException,
- dsa._poll_imds)
- self.assertEqual(m_report_ready.call_count, 1)
- self.assertFalse(os.path.exists(report_file))
-
-
-@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready', mock.MagicMock())
-@mock.patch(MOCKPATH + 'subp.subp', mock.MagicMock())
-@mock.patch(MOCKPATH + 'util.write_file', mock.MagicMock())
-@mock.patch(MOCKPATH + 'util.is_FreeBSD')
-@mock.patch('cloudinit.sources.helpers.netlink.'
- 'wait_for_media_disconnect_connect')
-@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network', autospec=True)
-@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
-@mock.patch('requests.Session.request')
-class TestAzureDataSourcePreprovisioning(CiTestCase):
-
- def setUp(self):
- super(TestAzureDataSourcePreprovisioning, self).setUp()
- tmp = self.tmp_dir()
- self.waagent_d = self.tmp_path('/var/lib/waagent', tmp)
- self.paths = helpers.Paths({'cloud_dir': tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
-
- def test_poll_imds_returns_ovf_env(self, m_request,
- m_dhcp, m_net,
- m_media_switch,
- m_is_bsd):
- """The _poll_imds method should return the ovf_env.xml."""
- m_is_bsd.return_value = False
- m_media_switch.return_value = None
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}]
- url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01'
- host = "169.254.169.254"
- full_url = url.format(host)
- m_request.return_value = mock.MagicMock(status_code=200, text="ovf",
- content="ovf")
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- self.assertTrue(len(dsa._poll_imds()) > 0)
- self.assertEqual(m_request.call_args_list,
- [mock.call(allow_redirects=True,
- headers={'Metadata': 'true',
- 'User-Agent':
- 'Cloud-Init/%s' % vs()
- }, method='GET',
- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
- url=full_url)])
- self.assertEqual(m_dhcp.call_count, 2)
- m_net.assert_any_call(
- broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9',
- prefix_or_mask='255.255.255.0', router='192.168.2.1',
- static_routes=None)
- self.assertEqual(m_net.call_count, 2)
-
- def test__reprovision_calls__poll_imds(self, m_request,
- m_dhcp, m_net,
- m_media_switch,
- m_is_bsd):
- """The _reprovision method should call poll IMDS."""
- m_is_bsd.return_value = False
- m_media_switch.return_value = None
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}]
- url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01'
- host = "169.254.169.254"
- full_url = url.format(host)
- hostname = "myhost"
- username = "myuser"
- odata = {'HostName': hostname, 'UserName': username}
- content = construct_valid_ovf_env(data=odata)
- m_request.return_value = mock.MagicMock(status_code=200, text=content,
- content=content)
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- md, _ud, cfg, _d = dsa._reprovision()
- self.assertEqual(md['local-hostname'], hostname)
- self.assertEqual(cfg['system_info']['default_user']['name'], username)
- self.assertIn(
- mock.call(
- allow_redirects=True,
- headers={
- 'Metadata': 'true',
- 'User-Agent': 'Cloud-Init/%s' % vs()
- },
- method='GET',
- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
- url=full_url
- ),
- m_request.call_args_list)
- self.assertEqual(m_dhcp.call_count, 2)
- m_net.assert_any_call(
- broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9',
- prefix_or_mask='255.255.255.0', router='192.168.2.1',
- static_routes=None)
- self.assertEqual(m_net.call_count, 2)
-
-
-class TestRemoveUbuntuNetworkConfigScripts(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestRemoveUbuntuNetworkConfigScripts, self).setUp()
- self.tmp = self.tmp_dir()
-
- def test_remove_network_scripts_removes_both_files_and_directories(self):
- """Any files or directories in paths are removed when present."""
- file1 = self.tmp_path('file1', dir=self.tmp)
- subdir = self.tmp_path('sub1', dir=self.tmp)
- subfile = self.tmp_path('leaf1', dir=subdir)
- write_file(file1, 'file1content')
- write_file(subfile, 'leafcontent')
- dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1])
-
- for path in (file1, subdir, subfile):
- self.assertFalse(os.path.exists(path),
- 'Found unremoved: %s' % path)
-
- expected_logs = [
- 'INFO: Removing Ubuntu extended network scripts because cloud-init'
- ' updates Azure network configuration on the following event:'
- ' System boot.',
- 'Recursively deleting %s' % subdir,
- 'Attempting to remove %s' % file1]
- for log in expected_logs:
- self.assertIn(log, self.logs.getvalue())
-
- def test_remove_network_scripts_only_attempts_removal_if_path_exists(self):
- """Any files or directories absent are skipped without error."""
- dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[
- self.tmp_path('nodirhere/', dir=self.tmp),
- self.tmp_path('notfilehere', dir=self.tmp)])
- self.assertNotIn('/not/a', self.logs.getvalue()) # No delete logs
-
- @mock.patch(MOCKPATH + 'os.path.exists')
- def test_remove_network_scripts_default_removes_stock_scripts(self,
- m_exists):
- """Azure's stock ubuntu image scripts and artifacts are removed."""
- # Report path absent on all to avoid delete operation
- m_exists.return_value = False
- dsaz.maybe_remove_ubuntu_network_config_scripts()
- calls = m_exists.call_args_list
- for path in dsaz.UBUNTU_EXTENDED_NETWORK_SCRIPTS:
- self.assertIn(mock.call(path), calls)
-
-
-class TestWBIsPlatformViable(CiTestCase):
- """White box tests for _is_platform_viable."""
- with_logs = True
-
- @mock.patch(MOCKPATH + 'dmi.read_dmi_data')
- def test_true_on_non_azure_chassis(self, m_read_dmi_data):
- """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG."""
- m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG
- self.assertTrue(dsaz._is_platform_viable('doesnotmatter'))
-
- @mock.patch(MOCKPATH + 'os.path.exists')
- @mock.patch(MOCKPATH + 'dmi.read_dmi_data')
- def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist):
- """Return True if ovf-env.xml exists in known seed dirs."""
- # Non-matching Azure chassis-asset-tag
- m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'
-
- m_exist.return_value = True
- self.assertTrue(dsaz._is_platform_viable('/some/seed/dir'))
- m_exist.called_once_with('/other/seed/dir')
-
- def test_false_on_no_matching_azure_criteria(self):
- """Report non-azure on unmatched asset tag, ovf-env absent and no dev.
-
- Return False when the asset tag doesn't match Azure's static
- AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs
- and no devices have a label starting with prefix 'rd_rdfe_'.
- """
- self.assertFalse(wrap_and_call(
- MOCKPATH,
- {'os.path.exists': False,
- # Non-matching Azure chassis-asset-tag
- 'dmi.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X',
- 'subp.which': None},
- dsaz._is_platform_viable, 'doesnotmatter'))
- self.assertIn(
- "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format(
- dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'),
- self.logs.getvalue())
-
-
-class TestRandomSeed(CiTestCase):
- """Test proper handling of random_seed"""
-
- def test_non_ascii_seed_is_serializable(self):
- """Pass if a random string from the Azure infrastructure which
- contains at least one non-Unicode character can be converted to/from
- JSON without alteration and without throwing an exception.
- """
- path = resourceLocation("azure/non_unicode_random_string")
- result = dsaz._get_random_seed(path)
-
- obj = {'seed': result}
- try:
- serialized = json_dumps(obj)
- deserialized = load_json(serialized)
- except UnicodeDecodeError:
- self.fail("Non-serializable random seed returned")
-
- self.assertEqual(deserialized['seed'], result)
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py
deleted file mode 100644
index 4ab5d471..00000000
--- a/tests/unittests/test_datasource/test_common.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import settings
-from cloudinit import sources
-from cloudinit import type_utils
-from cloudinit.sources import (
- DataSource,
- DataSourceAliYun as AliYun,
- DataSourceAltCloud as AltCloud,
- DataSourceAzure as Azure,
- DataSourceBigstep as Bigstep,
- DataSourceCloudSigma as CloudSigma,
- DataSourceCloudStack as CloudStack,
- DataSourceConfigDrive as ConfigDrive,
- DataSourceDigitalOcean as DigitalOcean,
- DataSourceEc2 as Ec2,
- DataSourceExoscale as Exoscale,
- DataSourceGCE as GCE,
- DataSourceHetzner as Hetzner,
- DataSourceIBMCloud as IBMCloud,
- DataSourceMAAS as MAAS,
- DataSourceNoCloud as NoCloud,
- DataSourceOpenNebula as OpenNebula,
- DataSourceOpenStack as OpenStack,
- DataSourceOracle as Oracle,
- DataSourceOVF as OVF,
- DataSourceRbxCloud as RbxCloud,
- DataSourceScaleway as Scaleway,
- DataSourceSmartOS as SmartOS,
-)
-from cloudinit.sources import DataSourceNone as DSNone
-
-from cloudinit.tests import helpers as test_helpers
-
-DEFAULT_LOCAL = [
- Azure.DataSourceAzure,
- CloudSigma.DataSourceCloudSigma,
- ConfigDrive.DataSourceConfigDrive,
- DigitalOcean.DataSourceDigitalOcean,
- Hetzner.DataSourceHetzner,
- IBMCloud.DataSourceIBMCloud,
- NoCloud.DataSourceNoCloud,
- OpenNebula.DataSourceOpenNebula,
- Oracle.DataSourceOracle,
- OVF.DataSourceOVF,
- SmartOS.DataSourceSmartOS,
- Ec2.DataSourceEc2Local,
- OpenStack.DataSourceOpenStackLocal,
- RbxCloud.DataSourceRbxCloud,
- Scaleway.DataSourceScaleway,
-]
-
-DEFAULT_NETWORK = [
- AliYun.DataSourceAliYun,
- AltCloud.DataSourceAltCloud,
- Bigstep.DataSourceBigstep,
- CloudStack.DataSourceCloudStack,
- DSNone.DataSourceNone,
- Ec2.DataSourceEc2,
- Exoscale.DataSourceExoscale,
- GCE.DataSourceGCE,
- MAAS.DataSourceMAAS,
- NoCloud.DataSourceNoCloudNet,
- OpenStack.DataSourceOpenStack,
- OVF.DataSourceOVFNet,
-]
-
-
-class ExpectedDataSources(test_helpers.TestCase):
- builtin_list = settings.CFG_BUILTIN['datasource_list']
- deps_local = [sources.DEP_FILESYSTEM]
- deps_network = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
- pkg_list = [type_utils.obj_name(sources)]
-
- def test_expected_default_local_sources_found(self):
- found = sources.list_sources(
- self.builtin_list, self.deps_local, self.pkg_list)
- self.assertEqual(set(DEFAULT_LOCAL), set(found))
-
- def test_expected_default_network_sources_found(self):
- found = sources.list_sources(
- self.builtin_list, self.deps_network, self.pkg_list)
- self.assertEqual(set(DEFAULT_NETWORK), set(found))
-
- def test_expected_nondefault_network_sources_found(self):
- found = sources.list_sources(
- ['AliYun'], self.deps_network, self.pkg_list)
- self.assertEqual(set([AliYun.DataSourceAliYun]), set(found))
-
-
-class TestDataSourceInvariants(test_helpers.TestCase):
- def test_data_sources_have_valid_network_config_sources(self):
- for ds in DEFAULT_LOCAL + DEFAULT_NETWORK:
- for cfg_src in ds.network_config_sources:
- fail_msg = ('{} has an invalid network_config_sources entry:'
- ' {}'.format(str(ds), cfg_src))
- self.assertTrue(hasattr(sources.NetworkConfigSource, cfg_src),
- fail_msg)
-
- def test_expected_dsname_defined(self):
- for ds in DEFAULT_LOCAL + DEFAULT_NETWORK:
- fail_msg = (
- '{} has an invalid / missing dsname property: {}'.format(
- str(ds), str(ds.dsname)
- )
- )
- self.assertNotEqual(ds.dsname, DataSource.dsname, fail_msg)
- self.assertIsNotNone(ds.dsname)
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
deleted file mode 100644
index 6f830cc6..00000000
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ /dev/null
@@ -1,837 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from copy import copy, deepcopy
-import json
-import os
-
-from cloudinit import helpers
-from cloudinit.net import eni
-from cloudinit.net import network_state
-from cloudinit import settings
-from cloudinit.sources import DataSourceConfigDrive as ds
-from cloudinit.sources.helpers import openstack
-from cloudinit import util
-
-from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir
-
-
-PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
-EC2_META = {
- 'ami-id': 'ami-00000001',
- 'ami-launch-index': 0,
- 'ami-manifest-path': 'FIXME',
- 'block-device-mapping': {
- 'ami': 'sda1',
- 'ephemeral0': 'sda2',
- 'root': '/dev/sda1',
- 'swap': 'sda3'},
- 'hostname': 'sm-foo-test.novalocal',
- 'instance-action': 'none',
- 'instance-id': 'i-00000001',
- 'instance-type': 'm1.tiny',
- 'local-hostname': 'sm-foo-test.novalocal',
- 'local-ipv4': None,
- 'placement': {'availability-zone': 'nova'},
- 'public-hostname': 'sm-foo-test.novalocal',
- 'public-ipv4': '',
- 'public-keys': {'0': {'openssh-key': PUBKEY}},
- 'reservation-id': 'r-iru5qm4m',
- 'security-groups': ['default']
-}
-USER_DATA = b'#!/bin/sh\necho This is user data\n'
-OSTACK_META = {
- 'availability_zone': 'nova',
- 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
- {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}],
- 'hostname': 'sm-foo-test.novalocal',
- 'meta': {'dsmode': 'local', 'my-meta': 'my-value'},
- 'name': 'sm-foo-test',
- 'public_keys': {'mykey': PUBKEY},
- 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'}
-
-CONTENT_0 = b'This is contents of /etc/foo.cfg\n'
-CONTENT_1 = b'# this is /etc/bar/bar.cfg\n'
-NETWORK_DATA = {
- 'services': [
- {'type': 'dns', 'address': '199.204.44.24'},
- {'type': 'dns', 'address': '199.204.47.54'}
- ],
- 'links': [
- {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd',
- 'ethernet_mac_address': 'fa:16:3e:69:b0:58',
- 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'},
- {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33',
- 'ethernet_mac_address': 'fa:16:3e:d4:57:ad',
- 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'},
- {'vif_id': '1a5382f8-04c5-4d75-ab98-d666c1ef52cc',
- 'ethernet_mac_address': 'fa:16:3e:05:30:fe',
- 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04', 'name': 'nic0'}
- ],
- 'networks': [
- {'link': 'tap2ecc7709-b3', 'type': 'ipv4_dhcp',
- 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235',
- 'id': 'network0'},
- {'link': 'tap2f88d109-5b', 'type': 'ipv4_dhcp',
- 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54',
- 'id': 'network1'},
- {'link': 'tap1a5382f8-04', 'type': 'ipv4_dhcp',
- 'network_id': 'dab2ba57-cae2-4311-a5ed-010b263891f5',
- 'id': 'network2'}
- ]
-}
-
-NETWORK_DATA_2 = {
- "services": [
- {"type": "dns", "address": "1.1.1.191"},
- {"type": "dns", "address": "1.1.1.4"}],
- "networks": [
- {"network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a", "type": "ipv4",
- "netmask": "255.255.255.248", "link": "eth0",
- "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
- "gateway": "2.2.2.9"}],
- "ip_address": "2.2.2.10", "id": "network0-ipv4"},
- {"network_id": "ca447c83-6409-499b-aaef-6ad1ae995348", "type": "ipv4",
- "netmask": "255.255.255.224", "link": "eth1",
- "routes": [], "ip_address": "3.3.3.24", "id": "network1-ipv4"}],
- "links": [
- {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": 1500,
- "type": "vif", "id": "eth0", "vif_id": "vif-foo1"},
- {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": 1500,
- "type": "vif", "id": "eth1", "vif_id": "vif-foo2"}]
-}
-
-# This network data ha 'tap' or null type for a link.
-NETWORK_DATA_3 = {
- "services": [{"type": "dns", "address": "172.16.36.11"},
- {"type": "dns", "address": "172.16.36.12"}],
- "networks": [
- {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
- "type": "ipv4", "netmask": "255.255.255.128",
- "link": "tap77a0dc5b-72", "ip_address": "172.17.48.18",
- "id": "network0",
- "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
- "gateway": "172.17.48.1"}]},
- {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
- "type": "ipv6", "netmask": "ffff:ffff:ffff:ffff::",
- "link": "tap77a0dc5b-72",
- "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d",
- "id": "network1",
- "routes": [{"netmask": "::", "network": "::",
- "gateway": "fdb8:52d0:9d14::1"}]},
- {"network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54",
- "type": "ipv4", "netmask": "255.255.255.128",
- "link": "tap7d6b7bec-93", "ip_address": "172.16.48.13",
- "id": "network2",
- "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
- "gateway": "172.16.48.1"},
- {"netmask": "255.255.0.0", "network": "172.16.0.0",
- "gateway": "172.16.48.1"}]}],
- "links": [
- {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": None,
- "type": "tap", "id": "tap77a0dc5b-72",
- "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48"},
- {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": None,
- "type": None, "id": "tap7d6b7bec-93",
- "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5"}
- ]
-}
-
-BOND_MAC = "fa:16:3e:b3:72:36"
-NETWORK_DATA_BOND = {
- "services": [
- {"type": "dns", "address": "1.1.1.191"},
- {"type": "dns", "address": "1.1.1.4"},
- ],
- "networks": [
- {"id": "network2-ipv4", "ip_address": "2.2.2.13",
- "link": "vlan2", "netmask": "255.255.255.248",
- "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117",
- "type": "ipv4",
- "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
- "gateway": "2.2.2.9"}]},
- {"id": "network3-ipv4", "ip_address": "10.0.1.5",
- "link": "vlan3", "netmask": "255.255.255.248",
- "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
- "type": "ipv4",
- "routes": [{"netmask": "255.255.255.255",
- "network": "192.168.1.0", "gateway": "10.0.1.1"}]}
- ],
- "links": [
- {"ethernet_mac_address": "0c:c4:7a:34:6e:3c",
- "id": "eth0", "mtu": 1500, "type": "phy"},
- {"ethernet_mac_address": "0c:c4:7a:34:6e:3d",
- "id": "eth1", "mtu": 1500, "type": "phy"},
- {"bond_links": ["eth0", "eth1"],
- "bond_miimon": 100, "bond_mode": "4",
- "bond_xmit_hash_policy": "layer3+4",
- "ethernet_mac_address": BOND_MAC,
- "id": "bond0", "type": "bond"},
- {"ethernet_mac_address": "fa:16:3e:b3:72:30",
- "id": "vlan2", "type": "vlan", "vlan_id": 602,
- "vlan_link": "bond0", "vlan_mac_address": "fa:16:3e:b3:72:30"},
- {"ethernet_mac_address": "fa:16:3e:66:ab:a6",
- "id": "vlan3", "type": "vlan", "vlan_id": 612, "vlan_link": "bond0",
- "vlan_mac_address": "fa:16:3e:66:ab:a6"}
- ]
-}
-
-NETWORK_DATA_VLAN = {
- "services": [{"type": "dns", "address": "1.1.1.191"}],
- "networks": [
- {"id": "network1-ipv4", "ip_address": "10.0.1.5",
- "link": "vlan1", "netmask": "255.255.255.248",
- "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
- "type": "ipv4",
- "routes": [{"netmask": "255.255.255.255",
- "network": "192.168.1.0", "gateway": "10.0.1.1"}]}
- ],
- "links": [
- {"ethernet_mac_address": "fa:16:3e:69:b0:58",
- "id": "eth0", "mtu": 1500, "type": "phy"},
- {"ethernet_mac_address": "fa:16:3e:b3:72:30",
- "id": "vlan1", "type": "vlan", "vlan_id": 602,
- "vlan_link": "eth0", "vlan_mac_address": "fa:16:3e:b3:72:30"},
- ]
-}
-
-KNOWN_MACS = {
- 'fa:16:3e:69:b0:58': 'enp0s1',
- 'fa:16:3e:d4:57:ad': 'enp0s2',
- 'fa:16:3e:dd:50:9a': 'foo1',
- 'fa:16:3e:a8:14:69': 'foo2',
- 'fa:16:3e:ed:9a:59': 'foo3',
- '0c:c4:7a:34:6e:3d': 'oeth1',
- '0c:c4:7a:34:6e:3c': 'oeth0',
-}
-
-CFG_DRIVE_FILES_V2 = {
- 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META),
- 'ec2/2009-04-04/user-data': USER_DATA,
- 'ec2/latest/meta-data.json': json.dumps(EC2_META),
- 'ec2/latest/user-data': USER_DATA,
- 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/2012-08-10/user_data': USER_DATA,
- 'openstack/content/0000': CONTENT_0,
- 'openstack/content/0001': CONTENT_1,
- 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/latest/user_data': USER_DATA,
- 'openstack/latest/network_data.json': json.dumps(NETWORK_DATA),
- 'openstack/2015-10-15/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/2015-10-15/user_data': USER_DATA,
- 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)}
-
-M_PATH = "cloudinit.sources.DataSourceConfigDrive."
-
-
-class TestConfigDriveDataSource(CiTestCase):
-
- def setUp(self):
- super(TestConfigDriveDataSource, self).setUp()
- self.add_patch(
- M_PATH + "util.find_devs_with",
- "m_find_devs_with", return_value=[])
- self.tmp = self.tmp_dir()
-
- def test_ec2_metadata(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- found = ds.read_config_drive(self.tmp)
- self.assertTrue('ec2-metadata' in found)
- ec2_md = found['ec2-metadata']
- self.assertEqual(EC2_META, ec2_md)
-
- def test_dev_os_remap(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- found = ds.read_config_drive(self.tmp)
- cfg_ds.metadata = found['metadata']
- name_tests = {
- 'ami': '/dev/vda1',
- 'root': '/dev/vda1',
- 'ephemeral0': '/dev/vda2',
- 'swap': '/dev/vda3',
- }
- for name, dev_name in name_tests.items():
- with ExitStack() as mocks:
- provided_name = dev_name[len('/dev/'):]
- provided_name = "s" + provided_name[1:]
- find_mock = mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- return_value=[provided_name]))
- # We want os.path.exists() to return False on its first call,
- # and True on its second call. We use a handy generator as
- # the mock side effect for this. The mocked function returns
- # what the side effect returns.
-
- def exists_side_effect():
- yield False
- yield True
- exists_mock = mocks.enter_context(
- mock.patch.object(os.path, 'exists',
- side_effect=exists_side_effect()))
- self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
-
- find_mock.assert_called_once_with(mock.ANY)
- self.assertEqual(exists_mock.call_count, 2)
-
- def test_dev_os_map(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- found = ds.read_config_drive(self.tmp)
- os_md = found['metadata']
- cfg_ds.metadata = os_md
- name_tests = {
- 'ami': '/dev/vda1',
- 'root': '/dev/vda1',
- 'ephemeral0': '/dev/vda2',
- 'swap': '/dev/vda3',
- }
- for name, dev_name in name_tests.items():
- with ExitStack() as mocks:
- find_mock = mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- return_value=[dev_name]))
- exists_mock = mocks.enter_context(
- mock.patch.object(os.path, 'exists',
- return_value=True))
- self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
-
- find_mock.assert_called_once_with(mock.ANY)
- exists_mock.assert_called_once_with(mock.ANY)
-
- def test_dev_ec2_remap(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- found = ds.read_config_drive(self.tmp)
- ec2_md = found['ec2-metadata']
- os_md = found['metadata']
- cfg_ds.ec2_metadata = ec2_md
- cfg_ds.metadata = os_md
- name_tests = {
- 'ami': '/dev/vda1',
- 'root': '/dev/vda1',
- 'ephemeral0': '/dev/vda2',
- 'swap': '/dev/vda3',
- None: None,
- 'bob': None,
- 'root2k': None,
- }
- for name, dev_name in name_tests.items():
- # We want os.path.exists() to return False on its first call,
- # and True on its second call. We use a handy generator as
- # the mock side effect for this. The mocked function returns
- # what the side effect returns.
- def exists_side_effect():
- yield False
- yield True
- with mock.patch.object(os.path, 'exists',
- side_effect=exists_side_effect()):
- self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
- # We don't assert the call count for os.path.exists() because
- # not all of the entries in name_tests results in two calls to
- # that function. Specifically, 'root2k' doesn't seem to call
- # it at all.
-
- def test_dev_ec2_map(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- found = ds.read_config_drive(self.tmp)
- ec2_md = found['ec2-metadata']
- os_md = found['metadata']
- cfg_ds.ec2_metadata = ec2_md
- cfg_ds.metadata = os_md
- name_tests = {
- 'ami': '/dev/sda1',
- 'root': '/dev/sda1',
- 'ephemeral0': '/dev/sda2',
- 'swap': '/dev/sda3',
- None: None,
- 'bob': None,
- 'root2k': None,
- }
- for name, dev_name in name_tests.items():
- with mock.patch.object(os.path, 'exists', return_value=True):
- self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
-
- def test_dir_valid(self):
- """Verify a dir is read as such."""
-
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
-
- found = ds.read_config_drive(self.tmp)
-
- expected_md = copy(OSTACK_META)
- expected_md['instance-id'] = expected_md['uuid']
- expected_md['local-hostname'] = expected_md['hostname']
-
- self.assertEqual(USER_DATA, found['userdata'])
- self.assertEqual(expected_md, found['metadata'])
- self.assertEqual(NETWORK_DATA, found['networkdata'])
- self.assertEqual(found['files']['/etc/foo.cfg'], CONTENT_0)
- self.assertEqual(found['files']['/etc/bar/bar.cfg'], CONTENT_1)
-
- def test_seed_dir_valid_extra(self):
- """Verify extra files do not affect datasource validity."""
-
- data = copy(CFG_DRIVE_FILES_V2)
- data["myfoofile.txt"] = "myfoocontent"
- data["openstack/latest/random-file.txt"] = "random-content"
-
- populate_dir(self.tmp, data)
-
- found = ds.read_config_drive(self.tmp)
-
- expected_md = copy(OSTACK_META)
- expected_md['instance-id'] = expected_md['uuid']
- expected_md['local-hostname'] = expected_md['hostname']
-
- self.assertEqual(expected_md, found['metadata'])
-
- def test_seed_dir_bad_json_metadata(self):
- """Verify that bad json in metadata raises BrokenConfigDriveDir."""
- data = copy(CFG_DRIVE_FILES_V2)
-
- data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}"
- data["openstack/2015-10-15/meta_data.json"] = "non-json garbage {}"
- data["openstack/latest/meta_data.json"] = "non-json garbage {}"
-
- populate_dir(self.tmp, data)
-
- self.assertRaises(openstack.BrokenMetadata,
- ds.read_config_drive, self.tmp)
-
- def test_seed_dir_no_configdrive(self):
- """Verify that no metadata raises NonConfigDriveDir."""
-
- my_d = os.path.join(self.tmp, "non-configdrive")
- data = copy(CFG_DRIVE_FILES_V2)
- data["myfoofile.txt"] = "myfoocontent"
- data["openstack/latest/random-file.txt"] = "random-content"
- data["content/foo"] = "foocontent"
-
- self.assertRaises(openstack.NonReadable,
- ds.read_config_drive, my_d)
-
- def test_seed_dir_missing(self):
- """Verify that missing seed_dir raises NonConfigDriveDir."""
- my_d = os.path.join(self.tmp, "nonexistantdirectory")
- self.assertRaises(openstack.NonReadable,
- ds.read_config_drive, my_d)
-
- def test_find_candidates(self):
- devs_with_answers = {}
-
- def my_devs_with(*args, **kwargs):
- criteria = args[0] if len(args) else kwargs.pop('criteria', None)
- return devs_with_answers.get(criteria, [])
-
- def my_is_partition(dev):
- return dev[-1] in "0123456789" and not dev.startswith("sr")
-
- try:
- orig_find_devs_with = util.find_devs_with
- util.find_devs_with = my_devs_with
-
- orig_is_partition = util.is_partition
- util.is_partition = my_is_partition
-
- devs_with_answers = {"TYPE=vfat": [],
- "TYPE=iso9660": ["/dev/vdb"],
- "LABEL=config-2": ["/dev/vdb"]}
- self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
-
- # add a vfat item
- # zdd reverse sorts after vdb, but config-2 label is preferred
- devs_with_answers['TYPE=vfat'] = ["/dev/zdd"]
- self.assertEqual(["/dev/vdb", "/dev/zdd"],
- ds.find_candidate_devs())
-
- # verify that partitions are considered, that have correct label.
- devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
- "TYPE=iso9660": [],
- "LABEL=config-2": ["/dev/vdb3"]}
- self.assertEqual(["/dev/vdb3"],
- ds.find_candidate_devs())
-
- # Verify that uppercase labels are also found.
- devs_with_answers = {"TYPE=vfat": [],
- "TYPE=iso9660": ["/dev/vdb"],
- "LABEL=CONFIG-2": ["/dev/vdb"]}
- self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
-
- finally:
- util.find_devs_with = orig_find_devs_with
- util.is_partition = orig_is_partition
-
- @mock.patch(M_PATH + 'on_first_boot')
- def test_pubkeys_v2(self, on_first_boot):
- """Verify that public-keys work in config-drive-v2."""
- myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
- self.assertEqual(myds.get_public_ssh_keys(),
- [OSTACK_META['public_keys']['mykey']])
- self.assertEqual('configdrive', myds.cloud_name)
- self.assertEqual('openstack', myds.platform)
- self.assertEqual('seed-dir (%s/seed)' % self.tmp, myds.subplatform)
-
- def test_subplatform_config_drive_when_starts_with_dev(self):
- """subplatform reports config-drive when source starts with /dev/."""
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- with mock.patch(M_PATH + 'find_candidate_devs') as m_find_devs:
- with mock.patch(M_PATH + 'util.is_FreeBSD', return_value=False):
- with mock.patch(M_PATH + 'util.mount_cb'):
- with mock.patch(M_PATH + 'on_first_boot'):
- m_find_devs.return_value = ['/dev/anything']
- self.assertEqual(True, cfg_ds.get_data())
- self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform)
-
-
-class TestNetJson(CiTestCase):
- def setUp(self):
- super(TestNetJson, self).setUp()
- self.tmp = self.tmp_dir()
- self.maxDiff = None
-
- @mock.patch(M_PATH + 'on_first_boot')
- def test_network_data_is_found(self, on_first_boot):
- """Verify that network_data is present in ds in config-drive-v2."""
- myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
- self.assertIsNotNone(myds.network_json)
-
- @mock.patch(M_PATH + 'on_first_boot')
- def test_network_config_is_converted(self, on_first_boot):
- """Verify that network_data is converted and present on ds object."""
- myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
- network_config = openstack.convert_net_json(NETWORK_DATA,
- known_macs=KNOWN_MACS)
- self.assertEqual(myds.network_config, network_config)
-
- def test_network_config_conversion_dhcp6(self):
- """Test some ipv6 input network json and check the expected
- conversions."""
- in_data = {
- 'links': [
- {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd',
- 'ethernet_mac_address': 'fa:16:3e:69:b0:58',
- 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'},
- {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33',
- 'ethernet_mac_address': 'fa:16:3e:d4:57:ad',
- 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'},
- ],
- 'networks': [
- {'link': 'tap2ecc7709-b3', 'type': 'ipv6_dhcpv6-stateless',
- 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235',
- 'id': 'network0'},
- {'link': 'tap2f88d109-5b', 'type': 'ipv6_dhcpv6-stateful',
- 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54',
- 'id': 'network1'},
- ]
- }
- out_data = {
- 'version': 1,
- 'config': [
- {'mac_address': 'fa:16:3e:69:b0:58',
- 'mtu': None,
- 'name': 'enp0s1',
- 'subnets': [{'type': 'ipv6_dhcpv6-stateless'}],
- 'type': 'physical'},
- {'mac_address': 'fa:16:3e:d4:57:ad',
- 'mtu': None,
- 'name': 'enp0s2',
- 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}],
- 'type': 'physical',
- 'accept-ra': True}
- ],
- }
- conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS)
- self.assertEqual(out_data, conv_data)
-
- def test_network_config_conversions(self):
- """Tests a bunch of input network json and checks the
- expected conversions."""
- in_datas = [
- NETWORK_DATA,
- {
- 'services': [{'type': 'dns', 'address': '172.19.0.12'}],
- 'networks': [{
- 'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4',
- 'type': 'ipv4',
- 'netmask': '255.255.252.0',
- 'link': 'tap1a81968a-79',
- 'routes': [{
- 'netmask': '0.0.0.0',
- 'network': '0.0.0.0',
- 'gateway': '172.19.3.254',
- }],
- 'ip_address': '172.19.1.34',
- 'id': 'network0',
- }],
- 'links': [{
- 'type': 'bridge',
- 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f',
- 'ethernet_mac_address': 'fa:16:3e:ed:9a:59',
- 'id': 'tap1a81968a-79',
- 'mtu': None,
- }],
- },
- ]
- out_datas = [
- {
- 'version': 1,
- 'config': [
- {
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical',
- 'mac_address': 'fa:16:3e:69:b0:58',
- 'name': 'enp0s1',
- 'mtu': None,
- },
- {
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical',
- 'mac_address': 'fa:16:3e:d4:57:ad',
- 'name': 'enp0s2',
- 'mtu': None,
- },
- {
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical',
- 'mac_address': 'fa:16:3e:05:30:fe',
- 'name': 'nic0',
- 'mtu': None,
- },
- {
- 'type': 'nameserver',
- 'address': '199.204.44.24',
- },
- {
- 'type': 'nameserver',
- 'address': '199.204.47.54',
- }
- ],
-
- },
- {
- 'version': 1,
- 'config': [
- {
- 'name': 'foo3',
- 'mac_address': 'fa:16:3e:ed:9a:59',
- 'mtu': None,
- 'type': 'physical',
- 'subnets': [
- {
- 'address': '172.19.1.34',
- 'netmask': '255.255.252.0',
- 'type': 'static',
- 'ipv4': True,
- 'routes': [{
- 'gateway': '172.19.3.254',
- 'netmask': '0.0.0.0',
- 'network': '0.0.0.0',
- }],
- }
- ]
- },
- {
- 'type': 'nameserver',
- 'address': '172.19.0.12',
- }
- ],
- },
- ]
- for in_data, out_data in zip(in_datas, out_datas):
- conv_data = openstack.convert_net_json(in_data,
- known_macs=KNOWN_MACS)
- self.assertEqual(out_data, conv_data)
-
-
-class TestConvertNetworkData(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestConvertNetworkData, self).setUp()
- self.tmp = self.tmp_dir()
-
- def _getnames_in_config(self, ncfg):
- return set([n['name'] for n in ncfg['config']
- if n['type'] == 'physical'])
-
- def test_conversion_fills_names(self):
- ncfg = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS)
- expected = set(['nic0', 'enp0s1', 'enp0s2'])
- found = self._getnames_in_config(ncfg)
- self.assertEqual(found, expected)
-
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
- def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac):
- macs = KNOWN_MACS.copy()
- macs.update({'fa:16:3e:05:30:fe': 'foonic1',
- 'fa:16:3e:69:b0:58': 'ens1'})
- get_interfaces_by_mac.return_value = macs
-
- ncfg = openstack.convert_net_json(NETWORK_DATA)
- expected = set(['nic0', 'ens1', 'enp0s2'])
- found = self._getnames_in_config(ncfg)
- self.assertEqual(found, expected)
-
- def test_convert_raises_value_error_on_missing_name(self):
- macs = {'aa:aa:aa:aa:aa:00': 'ens1'}
- self.assertRaises(ValueError, openstack.convert_net_json,
- NETWORK_DATA, known_macs=macs)
-
- def test_conversion_with_route(self):
- ncfg = openstack.convert_net_json(NETWORK_DATA_2,
- known_macs=KNOWN_MACS)
- # not the best test, but see that we get a route in the
- # network config and that it gets rendered to an ENI file
- routes = []
- for n in ncfg['config']:
- for s in n.get('subnets', []):
- routes.extend(s.get('routes', []))
- self.assertIn(
- {'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '2.2.2.9'},
- routes)
- eni_renderer = eni.Renderer()
- eni_renderer.render_network_state(
- network_state.parse_net_config_data(ncfg), target=self.tmp)
- with open(os.path.join(self.tmp, "etc",
- "network", "interfaces"), 'r') as f:
- eni_rendering = f.read()
- self.assertIn("route add default gw 2.2.2.9", eni_rendering)
-
- def test_conversion_with_tap(self):
- ncfg = openstack.convert_net_json(NETWORK_DATA_3,
- known_macs=KNOWN_MACS)
- physicals = set()
- for i in ncfg['config']:
- if i.get('type') == "physical":
- physicals.add(i['name'])
- self.assertEqual(physicals, set(('foo1', 'foo2')))
-
- def test_bond_conversion(self):
- # light testing of bond conversion and eni rendering of bond
- ncfg = openstack.convert_net_json(NETWORK_DATA_BOND,
- known_macs=KNOWN_MACS)
- eni_renderer = eni.Renderer()
-
- eni_renderer.render_network_state(
- network_state.parse_net_config_data(ncfg), target=self.tmp)
- with open(os.path.join(self.tmp, "etc",
- "network", "interfaces"), 'r') as f:
- eni_rendering = f.read()
-
- # Verify there are expected interfaces in the net config.
- interfaces = sorted(
- [i['name'] for i in ncfg['config']
- if i['type'] in ('vlan', 'bond', 'physical')])
- self.assertEqual(
- sorted(["oeth0", "oeth1", "bond0", "bond0.602", "bond0.612"]),
- interfaces)
-
- words = eni_rendering.split()
- # 'eth0' and 'eth1' are the ids. because their mac adresses
- # map to other names, we should not see them in the ENI
- self.assertNotIn('eth0', words)
- self.assertNotIn('eth1', words)
-
- # oeth0 and oeth1 are the interface names for eni.
- # bond0 will be generated for the bond. Each should be auto.
- self.assertIn("auto oeth0", eni_rendering)
- self.assertIn("auto oeth1", eni_rendering)
- self.assertIn("auto bond0", eni_rendering)
- # The bond should have the given mac address
- pos = eni_rendering.find("auto bond0")
- self.assertIn(BOND_MAC, eni_rendering[pos:])
-
- def test_vlan(self):
- # light testing of vlan config conversion and eni rendering
- ncfg = openstack.convert_net_json(NETWORK_DATA_VLAN,
- known_macs=KNOWN_MACS)
- eni_renderer = eni.Renderer()
- eni_renderer.render_network_state(
- network_state.parse_net_config_data(ncfg), target=self.tmp)
- with open(os.path.join(self.tmp, "etc",
- "network", "interfaces"), 'r') as f:
- eni_rendering = f.read()
-
- self.assertIn("iface enp0s1", eni_rendering)
- self.assertIn("address 10.0.1.5", eni_rendering)
- self.assertIn("auto enp0s1.602", eni_rendering)
-
- def test_mac_addrs_can_be_upper_case(self):
- # input mac addresses on rackspace may be upper case
- my_netdata = deepcopy(NETWORK_DATA)
- for link in my_netdata['links']:
- link['ethernet_mac_address'] = link['ethernet_mac_address'].upper()
-
- ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS)
- config_name2mac = {}
- for n in ncfg['config']:
- if n['type'] == 'physical':
- config_name2mac[n['name']] = n['mac_address']
-
- expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58',
- 'enp0s2': 'fa:16:3e:d4:57:ad'}
- self.assertEqual(expected, config_name2mac)
-
- def test_unknown_device_types_accepted(self):
- # If we don't recognise a link, we should treat it as physical for a
- # best-effort boot
- my_netdata = deepcopy(NETWORK_DATA)
- my_netdata['links'][0]['type'] = 'my-special-link-type'
-
- ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS)
- config_name2mac = {}
- for n in ncfg['config']:
- if n['type'] == 'physical':
- config_name2mac[n['name']] = n['mac_address']
-
- expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58',
- 'enp0s2': 'fa:16:3e:d4:57:ad'}
- self.assertEqual(expected, config_name2mac)
-
- # We should, however, warn the user that we don't recognise the type
- self.assertIn('Unknown network_data link type (my-special-link-type)',
- self.logs.getvalue())
-
-
-def cfg_ds_from_dir(base_d, files=None):
- run = os.path.join(base_d, "run")
- os.mkdir(run)
- cfg_ds = ds.DataSourceConfigDrive(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': run}))
- cfg_ds.seed_dir = os.path.join(base_d, "seed")
- if files:
- populate_dir(cfg_ds.seed_dir, files)
- cfg_ds.known_macs = KNOWN_MACS.copy()
- if not cfg_ds.get_data():
- raise RuntimeError("Data source did not extract itself from"
- " seed directory %s" % cfg_ds.seed_dir)
- return cfg_ds
-
-
-def populate_ds_from_read_config(cfg_ds, source, results):
- """Patch the DataSourceConfigDrive from the results of
- read_config_drive_dir hopefully in line with what it would have
- if cfg_ds.get_data had been successfully called"""
- cfg_ds.source = source
- cfg_ds.metadata = results.get('metadata')
- cfg_ds.ec2_metadata = results.get('ec2-metadata')
- cfg_ds.userdata_raw = results.get('userdata')
- cfg_ds.version = results.get('version')
- cfg_ds.network_json = results.get('networkdata')
- cfg_ds._network_config = openstack.convert_net_json(
- cfg_ds.network_json, known_macs=KNOWN_MACS)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py
deleted file mode 100644
index 3127014b..00000000
--- a/tests/unittests/test_datasource/test_digitalocean.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# Copyright (C) 2014 Neal Shrader
-#
-# Author: Neal Shrader <neal@digitalocean.com>
-# Author: Ben Howard <bh@digitalocean.com>
-# Author: Scott Moser <smoser@ubuntu.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import json
-
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit.sources import DataSourceDigitalOcean
-from cloudinit.sources.helpers import digitalocean
-
-from cloudinit.tests.helpers import mock, CiTestCase
-
-DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co",
- "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"]
-DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co"
-
-# the following JSON was taken from droplet (that's why its a string)
-DO_META = json.loads("""
-{
- "droplet_id": "22532410",
- "hostname": "utl-96268",
- "vendor_data": "vendordata goes here",
- "user_data": "userdata goes here",
- "public_keys": "",
- "auth_key": "authorization_key",
- "region": "nyc3",
- "interfaces": {
- "private": [
- {
- "ipv4": {
- "ip_address": "10.132.6.205",
- "netmask": "255.255.0.0",
- "gateway": "10.132.0.1"
- },
- "mac": "04:01:57:d1:9e:02",
- "type": "private"
- }
- ],
- "public": [
- {
- "ipv4": {
- "ip_address": "192.0.0.20",
- "netmask": "255.255.255.0",
- "gateway": "104.236.0.1"
- },
- "ipv6": {
- "ip_address": "2604:A880:0800:0000:1000:0000:0000:0000",
- "cidr": 64,
- "gateway": "2604:A880:0800:0000:0000:0000:0000:0001"
- },
- "anchor_ipv4": {
- "ip_address": "10.0.0.5",
- "netmask": "255.255.0.0",
- "gateway": "10.0.0.1"
- },
- "mac": "04:01:57:d1:9e:01",
- "type": "public"
- }
- ]
- },
- "floating_ip": {
- "ipv4": {
- "active": false
- }
- },
- "dns": {
- "nameservers": [
- "2001:4860:4860::8844",
- "2001:4860:4860::8888",
- "8.8.8.8"
- ]
- }
-}
-""")
-
-# This has no private interface
-DO_META_2 = {
- "droplet_id": 27223699,
- "hostname": "smtest1",
- "vendor_data": "\n".join([
- ('"Content-Type: multipart/mixed; '
- 'boundary=\"===============8645434374073493512==\"'),
- 'MIME-Version: 1.0',
- '',
- '--===============8645434374073493512==',
- 'MIME-Version: 1.0'
- 'Content-Type: text/cloud-config; charset="us-ascii"'
- 'Content-Transfer-Encoding: 7bit'
- 'Content-Disposition: attachment; filename="cloud-config"'
- '',
- '#cloud-config',
- 'disable_root: false',
- 'manage_etc_hosts: true',
- '',
- '',
- '--===============8645434374073493512=='
- ]),
- "public_keys": [
- "ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies"
- ],
- "auth_key": "88888888888888888888888888888888",
- "region": "nyc3",
- "interfaces": {
- "public": [{
- "ipv4": {
- "ip_address": "45.55.249.133",
- "netmask": "255.255.192.0",
- "gateway": "45.55.192.1"
- },
- "anchor_ipv4": {
- "ip_address": "10.17.0.5",
- "netmask": "255.255.0.0",
- "gateway": "10.17.0.1"
- },
- "mac": "ae:cc:08:7c:88:00",
- "type": "public"
- }]
- },
- "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}},
- "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]},
- "tags": None,
-}
-
-DO_META['public_keys'] = DO_SINGLE_KEY
-
-MD_URL = 'http://169.254.169.254/metadata/v1.json'
-
-
-def _mock_dmi():
- return (True, DO_META.get('id'))
-
-
-class TestDataSourceDigitalOcean(CiTestCase):
- """
- Test reading the meta-data
- """
- def setUp(self):
- super(TestDataSourceDigitalOcean, self).setUp()
- self.tmp = self.tmp_dir()
-
- def get_ds(self, get_sysinfo=_mock_dmi):
- ds = DataSourceDigitalOcean.DataSourceDigitalOcean(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- ds.use_ip4LL = False
- if get_sysinfo is not None:
- ds._get_sysinfo = get_sysinfo
- return ds
-
- @mock.patch('cloudinit.sources.helpers.digitalocean.read_sysinfo')
- def test_returns_false_not_on_docean(self, m_read_sysinfo):
- m_read_sysinfo.return_value = (False, None)
- ds = self.get_ds(get_sysinfo=None)
- self.assertEqual(False, ds.get_data())
- self.assertTrue(m_read_sysinfo.called)
-
- @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata')
- def test_metadata(self, mock_readmd):
- mock_readmd.return_value = DO_META.copy()
-
- ds = self.get_ds()
- ret = ds.get_data()
- self.assertTrue(ret)
-
- self.assertTrue(mock_readmd.called)
-
- self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw())
- self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw())
- self.assertEqual(DO_META.get('region'), ds.availability_zone)
- self.assertEqual(DO_META.get('droplet_id'), ds.get_instance_id())
- self.assertEqual(DO_META.get('hostname'), ds.get_hostname())
-
- # Single key
- self.assertEqual([DO_META.get('public_keys')],
- ds.get_public_ssh_keys())
-
- self.assertIsInstance(ds.get_public_ssh_keys(), list)
-
- @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata')
- def test_multiple_ssh_keys(self, mock_readmd):
- metadata = DO_META.copy()
- metadata['public_keys'] = DO_MULTIPLE_KEYS
- mock_readmd.return_value = metadata.copy()
-
- ds = self.get_ds()
- ret = ds.get_data()
- self.assertTrue(ret)
-
- self.assertTrue(mock_readmd.called)
-
- # Multiple keys
- self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys())
- self.assertIsInstance(ds.get_public_ssh_keys(), list)
-
-
-class TestNetworkConvert(CiTestCase):
-
- def _get_networking(self):
- self.m_get_by_mac.return_value = {
- '04:01:57:d1:9e:01': 'ens1',
- '04:01:57:d1:9e:02': 'ens2',
- 'b8:ae:ed:75:5f:9a': 'enp0s25',
- 'ae:cc:08:7c:88:00': 'meta2p1'}
- netcfg = digitalocean.convert_network_configuration(
- DO_META['interfaces'], DO_META['dns']['nameservers'])
- self.assertIn('config', netcfg)
- return netcfg
-
- def setUp(self):
- super(TestNetworkConvert, self).setUp()
- self.add_patch('cloudinit.net.get_interfaces_by_mac', 'm_get_by_mac')
-
- def test_networking_defined(self):
- netcfg = self._get_networking()
- self.assertIsNotNone(netcfg)
- dns_defined = False
-
- for part in netcfg.get('config'):
- n_type = part.get('type')
- print("testing part ", n_type, "\n", json.dumps(part, indent=3))
-
- if n_type == 'nameserver':
- n_address = part.get('address')
- self.assertIsNotNone(n_address)
- self.assertEqual(len(n_address), 3)
-
- dns_resolvers = DO_META["dns"]["nameservers"]
- for x in n_address:
- self.assertIn(x, dns_resolvers)
- dns_defined = True
-
- else:
- n_subnets = part.get('type')
- n_name = part.get('name')
- n_mac = part.get('mac_address')
-
- self.assertIsNotNone(n_type)
- self.assertIsNotNone(n_subnets)
- self.assertIsNotNone(n_name)
- self.assertIsNotNone(n_mac)
-
- self.assertTrue(dns_defined)
-
- def _get_nic_definition(self, int_type, expected_name):
- """helper function to return if_type (i.e. public) and the expected
- name used by cloud-init (i.e eth0)"""
- netcfg = self._get_networking()
- meta_def = (DO_META.get('interfaces')).get(int_type)[0]
-
- self.assertEqual(int_type, meta_def.get('type'))
-
- for nic_def in netcfg.get('config'):
- print(nic_def)
- if nic_def.get('name') == expected_name:
- return nic_def, meta_def
-
- def _get_match_subn(self, subnets, ip_addr):
- """get the matching subnet definition based on ip address"""
- for subn in subnets:
- address = subn.get('address')
- self.assertIsNotNone(address)
-
- # equals won't work because of ipv6 addressing being in
- # cidr notation, i.e fe00::1/64
- if ip_addr in address:
- print(json.dumps(subn, indent=3))
- return subn
-
- def test_correct_gateways_defined(self):
- """test to make sure the eth0 ipv4 and ipv6 gateways are defined"""
- netcfg = self._get_networking()
- gateways = []
- for nic_def in netcfg.get('config'):
- if nic_def.get('type') != 'physical':
- continue
- for subn in nic_def.get('subnets'):
- if 'gateway' in subn:
- gateways.append(subn.get('gateway'))
-
- # we should have two gateways, one ipv4 and ipv6
- self.assertEqual(len(gateways), 2)
-
- # make that the ipv6 gateway is there
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- ipv4_def = meta_def.get('ipv4')
- self.assertIn(ipv4_def.get('gateway'), gateways)
-
- # make sure the the ipv6 gateway is there
- ipv6_def = meta_def.get('ipv6')
- self.assertIn(ipv6_def.get('gateway'), gateways)
-
- def test_public_interface_defined(self):
- """test that the public interface is defined as eth0"""
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- self.assertEqual('eth0', nic_def.get('name'))
- self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address'))
- self.assertEqual('physical', nic_def.get('type'))
-
- def test_private_interface_defined(self):
- """test that the private interface is defined as eth1"""
- (nic_def, meta_def) = self._get_nic_definition('private', 'eth1')
- self.assertEqual('eth1', nic_def.get('name'))
- self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address'))
- self.assertEqual('physical', nic_def.get('type'))
-
- def test_public_interface_ipv6(self):
- """test public ipv6 addressing"""
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- ipv6_def = meta_def.get('ipv6')
- self.assertIsNotNone(ipv6_def)
-
- subn_def = self._get_match_subn(nic_def.get('subnets'),
- ipv6_def.get('ip_address'))
-
- cidr_notated_address = "{0}/{1}".format(ipv6_def.get('ip_address'),
- ipv6_def.get('cidr'))
-
- self.assertEqual(cidr_notated_address, subn_def.get('address'))
- self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway'))
-
- def test_public_interface_ipv4(self):
- """test public ipv4 addressing"""
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- ipv4_def = meta_def.get('ipv4')
- self.assertIsNotNone(ipv4_def)
-
- subn_def = self._get_match_subn(nic_def.get('subnets'),
- ipv4_def.get('ip_address'))
-
- self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask'))
- self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway'))
-
- def test_public_interface_anchor_ipv4(self):
- """test public ipv4 addressing"""
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- ipv4_def = meta_def.get('anchor_ipv4')
- self.assertIsNotNone(ipv4_def)
-
- subn_def = self._get_match_subn(nic_def.get('subnets'),
- ipv4_def.get('ip_address'))
-
- self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask'))
- self.assertNotIn('gateway', subn_def)
-
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
- def test_convert_without_private(self, m_get_by_mac):
- m_get_by_mac.return_value = {
- 'b8:ae:ed:75:5f:9a': 'enp0s25',
- 'ae:cc:08:7c:88:00': 'meta2p1'}
- netcfg = digitalocean.convert_network_configuration(
- DO_META_2['interfaces'], DO_META_2['dns']['nameservers'])
-
- # print(netcfg)
- byname = {}
- for i in netcfg['config']:
- if 'name' in i:
- if i['name'] in byname:
- raise ValueError("name '%s' in config twice: %s" %
- (i['name'], netcfg))
- byname[i['name']] = i
- self.assertTrue('eth0' in byname)
- self.assertTrue('subnets' in byname['eth0'])
- eth0 = byname['eth0']
- self.assertEqual(
- sorted(['45.55.249.133', '10.17.0.5']),
- sorted([i['address'] for i in eth0['subnets']]))
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py
deleted file mode 100644
index f0061199..00000000
--- a/tests/unittests/test_datasource/test_exoscale.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# Author: Mathieu Corbin <mathieu.corbin@exoscale.com>
-# Author: Christopher Glass <christopher.glass@exoscale.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import helpers
-from cloudinit.sources.DataSourceExoscale import (
- API_VERSION,
- DataSourceExoscale,
- METADATA_URL,
- get_password,
- PASSWORD_SERVER_PORT,
- read_metadata)
-from cloudinit.tests.helpers import HttprettyTestCase, mock
-from cloudinit import util
-
-import httpretty
-import os
-import requests
-
-
-TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL,
- PASSWORD_SERVER_PORT,
- API_VERSION)
-
-TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL,
- API_VERSION)
-
-TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL,
- API_VERSION)
-
-
-@httpretty.activate
-class TestDatasourceExoscale(HttprettyTestCase):
-
- def setUp(self):
- super(TestDatasourceExoscale, self).setUp()
- self.tmp = self.tmp_dir()
- self.password_url = TEST_PASSWORD_URL
- self.metadata_url = TEST_METADATA_URL
- self.userdata_url = TEST_USERDATA_URL
-
- def test_password_saved(self):
- """The password is not set when it is not found
- in the metadata service."""
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body="saved_password")
- self.assertFalse(get_password())
-
- def test_password_empty(self):
- """No password is set if the metadata service returns
- an empty string."""
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body="")
- self.assertFalse(get_password())
-
- def test_password(self):
- """The password is set to what is found in the metadata
- service."""
- expected_password = "p@ssw0rd"
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body=expected_password)
- password = get_password()
- self.assertEqual(expected_password, password)
-
- def test_activate_removes_set_passwords_semaphore(self):
- """Allow set_passwords to run every boot by removing the semaphore."""
- path = helpers.Paths({'cloud_dir': self.tmp})
- sem_dir = self.tmp_path('instance/sem', dir=self.tmp)
- util.ensure_dir(sem_dir)
- sem_file = os.path.join(sem_dir, 'config_set_passwords')
- with open(sem_file, 'w') as stream:
- stream.write('')
- ds = DataSourceExoscale({}, None, path)
- ds.activate(None, None)
- self.assertFalse(os.path.exists(sem_file))
-
- def test_get_data(self):
- """The datasource conforms to expected behavior when supplied
- full test data."""
- path = helpers.Paths({'run_dir': self.tmp})
- ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: True
- expected_password = "p@ssw0rd"
- expected_id = "12345"
- expected_hostname = "myname"
- expected_userdata = "#cloud-config"
- httpretty.register_uri(httpretty.GET,
- self.userdata_url,
- body=expected_userdata)
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body=expected_password)
- httpretty.register_uri(httpretty.GET,
- self.metadata_url,
- body="instance-id\nlocal-hostname")
- httpretty.register_uri(httpretty.GET,
- "{}local-hostname".format(self.metadata_url),
- body=expected_hostname)
- httpretty.register_uri(httpretty.GET,
- "{}instance-id".format(self.metadata_url),
- body=expected_id)
- self.assertTrue(ds._get_data())
- self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
- self.assertEqual(ds.metadata, {"instance-id": expected_id,
- "local-hostname": expected_hostname})
- self.assertEqual(ds.get_config_obj(),
- {'ssh_pwauth': True,
- 'password': expected_password,
- 'chpasswd': {
- 'expire': False,
- }})
-
- def test_get_data_saved_password(self):
- """The datasource conforms to expected behavior when saved_password is
- returned by the password server."""
- path = helpers.Paths({'run_dir': self.tmp})
- ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: True
- expected_answer = "saved_password"
- expected_id = "12345"
- expected_hostname = "myname"
- expected_userdata = "#cloud-config"
- httpretty.register_uri(httpretty.GET,
- self.userdata_url,
- body=expected_userdata)
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body=expected_answer)
- httpretty.register_uri(httpretty.GET,
- self.metadata_url,
- body="instance-id\nlocal-hostname")
- httpretty.register_uri(httpretty.GET,
- "{}local-hostname".format(self.metadata_url),
- body=expected_hostname)
- httpretty.register_uri(httpretty.GET,
- "{}instance-id".format(self.metadata_url),
- body=expected_id)
- self.assertTrue(ds._get_data())
- self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
- self.assertEqual(ds.metadata, {"instance-id": expected_id,
- "local-hostname": expected_hostname})
- self.assertEqual(ds.get_config_obj(), {})
-
- def test_get_data_no_password(self):
- """The datasource conforms to expected behavior when no password is
- returned by the password server."""
- path = helpers.Paths({'run_dir': self.tmp})
- ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: True
- expected_answer = ""
- expected_id = "12345"
- expected_hostname = "myname"
- expected_userdata = "#cloud-config"
- httpretty.register_uri(httpretty.GET,
- self.userdata_url,
- body=expected_userdata)
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body=expected_answer)
- httpretty.register_uri(httpretty.GET,
- self.metadata_url,
- body="instance-id\nlocal-hostname")
- httpretty.register_uri(httpretty.GET,
- "{}local-hostname".format(self.metadata_url),
- body=expected_hostname)
- httpretty.register_uri(httpretty.GET,
- "{}instance-id".format(self.metadata_url),
- body=expected_id)
- self.assertTrue(ds._get_data())
- self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
- self.assertEqual(ds.metadata, {"instance-id": expected_id,
- "local-hostname": expected_hostname})
- self.assertEqual(ds.get_config_obj(), {})
-
- @mock.patch('cloudinit.sources.DataSourceExoscale.get_password')
- def test_read_metadata_when_password_server_unreachable(self, m_password):
- """The read_metadata function returns partial results in case the
- password server (only) is unreachable."""
- expected_id = "12345"
- expected_hostname = "myname"
- expected_userdata = "#cloud-config"
-
- m_password.side_effect = requests.Timeout('Fake Connection Timeout')
- httpretty.register_uri(httpretty.GET,
- self.userdata_url,
- body=expected_userdata)
- httpretty.register_uri(httpretty.GET,
- self.metadata_url,
- body="instance-id\nlocal-hostname")
- httpretty.register_uri(httpretty.GET,
- "{}local-hostname".format(self.metadata_url),
- body=expected_hostname)
- httpretty.register_uri(httpretty.GET,
- "{}instance-id".format(self.metadata_url),
- body=expected_id)
-
- result = read_metadata()
-
- self.assertIsNone(result.get("password"))
- self.assertEqual(result.get("user-data").decode("utf-8"),
- expected_userdata)
-
- def test_non_viable_platform(self):
- """The datasource fails fast when the platform is not viable."""
- path = helpers.Paths({'run_dir': self.tmp})
- ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: False
- self.assertFalse(ds._get_data())
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
deleted file mode 100644
index 01f4cbd1..00000000
--- a/tests/unittests/test_datasource/test_gce.py
+++ /dev/null
@@ -1,363 +0,0 @@
-# Copyright (C) 2014 Vaidas Jablonskis
-#
-# Author: Vaidas Jablonskis <jablonskis@gmail.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import datetime
-import httpretty
-import json
-import re
-from unittest import mock
-from urllib.parse import urlparse
-
-from base64 import b64encode, b64decode
-
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit.sources import DataSourceGCE
-
-from cloudinit.tests import helpers as test_helpers
-
-
-GCE_META = {
- 'instance/id': '123',
- 'instance/zone': 'foo/bar',
- 'instance/hostname': 'server.project-foo.local',
-}
-
-GCE_META_PARTIAL = {
- 'instance/id': '1234',
- 'instance/hostname': 'server.project-bar.local',
- 'instance/zone': 'bar/baz',
-}
-
-GCE_META_ENCODING = {
- 'instance/id': '12345',
- 'instance/hostname': 'server.project-baz.local',
- 'instance/zone': 'baz/bang',
- 'instance/attributes': {
- 'user-data': b64encode(b'#!/bin/echo baz\n').decode('utf-8'),
- 'user-data-encoding': 'base64',
- }
-}
-
-GCE_USER_DATA_TEXT = {
- 'instance/id': '12345',
- 'instance/hostname': 'server.project-baz.local',
- 'instance/zone': 'baz/bang',
- 'instance/attributes': {
- 'user-data': '#!/bin/sh\necho hi mom\ntouch /run/up-now\n',
- }
-}
-
-HEADERS = {'Metadata-Flavor': 'Google'}
-MD_URL_RE = re.compile(
- r'http://metadata.google.internal/computeMetadata/v1/.*')
-GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/'
- 'v1/instance/guest-attributes/hostkeys/')
-
-
-def _set_mock_metadata(gce_meta=None):
- if gce_meta is None:
- gce_meta = GCE_META
-
- def _request_callback(method, uri, headers):
- url_path = urlparse(uri).path
- if url_path.startswith('/computeMetadata/v1/'):
- path = url_path.split('/computeMetadata/v1/')[1:][0]
- recursive = path.endswith('/')
- path = path.rstrip('/')
- else:
- path = None
- if path in gce_meta:
- response = gce_meta.get(path)
- if recursive:
- response = json.dumps(response)
- return (200, headers, response)
- else:
- return (404, headers, '')
-
- # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316
- httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback)
-
-
-@httpretty.activate
-class TestDataSourceGCE(test_helpers.HttprettyTestCase):
-
- def _make_distro(self, dtype, def_user=None):
- cfg = dict(settings.CFG_BUILTIN)
- cfg['system_info']['distro'] = dtype
- paths = helpers.Paths(cfg['system_info']['paths'])
- distro_cls = distros.fetch(dtype)
- if def_user:
- cfg['system_info']['default_user'] = def_user.copy()
- distro = distro_cls(dtype, cfg['system_info'], paths)
- return distro
-
- def setUp(self):
- tmp = self.tmp_dir()
- self.ds = DataSourceGCE.DataSourceGCE(
- settings.CFG_BUILTIN, None,
- helpers.Paths({'run_dir': tmp}))
- ppatch = self.m_platform_reports_gce = mock.patch(
- 'cloudinit.sources.DataSourceGCE.platform_reports_gce')
- self.m_platform_reports_gce = ppatch.start()
- self.m_platform_reports_gce.return_value = True
- self.addCleanup(ppatch.stop)
- super(TestDataSourceGCE, self).setUp()
-
- def test_connection(self):
- _set_mock_metadata()
- success = self.ds.get_data()
- self.assertTrue(success)
-
- req_header = httpretty.last_request().headers
- for header_name, expected_value in HEADERS.items():
- self.assertEqual(expected_value, req_header.get(header_name))
-
- def test_metadata(self):
- # UnicodeDecodeError if set to ds.userdata instead of userdata_raw
- meta = GCE_META.copy()
- meta['instance/attributes/user-data'] = b'/bin/echo \xff\n'
-
- _set_mock_metadata()
- self.ds.get_data()
-
- shostname = GCE_META.get('instance/hostname').split('.')[0]
- self.assertEqual(shostname,
- self.ds.get_hostname())
-
- self.assertEqual(GCE_META.get('instance/id'),
- self.ds.get_instance_id())
-
- self.assertEqual(GCE_META.get('instance/attributes/user-data'),
- self.ds.get_userdata_raw())
-
- # test partial metadata (missing user-data in particular)
- def test_metadata_partial(self):
- _set_mock_metadata(GCE_META_PARTIAL)
- self.ds.get_data()
-
- self.assertEqual(GCE_META_PARTIAL.get('instance/id'),
- self.ds.get_instance_id())
-
- shostname = GCE_META_PARTIAL.get('instance/hostname').split('.')[0]
- self.assertEqual(shostname, self.ds.get_hostname())
-
- def test_userdata_no_encoding(self):
- """check that user-data is read."""
- _set_mock_metadata(GCE_USER_DATA_TEXT)
- self.ds.get_data()
- self.assertEqual(
- GCE_USER_DATA_TEXT['instance/attributes']['user-data'].encode(),
- self.ds.get_userdata_raw())
-
- def test_metadata_encoding(self):
- """user-data is base64 encoded if user-data-encoding is 'base64'."""
- _set_mock_metadata(GCE_META_ENCODING)
- self.ds.get_data()
-
- instance_data = GCE_META_ENCODING.get('instance/attributes')
- decoded = b64decode(instance_data.get('user-data'))
- self.assertEqual(decoded, self.ds.get_userdata_raw())
-
- def test_missing_required_keys_return_false(self):
- for required_key in ['instance/id', 'instance/zone',
- 'instance/hostname']:
- meta = GCE_META_PARTIAL.copy()
- del meta[required_key]
- _set_mock_metadata(meta)
- self.assertEqual(False, self.ds.get_data())
- httpretty.reset()
-
- def test_no_ssh_keys_metadata(self):
- _set_mock_metadata()
- self.ds.get_data()
- self.assertEqual([], self.ds.get_public_ssh_keys())
-
- def test_cloudinit_ssh_keys(self):
- valid_key = 'ssh-rsa VALID {0}'
- invalid_key = 'ssh-rsa INVALID {0}'
- project_attributes = {
- 'sshKeys': '\n'.join([
- 'cloudinit:{0}'.format(valid_key.format(0)),
- 'user:{0}'.format(invalid_key.format(0)),
- ]),
- 'ssh-keys': '\n'.join([
- 'cloudinit:{0}'.format(valid_key.format(1)),
- 'user:{0}'.format(invalid_key.format(1)),
- ]),
- }
- instance_attributes = {
- 'ssh-keys': '\n'.join([
- 'cloudinit:{0}'.format(valid_key.format(2)),
- 'user:{0}'.format(invalid_key.format(2)),
- ]),
- 'block-project-ssh-keys': 'False',
- }
-
- meta = GCE_META.copy()
- meta['project/attributes'] = project_attributes
- meta['instance/attributes'] = instance_attributes
-
- _set_mock_metadata(meta)
- self.ds.get_data()
-
- expected = [valid_key.format(key) for key in range(3)]
- self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
-
- @mock.patch("cloudinit.sources.DataSourceGCE.ug_util")
- def test_default_user_ssh_keys(self, mock_ug_util):
- mock_ug_util.normalize_users_groups.return_value = None, None
- mock_ug_util.extract_default.return_value = 'ubuntu', None
- ubuntu_ds = DataSourceGCE.DataSourceGCE(
- settings.CFG_BUILTIN, self._make_distro('ubuntu'),
- helpers.Paths({'run_dir': self.tmp_dir()}))
-
- valid_key = 'ssh-rsa VALID {0}'
- invalid_key = 'ssh-rsa INVALID {0}'
- project_attributes = {
- 'sshKeys': '\n'.join([
- 'ubuntu:{0}'.format(valid_key.format(0)),
- 'user:{0}'.format(invalid_key.format(0)),
- ]),
- 'ssh-keys': '\n'.join([
- 'ubuntu:{0}'.format(valid_key.format(1)),
- 'user:{0}'.format(invalid_key.format(1)),
- ]),
- }
- instance_attributes = {
- 'ssh-keys': '\n'.join([
- 'ubuntu:{0}'.format(valid_key.format(2)),
- 'user:{0}'.format(invalid_key.format(2)),
- ]),
- 'block-project-ssh-keys': 'False',
- }
-
- meta = GCE_META.copy()
- meta['project/attributes'] = project_attributes
- meta['instance/attributes'] = instance_attributes
-
- _set_mock_metadata(meta)
- ubuntu_ds.get_data()
-
- expected = [valid_key.format(key) for key in range(3)]
- self.assertEqual(set(expected), set(ubuntu_ds.get_public_ssh_keys()))
-
- def test_instance_ssh_keys_override(self):
- valid_key = 'ssh-rsa VALID {0}'
- invalid_key = 'ssh-rsa INVALID {0}'
- project_attributes = {
- 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)),
- 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)),
- }
- instance_attributes = {
- 'sshKeys': 'cloudinit:{0}'.format(valid_key.format(0)),
- 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(1)),
- 'block-project-ssh-keys': 'False',
- }
-
- meta = GCE_META.copy()
- meta['project/attributes'] = project_attributes
- meta['instance/attributes'] = instance_attributes
-
- _set_mock_metadata(meta)
- self.ds.get_data()
-
- expected = [valid_key.format(key) for key in range(2)]
- self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
-
- def test_block_project_ssh_keys_override(self):
- valid_key = 'ssh-rsa VALID {0}'
- invalid_key = 'ssh-rsa INVALID {0}'
- project_attributes = {
- 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)),
- 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)),
- }
- instance_attributes = {
- 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(0)),
- 'block-project-ssh-keys': 'True',
- }
-
- meta = GCE_META.copy()
- meta['project/attributes'] = project_attributes
- meta['instance/attributes'] = instance_attributes
-
- _set_mock_metadata(meta)
- self.ds.get_data()
-
- expected = [valid_key.format(0)]
- self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
-
- def test_only_last_part_of_zone_used_for_availability_zone(self):
- _set_mock_metadata()
- r = self.ds.get_data()
- self.assertEqual(True, r)
- self.assertEqual('bar', self.ds.availability_zone)
-
- @mock.patch("cloudinit.sources.DataSourceGCE.GoogleMetadataFetcher")
- def test_get_data_returns_false_if_not_on_gce(self, m_fetcher):
- self.m_platform_reports_gce.return_value = False
- ret = self.ds.get_data()
- self.assertEqual(False, ret)
- m_fetcher.assert_not_called()
-
- def test_has_expired(self):
-
- def _get_timestamp(days):
- format_str = '%Y-%m-%dT%H:%M:%S+0000'
- today = datetime.datetime.now()
- timestamp = today + datetime.timedelta(days=days)
- return timestamp.strftime(format_str)
-
- past = _get_timestamp(-1)
- future = _get_timestamp(1)
- ssh_keys = {
- None: False,
- '': False,
- 'Invalid': False,
- 'user:ssh-rsa key user@domain.com': False,
- 'user:ssh-rsa key google {"expireOn":"%s"}' % past: False,
- 'user:ssh-rsa key google-ssh': False,
- 'user:ssh-rsa key google-ssh {invalid:json}': False,
- 'user:ssh-rsa key google-ssh {"userName":"user"}': False,
- 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False,
- 'user:xyz key google-ssh {"expireOn":"%s"}' % future: False,
- 'user:xyz key google-ssh {"expireOn":"%s"}' % past: True,
- }
-
- for key, expired in ssh_keys.items():
- self.assertEqual(DataSourceGCE._has_expired(key), expired)
-
- def test_parse_public_keys_non_ascii(self):
- public_key_data = [
- 'cloudinit:rsa ssh-ke%s invalid' % chr(165),
- 'use%sname:rsa ssh-key' % chr(174),
- 'cloudinit:test 1',
- 'default:test 2',
- 'user:test 3',
- ]
- expected = ['test 1', 'test 2']
- found = DataSourceGCE._parse_public_keys(
- public_key_data, default_user='default')
- self.assertEqual(sorted(found), sorted(expected))
-
- @mock.patch("cloudinit.url_helper.readurl")
- def test_publish_host_keys(self, m_readurl):
- hostkeys = [('ssh-rsa', 'asdfasdf'),
- ('ssh-ed25519', 'qwerqwer')]
- readurl_expected_calls = [
- mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS,
- request_method='PUT',
- url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')),
- mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS,
- request_method='PUT',
- url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')),
- ]
- self.ds.publish_host_keys(hostkeys)
- m_readurl.assert_has_calls(readurl_expected_calls, any_order=True)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
deleted file mode 100644
index 415755aa..00000000
--- a/tests/unittests/test_datasource/test_openstack.py
+++ /dev/null
@@ -1,694 +0,0 @@
-# Copyright (C) 2014 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import copy
-import httpretty as hp
-import json
-import re
-from io import StringIO
-from urllib.parse import urlparse
-
-from cloudinit.tests import helpers as test_helpers
-
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET
-from cloudinit.sources import DataSourceOpenStack as ds
-from cloudinit.sources.helpers import openstack
-from cloudinit import util
-
-BASE_URL = "http://169.254.169.254"
-PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
-EC2_META = {
- 'ami-id': 'ami-00000001',
- 'ami-launch-index': '0',
- 'ami-manifest-path': 'FIXME',
- 'hostname': 'sm-foo-test.novalocal',
- 'instance-action': 'none',
- 'instance-id': 'i-00000001',
- 'instance-type': 'm1.tiny',
- 'local-hostname': 'sm-foo-test.novalocal',
- 'local-ipv4': '0.0.0.0',
- 'public-hostname': 'sm-foo-test.novalocal',
- 'public-ipv4': '0.0.0.1',
- 'reservation-id': 'r-iru5qm4m',
-}
-USER_DATA = b'#!/bin/sh\necho This is user data\n'
-VENDOR_DATA = {
- 'magic': '',
-}
-OSTACK_META = {
- 'availability_zone': 'nova',
- 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
- {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}],
- 'hostname': 'sm-foo-test.novalocal',
- 'meta': {'dsmode': 'local', 'my-meta': 'my-value'},
- 'name': 'sm-foo-test',
- 'public_keys': {'mykey': PUBKEY},
- 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c',
-}
-CONTENT_0 = b'This is contents of /etc/foo.cfg\n'
-CONTENT_1 = b'# this is /etc/bar/bar.cfg\n'
-OS_FILES = {
- 'openstack/content/0000': CONTENT_0,
- 'openstack/content/0001': CONTENT_1,
- 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/latest/network_data.json': json.dumps(
- {'links': [], 'networks': [], 'services': []}),
- 'openstack/latest/user_data': USER_DATA,
- 'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA),
-}
-EC2_FILES = {
- 'latest/user-data': USER_DATA,
-}
-EC2_VERSIONS = [
- 'latest',
-]
-
-MOCK_PATH = 'cloudinit.sources.DataSourceOpenStack.'
-
-
-# TODO _register_uris should leverage test_ec2.register_mock_metaserver.
-def _register_uris(version, ec2_files, ec2_meta, os_files):
- """Registers a set of url patterns into httpretty that will mimic the
- same data returned by the openstack metadata service (and ec2 service)."""
-
- def match_ec2_url(uri, headers):
- path = uri.path.strip("/")
- if len(path) == 0:
- return (200, headers, "\n".join(EC2_VERSIONS))
- path = uri.path.lstrip("/")
- if path in ec2_files:
- return (200, headers, ec2_files.get(path))
- if path == 'latest/meta-data/':
- buf = StringIO()
- for (k, v) in ec2_meta.items():
- if isinstance(v, (list, tuple)):
- buf.write("%s/" % (k))
- else:
- buf.write("%s" % (k))
- buf.write("\n")
- return (200, headers, buf.getvalue())
- if path.startswith('latest/meta-data/'):
- value = None
- pieces = path.split("/")
- if path.endswith("/"):
- pieces = pieces[2:-1]
- value = util.get_cfg_by_path(ec2_meta, pieces)
- else:
- pieces = pieces[2:]
- value = util.get_cfg_by_path(ec2_meta, pieces)
- if value is not None:
- return (200, headers, str(value))
- return (404, headers, '')
-
- def match_os_uri(uri, headers):
- path = uri.path.strip("/")
- if path == 'openstack':
- return (200, headers, "\n".join([openstack.OS_LATEST]))
- path = uri.path.lstrip("/")
- if path in os_files:
- return (200, headers, os_files.get(path))
- return (404, headers, '')
-
- def get_request_callback(method, uri, headers):
- uri = urlparse(uri)
- path = uri.path.lstrip("/").split("/")
- if path[0] == 'openstack':
- return match_os_uri(uri, headers)
- return match_ec2_url(uri, headers)
-
- hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'),
- body=get_request_callback)
-
-
-def _read_metadata_service():
- return ds.read_metadata_service(BASE_URL, retries=0, timeout=0.1)
-
-
-class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
-
- with_logs = True
- VERSION = 'latest'
-
- def setUp(self):
- super(TestOpenStackDataSource, self).setUp()
- self.tmp = self.tmp_dir()
-
- def test_successful(self):
- _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
- f = _read_metadata_service()
- self.assertEqual(VENDOR_DATA, f.get('vendordata'))
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertEqual(2, len(f['files']))
- self.assertEqual(USER_DATA, f.get('userdata'))
- self.assertEqual(EC2_META, f.get('ec2-metadata'))
- self.assertEqual(2, f.get('version'))
- metadata = f['metadata']
- self.assertEqual('nova', metadata.get('availability_zone'))
- self.assertEqual('sm-foo-test.novalocal', metadata.get('hostname'))
- self.assertEqual('sm-foo-test.novalocal',
- metadata.get('local-hostname'))
- self.assertEqual('sm-foo-test', metadata.get('name'))
- self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c',
- metadata.get('uuid'))
- self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c',
- metadata.get('instance-id'))
-
- def test_no_ec2(self):
- _register_uris(self.VERSION, {}, {}, OS_FILES)
- f = _read_metadata_service()
- self.assertEqual(VENDOR_DATA, f.get('vendordata'))
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertEqual(USER_DATA, f.get('userdata'))
- self.assertEqual({}, f.get('ec2-metadata'))
- self.assertEqual(2, f.get('version'))
-
- def test_bad_metadata(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files.pop(k, None)
- _register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(openstack.NonReadable, _read_metadata_service)
-
- def test_bad_uuid(self):
- os_files = copy.deepcopy(OS_FILES)
- os_meta = copy.deepcopy(OSTACK_META)
- os_meta.pop('uuid')
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files[k] = json.dumps(os_meta)
- _register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(BrokenMetadata, _read_metadata_service)
-
- def test_userdata_empty(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('user_data'):
- os_files.pop(k, None)
- _register_uris(self.VERSION, {}, {}, os_files)
- f = _read_metadata_service()
- self.assertEqual(VENDOR_DATA, f.get('vendordata'))
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertFalse(f.get('userdata'))
-
- def test_vendordata_empty(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('vendor_data.json'):
- os_files.pop(k, None)
- _register_uris(self.VERSION, {}, {}, os_files)
- f = _read_metadata_service()
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertFalse(f.get('vendordata'))
-
- def test_vendordata_invalid(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('vendor_data.json'):
- os_files[k] = '{' # some invalid json
- _register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(BrokenMetadata, _read_metadata_service)
-
- def test_metadata_invalid(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files[k] = '{' # some invalid json
- _register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(BrokenMetadata, _read_metadata_service)
-
- @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
- def test_datasource(self, m_dhcp):
- _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
- ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + 'detect_openstack'
- with test_helpers.mock.patch(mock_path) as m_detect_os:
- m_detect_os.return_value = True
- found = ds_os.get_data()
- self.assertTrue(found)
- self.assertEqual(2, ds_os.version)
- md = dict(ds_os.metadata)
- md.pop('instance-id', None)
- md.pop('local-hostname', None)
- self.assertEqual(OSTACK_META, md)
- self.assertEqual(EC2_META, ds_os.ec2_metadata)
- self.assertEqual(USER_DATA, ds_os.userdata_raw)
- self.assertEqual(2, len(ds_os.files))
- self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure)
- self.assertIsNone(ds_os.vendordata_raw)
- m_dhcp.assert_not_called()
-
- @hp.activate
- @test_helpers.mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
- @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
- def test_local_datasource(self, m_dhcp, m_net):
- """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data."""
- _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
- ds_os_local = ds.DataSourceOpenStackLocal(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- ds_os_local._fallback_interface = 'eth9' # Monkey patch for dhcp
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'broadcast-address': '192.168.2.255'}]
-
- self.assertIsNone(ds_os_local.version)
- mock_path = MOCK_PATH + 'detect_openstack'
- with test_helpers.mock.patch(mock_path) as m_detect_os:
- m_detect_os.return_value = True
- found = ds_os_local.get_data()
- self.assertTrue(found)
- self.assertEqual(2, ds_os_local.version)
- md = dict(ds_os_local.metadata)
- md.pop('instance-id', None)
- md.pop('local-hostname', None)
- self.assertEqual(OSTACK_META, md)
- self.assertEqual(EC2_META, ds_os_local.ec2_metadata)
- self.assertEqual(USER_DATA, ds_os_local.userdata_raw)
- self.assertEqual(2, len(ds_os_local.files))
- self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure)
- self.assertIsNone(ds_os_local.vendordata_raw)
- m_dhcp.assert_called_with('eth9', None)
-
- def test_bad_datasource_meta(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files[k] = '{' # some invalid json
- _register_uris(self.VERSION, {}, {}, os_files)
- ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
- None,
- helpers.Paths({'run_dir': self.tmp}))
- self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + 'detect_openstack'
- with test_helpers.mock.patch(mock_path) as m_detect_os:
- m_detect_os.return_value = True
- found = ds_os.get_data()
- self.assertFalse(found)
- self.assertIsNone(ds_os.version)
- self.assertIn(
- 'InvalidMetaDataException: Broken metadata address'
- ' http://169.254.169.25',
- self.logs.getvalue())
-
- def test_no_datasource(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files.pop(k)
- _register_uris(self.VERSION, {}, {}, os_files)
- ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
- None,
- helpers.Paths({'run_dir': self.tmp}))
- ds_os.ds_cfg = {
- 'max_wait': 0,
- 'timeout': 0,
- }
- self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + 'detect_openstack'
- with test_helpers.mock.patch(mock_path) as m_detect_os:
- m_detect_os.return_value = True
- found = ds_os.get_data()
- self.assertFalse(found)
- self.assertIsNone(ds_os.version)
-
- def test_network_config_disabled_by_datasource_config(self):
- """The network_config can be disabled from datasource config."""
- mock_path = MOCK_PATH + 'openstack.convert_net_json'
- ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- ds_os.ds_cfg = {'apply_network_config': False}
- sample_json = {'links': [{'ethernet_mac_address': 'mymac'}],
- 'networks': [], 'services': []}
- ds_os.network_json = sample_json # Ignore this content from metadata
- with test_helpers.mock.patch(mock_path) as m_convert_json:
- self.assertIsNone(ds_os.network_config)
- m_convert_json.assert_not_called()
-
- def test_network_config_from_network_json(self):
- """The datasource gets network_config from network_data.json."""
- mock_path = MOCK_PATH + 'openstack.convert_net_json'
- example_cfg = {'version': 1, 'config': []}
- ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- sample_json = {'links': [{'ethernet_mac_address': 'mymac'}],
- 'networks': [], 'services': []}
- ds_os.network_json = sample_json
- with test_helpers.mock.patch(mock_path) as m_convert_json:
- m_convert_json.return_value = example_cfg
- self.assertEqual(example_cfg, ds_os.network_config)
- self.assertIn(
- 'network config provided via network_json', self.logs.getvalue())
- m_convert_json.assert_called_with(sample_json, known_macs=None)
-
- def test_network_config_cached(self):
- """The datasource caches the network_config property."""
- mock_path = MOCK_PATH + 'openstack.convert_net_json'
- example_cfg = {'version': 1, 'config': []}
- ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- ds_os._network_config = example_cfg
- with test_helpers.mock.patch(mock_path) as m_convert_json:
- self.assertEqual(example_cfg, ds_os.network_config)
- m_convert_json.assert_not_called()
-
- def test_disabled_datasource(self):
- os_files = copy.deepcopy(OS_FILES)
- os_meta = copy.deepcopy(OSTACK_META)
- os_meta['meta'] = {
- 'dsmode': 'disabled',
- }
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files[k] = json.dumps(os_meta)
- _register_uris(self.VERSION, {}, {}, os_files)
- ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
- None,
- helpers.Paths({'run_dir': self.tmp}))
- ds_os.ds_cfg = {
- 'max_wait': 0,
- 'timeout': 0,
- }
- self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + 'detect_openstack'
- with test_helpers.mock.patch(mock_path) as m_detect_os:
- m_detect_os.return_value = True
- found = ds_os.get_data()
- self.assertFalse(found)
- self.assertIsNone(ds_os.version)
-
- @hp.activate
- def test_wb__crawl_metadata_does_not_persist(self):
- """_crawl_metadata returns current metadata and does not cache."""
- _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
- ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- crawled_data = ds_os._crawl_metadata()
- self.assertEqual(UNSET, ds_os.ec2_metadata)
- self.assertIsNone(ds_os.userdata_raw)
- self.assertEqual(0, len(ds_os.files))
- self.assertIsNone(ds_os.vendordata_raw)
- self.assertEqual(
- ['dsmode', 'ec2-metadata', 'files', 'metadata', 'networkdata',
- 'userdata', 'vendordata', 'version'],
- sorted(crawled_data.keys()))
- self.assertEqual('local', crawled_data['dsmode'])
- self.assertEqual(EC2_META, crawled_data['ec2-metadata'])
- self.assertEqual(2, len(crawled_data['files']))
- md = copy.deepcopy(crawled_data['metadata'])
- md.pop('instance-id')
- md.pop('local-hostname')
- self.assertEqual(OSTACK_META, md)
- self.assertEqual(
- json.loads(OS_FILES['openstack/latest/network_data.json']),
- crawled_data['networkdata'])
- self.assertEqual(USER_DATA, crawled_data['userdata'])
- self.assertEqual(VENDOR_DATA, crawled_data['vendordata'])
- self.assertEqual(2, crawled_data['version'])
-
-
-class TestVendorDataLoading(test_helpers.TestCase):
- def cvj(self, data):
- return convert_vendordata(data)
-
- def test_vd_load_none(self):
- # non-existant vendor-data should return none
- self.assertIsNone(self.cvj(None))
-
- def test_vd_load_string(self):
- self.assertEqual(self.cvj("foobar"), "foobar")
-
- def test_vd_load_list(self):
- data = [{'foo': 'bar'}, 'mystring', list(['another', 'list'])]
- self.assertEqual(self.cvj(data), data)
-
- def test_vd_load_dict_no_ci(self):
- self.assertIsNone(self.cvj({'foo': 'bar'}))
-
- def test_vd_load_dict_ci_dict(self):
- self.assertRaises(ValueError, self.cvj,
- {'foo': 'bar', 'cloud-init': {'x': 1}})
-
- def test_vd_load_dict_ci_string(self):
- data = {'foo': 'bar', 'cloud-init': 'VENDOR_DATA'}
- self.assertEqual(self.cvj(data), data['cloud-init'])
-
- def test_vd_load_dict_ci_list(self):
- data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']}
- self.assertEqual(self.cvj(data), data['cloud-init'])
-
-
-@test_helpers.mock.patch(MOCK_PATH + 'util.is_x86')
-class TestDetectOpenStack(test_helpers.CiTestCase):
-
- def test_detect_openstack_non_intel_x86(self, m_is_x86):
- """Return True on non-intel platforms because dmi isn't conclusive."""
- m_is_x86.return_value = False
- self.assertTrue(
- ds.detect_openstack(), 'Expected detect_openstack == True')
-
- @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env')
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_not_detect_openstack_intel_x86_ec2(self, m_dmi, m_proc_env,
- m_is_x86):
- """Return False on EC2 platforms."""
- m_is_x86.return_value = True
- # No product_name in proc/1/environ
- m_proc_env.return_value = {'HOME': '/'}
-
- def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'HVM domU' # Nothing 'openstackish' on EC2
- if dmi_key == 'chassis-asset-tag':
- return '' # Empty string on EC2
- assert False, 'Unexpected dmi read of %s' % dmi_key
-
- m_dmi.side_effect = fake_dmi_read
- self.assertFalse(
- ds.detect_openstack(), 'Expected detect_openstack == False on EC2')
- m_proc_env.assert_called_with(1)
-
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_intel_product_name_compute(self, m_dmi,
- m_is_x86):
- """Return True on OpenStack compute and nova instances."""
- m_is_x86.return_value = True
- openstack_product_names = ['OpenStack Nova', 'OpenStack Compute']
-
- for product_name in openstack_product_names:
- m_dmi.return_value = product_name
- self.assertTrue(
- ds.detect_openstack(), 'Failed to detect_openstack')
-
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_opentelekomcloud_chassis_asset_tag(self, m_dmi,
- m_is_x86):
- """Return True on OpenStack reporting OpenTelekomCloud asset-tag."""
- m_is_x86.return_value = True
-
- def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'HVM domU' # Nothing 'openstackish' on OpenTelekomCloud
- if dmi_key == 'chassis-asset-tag':
- return 'OpenTelekomCloud'
- assert False, 'Unexpected dmi read of %s' % dmi_key
-
- m_dmi.side_effect = fake_dmi_read
- self.assertTrue(
- ds.detect_openstack(),
- 'Expected detect_openstack == True on OpenTelekomCloud')
-
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_sapccloud_chassis_asset_tag(self, m_dmi,
- m_is_x86):
- """Return True on OpenStack reporting SAP CCloud VM asset-tag."""
- m_is_x86.return_value = True
-
- def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'VMware Virtual Platform' # SAP CCloud uses VMware
- if dmi_key == 'chassis-asset-tag':
- return 'SAP CCloud VM'
- assert False, 'Unexpected dmi read of %s' % dmi_key
-
- m_dmi.side_effect = fake_dmi_read
- self.assertTrue(
- ds.detect_openstack(),
- 'Expected detect_openstack == True on SAP CCloud VM')
-
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi,
- m_is_x86):
- """Return True on OpenStack reporting Oracle cloud asset-tag."""
- m_is_x86.return_value = True
-
- def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'Standard PC (i440FX + PIIX, 1996)' # No match
- if dmi_key == 'chassis-asset-tag':
- return 'OracleCloud.com'
- assert False, 'Unexpected dmi read of %s' % dmi_key
-
- m_dmi.side_effect = fake_dmi_read
- self.assertTrue(
- ds.detect_openstack(accept_oracle=True),
- 'Expected detect_openstack == True on OracleCloud.com')
- self.assertFalse(
- ds.detect_openstack(accept_oracle=False),
- 'Expected detect_openstack == False.')
-
- def _test_detect_openstack_nova_compute_chassis_asset_tag(self, m_dmi,
- m_is_x86,
- chassis_tag):
- """Return True on OpenStack reporting generic asset-tag."""
- m_is_x86.return_value = True
-
- def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'Generic OpenStack Platform'
- if dmi_key == 'chassis-asset-tag':
- return chassis_tag
- assert False, 'Unexpected dmi read of %s' % dmi_key
-
- m_dmi.side_effect = fake_dmi_read
- self.assertTrue(
- ds.detect_openstack(),
- 'Expected detect_openstack == True on Generic OpenStack Platform')
-
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi,
- m_is_x86):
- self._test_detect_openstack_nova_compute_chassis_asset_tag(
- m_dmi, m_is_x86, 'OpenStack Nova')
-
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi,
- m_is_x86):
- self._test_detect_openstack_nova_compute_chassis_asset_tag(
- m_dmi, m_is_x86, 'OpenStack Compute')
-
- @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env')
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env,
- m_is_x86):
- """Return True when nova product_name specified in /proc/1/environ."""
- m_is_x86.return_value = True
- # Nova product_name in proc/1/environ
- m_proc_env.return_value = {
- 'HOME': '/', 'product_name': 'OpenStack Nova'}
-
- def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'HVM domU' # Nothing 'openstackish'
- if dmi_key == 'chassis-asset-tag':
- return '' # Nothin 'openstackish'
- assert False, 'Unexpected dmi read of %s' % dmi_key
-
- m_dmi.side_effect = fake_dmi_read
- self.assertTrue(
- ds.detect_openstack(),
- 'Expected detect_openstack == True on OpenTelekomCloud')
- m_proc_env.assert_called_with(1)
-
-
-class TestMetadataReader(test_helpers.HttprettyTestCase):
- """Test the MetadataReader."""
- burl = 'http://169.254.169.254/'
- md_base = {
- 'availability_zone': 'myaz1',
- 'hostname': 'sm-foo-test.novalocal',
- "keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}],
- 'launch_index': 0,
- 'name': 'sm-foo-test',
- 'public_keys': {'mykey': PUBKEY},
- 'project_id': '6a103f813b774b9fb15a4fcd36e1c056',
- 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'}
-
- def register(self, path, body=None, status=200):
- content = body if not isinstance(body, str) else body.encode('utf-8')
- hp.register_uri(
- hp.GET, self.burl + "openstack" + path, status=status,
- body=content)
-
- def register_versions(self, versions):
- self.register("", '\n'.join(versions))
- self.register("/", '\n'.join(versions))
-
- def register_version(self, version, data):
- content = '\n'.join(sorted(data.keys()))
- self.register(version, content)
- self.register(version + "/", content)
- for path, content in data.items():
- self.register("/%s/%s" % (version, path), content)
- self.register("/%s/%s" % (version, path), content)
- if 'user_data' not in data:
- self.register("/%s/user_data" % version, "nodata", status=404)
-
- def test__find_working_version(self):
- """Test a working version ignores unsupported."""
- unsup = "2016-11-09"
- self.register_versions(
- [openstack.OS_FOLSOM, openstack.OS_LIBERTY, unsup,
- openstack.OS_LATEST])
- self.assertEqual(
- openstack.OS_LIBERTY,
- openstack.MetadataReader(self.burl)._find_working_version())
-
- def test__find_working_version_uses_latest(self):
- """'latest' should be used if no supported versions."""
- unsup1, unsup2 = ("2016-11-09", '2017-06-06')
- self.register_versions([unsup1, unsup2, openstack.OS_LATEST])
- self.assertEqual(
- openstack.OS_LATEST,
- openstack.MetadataReader(self.burl)._find_working_version())
-
- def test_read_v2_os_ocata(self):
- """Validate return value of read_v2 for os_ocata data."""
- md = copy.deepcopy(self.md_base)
- md['devices'] = []
- network_data = {'links': [], 'networks': [], 'services': []}
- vendor_data = {}
- vendor_data2 = {"static": {}}
-
- data = {
- 'meta_data.json': json.dumps(md),
- 'network_data.json': json.dumps(network_data),
- 'vendor_data.json': json.dumps(vendor_data),
- 'vendor_data2.json': json.dumps(vendor_data2),
- }
-
- self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST])
- self.register_version(openstack.OS_OCATA, data)
-
- mock_read_ec2 = test_helpers.mock.MagicMock(
- return_value={'instance-id': 'unused-ec2'})
- expected_md = copy.deepcopy(md)
- expected_md.update(
- {'instance-id': md['uuid'], 'local-hostname': md['hostname']})
- expected = {
- 'userdata': '', # Annoying, no user-data results in empty string.
- 'version': 2,
- 'metadata': expected_md,
- 'vendordata': vendor_data,
- 'networkdata': network_data,
- 'ec2-metadata': mock_read_ec2.return_value,
- 'files': {},
- }
- reader = openstack.MetadataReader(self.burl)
- reader._read_ec2_metadata = mock_read_ec2
- self.assertEqual(expected, reader.read_v2())
- self.assertEqual(1, mock_read_ec2.call_count)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py
deleted file mode 100644
index 16773de5..00000000
--- a/tests/unittests/test_datasource/test_ovf.py
+++ /dev/null
@@ -1,544 +0,0 @@
-# Copyright (C) 2016 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import base64
-import os
-
-from collections import OrderedDict
-from textwrap import dedent
-
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.tests.helpers import CiTestCase, mock, wrap_and_call
-from cloudinit.helpers import Paths
-from cloudinit.sources import DataSourceOVF as dsovf
-from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
- CustomScriptNotFound)
-
-MPATH = 'cloudinit.sources.DataSourceOVF.'
-
-NOT_FOUND = None
-
-OVF_ENV_CONTENT = """<?xml version="1.0" encoding="UTF-8"?>
-<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
- xsi:schemaLocation="http://schemas.dmtf.org/ovf/environment/1 ../dsp8027.xsd"
- oe:id="WebTier">
- <!-- Information about hypervisor platform -->
- <oe:PlatformSection>
- <Kind>ESX Server</Kind>
- <Version>3.0.1</Version>
- <Vendor>VMware, Inc.</Vendor>
- <Locale>en_US</Locale>
- </oe:PlatformSection>
- <!--- Properties defined for this virtual machine -->
- <PropertySection>
-{properties}
- </PropertySection>
-</Environment>
-"""
-
-
-def fill_properties(props, template=OVF_ENV_CONTENT):
- lines = []
- prop_tmpl = '<Property oe:key="{key}" oe:value="{val}"/>'
- for key, val in props.items():
- lines.append(prop_tmpl.format(key=key, val=val))
- indent = " "
- properties = ''.join([indent + line + "\n" for line in lines])
- return template.format(properties=properties)
-
-
-class TestReadOvfEnv(CiTestCase):
- def test_with_b64_userdata(self):
- user_data = "#!/bin/sh\necho hello world\n"
- user_data_b64 = base64.b64encode(user_data.encode()).decode()
- props = {"user-data": user_data_b64, "password": "passw0rd",
- "instance-id": "inst-001"}
- env = fill_properties(props)
- md, ud, cfg = dsovf.read_ovf_environment(env)
- self.assertEqual({"instance-id": "inst-001"}, md)
- self.assertEqual(user_data.encode(), ud)
- self.assertEqual({'password': "passw0rd"}, cfg)
-
- def test_with_non_b64_userdata(self):
- user_data = "my-user-data"
- props = {"user-data": user_data, "instance-id": "inst-001"}
- env = fill_properties(props)
- md, ud, cfg = dsovf.read_ovf_environment(env)
- self.assertEqual({"instance-id": "inst-001"}, md)
- self.assertEqual(user_data.encode(), ud)
- self.assertEqual({}, cfg)
-
- def test_with_no_userdata(self):
- props = {"password": "passw0rd", "instance-id": "inst-001"}
- env = fill_properties(props)
- md, ud, cfg = dsovf.read_ovf_environment(env)
- self.assertEqual({"instance-id": "inst-001"}, md)
- self.assertEqual({'password': "passw0rd"}, cfg)
- self.assertIsNone(ud)
-
-
-class TestMarkerFiles(CiTestCase):
-
- def setUp(self):
- super(TestMarkerFiles, self).setUp()
- self.tdir = self.tmp_dir()
-
- def test_false_when_markerid_none(self):
- """Return False when markerid provided is None."""
- self.assertFalse(
- dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir))
-
- def test_markerid_file_exist(self):
- """Return False when markerid file path does not exist,
- True otherwise."""
- self.assertFalse(
- dsovf.check_marker_exists('123', self.tdir))
-
- marker_file = self.tmp_path('.markerfile-123.txt', self.tdir)
- util.write_file(marker_file, '')
- self.assertTrue(
- dsovf.check_marker_exists('123', self.tdir)
- )
-
- def test_marker_file_setup(self):
- """Test creation of marker files."""
- markerfilepath = self.tmp_path('.markerfile-hi.txt', self.tdir)
- self.assertFalse(os.path.exists(markerfilepath))
- dsovf.setup_marker_files(markerid='hi', marker_dir=self.tdir)
- self.assertTrue(os.path.exists(markerfilepath))
-
-
-class TestDatasourceOVF(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestDatasourceOVF, self).setUp()
- self.datasource = dsovf.DataSourceOVF
- self.tdir = self.tmp_dir()
-
- def test_get_data_false_on_none_dmi_data(self):
- """When dmi for system-product-name is None, get_data returns False."""
- paths = Paths({'cloud_dir': self.tdir})
- ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
- retcode = wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': None,
- 'transport_iso9660': NOT_FOUND,
- 'transport_vmware_guestinfo': NOT_FOUND},
- ds.get_data)
- self.assertFalse(retcode, 'Expected False return from ds.get_data')
- self.assertIn(
- 'DEBUG: No system-product-name found', self.logs.getvalue())
-
- def test_get_data_no_vmware_customization_disabled(self):
- """When vmware customization is disabled via sys_cfg log a message."""
- paths = Paths({'cloud_dir': self.tdir})
- ds = self.datasource(
- sys_cfg={'disable_vmware_customization': True}, distro={},
- paths=paths)
- retcode = wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'transport_iso9660': NOT_FOUND,
- 'transport_vmware_guestinfo': NOT_FOUND},
- ds.get_data)
- self.assertFalse(retcode, 'Expected False return from ds.get_data')
- self.assertIn(
- 'DEBUG: Customization for VMware platform is disabled.',
- self.logs.getvalue())
-
- def test_get_data_vmware_customization_disabled(self):
- """When cloud-init workflow for vmware is enabled via sys_cfg log a
- message.
- """
- paths = Paths({'cloud_dir': self.tdir})
- ds = self.datasource(
- sys_cfg={'disable_vmware_customization': False}, distro={},
- paths=paths)
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
- [CUSTOM-SCRIPT]
- SCRIPT-NAME = test-script
- [MISC]
- MARKER-ID = 12345345
- """)
- util.write_file(conf_file, conf_content)
- with mock.patch(MPATH + 'get_tools_config', return_value='true'):
- with self.assertRaises(CustomScriptNotFound) as context:
- wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'get_nics_to_enable': ''},
- ds.get_data)
- customscript = self.tmp_path('test-script', self.tdir)
- self.assertIn('Script %s not found!!' % customscript,
- str(context.exception))
-
- def test_get_data_cust_script_disabled(self):
- """If custom script is disabled by VMware tools configuration,
- raise a RuntimeError.
- """
- paths = Paths({'cloud_dir': self.tdir})
- ds = self.datasource(
- sys_cfg={'disable_vmware_customization': False}, distro={},
- paths=paths)
- # Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
- [CUSTOM-SCRIPT]
- SCRIPT-NAME = test-script
- [MISC]
- MARKER-ID = 12345346
- """)
- util.write_file(conf_file, conf_content)
- # Prepare the custom sript
- customscript = self.tmp_path('test-script', self.tdir)
- util.write_file(customscript, "This is the post cust script")
-
- with mock.patch(MPATH + 'get_tools_config', return_value='invalid'):
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
- with self.assertRaises(RuntimeError) as context:
- wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'get_nics_to_enable': ''},
- ds.get_data)
- self.assertIn('Custom script is disabled by VM Administrator',
- str(context.exception))
-
- def test_get_data_cust_script_enabled(self):
- """If custom script is enabled by VMware tools configuration,
- execute the script.
- """
- paths = Paths({'cloud_dir': self.tdir})
- ds = self.datasource(
- sys_cfg={'disable_vmware_customization': False}, distro={},
- paths=paths)
- # Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
- [CUSTOM-SCRIPT]
- SCRIPT-NAME = test-script
- [MISC]
- MARKER-ID = 12345346
- """)
- util.write_file(conf_file, conf_content)
-
- # Mock custom script is enabled by return true when calling
- # get_tools_config
- with mock.patch(MPATH + 'get_tools_config', return_value="true"):
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
- with self.assertRaises(CustomScriptNotFound) as context:
- wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'get_nics_to_enable': ''},
- ds.get_data)
- # Verify custom script is trying to be executed
- customscript = self.tmp_path('test-script', self.tdir)
- self.assertIn('Script %s not found!!' % customscript,
- str(context.exception))
-
- def test_get_data_force_run_post_script_is_yes(self):
- """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if
- enable-custom-scripts is not defined in VM Tools configuration
- """
- paths = Paths({'cloud_dir': self.tdir})
- ds = self.datasource(
- sys_cfg={'disable_vmware_customization': False}, distro={},
- paths=paths)
- # Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts
- # default value is TRUE
- conf_content = dedent("""\
- [CUSTOM-SCRIPT]
- SCRIPT-NAME = test-script
- [MISC]
- MARKER-ID = 12345346
- DEFAULT-RUN-POST-CUST-SCRIPT = yes
- """)
- util.write_file(conf_file, conf_content)
-
- # Mock get_tools_config(section, key, defaultVal) to return
- # defaultVal
- def my_get_tools_config(*args, **kwargs):
- return args[2]
-
- with mock.patch(MPATH + 'get_tools_config',
- side_effect=my_get_tools_config):
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
- with self.assertRaises(CustomScriptNotFound) as context:
- wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'get_nics_to_enable': ''},
- ds.get_data)
- # Verify custom script still runs although it is
- # disabled by VMware Tools
- customscript = self.tmp_path('test-script', self.tdir)
- self.assertIn('Script %s not found!!' % customscript,
- str(context.exception))
-
- def test_get_data_non_vmware_seed_platform_info(self):
- """Platform info properly reports when on non-vmware platforms."""
- paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir})
- # Write ovf-env.xml seed file
- seed_dir = self.tmp_path('seed', dir=self.tdir)
- ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir)
- util.write_file(ovf_env, OVF_ENV_CONTENT)
- ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
-
- self.assertEqual('ovf', ds.cloud_name)
- self.assertEqual('ovf', ds.platform_type)
- with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='!VMware'):
- with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd:
- with mock.patch(MPATH + 'transport_iso9660') as m_iso9660:
- m_iso9660.return_value = NOT_FOUND
- m_guestd.return_value = NOT_FOUND
- self.assertTrue(ds.get_data())
- self.assertEqual(
- 'ovf (%s/seed/ovf-env.xml)' % self.tdir,
- ds.subplatform)
-
- def test_get_data_vmware_seed_platform_info(self):
- """Platform info properly reports when on VMware platform."""
- paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir})
- # Write ovf-env.xml seed file
- seed_dir = self.tmp_path('seed', dir=self.tdir)
- ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir)
- util.write_file(ovf_env, OVF_ENV_CONTENT)
- ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
-
- self.assertEqual('ovf', ds.cloud_name)
- self.assertEqual('ovf', ds.platform_type)
- with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='VMWare'):
- with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd:
- with mock.patch(MPATH + 'transport_iso9660') as m_iso9660:
- m_iso9660.return_value = NOT_FOUND
- m_guestd.return_value = NOT_FOUND
- self.assertTrue(ds.get_data())
- self.assertEqual(
- 'vmware (%s/seed/ovf-env.xml)' % self.tdir,
- ds.subplatform)
-
-
-class TestTransportIso9660(CiTestCase):
-
- def setUp(self):
- super(TestTransportIso9660, self).setUp()
- self.add_patch('cloudinit.util.find_devs_with',
- 'm_find_devs_with')
- self.add_patch('cloudinit.util.mounts', 'm_mounts')
- self.add_patch('cloudinit.util.mount_cb', 'm_mount_cb')
- self.add_patch('cloudinit.sources.DataSourceOVF.get_ovf_env',
- 'm_get_ovf_env')
- self.m_get_ovf_env.return_value = ('myfile', 'mycontent')
-
- def test_find_already_mounted(self):
- """Check we call get_ovf_env from on matching mounted devices"""
- mounts = {
- '/dev/sr9': {
- 'fstype': 'iso9660',
- 'mountpoint': 'wark/media/sr9',
- 'opts': 'ro',
- }
- }
- self.m_mounts.return_value = mounts
-
- self.assertEqual("mycontent", dsovf.transport_iso9660())
-
- def test_find_already_mounted_skips_non_iso9660(self):
- """Check we call get_ovf_env ignoring non iso9660"""
- mounts = {
- '/dev/xvdb': {
- 'fstype': 'vfat',
- 'mountpoint': 'wark/foobar',
- 'opts': 'defaults,noatime',
- },
- '/dev/xvdc': {
- 'fstype': 'iso9660',
- 'mountpoint': 'wark/media/sr9',
- 'opts': 'ro',
- }
- }
- # We use an OrderedDict here to ensure we check xvdb before xvdc
- # as we're not mocking the regex matching, however, if we place
- # an entry in the results then we can be reasonably sure that
- # we're skipping an entry which fails to match.
- self.m_mounts.return_value = (
- OrderedDict(sorted(mounts.items(), key=lambda t: t[0])))
-
- self.assertEqual("mycontent", dsovf.transport_iso9660())
-
- def test_find_already_mounted_matches_kname(self):
- """Check we dont regex match on basename of the device"""
- mounts = {
- '/dev/foo/bar/xvdc': {
- 'fstype': 'iso9660',
- 'mountpoint': 'wark/media/sr9',
- 'opts': 'ro',
- }
- }
- # we're skipping an entry which fails to match.
- self.m_mounts.return_value = mounts
-
- self.assertEqual(NOT_FOUND, dsovf.transport_iso9660())
-
- def test_mount_cb_called_on_blkdevs_with_iso9660(self):
- """Check we call mount_cb on blockdevs with iso9660 only"""
- self.m_mounts.return_value = {}
- self.m_find_devs_with.return_value = ['/dev/sr0']
- self.m_mount_cb.return_value = ("myfile", "mycontent")
-
- self.assertEqual("mycontent", dsovf.transport_iso9660())
- self.m_mount_cb.assert_called_with(
- "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660")
-
- def test_mount_cb_called_on_blkdevs_with_iso9660_check_regex(self):
- """Check we call mount_cb on blockdevs with iso9660 and match regex"""
- self.m_mounts.return_value = {}
- self.m_find_devs_with.return_value = [
- '/dev/abc', '/dev/my-cdrom', '/dev/sr0']
- self.m_mount_cb.return_value = ("myfile", "mycontent")
-
- self.assertEqual("mycontent", dsovf.transport_iso9660())
- self.m_mount_cb.assert_called_with(
- "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660")
-
- def test_mount_cb_not_called_no_matches(self):
- """Check we don't call mount_cb if nothing matches"""
- self.m_mounts.return_value = {}
- self.m_find_devs_with.return_value = ['/dev/vg/myovf']
-
- self.assertEqual(NOT_FOUND, dsovf.transport_iso9660())
- self.assertEqual(0, self.m_mount_cb.call_count)
-
- def test_mount_cb_called_require_iso_false(self):
- """Check we call mount_cb on blockdevs with require_iso=False"""
- self.m_mounts.return_value = {}
- self.m_find_devs_with.return_value = ['/dev/xvdz']
- self.m_mount_cb.return_value = ("myfile", "mycontent")
-
- self.assertEqual(
- "mycontent", dsovf.transport_iso9660(require_iso=False))
-
- self.m_mount_cb.assert_called_with(
- "/dev/xvdz", dsovf.get_ovf_env, mtype=None)
-
- def test_maybe_cdrom_device_none(self):
- """Test maybe_cdrom_device returns False for none/empty input"""
- self.assertFalse(dsovf.maybe_cdrom_device(None))
- self.assertFalse(dsovf.maybe_cdrom_device(''))
-
- def test_maybe_cdrom_device_non_string_exception(self):
- """Test maybe_cdrom_device raises ValueError on non-string types"""
- with self.assertRaises(ValueError):
- dsovf.maybe_cdrom_device({'a': 'eleven'})
-
- def test_maybe_cdrom_device_false_on_multi_dir_paths(self):
- """Test maybe_cdrom_device is false on /dev[/.*]/* paths"""
- self.assertFalse(dsovf.maybe_cdrom_device('/dev/foo/sr0'))
- self.assertFalse(dsovf.maybe_cdrom_device('foo/sr0'))
- self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0'))
- self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0'))
-
- def test_maybe_cdrom_device_true_on_hd_partitions(self):
- """Test maybe_cdrom_device is false on /dev/hd[a-z][0-9]+ paths"""
- self.assertTrue(dsovf.maybe_cdrom_device('/dev/hda1'))
- self.assertTrue(dsovf.maybe_cdrom_device('hdz9'))
-
- def test_maybe_cdrom_device_true_on_valid_relative_paths(self):
- """Test maybe_cdrom_device normalizes paths"""
- self.assertTrue(dsovf.maybe_cdrom_device('/dev/wark/../sr9'))
- self.assertTrue(dsovf.maybe_cdrom_device('///sr0'))
- self.assertTrue(dsovf.maybe_cdrom_device('/sr0'))
- self.assertTrue(dsovf.maybe_cdrom_device('//dev//hda'))
-
- def test_maybe_cdrom_device_true_on_xvd_partitions(self):
- """Test maybe_cdrom_device returns true on xvd*"""
- self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda'))
- self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda1'))
- self.assertTrue(dsovf.maybe_cdrom_device('xvdza1'))
-
-
-@mock.patch(MPATH + "subp.which")
-@mock.patch(MPATH + "subp.subp")
-class TestTransportVmwareGuestinfo(CiTestCase):
- """Test the com.vmware.guestInfo transport implemented in
- transport_vmware_guestinfo."""
-
- rpctool = 'vmware-rpctool'
- with_logs = True
- rpctool_path = '/not/important/vmware-rpctool'
-
- def test_without_vmware_rpctool_returns_notfound(self, m_subp, m_which):
- m_which.return_value = None
- self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
- self.assertEqual(0, m_subp.call_count,
- "subp should not be called if no rpctool in path.")
-
- def test_notfound_on_exit_code_1(self, m_subp, m_which):
- """If vmware-rpctool exits 1, then must return not found."""
- m_which.return_value = self.rpctool_path
- m_subp.side_effect = subp.ProcessExecutionError(
- stdout="", stderr="No value found", exit_code=1, cmd=["unused"])
- self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
- self.assertEqual(1, m_subp.call_count)
- self.assertNotIn("WARNING", self.logs.getvalue(),
- "exit code of 1 by rpctool should not cause warning.")
-
- def test_notfound_if_no_content_but_exit_zero(self, m_subp, m_which):
- """If vmware-rpctool exited 0 with no stdout is normal not-found.
-
- This isn't actually a case I've seen. normally on "not found",
- rpctool would exit 1 with 'No value found' on stderr. But cover
- the case where it exited 0 and just wrote nothing to stdout.
- """
- m_which.return_value = self.rpctool_path
- m_subp.return_value = ('', '')
- self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
- self.assertEqual(1, m_subp.call_count)
-
- def test_notfound_and_warns_on_unexpected_exit_code(self, m_subp, m_which):
- """If vmware-rpctool exits non zero or 1, warnings should be logged."""
- m_which.return_value = self.rpctool_path
- m_subp.side_effect = subp.ProcessExecutionError(
- stdout=None, stderr="No value found", exit_code=2, cmd=["unused"])
- self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
- self.assertEqual(1, m_subp.call_count)
- self.assertIn("WARNING", self.logs.getvalue(),
- "exit code of 2 by rpctool should log WARNING.")
-
- def test_found_when_guestinfo_present(self, m_subp, m_which):
- """When there is a ovf info, transport should return it."""
- m_which.return_value = self.rpctool_path
- content = fill_properties({})
- m_subp.return_value = (content, '')
- self.assertEqual(content, dsovf.transport_vmware_guestinfo())
- self.assertEqual(1, m_subp.call_count)
-
-#
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_rbx.py b/tests/unittests/test_datasource/test_rbx.py
deleted file mode 100644
index d017510e..00000000
--- a/tests/unittests/test_datasource/test_rbx.py
+++ /dev/null
@@ -1,238 +0,0 @@
-import json
-
-from cloudinit import helpers
-from cloudinit import distros
-from cloudinit.sources import DataSourceRbxCloud as ds
-from cloudinit.tests.helpers import mock, CiTestCase, populate_dir
-from cloudinit import subp
-
-DS_PATH = "cloudinit.sources.DataSourceRbxCloud"
-
-CRYPTO_PASS = "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" \
- "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" \
- "tToyGP41.s1"
-
-CLOUD_METADATA = {
- "vm": {
- "memory": 4,
- "cpu": 2,
- "name": "vm-image-builder",
- "_id": "5beab44f680cffd11f0e60fc"
- },
- "additionalMetadata": {
- "username": "guru",
- "sshKeys": ["ssh-rsa ..."],
- "password": {
- "sha512": CRYPTO_PASS
- }
- },
- "disk": [
- {"size": 10, "type": "ssd",
- "name": "vm-image-builder-os",
- "_id": "5beab450680cffd11f0e60fe"},
- {"size": 2, "type": "ssd",
- "name": "ubuntu-1804-bionic",
- "_id": "5bef002c680cffd11f107590"}
- ],
- "netadp": [
- {
- "ip": [{"address": "62.181.8.174"}],
- "network": {
- "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]},
- "routing": [],
- "gateway": "62.181.8.1",
- "netmask": "255.255.248.0",
- "name": "public",
- "type": "public",
- "_id": "5784e97be2627505227b578c"
- },
- "speed": 1000,
- "type": "hv",
- "macaddress": "00:15:5D:FF:0F:03",
- "_id": "5beab450680cffd11f0e6102"
- },
- {
- "ip": [{"address": "10.209.78.11"}],
- "network": {
- "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]},
- "routing": [],
- "gateway": "10.209.78.1",
- "netmask": "255.255.255.0",
- "name": "network-determined-bardeen",
- "type": "private",
- "_id": "5beaec64680cffd11f0e7c31"
- },
- "speed": 1000,
- "type": "hv",
- "macaddress": "00:15:5D:FF:0F:24",
- "_id": "5bec18c6680cffd11f0f0d8b"
- }
- ],
- "dvddrive": [{"iso": {}}]
-}
-
-
-class TestRbxDataSource(CiTestCase):
- parsed_user = None
- allowed_subp = ['bash']
-
- def _fetch_distro(self, kind):
- cls = distros.fetch(kind)
- paths = helpers.Paths({})
- return cls(kind, {}, paths)
-
- def setUp(self):
- super(TestRbxDataSource, self).setUp()
- self.tmp = self.tmp_dir()
- self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp}
- )
-
- # defaults for few tests
- self.ds = ds.DataSourceRbxCloud
- self.seed_dir = self.paths.seed_dir
- self.sys_cfg = {'datasource': {'RbxCloud': {'dsmode': 'local'}}}
-
- def test_seed_read_user_data_callback_empty_file(self):
- populate_user_metadata(self.seed_dir, '')
- populate_cloud_metadata(self.seed_dir, {})
- results = ds.read_user_data_callback(self.seed_dir)
-
- self.assertIsNone(results)
-
- def test_seed_read_user_data_callback_valid_disk(self):
- populate_user_metadata(self.seed_dir, '')
- populate_cloud_metadata(self.seed_dir, CLOUD_METADATA)
- results = ds.read_user_data_callback(self.seed_dir)
-
- self.assertNotEqual(results, None)
- self.assertTrue('userdata' in results)
- self.assertTrue('metadata' in results)
- self.assertTrue('cfg' in results)
-
- def test_seed_read_user_data_callback_userdata(self):
- userdata = "#!/bin/sh\nexit 1"
- populate_user_metadata(self.seed_dir, userdata)
- populate_cloud_metadata(self.seed_dir, CLOUD_METADATA)
-
- results = ds.read_user_data_callback(self.seed_dir)
-
- self.assertNotEqual(results, None)
- self.assertTrue('userdata' in results)
- self.assertEqual(results['userdata'], userdata)
-
- def test_generate_network_config(self):
- expected = {
- 'version': 1,
- 'config': [
- {
- 'subnets': [
- {'control': 'auto',
- 'dns_nameservers': ['8.8.8.8', '8.8.4.4'],
- 'netmask': '255.255.248.0',
- 'address': '62.181.8.174',
- 'type': 'static', 'gateway': '62.181.8.1'}
- ],
- 'type': 'physical',
- 'name': 'eth0',
- 'mac_address': '00:15:5d:ff:0f:03'
- },
- {
- 'subnets': [
- {'control': 'auto',
- 'dns_nameservers': ['9.9.9.9', '8.8.8.8'],
- 'netmask': '255.255.255.0',
- 'address': '10.209.78.11',
- 'type': 'static',
- 'gateway': '10.209.78.1'}
- ],
- 'type': 'physical',
- 'name': 'eth1',
- 'mac_address': '00:15:5d:ff:0f:24'
- }
- ]
- }
- self.assertTrue(
- ds.generate_network_config(CLOUD_METADATA['netadp']),
- expected
- )
-
- @mock.patch(DS_PATH + '.subp.subp')
- def test_gratuitous_arp_run_standard_arping(self, m_subp):
- """Test handle run arping & parameters."""
- items = [
- {
- 'destination': '172.17.0.2',
- 'source': '172.16.6.104'
- },
- {
- 'destination': '172.17.0.2',
- 'source': '172.16.6.104',
- },
- ]
- ds.gratuitous_arp(items, self._fetch_distro('ubuntu'))
- self.assertEqual([
- mock.call([
- 'arping', '-c', '2', '-S',
- '172.16.6.104', '172.17.0.2'
- ]),
- mock.call([
- 'arping', '-c', '2', '-S',
- '172.16.6.104', '172.17.0.2'
- ])
- ], m_subp.call_args_list
- )
-
- @mock.patch(DS_PATH + '.subp.subp')
- def test_handle_rhel_like_arping(self, m_subp):
- """Test handle on RHEL-like distros."""
- items = [
- {
- 'source': '172.16.6.104',
- 'destination': '172.17.0.2',
- }
- ]
- ds.gratuitous_arp(items, self._fetch_distro('fedora'))
- self.assertEqual([
- mock.call(
- ['arping', '-c', '2', '-s', '172.16.6.104', '172.17.0.2']
- )],
- m_subp.call_args_list
- )
-
- @mock.patch(
- DS_PATH + '.subp.subp',
- side_effect=subp.ProcessExecutionError()
- )
- def test_continue_on_arping_error(self, m_subp):
- """Continue when command error"""
- items = [
- {
- 'destination': '172.17.0.2',
- 'source': '172.16.6.104'
- },
- {
- 'destination': '172.17.0.2',
- 'source': '172.16.6.104',
- },
- ]
- ds.gratuitous_arp(items, self._fetch_distro('ubuntu'))
- self.assertEqual([
- mock.call([
- 'arping', '-c', '2', '-S',
- '172.16.6.104', '172.17.0.2'
- ]),
- mock.call([
- 'arping', '-c', '2', '-S',
- '172.16.6.104', '172.17.0.2'
- ])
- ], m_subp.call_args_list
- )
-
-
-def populate_cloud_metadata(path, data):
- populate_dir(path, {'cloud.json': json.dumps(data)})
-
-
-def populate_user_metadata(path, data):
- populate_dir(path, {'user.data': data})
diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py
deleted file mode 100644
index 32f3274a..00000000
--- a/tests/unittests/test_datasource/test_scaleway.py
+++ /dev/null
@@ -1,473 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import json
-
-import httpretty
-import requests
-
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit import sources
-from cloudinit.sources import DataSourceScaleway
-
-from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase
-
-
-class DataResponses(object):
- """
- Possible responses of the API endpoint
- 169.254.42.42/user_data/cloud-init and
- 169.254.42.42/vendor_data/cloud-init.
- """
-
- FAKE_USER_DATA = '#!/bin/bash\necho "user-data"'
-
- @staticmethod
- def rate_limited(method, uri, headers):
- return 429, headers, ''
-
- @staticmethod
- def api_error(method, uri, headers):
- return 500, headers, ''
-
- @classmethod
- def get_ok(cls, method, uri, headers):
- return 200, headers, cls.FAKE_USER_DATA
-
- @staticmethod
- def empty(method, uri, headers):
- """
- No user data for this server.
- """
- return 404, headers, ''
-
-
-class MetadataResponses(object):
- """
- Possible responses of the metadata API.
- """
-
- FAKE_METADATA = {
- 'id': '00000000-0000-0000-0000-000000000000',
- 'hostname': 'scaleway.host',
- 'tags': [
- "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
- ],
- 'ssh_public_keys': [{
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- 'fingerprint': '2048 06:ae:... login (RSA)'
- }, {
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- 'fingerprint': '2048 06:ff:... login2 (RSA)'
- }]
- }
-
- @classmethod
- def get_ok(cls, method, uri, headers):
- return 200, headers, json.dumps(cls.FAKE_METADATA)
-
-
-class TestOnScaleway(CiTestCase):
-
- def setUp(self):
- super(TestOnScaleway, self).setUp()
- self.tmp = self.tmp_dir()
-
- def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline):
- mock, faked = fake_dmi
- mock.return_value = 'Scaleway' if faked else 'Whatever'
-
- mock, faked = fake_file_exists
- mock.return_value = faked
-
- mock, faked = fake_cmdline
- mock.return_value = \
- 'initrd=initrd showopts scaleway nousb' if faked \
- else 'BOOT_IMAGE=/vmlinuz-3.11.0-26-generic'
-
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('os.path.exists')
- @mock.patch('cloudinit.dmi.read_dmi_data')
- def test_not_on_scaleway(self, m_read_dmi_data, m_file_exists,
- m_get_cmdline):
- self.install_mocks(
- fake_dmi=(m_read_dmi_data, False),
- fake_file_exists=(m_file_exists, False),
- fake_cmdline=(m_get_cmdline, False)
- )
- self.assertFalse(DataSourceScaleway.on_scaleway())
-
- # When not on Scaleway, get_data() returns False.
- datasource = DataSourceScaleway.DataSourceScaleway(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})
- )
- self.assertFalse(datasource.get_data())
-
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('os.path.exists')
- @mock.patch('cloudinit.dmi.read_dmi_data')
- def test_on_scaleway_dmi(self, m_read_dmi_data, m_file_exists,
- m_get_cmdline):
- """
- dmidecode returns "Scaleway".
- """
- # dmidecode returns "Scaleway"
- self.install_mocks(
- fake_dmi=(m_read_dmi_data, True),
- fake_file_exists=(m_file_exists, False),
- fake_cmdline=(m_get_cmdline, False)
- )
- self.assertTrue(DataSourceScaleway.on_scaleway())
-
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('os.path.exists')
- @mock.patch('cloudinit.dmi.read_dmi_data')
- def test_on_scaleway_var_run_scaleway(self, m_read_dmi_data, m_file_exists,
- m_get_cmdline):
- """
- /var/run/scaleway exists.
- """
- self.install_mocks(
- fake_dmi=(m_read_dmi_data, False),
- fake_file_exists=(m_file_exists, True),
- fake_cmdline=(m_get_cmdline, False)
- )
- self.assertTrue(DataSourceScaleway.on_scaleway())
-
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('os.path.exists')
- @mock.patch('cloudinit.dmi.read_dmi_data')
- def test_on_scaleway_cmdline(self, m_read_dmi_data, m_file_exists,
- m_get_cmdline):
- """
- "scaleway" in /proc/cmdline.
- """
- self.install_mocks(
- fake_dmi=(m_read_dmi_data, False),
- fake_file_exists=(m_file_exists, False),
- fake_cmdline=(m_get_cmdline, True)
- )
- self.assertTrue(DataSourceScaleway.on_scaleway())
-
-
-def get_source_address_adapter(*args, **kwargs):
- """
- Scaleway user/vendor data API requires to be called with a privileged port.
-
- If the unittests are run as non-root, the user doesn't have the permission
- to bind on ports below 1024.
-
- This function removes the bind on a privileged address, since anyway the
- HTTP call is mocked by httpretty.
- """
- kwargs.pop('source_address')
- return requests.adapters.HTTPAdapter(*args, **kwargs)
-
-
-class TestDataSourceScaleway(HttprettyTestCase):
-
- def setUp(self):
- tmp = self.tmp_dir()
- self.datasource = DataSourceScaleway.DataSourceScaleway(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': tmp})
- )
- super(TestDataSourceScaleway, self).setUp()
-
- self.metadata_url = \
- DataSourceScaleway.BUILTIN_DS_CONFIG['metadata_url']
- self.userdata_url = \
- DataSourceScaleway.BUILTIN_DS_CONFIG['userdata_url']
- self.vendordata_url = \
- DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url']
-
- self.add_patch('cloudinit.sources.DataSourceScaleway.on_scaleway',
- '_m_on_scaleway', return_value=True)
- self.add_patch(
- 'cloudinit.sources.DataSourceScaleway.net.find_fallback_nic',
- '_m_find_fallback_nic', return_value='scalewaynic0')
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
- @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
- get_source_address_adapter)
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('time.sleep', return_value=None)
- def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4):
- """
- get_data() returns metadata, user data and vendor data.
- """
- m_get_cmdline.return_value = 'scaleway'
-
- # Make user data API return a valid response
- httpretty.register_uri(httpretty.GET, self.metadata_url,
- body=MetadataResponses.get_ok)
- httpretty.register_uri(httpretty.GET, self.userdata_url,
- body=DataResponses.get_ok)
- httpretty.register_uri(httpretty.GET, self.vendordata_url,
- body=DataResponses.get_ok)
- self.datasource.get_data()
-
- self.assertEqual(self.datasource.get_instance_id(),
- MetadataResponses.FAKE_METADATA['id'])
- self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- ].sort())
- self.assertEqual(self.datasource.get_hostname(),
- MetadataResponses.FAKE_METADATA['hostname'])
- self.assertEqual(self.datasource.get_userdata_raw(),
- DataResponses.FAKE_USER_DATA)
- self.assertEqual(self.datasource.get_vendordata_raw(),
- DataResponses.FAKE_USER_DATA)
- self.assertIsNone(self.datasource.availability_zone)
- self.assertIsNone(self.datasource.region)
- self.assertEqual(sleep.call_count, 0)
-
- def test_ssh_keys_empty(self):
- """
- get_public_ssh_keys() should return empty list if no ssh key are
- available
- """
- self.datasource.metadata['tags'] = []
- self.datasource.metadata['ssh_public_keys'] = []
- self.assertEqual(self.datasource.get_public_ssh_keys(), [])
-
- def test_ssh_keys_only_tags(self):
- """
- get_public_ssh_keys() should return list of keys available in tags
- """
- self.datasource.metadata['tags'] = [
- "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
- "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC",
- ]
- self.datasource.metadata['ssh_public_keys'] = []
- self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- ].sort())
-
- def test_ssh_keys_only_conf(self):
- """
- get_public_ssh_keys() should return list of keys available in
- ssh_public_keys field
- """
- self.datasource.metadata['tags'] = []
- self.datasource.metadata['ssh_public_keys'] = [{
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- 'fingerprint': '2048 06:ae:... login (RSA)'
- }, {
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- 'fingerprint': '2048 06:ff:... login2 (RSA)'
- }]
- self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- ].sort())
-
- def test_ssh_keys_both(self):
- """
- get_public_ssh_keys() should return a merge of keys available
- in ssh_public_keys and tags
- """
- self.datasource.metadata['tags'] = [
- "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
- ]
-
- self.datasource.metadata['ssh_public_keys'] = [{
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- 'fingerprint': '2048 06:ae:... login (RSA)'
- }, {
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- 'fingerprint': '2048 06:ff:... login2 (RSA)'
- }]
- self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- ].sort())
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
- @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
- get_source_address_adapter)
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('time.sleep', return_value=None)
- def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4):
- """
- get_data() returns metadata, but no user data nor vendor data.
- """
- m_get_cmdline.return_value = 'scaleway'
-
- # Make user and vendor data APIs return HTTP/404, which means there is
- # no user / vendor data for the server.
- httpretty.register_uri(httpretty.GET, self.metadata_url,
- body=MetadataResponses.get_ok)
- httpretty.register_uri(httpretty.GET, self.userdata_url,
- body=DataResponses.empty)
- httpretty.register_uri(httpretty.GET, self.vendordata_url,
- body=DataResponses.empty)
- self.datasource.get_data()
- self.assertIsNone(self.datasource.get_userdata_raw())
- self.assertIsNone(self.datasource.get_vendordata_raw())
- self.assertEqual(sleep.call_count, 0)
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
- @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
- get_source_address_adapter)
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('time.sleep', return_value=None)
- def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4):
- """
- get_data() is rate limited two times by the metadata API when fetching
- user data.
- """
- m_get_cmdline.return_value = 'scaleway'
-
- httpretty.register_uri(httpretty.GET, self.metadata_url,
- body=MetadataResponses.get_ok)
- httpretty.register_uri(httpretty.GET, self.vendordata_url,
- body=DataResponses.empty)
-
- httpretty.register_uri(
- httpretty.GET, self.userdata_url,
- responses=[
- httpretty.Response(body=DataResponses.rate_limited),
- httpretty.Response(body=DataResponses.rate_limited),
- httpretty.Response(body=DataResponses.get_ok),
- ]
- )
- self.datasource.get_data()
- self.assertEqual(self.datasource.get_userdata_raw(),
- DataResponses.FAKE_USER_DATA)
- self.assertEqual(sleep.call_count, 2)
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
- def test_network_config_ok(self, m_get_cmdline, fallback_nic):
- """
- network_config will only generate IPv4 config if no ipv6 data is
- available in the metadata
- """
- m_get_cmdline.return_value = 'scaleway'
- fallback_nic.return_value = 'ens2'
- self.datasource.metadata['ipv6'] = None
-
- netcfg = self.datasource.network_config
- resp = {
- 'version': 1,
- 'config': [
- {
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]
- }
- ]
- }
- self.assertEqual(netcfg, resp)
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
- def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic):
- """
- network_config will only generate IPv4/v6 configs if ipv6 data is
- available in the metadata
- """
- m_get_cmdline.return_value = 'scaleway'
- fallback_nic.return_value = 'ens2'
- self.datasource.metadata['ipv6'] = {
- 'address': '2000:abc:4444:9876::42:999',
- 'gateway': '2000:abc:4444:9876::42:000',
- 'netmask': '127',
- }
-
- netcfg = self.datasource.network_config
- resp = {
- 'version': 1,
- 'config': [
- {
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [
- {
- 'type': 'dhcp4'
- },
- {
- 'type': 'static',
- 'address': '2000:abc:4444:9876::42:999',
- 'gateway': '2000:abc:4444:9876::42:000',
- 'netmask': '127',
- }
- ]
- }
- ]
- }
- self.assertEqual(netcfg, resp)
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
- def test_network_config_existing(self, m_get_cmdline, fallback_nic):
- """
- network_config() should return the same data if a network config
- already exists
- """
- m_get_cmdline.return_value = 'scaleway'
- self.datasource._network_config = '0xdeadbeef'
-
- netcfg = self.datasource.network_config
- self.assertEqual(netcfg, '0xdeadbeef')
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
- def test_network_config_unset(self, m_get_cmdline, fallback_nic):
- """
- _network_config will be set to sources.UNSET after the first boot.
- Make sure it behave correctly.
- """
- m_get_cmdline.return_value = 'scaleway'
- fallback_nic.return_value = 'ens2'
- self.datasource.metadata['ipv6'] = None
- self.datasource._network_config = sources.UNSET
-
- resp = {
- 'version': 1,
- 'config': [
- {
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]
- }
- ]
- }
-
- netcfg = self.datasource.network_config
- self.assertEqual(netcfg, resp)
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.LOG.warning')
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
- def test_network_config_cached_none(self, m_get_cmdline, fallback_nic,
- logwarning):
- """
- network_config() should return config data if cached data is None
- rather than sources.UNSET
- """
- m_get_cmdline.return_value = 'scaleway'
- fallback_nic.return_value = 'ens2'
- self.datasource.metadata['ipv6'] = None
- self.datasource._network_config = None
-
- resp = {
- 'version': 1,
- 'config': [
- {
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]
- }
- ]
- }
-
- netcfg = self.datasource.network_config
- self.assertEqual(netcfg, resp)
- logwarning.assert_called_with('Found None as cached _network_config. '
- 'Resetting to %s', sources.UNSET)
diff --git a/cloudinit/tests/test_dhclient_hook.py b/tests/unittests/test_dhclient_hook.py
index eadae81c..7e5b54c0 100644
--- a/cloudinit/tests/test_dhclient_hook.py
+++ b/tests/unittests/test_dhclient_hook.py
@@ -2,47 +2,48 @@
"""Tests for cloudinit.dhclient_hook."""
-from cloudinit import dhclient_hook as dhc
-from cloudinit.tests.helpers import CiTestCase, dir2dict, populate_dir
-
import argparse
import json
import os
from unittest import mock
+from cloudinit import dhclient_hook as dhc
+from tests.unittests.helpers import CiTestCase, dir2dict, populate_dir
+
class TestDhclientHook(CiTestCase):
ex_env = {
- 'interface': 'eth0',
- 'new_dhcp_lease_time': '3600',
- 'new_host_name': 'x1',
- 'new_ip_address': '10.145.210.163',
- 'new_subnet_mask': '255.255.255.0',
- 'old_host_name': 'x1',
- 'PATH': '/usr/sbin:/usr/bin:/sbin:/bin',
- 'pid': '614',
- 'reason': 'BOUND',
+ "interface": "eth0",
+ "new_dhcp_lease_time": "3600",
+ "new_host_name": "x1",
+ "new_ip_address": "10.145.210.163",
+ "new_subnet_mask": "255.255.255.0",
+ "old_host_name": "x1",
+ "PATH": "/usr/sbin:/usr/bin:/sbin:/bin",
+ "pid": "614",
+ "reason": "BOUND",
}
# some older versions of dhclient put the same content,
# but in upper case with DHCP4_ instead of new_
ex_env_dhcp4 = {
- 'REASON': 'BOUND',
- 'DHCP4_dhcp_lease_time': '3600',
- 'DHCP4_host_name': 'x1',
- 'DHCP4_ip_address': '10.145.210.163',
- 'DHCP4_subnet_mask': '255.255.255.0',
- 'INTERFACE': 'eth0',
- 'PATH': '/usr/sbin:/usr/bin:/sbin:/bin',
- 'pid': '614',
+ "REASON": "BOUND",
+ "DHCP4_dhcp_lease_time": "3600",
+ "DHCP4_host_name": "x1",
+ "DHCP4_ip_address": "10.145.210.163",
+ "DHCP4_subnet_mask": "255.255.255.0",
+ "INTERFACE": "eth0",
+ "PATH": "/usr/sbin:/usr/bin:/sbin:/bin",
+ "pid": "614",
}
expected = {
- 'dhcp_lease_time': '3600',
- 'host_name': 'x1',
- 'ip_address': '10.145.210.163',
- 'subnet_mask': '255.255.255.0'}
+ "dhcp_lease_time": "3600",
+ "host_name": "x1",
+ "ip_address": "10.145.210.163",
+ "subnet_mask": "255.255.255.0",
+ }
def setUp(self):
super(TestDhclientHook, self).setUp()
@@ -50,7 +51,7 @@ class TestDhclientHook(CiTestCase):
def test_handle_args(self):
"""quick test of call to handle_args."""
- nic = 'eth0'
+ nic = "eth0"
args = argparse.Namespace(event=dhc.UP, interface=nic)
with mock.patch.dict("os.environ", clear=True, values=self.ex_env):
dhc.handle_args(dhc.NAME, args, data_d=self.tmp)
@@ -61,45 +62,51 @@ class TestDhclientHook(CiTestCase):
def test_run_hook_up_creates_dir(self):
"""If dir does not exist, run_hook should create it."""
subd = self.tmp_path("subdir", self.tmp)
- nic = 'eth1'
- dhc.run_hook(nic, 'up', data_d=subd, env=self.ex_env)
+ nic = "eth1"
+ dhc.run_hook(nic, "up", data_d=subd, env=self.ex_env)
self.assertEqual(
- set([nic + ".json"]), set(dir2dict(subd + os.path.sep)))
+ set([nic + ".json"]), set(dir2dict(subd + os.path.sep))
+ )
def test_run_hook_up(self):
"""Test expected use of run_hook_up."""
- nic = 'eth0'
- dhc.run_hook(nic, 'up', data_d=self.tmp, env=self.ex_env)
+ nic = "eth0"
+ dhc.run_hook(nic, "up", data_d=self.tmp, env=self.ex_env)
found = dir2dict(self.tmp + os.path.sep)
self.assertEqual([nic + ".json"], list(found.keys()))
self.assertEqual(self.expected, json.loads(found[nic + ".json"]))
def test_run_hook_up_dhcp4_prefix(self):
"""Test run_hook filters correctly with older DHCP4_ data."""
- nic = 'eth0'
- dhc.run_hook(nic, 'up', data_d=self.tmp, env=self.ex_env_dhcp4)
+ nic = "eth0"
+ dhc.run_hook(nic, "up", data_d=self.tmp, env=self.ex_env_dhcp4)
found = dir2dict(self.tmp + os.path.sep)
self.assertEqual([nic + ".json"], list(found.keys()))
self.assertEqual(self.expected, json.loads(found[nic + ".json"]))
def test_run_hook_down_deletes(self):
"""down should delete the created json file."""
- nic = 'eth1'
+ nic = "eth1"
populate_dir(
- self.tmp, {nic + ".json": "{'abcd'}", 'myfile.txt': 'text'})
- dhc.run_hook(nic, 'down', data_d=self.tmp, env={'old_host_name': 'x1'})
+ self.tmp, {nic + ".json": "{'abcd'}", "myfile.txt": "text"}
+ )
+ dhc.run_hook(nic, "down", data_d=self.tmp, env={"old_host_name": "x1"})
self.assertEqual(
- set(['myfile.txt']),
- set(dir2dict(self.tmp + os.path.sep)))
+ set(["myfile.txt"]), set(dir2dict(self.tmp + os.path.sep))
+ )
def test_get_parser(self):
"""Smoke test creation of get_parser."""
# cloud-init main uses 'action'.
- event, interface = (dhc.UP, 'mynic0')
+ event, interface = (dhc.UP, "mynic0")
self.assertEqual(
- argparse.Namespace(event=event, interface=interface,
- action=(dhc.NAME, dhc.handle_args)),
- dhc.get_parser().parse_args([event, interface]))
+ argparse.Namespace(
+ event=event,
+ interface=interface,
+ action=(dhc.NAME, dhc.handle_args),
+ ),
+ dhc.get_parser().parse_args([event, interface]),
+ )
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_arch.py b/tests/unittests/test_distros/test_arch.py
deleted file mode 100644
index a95ba3b5..00000000
--- a/tests/unittests/test_distros/test_arch.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.distros.arch import _render_network
-from cloudinit import util
-
-from cloudinit.tests.helpers import (CiTestCase, dir2dict)
-
-from . import _get_distro
-
-
-class TestArch(CiTestCase):
-
- def test_get_distro(self):
- distro = _get_distro("arch")
- hostname = "myhostname"
- hostfile = self.tmp_path("hostfile")
- distro._write_hostname(hostname, hostfile)
- self.assertEqual(hostname + "\n", util.load_file(hostfile))
-
-
-class TestRenderNetwork(CiTestCase):
- def test_basic_static(self):
- """Just the most basic static config.
-
- note 'lo' should not be rendered as an interface."""
- entries = {'eth0': {'auto': True,
- 'dns-nameservers': ['8.8.8.8'],
- 'bootproto': 'static',
- 'address': '10.0.0.2',
- 'gateway': '10.0.0.1',
- 'netmask': '255.255.255.0'},
- 'lo': {'auto': True}}
- target = self.tmp_dir()
- devs = _render_network(entries, target=target)
- files = dir2dict(target, prefix=target)
- self.assertEqual(['eth0'], devs)
- self.assertEqual(
- {'/etc/netctl/eth0': '\n'.join([
- "Address=10.0.0.2/255.255.255.0",
- "Connection=ethernet",
- "DNS=('8.8.8.8')",
- "Gateway=10.0.0.1",
- "IP=static",
- "Interface=eth0", ""]),
- '/etc/resolv.conf': 'nameserver 8.8.8.8\n'}, files)
diff --git a/tests/unittests/test_distros/test_bsd_utils.py b/tests/unittests/test_distros/test_bsd_utils.py
deleted file mode 100644
index 3a68f2a9..00000000
--- a/tests/unittests/test_distros/test_bsd_utils.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import cloudinit.distros.bsd_utils as bsd_utils
-
-from cloudinit.tests.helpers import (CiTestCase, ExitStack, mock)
-
-RC_FILE = """
-if something; then
- do something here
-fi
-hostname={hostname}
-"""
-
-
-class TestBsdUtils(CiTestCase):
-
- def setUp(self):
- super().setUp()
- patches = ExitStack()
- self.addCleanup(patches.close)
-
- self.load_file = patches.enter_context(
- mock.patch.object(bsd_utils.util, 'load_file'))
-
- self.write_file = patches.enter_context(
- mock.patch.object(bsd_utils.util, 'write_file'))
-
- def test_get_rc_config_value(self):
- self.load_file.return_value = 'hostname=foo\n'
- self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
- self.load_file.assert_called_with('/etc/rc.conf')
-
- self.load_file.return_value = 'hostname=foo'
- self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
-
- self.load_file.return_value = 'hostname="foo"'
- self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
-
- self.load_file.return_value = "hostname='foo'"
- self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
-
- self.load_file.return_value = 'hostname=\'foo"'
- self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "'foo\"")
-
- self.load_file.return_value = ''
- self.assertEqual(bsd_utils.get_rc_config_value('hostname'), None)
-
- self.load_file.return_value = RC_FILE.format(hostname='foo')
- self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "foo")
-
- def test_set_rc_config_value_unchanged(self):
- # bsd_utils.set_rc_config_value('hostname', 'foo')
- # self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n')
-
- self.load_file.return_value = RC_FILE.format(hostname='foo')
- self.write_file.assert_not_called()
-
- def test_set_rc_config_value(self):
- bsd_utils.set_rc_config_value('hostname', 'foo')
- self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n')
-
- self.load_file.return_value = RC_FILE.format(hostname='foo')
- bsd_utils.set_rc_config_value('hostname', 'bar')
- self.write_file.assert_called_with(
- '/etc/rc.conf',
- RC_FILE.format(hostname='bar')
- )
diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py
deleted file mode 100644
index 94ab052d..00000000
--- a/tests/unittests/test_distros/test_create_users.py
+++ /dev/null
@@ -1,271 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import re
-
-from cloudinit import distros
-from cloudinit import ssh_util
-from cloudinit.tests.helpers import (CiTestCase, mock)
-
-
-class MyBaseDistro(distros.Distro):
- # MyBaseDistro is here to test base Distro class implementations
-
- def __init__(self, name="basedistro", cfg=None, paths=None):
- if not cfg:
- cfg = {}
- if not paths:
- paths = {}
- super(MyBaseDistro, self).__init__(name, cfg, paths)
-
- def install_packages(self, pkglist):
- raise NotImplementedError()
-
- def _write_network(self, settings):
- raise NotImplementedError()
-
- def package_command(self, cmd, args=None, pkgs=None):
- raise NotImplementedError()
-
- def update_package_sources(self):
- raise NotImplementedError()
-
- def apply_locale(self, locale, out_fn=None):
- raise NotImplementedError()
-
- def set_timezone(self, tz):
- raise NotImplementedError()
-
- def _read_hostname(self, filename, default=None):
- raise NotImplementedError()
-
- def _write_hostname(self, hostname, filename):
- raise NotImplementedError()
-
- def _read_system_hostname(self):
- raise NotImplementedError()
-
-
-@mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False)
-@mock.patch("cloudinit.distros.subp.subp")
-class TestCreateUser(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestCreateUser, self).setUp()
- self.dist = MyBaseDistro()
-
- def _useradd2call(self, args):
- # return a mock call for the useradd command in args
- # with expected 'logstring'.
- args = ['useradd'] + args
- logcmd = [a for a in args]
- for i in range(len(args)):
- if args[i] in ('--password',):
- logcmd[i + 1] = 'REDACTED'
- return mock.call(args, logstring=logcmd)
-
- def test_basic(self, m_subp, m_is_snappy):
- user = 'foouser'
- self.dist.create_user(user)
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '-m']),
- mock.call(['passwd', '-l', user])])
-
- def test_no_home(self, m_subp, m_is_snappy):
- user = 'foouser'
- self.dist.create_user(user, no_create_home=True)
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '-M']),
- mock.call(['passwd', '-l', user])])
-
- def test_system_user(self, m_subp, m_is_snappy):
- # system user should have no home and get --system
- user = 'foouser'
- self.dist.create_user(user, system=True)
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '--system', '-M']),
- mock.call(['passwd', '-l', user])])
-
- def test_explicit_no_home_false(self, m_subp, m_is_snappy):
- user = 'foouser'
- self.dist.create_user(user, no_create_home=False)
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '-m']),
- mock.call(['passwd', '-l', user])])
-
- def test_unlocked(self, m_subp, m_is_snappy):
- user = 'foouser'
- self.dist.create_user(user, lock_passwd=False)
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '-m'])])
-
- def test_set_password(self, m_subp, m_is_snappy):
- user = 'foouser'
- password = 'passfoo'
- self.dist.create_user(user, passwd=password)
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '--password', password, '-m']),
- mock.call(['passwd', '-l', user])])
-
- @mock.patch("cloudinit.distros.util.is_group")
- def test_group_added(self, m_is_group, m_subp, m_is_snappy):
- m_is_group.return_value = False
- user = 'foouser'
- self.dist.create_user(user, groups=['group1'])
- expected = [
- mock.call(['groupadd', 'group1']),
- self._useradd2call([user, '--groups', 'group1', '-m']),
- mock.call(['passwd', '-l', user])]
- self.assertEqual(m_subp.call_args_list, expected)
-
- @mock.patch("cloudinit.distros.util.is_group")
- def test_only_new_group_added(self, m_is_group, m_subp, m_is_snappy):
- ex_groups = ['existing_group']
- groups = ['group1', ex_groups[0]]
- m_is_group.side_effect = lambda m: m in ex_groups
- user = 'foouser'
- self.dist.create_user(user, groups=groups)
- expected = [
- mock.call(['groupadd', 'group1']),
- self._useradd2call([user, '--groups', ','.join(groups), '-m']),
- mock.call(['passwd', '-l', user])]
- self.assertEqual(m_subp.call_args_list, expected)
-
- @mock.patch("cloudinit.distros.util.is_group")
- def test_create_groups_with_whitespace_string(
- self, m_is_group, m_subp, m_is_snappy):
- # groups supported as a comma delimeted string even with white space
- m_is_group.return_value = False
- user = 'foouser'
- self.dist.create_user(user, groups='group1, group2')
- expected = [
- mock.call(['groupadd', 'group1']),
- mock.call(['groupadd', 'group2']),
- self._useradd2call([user, '--groups', 'group1,group2', '-m']),
- mock.call(['passwd', '-l', user])]
- self.assertEqual(m_subp.call_args_list, expected)
-
- def test_explicit_sudo_false(self, m_subp, m_is_snappy):
- user = 'foouser'
- self.dist.create_user(user, sudo=False)
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '-m']),
- mock.call(['passwd', '-l', user])])
-
- @mock.patch('cloudinit.ssh_util.setup_user_keys')
- def test_setup_ssh_authorized_keys_with_string(
- self, m_setup_user_keys, m_subp, m_is_snappy):
- """ssh_authorized_keys allows string and calls setup_user_keys."""
- user = 'foouser'
- self.dist.create_user(user, ssh_authorized_keys='mykey')
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '-m']),
- mock.call(['passwd', '-l', user])])
- m_setup_user_keys.assert_called_once_with(set(['mykey']), user)
-
- @mock.patch('cloudinit.ssh_util.setup_user_keys')
- def test_setup_ssh_authorized_keys_with_list(
- self, m_setup_user_keys, m_subp, m_is_snappy):
- """ssh_authorized_keys allows lists and calls setup_user_keys."""
- user = 'foouser'
- self.dist.create_user(user, ssh_authorized_keys=['key1', 'key2'])
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '-m']),
- mock.call(['passwd', '-l', user])])
- m_setup_user_keys.assert_called_once_with(set(['key1', 'key2']), user)
-
- @mock.patch('cloudinit.ssh_util.setup_user_keys')
- def test_setup_ssh_authorized_keys_with_integer(
- self, m_setup_user_keys, m_subp, m_is_snappy):
- """ssh_authorized_keys warns on non-iterable/string type."""
- user = 'foouser'
- self.dist.create_user(user, ssh_authorized_keys=-1)
- m_setup_user_keys.assert_called_once_with(set([]), user)
- match = re.match(
- r'.*WARNING: Invalid type \'<(type|class) \'int\'>\' detected for'
- ' \'ssh_authorized_keys\'.*',
- self.logs.getvalue(),
- re.DOTALL)
- self.assertIsNotNone(
- match, 'Missing ssh_authorized_keys invalid type warning')
-
- @mock.patch('cloudinit.ssh_util.setup_user_keys')
- def test_create_user_with_ssh_redirect_user_no_cloud_keys(
- self, m_setup_user_keys, m_subp, m_is_snappy):
- """Log a warning when trying to redirect a user no cloud ssh keys."""
- user = 'foouser'
- self.dist.create_user(user, ssh_redirect_user='someuser')
- self.assertIn(
- 'WARNING: Unable to disable SSH logins for foouser given '
- 'ssh_redirect_user: someuser. No cloud public-keys present.\n',
- self.logs.getvalue())
- m_setup_user_keys.assert_not_called()
-
- @mock.patch('cloudinit.ssh_util.setup_user_keys')
- def test_create_user_with_ssh_redirect_user_with_cloud_keys(
- self, m_setup_user_keys, m_subp, m_is_snappy):
- """Disable ssh when ssh_redirect_user and cloud ssh keys are set."""
- user = 'foouser'
- self.dist.create_user(
- user, ssh_redirect_user='someuser', cloud_public_ssh_keys=['key1'])
- disable_prefix = ssh_util.DISABLE_USER_OPTS
- disable_prefix = disable_prefix.replace('$USER', 'someuser')
- disable_prefix = disable_prefix.replace('$DISABLE_USER', user)
- m_setup_user_keys.assert_called_once_with(
- set(['key1']), 'foouser', options=disable_prefix)
-
- @mock.patch('cloudinit.ssh_util.setup_user_keys')
- def test_create_user_with_ssh_redirect_user_does_not_disable_auth_keys(
- self, m_setup_user_keys, m_subp, m_is_snappy):
- """Do not disable ssh_authorized_keys when ssh_redirect_user is set."""
- user = 'foouser'
- self.dist.create_user(
- user, ssh_authorized_keys='auth1', ssh_redirect_user='someuser',
- cloud_public_ssh_keys=['key1'])
- disable_prefix = ssh_util.DISABLE_USER_OPTS
- disable_prefix = disable_prefix.replace('$USER', 'someuser')
- disable_prefix = disable_prefix.replace('$DISABLE_USER', user)
- self.assertEqual(
- m_setup_user_keys.call_args_list,
- [mock.call(set(['auth1']), user), # not disabled
- mock.call(set(['key1']), 'foouser', options=disable_prefix)])
-
- @mock.patch("cloudinit.distros.subp.which")
- def test_lock_with_usermod_if_no_passwd(self, m_which, m_subp,
- m_is_snappy):
- """Lock uses usermod --lock if no 'passwd' cmd available."""
- m_which.side_effect = lambda m: m in ('usermod',)
- self.dist.lock_passwd("bob")
- self.assertEqual(
- [mock.call(['usermod', '--lock', 'bob'])],
- m_subp.call_args_list)
-
- @mock.patch("cloudinit.distros.subp.which")
- def test_lock_with_passwd_if_available(self, m_which, m_subp,
- m_is_snappy):
- """Lock with only passwd will use passwd."""
- m_which.side_effect = lambda m: m in ('passwd',)
- self.dist.lock_passwd("bob")
- self.assertEqual(
- [mock.call(['passwd', '-l', 'bob'])],
- m_subp.call_args_list)
-
- @mock.patch("cloudinit.distros.subp.which")
- def test_lock_raises_runtime_if_no_commands(self, m_which, m_subp,
- m_is_snappy):
- """Lock with no commands available raises RuntimeError."""
- m_which.return_value = None
- with self.assertRaises(RuntimeError):
- self.dist.lock_passwd("bob")
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_debian.py b/tests/unittests/test_distros/test_debian.py
deleted file mode 100644
index 7ff8240b..00000000
--- a/tests/unittests/test_distros/test_debian.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import distros
-from cloudinit import util
-from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock)
-
-
-@mock.patch("cloudinit.distros.debian.subp.subp")
-class TestDebianApplyLocale(FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestDebianApplyLocale, self).setUp()
- self.new_root = self.tmp_dir()
- self.patchOS(self.new_root)
- self.patchUtils(self.new_root)
- self.spath = self.tmp_path('etc/default/locale', self.new_root)
- cls = distros.fetch("debian")
- self.distro = cls("debian", {}, None)
-
- def test_no_rerun(self, m_subp):
- """If system has defined locale, no re-run is expected."""
- m_subp.return_value = (None, None)
- locale = 'en_US.UTF-8'
- util.write_file(self.spath, 'LANG=%s\n' % locale, omode="w")
- self.distro.apply_locale(locale, out_fn=self.spath)
- m_subp.assert_not_called()
-
- def test_no_regen_on_c_utf8(self, m_subp):
- """If locale is set to C.UTF8, do not attempt to call locale-gen"""
- m_subp.return_value = (None, None)
- locale = 'C.UTF-8'
- util.write_file(self.spath, 'LANG=%s\n' % 'en_US.UTF-8', omode="w")
- self.distro.apply_locale(locale, out_fn=self.spath)
- self.assertEqual(
- [['update-locale', '--locale-file=' + self.spath,
- 'LANG=%s' % locale]],
- [p[0][0] for p in m_subp.call_args_list])
-
- def test_rerun_if_different(self, m_subp):
- """If system has different locale, locale-gen should be called."""
- m_subp.return_value = (None, None)
- locale = 'en_US.UTF-8'
- util.write_file(self.spath, 'LANG=fr_FR.UTF-8', omode="w")
- self.distro.apply_locale(locale, out_fn=self.spath)
- self.assertEqual(
- [['locale-gen', locale],
- ['update-locale', '--locale-file=' + self.spath,
- 'LANG=%s' % locale]],
- [p[0][0] for p in m_subp.call_args_list])
-
- def test_rerun_if_no_file(self, m_subp):
- """If system has no locale file, locale-gen should be called."""
- m_subp.return_value = (None, None)
- locale = 'en_US.UTF-8'
- self.distro.apply_locale(locale, out_fn=self.spath)
- self.assertEqual(
- [['locale-gen', locale],
- ['update-locale', '--locale-file=' + self.spath,
- 'LANG=%s' % locale]],
- [p[0][0] for p in m_subp.call_args_list])
-
- def test_rerun_on_unset_system_locale(self, m_subp):
- """If system has unset locale, locale-gen should be called."""
- m_subp.return_value = (None, None)
- locale = 'en_US.UTF-8'
- util.write_file(self.spath, 'LANG=', omode="w")
- self.distro.apply_locale(locale, out_fn=self.spath)
- self.assertEqual(
- [['locale-gen', locale],
- ['update-locale', '--locale-file=' + self.spath,
- 'LANG=%s' % locale]],
- [p[0][0] for p in m_subp.call_args_list])
-
- def test_rerun_on_mismatched_keys(self, m_subp):
- """If key is LC_ALL and system has only LANG, rerun is expected."""
- m_subp.return_value = (None, None)
- locale = 'en_US.UTF-8'
- util.write_file(self.spath, 'LANG=', omode="w")
- self.distro.apply_locale(locale, out_fn=self.spath, keyname='LC_ALL')
- self.assertEqual(
- [['locale-gen', locale],
- ['update-locale', '--locale-file=' + self.spath,
- 'LC_ALL=%s' % locale]],
- [p[0][0] for p in m_subp.call_args_list])
-
- def test_falseish_locale_raises_valueerror(self, m_subp):
- """locale as None or "" is invalid and should raise ValueError."""
-
- with self.assertRaises(ValueError) as ctext_m:
- self.distro.apply_locale(None)
- m_subp.assert_not_called()
-
- self.assertEqual(
- 'Failed to provide locale value.', str(ctext_m.exception))
-
- with self.assertRaises(ValueError) as ctext_m:
- self.distro.apply_locale("")
- m_subp.assert_not_called()
- self.assertEqual(
- 'Failed to provide locale value.', str(ctext_m.exception))
diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py
deleted file mode 100644
index 44607489..00000000
--- a/tests/unittests/test_distros/test_generic.py
+++ /dev/null
@@ -1,302 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import distros
-from cloudinit import util
-
-from cloudinit.tests import helpers
-
-import os
-import pytest
-import shutil
-import tempfile
-from unittest import mock
-
-unknown_arch_info = {
- 'arches': ['default'],
- 'failsafe': {'primary': 'http://fs-primary-default',
- 'security': 'http://fs-security-default'}
-}
-
-package_mirrors = [
- {'arches': ['i386', 'amd64'],
- 'failsafe': {'primary': 'http://fs-primary-intel',
- 'security': 'http://fs-security-intel'},
- 'search': {
- 'primary': ['http://%(ec2_region)s.ec2/',
- 'http://%(availability_zone)s.clouds/'],
- 'security': ['http://security-mirror1-intel',
- 'http://security-mirror2-intel']}},
- {'arches': ['armhf', 'armel'],
- 'failsafe': {'primary': 'http://fs-primary-arm',
- 'security': 'http://fs-security-arm'}},
- unknown_arch_info
-]
-
-gpmi = distros._get_package_mirror_info
-gapmi = distros._get_arch_package_mirror_info
-
-
-class TestGenericDistro(helpers.FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestGenericDistro, self).setUp()
- # Make a temp directoy for tests to use.
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def _write_load_sudoers(self, _user, rules):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- os.makedirs(os.path.join(self.tmp, "etc"))
- os.makedirs(os.path.join(self.tmp, "etc", 'sudoers.d'))
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- d.write_sudo_rules("harlowja", rules)
- contents = util.load_file(d.ci_sudoers_fn)
- return contents
-
- def _count_in(self, lines_look_for, text_content):
- found_amount = 0
- for e in lines_look_for:
- for line in text_content.splitlines():
- line = line.strip()
- if line == e:
- found_amount += 1
- return found_amount
-
- def test_sudoers_ensure_rules(self):
- rules = 'ALL=(ALL:ALL) ALL'
- contents = self._write_load_sudoers('harlowja', rules)
- expected = ['harlowja ALL=(ALL:ALL) ALL']
- self.assertEqual(len(expected), self._count_in(expected, contents))
- not_expected = [
- 'harlowja A',
- 'harlowja L',
- 'harlowja L',
- ]
- self.assertEqual(0, self._count_in(not_expected, contents))
-
- def test_sudoers_ensure_rules_list(self):
- rules = [
- 'ALL=(ALL:ALL) ALL',
- 'B-ALL=(ALL:ALL) ALL',
- 'C-ALL=(ALL:ALL) ALL',
- ]
- contents = self._write_load_sudoers('harlowja', rules)
- expected = [
- 'harlowja ALL=(ALL:ALL) ALL',
- 'harlowja B-ALL=(ALL:ALL) ALL',
- 'harlowja C-ALL=(ALL:ALL) ALL',
- ]
- self.assertEqual(len(expected), self._count_in(expected, contents))
- not_expected = [
- 'harlowja A',
- 'harlowja L',
- 'harlowja L',
- ]
- self.assertEqual(0, self._count_in(not_expected, contents))
-
- def test_sudoers_ensure_new(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- d.ensure_sudo_dir("/b")
- contents = util.load_file("/etc/sudoers")
- self.assertIn("includedir /b", contents)
- self.assertTrue(os.path.isdir("/b"))
-
- def test_sudoers_ensure_append(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- util.write_file("/etc/sudoers", "josh, josh\n")
- d.ensure_sudo_dir("/b")
- contents = util.load_file("/etc/sudoers")
- self.assertIn("includedir /b", contents)
- self.assertTrue(os.path.isdir("/b"))
- self.assertIn("josh", contents)
- self.assertEqual(2, contents.count("josh"))
-
- def test_arch_package_mirror_info_unknown(self):
- """for an unknown arch, we should get back that with arch 'default'."""
- arch_mirrors = gapmi(package_mirrors, arch="unknown")
- self.assertEqual(unknown_arch_info, arch_mirrors)
-
- def test_arch_package_mirror_info_known(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- self.assertEqual(package_mirrors[0], arch_mirrors)
-
- def test_systemd_in_use(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- os.makedirs('/run/systemd/system')
- self.assertTrue(d.uses_systemd())
-
- def test_systemd_not_in_use(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- self.assertFalse(d.uses_systemd())
-
- def test_systemd_symlink(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- os.makedirs('/run/systemd')
- os.symlink('/', '/run/systemd/system')
- self.assertFalse(d.uses_systemd())
-
- @mock.patch('cloudinit.distros.debian.read_system_locale')
- def test_get_locale_ubuntu(self, m_locale):
- """Test ubuntu distro returns locale set to C.UTF-8"""
- m_locale.return_value = 'C.UTF-8'
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- locale = d.get_locale()
- self.assertEqual('C.UTF-8', locale)
-
- def test_get_locale_rhel(self):
- """Test rhel distro returns NotImplementedError exception"""
- cls = distros.fetch("rhel")
- d = cls("rhel", {}, None)
- with self.assertRaises(NotImplementedError):
- d.get_locale()
-
- def test_expire_passwd_uses_chpasswd(self):
- """Test ubuntu.expire_passwd uses the passwd command."""
- for d_name in ("ubuntu", "rhel"):
- cls = distros.fetch(d_name)
- d = cls(d_name, {}, None)
- with mock.patch("cloudinit.subp.subp") as m_subp:
- d.expire_passwd("myuser")
- m_subp.assert_called_once_with(["passwd", "--expire", "myuser"])
-
- def test_expire_passwd_freebsd_uses_pw_command(self):
- """Test FreeBSD.expire_passwd uses the pw command."""
- cls = distros.fetch("freebsd")
- d = cls("freebsd", {}, None)
- with mock.patch("cloudinit.subp.subp") as m_subp:
- d.expire_passwd("myuser")
- m_subp.assert_called_once_with(
- ["pw", "usermod", "myuser", "-p", "01-Jan-1970"])
-
-
-class TestGetPackageMirrors:
-
- def return_first(self, mlist):
- if not mlist:
- return None
- return mlist[0]
-
- def return_second(self, mlist):
- if not mlist:
- return None
-
- return mlist[1] if len(mlist) > 1 else None
-
- def return_none(self, _mlist):
- return None
-
- def return_last(self, mlist):
- if not mlist:
- return None
- return(mlist[-1])
-
- @pytest.mark.parametrize(
- "allow_ec2_mirror, platform_type, mirrors",
- [
- (True, "ec2", [
- {'primary': 'http://us-east-1.ec2/',
- 'security': 'http://security-mirror1-intel'},
- {'primary': 'http://us-east-1a.clouds/',
- 'security': 'http://security-mirror2-intel'}
- ]),
- (True, "other", [
- {'primary': 'http://us-east-1.ec2/',
- 'security': 'http://security-mirror1-intel'},
- {'primary': 'http://us-east-1a.clouds/',
- 'security': 'http://security-mirror2-intel'}
- ]),
- (False, "ec2", [
- {'primary': 'http://us-east-1.ec2/',
- 'security': 'http://security-mirror1-intel'},
- {'primary': 'http://us-east-1a.clouds/',
- 'security': 'http://security-mirror2-intel'}
- ]),
- (False, "other", [
- {'primary': 'http://us-east-1a.clouds/',
- 'security': 'http://security-mirror1-intel'},
- {'primary': 'http://fs-primary-intel',
- 'security': 'http://security-mirror2-intel'}
- ])
- ])
- def test_get_package_mirror_info_az_ec2(self,
- allow_ec2_mirror,
- platform_type,
- mirrors):
- flag_path = "cloudinit.distros." \
- "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES"
- with mock.patch(flag_path, allow_ec2_mirror):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(
- availability_zone="us-east-1a",
- platform_type=platform_type)
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- assert(results == mirrors[0])
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_second)
- assert(results == mirrors[1])
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_none)
- assert(results == package_mirrors[0]['failsafe'])
-
- def test_get_package_mirror_info_az_non_ec2(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(availability_zone="nova.cloudvendor")
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- assert(results == {
- 'primary': 'http://nova.cloudvendor.clouds/',
- 'security': 'http://security-mirror1-intel'}
- )
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_last)
- assert(results == {
- 'primary': 'http://nova.cloudvendor.clouds/',
- 'security': 'http://security-mirror2-intel'}
- )
-
- def test_get_package_mirror_info_none(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(availability_zone=None)
-
- # because both search entries here replacement based on
- # availability-zone, the filter will be called with an empty list and
- # failsafe should be taken.
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- assert(results == {
- 'primary': 'http://fs-primary-intel',
- 'security': 'http://security-mirror1-intel'}
- )
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_last)
- assert(results == {
- 'primary': 'http://fs-primary-intel',
- 'security': 'http://security-mirror2-intel'}
- )
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_hosts.py b/tests/unittests/test_distros/test_hosts.py
deleted file mode 100644
index 8aaa6e48..00000000
--- a/tests/unittests/test_distros/test_hosts.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import unittest
-
-from cloudinit.distros.parsers import hosts
-
-
-BASE_ETC = '''
-# Example
-127.0.0.1 localhost
-192.168.1.10 foo.mydomain.org foo
-192.168.1.10 bar.mydomain.org bar
-146.82.138.7 master.debian.org master
-209.237.226.90 www.opensource.org
-'''
-BASE_ETC = BASE_ETC.strip()
-
-
-class TestHostsHelper(unittest.TestCase):
- def test_parse(self):
- eh = hosts.HostsConf(BASE_ETC)
- self.assertEqual(eh.get_entry('127.0.0.1'), [['localhost']])
- self.assertEqual(eh.get_entry('192.168.1.10'),
- [['foo.mydomain.org', 'foo'],
- ['bar.mydomain.org', 'bar']])
- eh = str(eh)
- self.assertTrue(eh.startswith('# Example'))
-
- def test_add(self):
- eh = hosts.HostsConf(BASE_ETC)
- eh.add_entry('127.0.0.0', 'blah')
- self.assertEqual(eh.get_entry('127.0.0.0'), [['blah']])
- eh.add_entry('127.0.0.3', 'blah', 'blah2', 'blah3')
- self.assertEqual(eh.get_entry('127.0.0.3'),
- [['blah', 'blah2', 'blah3']])
-
- def test_del(self):
- eh = hosts.HostsConf(BASE_ETC)
- eh.add_entry('127.0.0.0', 'blah')
- self.assertEqual(eh.get_entry('127.0.0.0'), [['blah']])
-
- eh.del_entries('127.0.0.0')
- self.assertEqual(eh.get_entry('127.0.0.0'), [])
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py
deleted file mode 100644
index fa48410a..00000000
--- a/tests/unittests/test_distros/test_user_data_normalize.py
+++ /dev/null
@@ -1,374 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from unittest import mock
-
-from cloudinit import distros
-from cloudinit.distros import ug_util
-from cloudinit import helpers
-from cloudinit import settings
-
-from cloudinit.tests.helpers import TestCase
-
-
-bcfg = {
- 'name': 'bob',
- 'plain_text_passwd': 'ubuntu',
- 'home': "/home/ubuntu",
- 'shell': "/bin/bash",
- 'lock_passwd': True,
- 'gecos': "Ubuntu",
- 'groups': ["foo"]
-}
-
-
-class TestUGNormalize(TestCase):
-
- def setUp(self):
- super(TestUGNormalize, self).setUp()
- self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy')
- self.add_patch('cloudinit.util.system_info', 'm_sysinfo')
- self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')}
-
- def _make_distro(self, dtype, def_user=None):
- cfg = dict(settings.CFG_BUILTIN)
- cfg['system_info']['distro'] = dtype
- paths = helpers.Paths(cfg['system_info']['paths'])
- distro_cls = distros.fetch(dtype)
- if def_user:
- cfg['system_info']['default_user'] = def_user.copy()
- distro = distro_cls(dtype, cfg['system_info'], paths)
- return distro
-
- def _norm(self, cfg, distro):
- return ug_util.normalize_users_groups(cfg, distro)
-
- def test_group_dict(self):
- distro = self._make_distro('ubuntu')
- g = {'groups':
- [{'ubuntu': ['foo', 'bar'],
- 'bob': 'users'},
- 'cloud-users',
- {'bob': 'users2'}]}
- (_users, groups) = self._norm(g, distro)
- self.assertIn('ubuntu', groups)
- ub_members = groups['ubuntu']
- self.assertEqual(sorted(['foo', 'bar']), sorted(ub_members))
- self.assertIn('bob', groups)
- b_members = groups['bob']
- self.assertEqual(sorted(['users', 'users2']),
- sorted(b_members))
-
- def test_basic_groups(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'groups': ['bob'],
- }
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', groups)
- self.assertEqual({}, users)
-
- def test_csv_groups(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'groups': 'bob,joe,steve',
- }
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', groups)
- self.assertIn('joe', groups)
- self.assertIn('steve', groups)
- self.assertEqual({}, users)
-
- def test_more_groups(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'groups': ['bob', 'joe', 'steve']
- }
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', groups)
- self.assertIn('joe', groups)
- self.assertIn('steve', groups)
- self.assertEqual({}, users)
-
- def test_member_groups(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'groups': {
- 'bob': ['s'],
- 'joe': [],
- 'steve': [],
- }
- }
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', groups)
- self.assertEqual(['s'], groups['bob'])
- self.assertEqual([], groups['joe'])
- self.assertIn('joe', groups)
- self.assertIn('steve', groups)
- self.assertEqual({}, users)
-
- def test_users_simple_dict(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': {
- 'default': True,
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- ug_cfg = {
- 'users': {
- 'default': 'yes',
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- ug_cfg = {
- 'users': {
- 'default': '1',
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
-
- def test_users_simple_dict_no(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': {
- 'default': False,
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertEqual({}, users)
- ug_cfg = {
- 'users': {
- 'default': 'no',
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertEqual({}, users)
-
- def test_users_simple_csv(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': 'joe,bob',
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('joe', users)
- self.assertIn('bob', users)
- self.assertEqual({'default': False}, users['joe'])
- self.assertEqual({'default': False}, users['bob'])
-
- def test_users_simple(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- 'joe',
- 'bob'
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('joe', users)
- self.assertIn('bob', users)
- self.assertEqual({'default': False}, users['joe'])
- self.assertEqual({'default': False}, users['bob'])
-
- def test_users_old_user(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'user': 'zetta',
- 'users': 'default'
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertNotIn('bob', users) # Bob is not the default now, zetta is
- self.assertIn('zetta', users)
- self.assertTrue(users['zetta']['default'])
- self.assertNotIn('default', users)
- ug_cfg = {
- 'user': 'zetta',
- 'users': 'default, joe'
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertNotIn('bob', users) # Bob is not the default now, zetta is
- self.assertIn('joe', users)
- self.assertIn('zetta', users)
- self.assertTrue(users['zetta']['default'])
- self.assertNotIn('default', users)
- ug_cfg = {
- 'user': 'zetta',
- 'users': ['bob', 'joe']
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- self.assertIn('joe', users)
- self.assertIn('zetta', users)
- self.assertTrue(users['zetta']['default'])
- ug_cfg = {
- 'user': 'zetta',
- 'users': {
- 'bob': True,
- 'joe': True,
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- self.assertIn('joe', users)
- self.assertIn('zetta', users)
- self.assertTrue(users['zetta']['default'])
- ug_cfg = {
- 'user': 'zetta',
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('zetta', users)
- ug_cfg = {}
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertEqual({}, users)
- self.assertEqual({}, groups)
-
- def test_users_dict_default_additional(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': [
- {'name': 'default', 'blah': True}
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- self.assertEqual(",".join(distro.get_default_user()['groups']),
- users['bob']['groups'])
- self.assertEqual(True, users['bob']['blah'])
- self.assertEqual(True, users['bob']['default'])
-
- def test_users_dict_extract(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': [
- 'default',
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- (name, config) = ug_util.extract_default(users)
- self.assertEqual(name, 'bob')
- expected_config = {}
- def_config = None
- try:
- def_config = distro.get_default_user()
- except NotImplementedError:
- pass
- if not def_config:
- def_config = {}
- expected_config.update(def_config)
-
- # Ignore these for now
- expected_config.pop('name', None)
- expected_config.pop('groups', None)
- config.pop('groups', None)
- self.assertEqual(config, expected_config)
-
- def test_users_dict_default(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': [
- 'default',
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- self.assertEqual(",".join(distro.get_default_user()['groups']),
- users['bob']['groups'])
- self.assertEqual(True, users['bob']['default'])
-
- def test_users_dict_trans(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- {'name': 'joe',
- 'tr-me': True},
- {'name': 'bob'},
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('joe', users)
- self.assertIn('bob', users)
- self.assertEqual({'tr_me': True, 'default': False}, users['joe'])
- self.assertEqual({'default': False}, users['bob'])
-
- def test_users_dict(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- {'name': 'joe'},
- {'name': 'bob'},
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('joe', users)
- self.assertIn('bob', users)
- self.assertEqual({'default': False}, users['joe'])
- self.assertEqual({'default': False}, users['bob'])
-
- @mock.patch('cloudinit.subp.subp')
- def test_create_snap_user(self, mock_subp):
- mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n',
- '')]
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- {'name': 'joe', 'snapuser': 'joe@joe.com'},
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- for (user, config) in users.items():
- print('user=%s config=%s' % (user, config))
- username = distro.create_user(user, **config)
-
- snapcmd = ['snap', 'create-user', '--sudoer', '--json', 'joe@joe.com']
- mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd)
- self.assertEqual(username, 'joe')
-
- @mock.patch('cloudinit.subp.subp')
- def test_create_snap_user_known(self, mock_subp):
- mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n',
- '')]
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- {'name': 'joe', 'snapuser': 'joe@joe.com', 'known': True},
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- for (user, config) in users.items():
- print('user=%s config=%s' % (user, config))
- username = distro.create_user(user, **config)
-
- snapcmd = ['snap', 'create-user', '--sudoer', '--json', '--known',
- 'joe@joe.com']
- mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd)
- self.assertEqual(username, 'joe')
-
- @mock.patch('cloudinit.util.system_is_snappy')
- @mock.patch('cloudinit.util.is_group')
- @mock.patch('cloudinit.subp.subp')
- def test_add_user_on_snappy_system(self, mock_subp, mock_isgrp,
- mock_snappy):
- mock_isgrp.return_value = False
- mock_subp.return_value = True
- mock_snappy.return_value = True
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- {'name': 'joe', 'groups': 'users', 'create_groups': True},
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- for (user, config) in users.items():
- print('user=%s config=%s' % (user, config))
- distro.add_user(user, **config)
-
- groupcmd = ['groupadd', 'users', '--extrausers']
- addcmd = ['useradd', 'joe', '--extrausers', '--groups', 'users', '-m']
-
- mock_subp.assert_any_call(groupcmd)
- mock_subp.assert_any_call(addcmd, logstring=addcmd)
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_dmi.py b/tests/unittests/test_dmi.py
index 78a72122..6c28724a 100644
--- a/cloudinit/tests/test_dmi.py
+++ b/tests/unittests/test_dmi.py
@@ -1,16 +1,13 @@
-from cloudinit.tests import helpers
-from cloudinit import dmi
-from cloudinit import util
-from cloudinit import subp
-
import os
-import tempfile
import shutil
+import tempfile
from unittest import mock
+from cloudinit import dmi, subp, util
+from tests.unittests import helpers
-class TestReadDMIData(helpers.FilesystemMockingTestCase):
+class TestReadDMIData(helpers.FilesystemMockingTestCase):
def setUp(self):
super(TestReadDMIData, self).setUp()
self.new_root = tempfile.mkdtemp()
@@ -24,7 +21,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
self._m_is_FreeBSD = p.start()
def _create_sysfs_parent_directory(self):
- util.ensure_dir(os.path.join('sys', 'class', 'dmi', 'id'))
+ util.ensure_dir(os.path.join("sys", "class", "dmi", "id"))
def _create_sysfs_file(self, key, content):
"""Mocks the sys path found on Linux systems."""
@@ -37,92 +34,109 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
In order to test a missing sys path and call outs to dmidecode, this
function fakes the results of dmidecode to test the results.
"""
+
def _dmidecode_subp(cmd):
if cmd[-1] != key:
raise subp.ProcessExecutionError()
return (content, error)
self.patched_funcs.enter_context(
- mock.patch("cloudinit.dmi.subp.which", side_effect=lambda _: True))
+ mock.patch("cloudinit.dmi.subp.which", side_effect=lambda _: True)
+ )
self.patched_funcs.enter_context(
- mock.patch("cloudinit.dmi.subp.subp", side_effect=_dmidecode_subp))
+ mock.patch("cloudinit.dmi.subp.subp", side_effect=_dmidecode_subp)
+ )
def _configure_kenv_return(self, key, content, error=None):
"""
In order to test a FreeBSD system call outs to kenv, this
function fakes the results of kenv to test the results.
"""
+
def _kenv_subp(cmd):
if cmd[-1] != dmi.DMIDECODE_TO_KERNEL[key].freebsd:
raise subp.ProcessExecutionError()
return (content, error)
self.patched_funcs.enter_context(
- mock.patch("cloudinit.dmi.subp.subp", side_effect=_kenv_subp))
+ mock.patch("cloudinit.dmi.subp.subp", side_effect=_kenv_subp)
+ )
def patch_mapping(self, new_mapping):
self.patched_funcs.enter_context(
- mock.patch('cloudinit.dmi.DMIDECODE_TO_KERNEL',
- new_mapping))
+ mock.patch("cloudinit.dmi.DMIDECODE_TO_KERNEL", new_mapping)
+ )
def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self):
- self.patch_mapping({'mapped-key': dmi.kdmi('mapped-value', None)})
- expected_dmi_value = 'sys-used-correctly'
- self._create_sysfs_file('mapped-value', expected_dmi_value)
- self._configure_dmidecode_return('mapped-key', 'wrong-wrong-wrong')
- self.assertEqual(expected_dmi_value, dmi.read_dmi_data('mapped-key'))
+ self.patch_mapping({"mapped-key": dmi.kdmi("mapped-value", None)})
+ expected_dmi_value = "sys-used-correctly"
+ self._create_sysfs_file("mapped-value", expected_dmi_value)
+ self._configure_dmidecode_return("mapped-key", "wrong-wrong-wrong")
+ self.assertEqual(expected_dmi_value, dmi.read_dmi_data("mapped-key"))
def test_dmidecode_used_if_no_sysfs_file_on_disk(self):
self.patch_mapping({})
self._create_sysfs_parent_directory()
- expected_dmi_value = 'dmidecode-used'
- self._configure_dmidecode_return('use-dmidecode', expected_dmi_value)
+ expected_dmi_value = "dmidecode-used"
+ self._configure_dmidecode_return("use-dmidecode", expected_dmi_value)
with mock.patch("cloudinit.util.os.uname") as m_uname:
- m_uname.return_value = ('x-sysname', 'x-nodename',
- 'x-release', 'x-version', 'x86_64')
- self.assertEqual(expected_dmi_value,
- dmi.read_dmi_data('use-dmidecode'))
+ m_uname.return_value = (
+ "x-sysname",
+ "x-nodename",
+ "x-release",
+ "x-version",
+ "x86_64",
+ )
+ self.assertEqual(
+ expected_dmi_value, dmi.read_dmi_data("use-dmidecode")
+ )
def test_dmidecode_not_used_on_arm(self):
self.patch_mapping({})
print("current =%s", subp)
self._create_sysfs_parent_directory()
- dmi_val = 'from-dmidecode'
- dmi_name = 'use-dmidecode'
+ dmi_val = "from-dmidecode"
+ dmi_name = "use-dmidecode"
self._configure_dmidecode_return(dmi_name, dmi_val)
print("now =%s", subp)
- expected = {'armel': None, 'aarch64': dmi_val, 'x86_64': dmi_val}
+ expected = {"armel": None, "aarch64": dmi_val, "x86_64": dmi_val}
found = {}
# we do not run the 'dmi-decode' binary on some arches
# verify that anything requested that is not in the sysfs dir
# will return None on those arches.
with mock.patch("cloudinit.util.os.uname") as m_uname:
for arch in expected:
- m_uname.return_value = ('x-sysname', 'x-nodename',
- 'x-release', 'x-version', arch)
+ m_uname.return_value = (
+ "x-sysname",
+ "x-nodename",
+ "x-release",
+ "x-version",
+ arch,
+ )
print("now2 =%s", subp)
found[arch] = dmi.read_dmi_data(dmi_name)
self.assertEqual(expected, found)
def test_none_returned_if_neither_source_has_data(self):
self.patch_mapping({})
- self._configure_dmidecode_return('key', 'value')
- self.assertIsNone(dmi.read_dmi_data('expect-fail'))
+ self._configure_dmidecode_return("key", "value")
+ self.assertIsNone(dmi.read_dmi_data("expect-fail"))
def test_none_returned_if_dmidecode_not_in_path(self):
self.patched_funcs.enter_context(
- mock.patch.object(subp, 'which', lambda _: False))
+ mock.patch.object(subp, "which", lambda _: False)
+ )
self.patch_mapping({})
- self.assertIsNone(dmi.read_dmi_data('expect-fail'))
+ self.assertIsNone(dmi.read_dmi_data("expect-fail"))
def test_empty_string_returned_instead_of_foxfox(self):
# uninitialized dmi values show as \xff, return empty string
my_len = 32
- dmi_value = b'\xff' * my_len + b'\n'
+ dmi_value = b"\xff" * my_len + b"\n"
expected = ""
- dmi_key = 'system-product-name'
- sysfs_key = 'product_name'
+ dmi_key = "system-product-name"
+ sysfs_key = "product_name"
self._create_sysfs_file(sysfs_key, dmi_value)
self.assertEqual(expected, dmi.read_dmi_data(dmi_key))
@@ -132,7 +146,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
# first verify we get the value if not in container
self._m_is_container.return_value = False
key, val = ("system-product-name", "my_product")
- self._create_sysfs_file('product_name', val)
+ self._create_sysfs_file("product_name", val)
self.assertEqual(val, dmi.read_dmi_data(key))
# then verify in container returns None
@@ -142,7 +156,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
def test_container_returns_none_on_unknown(self):
"""In a container even bogus keys return None."""
self._m_is_container.return_value = True
- self._create_sysfs_file('product_name', "should-be-ignored")
+ self._create_sysfs_file("product_name", "should-be-ignored")
self.assertIsNone(dmi.read_dmi_data("bogus"))
self.assertIsNone(dmi.read_dmi_data("system-product-name"))
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index 1d8aaf18..0b0de395 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -1,27 +1,34 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from collections import namedtuple
import copy
import os
+from collections import namedtuple
from uuid import uuid4
-from cloudinit import safeyaml
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.tests.helpers import (
- CiTestCase, dir2dict, populate_dir, populate_dir_with_ts)
-
+from cloudinit import safeyaml, subp, util
from cloudinit.sources import DataSourceIBMCloud as ds_ibm
-from cloudinit.sources import DataSourceSmartOS as ds_smartos
from cloudinit.sources import DataSourceOracle as ds_oracle
-
-UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu "
- "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux")
-UNAME_PPC64EL = ("Linux diamond 4.4.0-83-generic #106-Ubuntu SMP "
- "Mon Jun 26 17:53:54 UTC 2017 "
- "ppc64le ppc64le ppc64le GNU/Linux")
-UNAME_FREEBSD = ("FreeBSD fbsd12-1 12.1-RELEASE-p10 "
- "FreeBSD 12.1-RELEASE-p10 GENERIC amd64")
+from cloudinit.sources import DataSourceSmartOS as ds_smartos
+from tests.unittests.helpers import (
+ CiTestCase,
+ cloud_init_project_dir,
+ dir2dict,
+ populate_dir,
+ populate_dir_with_ts,
+)
+
+UNAME_MYSYS = (
+ "Linux bart 4.4.0-62-generic #83-Ubuntu "
+ "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux"
+)
+UNAME_PPC64EL = (
+ "Linux diamond 4.4.0-83-generic #106-Ubuntu SMP "
+ "Mon Jun 26 17:53:54 UTC 2017 "
+ "ppc64le ppc64le ppc64le GNU/Linux"
+)
+UNAME_FREEBSD = (
+ "FreeBSD fbsd12-1 12.1-RELEASE-p10 FreeBSD 12.1-RELEASE-p10 GENERIC amd64"
+)
BLKID_EFI_ROOT = """
DEVNAME=/dev/sda1
@@ -37,10 +44,16 @@ PARTUUID=30c65c77-e07d-4039-b2fb-88b1fb5fa1fc
# this is a Ubuntu 18.04 disk.img output (dual uefi and bios bootable)
BLKID_UEFI_UBUNTU = [
- {'DEVNAME': 'vda1', 'TYPE': 'ext4', 'PARTUUID': uuid4(), 'UUID': uuid4()},
- {'DEVNAME': 'vda14', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vda15', 'TYPE': 'vfat', 'LABEL': 'UEFI', 'PARTUUID': uuid4(),
- 'UUID': '5F55-129B'}]
+ {"DEVNAME": "vda1", "TYPE": "ext4", "PARTUUID": uuid4(), "UUID": uuid4()},
+ {"DEVNAME": "vda14", "PARTUUID": uuid4()},
+ {
+ "DEVNAME": "vda15",
+ "TYPE": "vfat",
+ "LABEL": "UEFI",
+ "PARTUUID": uuid4(),
+ "UUID": "5F55-129B",
+ },
+]
POLICY_FOUND_ONLY = "search,found=all,maybe=none,notfound=disabled"
@@ -48,7 +61,7 @@ POLICY_FOUND_OR_MAYBE = "search,found=all,maybe=all,notfound=disabled"
DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=disabled"
DI_DEFAULT_POLICY_NO_DMI = "search,found=all,maybe=all,notfound=enabled"
DI_EC2_STRICT_ID_DEFAULT = "true"
-OVF_MATCH_STRING = 'http://schemas.dmtf.org/ovf/environment/1'
+OVF_MATCH_STRING = "http://schemas.dmtf.org/ovf/environment/1"
SHELL_MOCK_TMPL = """\
%(name)s() {
@@ -62,7 +75,7 @@ SHELL_MOCK_TMPL = """\
RC_FOUND = 0
RC_NOT_FOUND = 1
-DS_NONE = 'None'
+DS_NONE = "None"
P_CHASSIS_ASSET_TAG = "sys/class/dmi/id/chassis_asset_tag"
P_PRODUCT_NAME = "sys/class/dmi/id/product_name"
@@ -74,31 +87,45 @@ P_DSID_CFG = "etc/cloud/ds-identify.cfg"
IBM_CONFIG_UUID = "9796-932E"
-MOCK_VIRT_IS_CONTAINER_OTHER = {'name': 'detect_virt',
- 'RET': 'container-other', 'ret': 0}
-MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0}
-MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0}
+MOCK_VIRT_IS_CONTAINER_OTHER = {
+ "name": "detect_virt",
+ "RET": "container-other",
+ "ret": 0,
+}
+MOCK_NOT_LXD_DATASOURCE = {"name": "dscheck_LXD", "ret": 1}
+MOCK_VIRT_IS_KVM = {"name": "detect_virt", "RET": "kvm", "ret": 0}
+MOCK_VIRT_IS_VMWARE = {"name": "detect_virt", "RET": "vmware", "ret": 0}
# currenty' SmartOS hypervisor "bhyve" is unknown by systemd-detect-virt.
-MOCK_VIRT_IS_VM_OTHER = {'name': 'detect_virt', 'RET': 'vm-other', 'ret': 0}
-MOCK_VIRT_IS_XEN = {'name': 'detect_virt', 'RET': 'xen', 'ret': 0}
-MOCK_UNAME_IS_PPC64 = {'name': 'uname', 'out': UNAME_PPC64EL, 'ret': 0}
-MOCK_UNAME_IS_FREEBSD = {'name': 'uname', 'out': UNAME_FREEBSD, 'ret': 0}
+MOCK_VIRT_IS_VM_OTHER = {"name": "detect_virt", "RET": "vm-other", "ret": 0}
+MOCK_VIRT_IS_XEN = {"name": "detect_virt", "RET": "xen", "ret": 0}
+MOCK_UNAME_IS_PPC64 = {"name": "uname", "out": UNAME_PPC64EL, "ret": 0}
+MOCK_UNAME_IS_FREEBSD = {"name": "uname", "out": UNAME_FREEBSD, "ret": 0}
+
+DEFAULT_MOCKS = [MOCK_NOT_LXD_DATASOURCE]
shell_true = 0
shell_false = 1
-CallReturn = namedtuple('CallReturn',
- ['rc', 'stdout', 'stderr', 'cfg', 'files'])
+CallReturn = namedtuple(
+ "CallReturn", ["rc", "stdout", "stderr", "cfg", "files"]
+)
class DsIdentifyBase(CiTestCase):
- dsid_path = os.path.realpath('tools/ds-identify')
- allowed_subp = ['sh']
-
- def call(self, rootd=None, mocks=None, func="main", args=None, files=None,
- policy_dmi=DI_DEFAULT_POLICY,
- policy_no_dmi=DI_DEFAULT_POLICY_NO_DMI,
- ec2_strict_id=DI_EC2_STRICT_ID_DEFAULT):
+ dsid_path = cloud_init_project_dir("tools/ds-identify")
+ allowed_subp = ["sh"]
+
+ def call(
+ self,
+ rootd=None,
+ mocks=None,
+ func="main",
+ args=None,
+ files=None,
+ policy_dmi=DI_DEFAULT_POLICY,
+ policy_no_dmi=DI_DEFAULT_POLICY_NO_DMI,
+ ec2_strict_id=DI_EC2_STRICT_ID_DEFAULT,
+ ):
if args is None:
args = []
if mocks is None:
@@ -110,7 +137,7 @@ class DsIdentifyBase(CiTestCase):
if rootd is None:
rootd = self.tmp_dir()
- unset = '_unset'
+ unset = "_unset"
wrap = self.tmp_path(path="_shwrap", dir=rootd)
populate_dir(rootd, files)
@@ -126,11 +153,11 @@ class DsIdentifyBase(CiTestCase):
'DI_DEFAULT_POLICY="%s"' % policy_dmi,
'DI_DEFAULT_POLICY_NO_DMI="%s"' % policy_no_dmi,
'DI_EC2_STRICT_ID_DEFAULT="%s"' % ec2_strict_id,
- ""
+ "",
]
def write_mock(data):
- ddata = {'out': None, 'err': None, 'ret': 0, 'RET': None}
+ ddata = {"out": None, "err": None, "ret": 0, "RET": None}
ddata.update(data)
for k in ddata:
if ddata[k] is None:
@@ -139,68 +166,88 @@ class DsIdentifyBase(CiTestCase):
mocklines = []
defaults = [
- {'name': 'detect_virt', 'RET': 'none', 'ret': 1},
- {'name': 'uname', 'out': UNAME_MYSYS},
- {'name': 'blkid', 'out': BLKID_EFI_ROOT},
- {'name': 'ovf_vmware_transport_guestinfo',
- 'out': 'No value found', 'ret': 1},
- {'name': 'dmi_decode', 'ret': 1,
- 'err': 'No dmidecode program. ERROR.'},
- {'name': 'get_kenv_field', 'ret': 1,
- 'err': 'No kenv program. ERROR.'},
+ {"name": "detect_virt", "RET": "none", "ret": 1},
+ {"name": "uname", "out": UNAME_MYSYS},
+ {"name": "blkid", "out": BLKID_EFI_ROOT},
+ {
+ "name": "ovf_vmware_transport_guestinfo",
+ "out": "No value found",
+ "ret": 1,
+ },
+ {
+ "name": "dmi_decode",
+ "ret": 1,
+ "err": "No dmidecode program. ERROR.",
+ },
+ {
+ "name": "get_kenv_field",
+ "ret": 1,
+ "err": "No kenv program. ERROR.",
+ },
]
- written = [d['name'] for d in mocks]
+ written = [d["name"] for d in mocks]
for data in mocks:
mocklines.append(write_mock(data))
for d in defaults:
- if d['name'] not in written:
+ if d["name"] not in written:
mocklines.append(write_mock(d))
- endlines = [
- func + ' ' + ' '.join(['"%s"' % s for s in args])
- ]
+ endlines = [func + " " + " ".join(['"%s"' % s for s in args])]
with open(wrap, "w") as fp:
- fp.write('\n'.join(head + mocklines + endlines) + "\n")
+ fp.write("\n".join(head + mocklines + endlines) + "\n")
rc = 0
try:
- out, err = subp.subp(['sh', '-c', '. %s' % wrap], capture=True)
+ out, err = subp.subp(["sh", "-c", ". %s" % wrap], capture=True)
except subp.ProcessExecutionError as e:
rc = e.exit_code
out = e.stdout
err = e.stderr
cfg = None
- cfg_out = os.path.join(rootd, 'run/cloud-init/cloud.cfg')
+ cfg_out = os.path.join(rootd, "run/cloud-init/cloud.cfg")
if os.path.exists(cfg_out):
contents = util.load_file(cfg_out)
try:
cfg = safeyaml.load(contents)
except Exception as e:
- cfg = {"_INVALID_YAML": contents,
- "_EXCEPTION": str(e)}
+ cfg = {"_INVALID_YAML": contents, "_EXCEPTION": str(e)}
return CallReturn(rc, out, err, cfg, dir2dict(rootd))
def _call_via_dict(self, data, rootd=None, **kwargs):
# return output of self.call with a dict input like VALID_CFG[item]
- xwargs = {'rootd': rootd}
- passthrough = ('mocks', 'func', 'args', 'policy_dmi',
- 'policy_no_dmi', 'files')
+ xwargs = {"rootd": rootd}
+ passthrough = (
+ "mocks",
+ "func",
+ "args",
+ "policy_dmi",
+ "policy_no_dmi",
+ "files",
+ )
for k in passthrough:
if k in data:
xwargs[k] = data[k]
if k in kwargs:
xwargs[k] = kwargs[k]
+ if "mocks" not in xwargs:
+ xwargs["mocks"] = DEFAULT_MOCKS
+ else:
+ mocked_funcs = [m["name"] for m in xwargs["mocks"]]
+ for default_mock in DEFAULT_MOCKS:
+ if default_mock["name"] not in mocked_funcs:
+ xwargs["mocks"].append(default_mock)
return self.call(**xwargs)
def _test_ds_found(self, name):
data = copy.deepcopy(VALID_CFG[name])
return self._check_via_dict(
- data, RC_FOUND, dslist=[data.get('ds'), DS_NONE])
+ data, RC_FOUND, dslist=[data.get("ds"), DS_NONE]
+ )
def _test_ds_not_found(self, name):
data = copy.deepcopy(VALID_CFG[name])
@@ -212,87 +259,104 @@ class DsIdentifyBase(CiTestCase):
try:
self.assertEqual(rc, ret.rc)
if dslist is not None:
- self.assertEqual(dslist, ret.cfg['datasource_list'])
+ self.assertEqual(dslist, ret.cfg["datasource_list"])
good = True
finally:
if not good:
- _print_run_output(ret.rc, ret.stdout, ret.stderr, ret.cfg,
- ret.files)
+ _print_run_output(
+ ret.rc, ret.stdout, ret.stderr, ret.cfg, ret.files
+ )
return ret
class TestDsIdentify(DsIdentifyBase):
def test_wb_print_variables(self):
"""_print_info reports an array of discovered variables to stderr."""
- data = VALID_CFG['Azure-dmi-detection']
+ data = VALID_CFG["Azure-dmi-detection"]
_, _, err, _, _ = self._call_via_dict(data)
expected_vars = [
- 'DMI_PRODUCT_NAME', 'DMI_SYS_VENDOR', 'DMI_PRODUCT_SERIAL',
- 'DMI_PRODUCT_UUID', 'PID_1_PRODUCT_NAME', 'DMI_CHASSIS_ASSET_TAG',
- 'FS_LABELS', 'KERNEL_CMDLINE', 'VIRT', 'UNAME_KERNEL_NAME',
- 'UNAME_KERNEL_RELEASE', 'UNAME_KERNEL_VERSION', 'UNAME_MACHINE',
- 'UNAME_NODENAME', 'UNAME_OPERATING_SYSTEM', 'DSNAME', 'DSLIST',
- 'MODE', 'ON_FOUND', 'ON_MAYBE', 'ON_NOTFOUND']
+ "DMI_PRODUCT_NAME",
+ "DMI_SYS_VENDOR",
+ "DMI_PRODUCT_SERIAL",
+ "DMI_PRODUCT_UUID",
+ "PID_1_PRODUCT_NAME",
+ "DMI_CHASSIS_ASSET_TAG",
+ "FS_LABELS",
+ "KERNEL_CMDLINE",
+ "VIRT",
+ "UNAME_KERNEL_NAME",
+ "UNAME_KERNEL_RELEASE",
+ "UNAME_KERNEL_VERSION",
+ "UNAME_MACHINE",
+ "UNAME_NODENAME",
+ "UNAME_OPERATING_SYSTEM",
+ "DSNAME",
+ "DSLIST",
+ "MODE",
+ "ON_FOUND",
+ "ON_MAYBE",
+ "ON_NOTFOUND",
+ ]
for var in expected_vars:
- self.assertIn('{0}='.format(var), err)
+ self.assertIn("{0}=".format(var), err)
def test_azure_dmi_detection_from_chassis_asset_tag(self):
"""Azure datasource is detected from DMI chassis-asset-tag"""
- self._test_ds_found('Azure-dmi-detection')
+ self._test_ds_found("Azure-dmi-detection")
def test_azure_seed_file_detection(self):
"""Azure datasource is detected due to presence of a seed file.
The seed file tested is /var/lib/cloud/seed/azure/ovf-env.xml."""
- self._test_ds_found('Azure-seed-detection')
+ self._test_ds_found("Azure-seed-detection")
def test_aws_ec2_hvm(self):
"""EC2: hvm instances use dmi serial and uuid starting with 'ec2'."""
- self._test_ds_found('Ec2-hvm')
+ self._test_ds_found("Ec2-hvm")
def test_aws_ec2_xen(self):
"""EC2: sys/hypervisor/uuid starts with ec2."""
- self._test_ds_found('Ec2-xen')
+ self._test_ds_found("Ec2-xen")
def test_brightbox_is_ec2(self):
"""EC2: product_serial ends with '.brightbox.com'"""
- self._test_ds_found('Ec2-brightbox')
+ self._test_ds_found("Ec2-brightbox")
def test_bobrightbox_is_not_brightbox(self):
"""EC2: bobrightbox.com in product_serial is not brightbox'"""
- self._test_ds_not_found('Ec2-brightbox-negative')
+ self._test_ds_not_found("Ec2-brightbox-negative")
def test_freebsd_nocloud(self):
"""NoCloud identified on FreeBSD via label by geom."""
- self._test_ds_found('NoCloud-fbsd')
+ self._test_ds_found("NoCloud-fbsd")
def test_gce_by_product_name(self):
"""GCE identifies itself with product_name."""
- self._test_ds_found('GCE')
+ self._test_ds_found("GCE")
def test_gce_by_serial(self):
"""Older gce compute instances must be identified by serial."""
- self._test_ds_found('GCE-serial')
+ self._test_ds_found("GCE-serial")
def test_config_drive(self):
"""ConfigDrive datasource has a disk with LABEL=config-2."""
- self._test_ds_found('ConfigDrive')
+ self._test_ds_found("ConfigDrive")
def test_rbx_cloud(self):
"""Rbx datasource has a disk with LABEL=CLOUDMD."""
- self._test_ds_found('RbxCloud')
+ self._test_ds_found("RbxCloud")
def test_rbx_cloud_lower(self):
"""Rbx datasource has a disk with LABEL=cloudmd."""
- self._test_ds_found('RbxCloudLower')
+ self._test_ds_found("RbxCloudLower")
def test_config_drive_upper(self):
"""ConfigDrive datasource has a disk with LABEL=CONFIG-2."""
- self._test_ds_found('ConfigDriveUpper')
+ self._test_ds_found("ConfigDriveUpper")
def test_config_drive_seed(self):
"""Config Drive seed directory."""
- self._test_ds_found('ConfigDrive-seed')
+ self._test_ds_found("ConfigDrive-seed")
def test_config_drive_interacts_with_ibmcloud_config_disk(self):
"""Verify ConfigDrive interaction with IBMCloud.
@@ -300,34 +364,35 @@ class TestDsIdentify(DsIdentifyBase):
If ConfigDrive is enabled and not IBMCloud, then ConfigDrive
should claim the ibmcloud 'config-2' disk.
If IBMCloud is enabled, then ConfigDrive should skip."""
- data = copy.deepcopy(VALID_CFG['IBMCloud-config-2'])
- files = data.get('files', {})
+ data = copy.deepcopy(VALID_CFG["IBMCloud-config-2"])
+ files = data.get("files", {})
if not files:
- data['files'] = files
- cfgpath = 'etc/cloud/cloud.cfg.d/99_networklayer_common.cfg'
+ data["files"] = files
+ cfgpath = "etc/cloud/cloud.cfg.d/99_networklayer_common.cfg"
# with list including IBMCloud, config drive should be not found.
- files[cfgpath] = 'datasource_list: [ ConfigDrive, IBMCloud ]\n'
+ files[cfgpath] = "datasource_list: [ ConfigDrive, IBMCloud ]\n"
ret = self._check_via_dict(data, shell_true)
- self.assertEqual(
- ret.cfg.get('datasource_list'), ['IBMCloud', 'None'])
+ self.assertEqual(ret.cfg.get("datasource_list"), ["IBMCloud", "None"])
# But if IBMCloud is not enabled, config drive should claim this.
- files[cfgpath] = 'datasource_list: [ ConfigDrive, NoCloud ]\n'
+ files[cfgpath] = "datasource_list: [ ConfigDrive, NoCloud ]\n"
ret = self._check_via_dict(data, shell_true)
self.assertEqual(
- ret.cfg.get('datasource_list'), ['ConfigDrive', 'None'])
+ ret.cfg.get("datasource_list"), ["ConfigDrive", "None"]
+ )
def test_ibmcloud_template_userdata_in_provisioning(self):
"""Template provisioned with user-data during provisioning stage.
Template provisioning with user-data has METADATA disk,
datasource should return not found."""
- data = copy.deepcopy(VALID_CFG['IBMCloud-metadata'])
+ data = copy.deepcopy(VALID_CFG["IBMCloud-metadata"])
# change the 'is_ibm_provisioning' mock to return 1 (false)
- isprov_m = [m for m in data['mocks']
- if m["name"] == "is_ibm_provisioning"][0]
- isprov_m['ret'] = shell_true
+ isprov_m = [
+ m for m in data["mocks"] if m["name"] == "is_ibm_provisioning"
+ ][0]
+ isprov_m["ret"] = shell_true
return self._check_via_dict(data, RC_NOT_FOUND)
def test_ibmcloud_template_userdata(self):
@@ -335,58 +400,61 @@ class TestDsIdentify(DsIdentifyBase):
Template provisioning with user-data has METADATA disk.
datasource should return found."""
- self._test_ds_found('IBMCloud-metadata')
+ self._test_ds_found("IBMCloud-metadata")
def test_ibmcloud_template_no_userdata_in_provisioning(self):
"""Template provisioned with no user-data during provisioning.
no disks attached. Datasource should return not found."""
- data = copy.deepcopy(VALID_CFG['IBMCloud-nodisks'])
- data['mocks'].append(
- {'name': 'is_ibm_provisioning', 'ret': shell_true})
+ data = copy.deepcopy(VALID_CFG["IBMCloud-nodisks"])
+ data["mocks"].append(
+ {"name": "is_ibm_provisioning", "ret": shell_true}
+ )
return self._check_via_dict(data, RC_NOT_FOUND)
def test_ibmcloud_template_no_userdata(self):
"""Template provisioned with no user-data first boot.
no disks attached. Datasource should return found."""
- self._check_via_dict(VALID_CFG['IBMCloud-nodisks'], RC_NOT_FOUND)
+ self._check_via_dict(VALID_CFG["IBMCloud-nodisks"], RC_NOT_FOUND)
def test_ibmcloud_os_code(self):
"""Launched by os code always has config-2 disk."""
- self._test_ds_found('IBMCloud-config-2')
+ self._test_ds_found("IBMCloud-config-2")
def test_ibmcloud_os_code_different_uuid(self):
"""IBM cloud config-2 disks must be explicit match on UUID.
If the UUID is not 9796-932E then we actually expect ConfigDrive."""
- data = copy.deepcopy(VALID_CFG['IBMCloud-config-2'])
+ data = copy.deepcopy(VALID_CFG["IBMCloud-config-2"])
offset = None
- for m, d in enumerate(data['mocks']):
- if d.get('name') == "blkid":
+ for m, d in enumerate(data["mocks"]):
+ if d.get("name") == "blkid":
offset = m
break
if not offset:
raise ValueError("Expected to find 'blkid' mock, but did not.")
- data['mocks'][offset]['out'] = d['out'].replace(ds_ibm.IBM_CONFIG_UUID,
- "DEAD-BEEF")
+ data["mocks"][offset]["out"] = d["out"].replace(
+ ds_ibm.IBM_CONFIG_UUID, "DEAD-BEEF"
+ )
self._check_via_dict(
- data, rc=RC_FOUND, dslist=['ConfigDrive', DS_NONE])
+ data, rc=RC_FOUND, dslist=["ConfigDrive", DS_NONE]
+ )
def test_ibmcloud_with_nocloud_seed(self):
"""NoCloud seed should be preferred over IBMCloud.
A nocloud seed should be preferred over IBMCloud even if enabled.
Ubuntu 16.04 images have <vlc>/seed/nocloud-net. LP: #1766401."""
- data = copy.deepcopy(VALID_CFG['IBMCloud-config-2'])
- files = data.get('files', {})
+ data = copy.deepcopy(VALID_CFG["IBMCloud-config-2"])
+ files = data.get("files", {})
if not files:
- data['files'] = files
- files.update(VALID_CFG['NoCloud-seed']['files'])
+ data["files"] = files
+ files.update(VALID_CFG["NoCloud-seed"]["files"])
ret = self._check_via_dict(data, shell_true)
self.assertEqual(
- ['NoCloud', 'IBMCloud', 'None'],
- ret.cfg.get('datasource_list'))
+ ["NoCloud", "IBMCloud", "None"], ret.cfg.get("datasource_list")
+ )
def test_ibmcloud_with_configdrive_seed(self):
"""ConfigDrive seed should be preferred over IBMCloud.
@@ -394,28 +462,28 @@ class TestDsIdentify(DsIdentifyBase):
A ConfigDrive seed should be preferred over IBMCloud even if enabled.
Ubuntu 16.04 images have a fstab entry that mounts the
METADATA disk into <vlc>/seed/config_drive. LP: ##1766401."""
- data = copy.deepcopy(VALID_CFG['IBMCloud-config-2'])
- files = data.get('files', {})
+ data = copy.deepcopy(VALID_CFG["IBMCloud-config-2"])
+ files = data.get("files", {})
if not files:
- data['files'] = files
- files.update(VALID_CFG['ConfigDrive-seed']['files'])
+ data["files"] = files
+ files.update(VALID_CFG["ConfigDrive-seed"]["files"])
ret = self._check_via_dict(data, shell_true)
self.assertEqual(
- ['ConfigDrive', 'IBMCloud', 'None'],
- ret.cfg.get('datasource_list'))
+ ["ConfigDrive", "IBMCloud", "None"], ret.cfg.get("datasource_list")
+ )
def test_policy_disabled(self):
"""A Builtin policy of 'disabled' should return not found.
Even though a search would find something, the builtin policy of
disabled should cause the return of not found."""
- mydata = copy.deepcopy(VALID_CFG['Ec2-hvm'])
+ mydata = copy.deepcopy(VALID_CFG["Ec2-hvm"])
self._check_via_dict(mydata, rc=RC_NOT_FOUND, policy_dmi="disabled")
def test_policy_config_disable_overrides_builtin(self):
"""explicit policy: disabled in config file should cause not found."""
- mydata = copy.deepcopy(VALID_CFG['Ec2-hvm'])
- mydata['files'][P_DSID_CFG] = '\n'.join(['policy: disabled', ''])
+ mydata = copy.deepcopy(VALID_CFG["Ec2-hvm"])
+ mydata["files"][P_DSID_CFG] = "\n".join(["policy: disabled", ""])
self._check_via_dict(mydata, rc=RC_NOT_FOUND)
def test_single_entry_defines_datasource(self):
@@ -424,54 +492,55 @@ class TestDsIdentify(DsIdentifyBase):
Test the valid Ec2-hvm, but provide a config file that specifies
a single entry in datasource_list. The configured value should
be used."""
- mydata = copy.deepcopy(VALID_CFG['Ec2-hvm'])
- cfgpath = 'etc/cloud/cloud.cfg.d/myds.cfg'
- mydata['files'][cfgpath] = 'datasource_list: ["NoCloud"]\n'
- self._check_via_dict(mydata, rc=RC_FOUND, dslist=['NoCloud', DS_NONE])
+ mydata = copy.deepcopy(VALID_CFG["Ec2-hvm"])
+ cfgpath = "etc/cloud/cloud.cfg.d/myds.cfg"
+ mydata["files"][cfgpath] = 'datasource_list: ["NoCloud"]\n'
+ self._check_via_dict(mydata, rc=RC_FOUND, dslist=["NoCloud", DS_NONE])
def test_configured_list_with_none(self):
"""When datasource_list already contains None, None is not added.
The explicitly configured datasource_list has 'None' in it. That
should not have None automatically added."""
- mydata = copy.deepcopy(VALID_CFG['GCE'])
- cfgpath = 'etc/cloud/cloud.cfg.d/myds.cfg'
- mydata['files'][cfgpath] = 'datasource_list: ["Ec2", "None"]\n'
- self._check_via_dict(mydata, rc=RC_FOUND, dslist=['Ec2', DS_NONE])
+ mydata = copy.deepcopy(VALID_CFG["GCE"])
+ cfgpath = "etc/cloud/cloud.cfg.d/myds.cfg"
+ mydata["files"][cfgpath] = 'datasource_list: ["Ec2", "None"]\n'
+ self._check_via_dict(mydata, rc=RC_FOUND, dslist=["Ec2", DS_NONE])
def test_aliyun_identified(self):
"""Test that Aliyun cloud is identified by product id."""
- self._test_ds_found('AliYun')
+ self._test_ds_found("AliYun")
def test_aliyun_over_ec2(self):
"""Even if all other factors identified Ec2, AliYun should be used."""
- mydata = copy.deepcopy(VALID_CFG['Ec2-xen'])
- self._test_ds_found('AliYun')
- prod_name = VALID_CFG['AliYun']['files'][P_PRODUCT_NAME]
- mydata['files'][P_PRODUCT_NAME] = prod_name
+ mydata = copy.deepcopy(VALID_CFG["Ec2-xen"])
+ self._test_ds_found("AliYun")
+ prod_name = VALID_CFG["AliYun"]["files"][P_PRODUCT_NAME]
+ mydata["files"][P_PRODUCT_NAME] = prod_name
policy = "search,found=first,maybe=none,notfound=disabled"
- self._check_via_dict(mydata, rc=RC_FOUND, dslist=['AliYun', DS_NONE],
- policy_dmi=policy)
+ self._check_via_dict(
+ mydata, rc=RC_FOUND, dslist=["AliYun", DS_NONE], policy_dmi=policy
+ )
def test_default_openstack_intel_is_found(self):
"""On Intel, openstack must be identified."""
- self._test_ds_found('OpenStack')
+ self._test_ds_found("OpenStack")
def test_openstack_open_telekom_cloud(self):
"""Open Telecom identification."""
- self._test_ds_found('OpenStack-OpenTelekom')
+ self._test_ds_found("OpenStack-OpenTelekom")
def test_openstack_sap_ccloud(self):
"""SAP Converged Cloud identification"""
- self._test_ds_found('OpenStack-SAPCCloud')
+ self._test_ds_found("OpenStack-SAPCCloud")
def test_openstack_asset_tag_nova(self):
"""OpenStack identification via asset tag OpenStack Nova."""
- self._test_ds_found('OpenStack-AssetTag-Nova')
+ self._test_ds_found("OpenStack-AssetTag-Nova")
def test_openstack_asset_tag_copute(self):
"""OpenStack identification via asset tag OpenStack Compute."""
- self._test_ds_found('OpenStack-AssetTag-Compute')
+ self._test_ds_found("OpenStack-AssetTag-Compute")
def test_openstack_on_non_intel_is_maybe(self):
"""On non-Intel, openstack without dmi info is maybe.
@@ -479,175 +548,282 @@ class TestDsIdentify(DsIdentifyBase):
nova does not identify itself on platforms other than intel.
https://bugs.launchpad.net/cloud-init/+bugs?field.tag=dsid-nova"""
- data = copy.deepcopy(VALID_CFG['OpenStack'])
- del data['files'][P_PRODUCT_NAME]
- data.update({'policy_dmi': POLICY_FOUND_OR_MAYBE,
- 'policy_no_dmi': POLICY_FOUND_OR_MAYBE})
+ data = copy.deepcopy(VALID_CFG["OpenStack"])
+ del data["files"][P_PRODUCT_NAME]
+ data.update(
+ {
+ "policy_dmi": POLICY_FOUND_OR_MAYBE,
+ "policy_no_dmi": POLICY_FOUND_OR_MAYBE,
+ }
+ )
# this should show not found as default uname in tests is intel.
# and intel openstack requires positive identification.
self._check_via_dict(data, RC_NOT_FOUND, dslist=None)
# updating the uname to ppc64 though should get a maybe.
- data.update({'mocks': [MOCK_VIRT_IS_KVM, MOCK_UNAME_IS_PPC64]})
+ data.update({"mocks": [MOCK_VIRT_IS_KVM, MOCK_UNAME_IS_PPC64]})
(_, _, err, _, _) = self._check_via_dict(
- data, RC_FOUND, dslist=['OpenStack', 'None'])
+ data, RC_FOUND, dslist=["OpenStack", "None"]
+ )
self.assertIn("check for 'OpenStack' returned maybe", err)
def test_default_ovf_is_found(self):
"""OVF is identified found when ovf/ovf-env.xml seed file exists."""
- self._test_ds_found('OVF-seed')
+ self._test_ds_found("OVF-seed")
def test_default_ovf_with_detect_virt_none_not_found(self):
"""OVF identifies not found when detect_virt returns "none"."""
self._check_via_dict(
- {'ds': 'OVF'}, rc=RC_NOT_FOUND, policy_dmi="disabled")
+ {"ds": "OVF"}, rc=RC_NOT_FOUND, policy_dmi="disabled"
+ )
def test_default_ovf_returns_not_found_on_azure(self):
"""OVF datasource won't be found as false positive on Azure."""
- ovfonazure = copy.deepcopy(VALID_CFG['OVF'])
+ ovfonazure = copy.deepcopy(VALID_CFG["OVF"])
# Set azure asset tag to assert OVF content not found
- ovfonazure['files'][P_CHASSIS_ASSET_TAG] = (
- '7783-7084-3265-9085-8269-3286-77\n')
- self._check_via_dict(
- ovfonazure, RC_FOUND, dslist=['Azure', DS_NONE])
+ ovfonazure["files"][
+ P_CHASSIS_ASSET_TAG
+ ] = "7783-7084-3265-9085-8269-3286-77\n"
+ self._check_via_dict(ovfonazure, RC_FOUND, dslist=["Azure", DS_NONE])
def test_ovf_on_vmware_iso_found_by_cdrom_with_ovf_schema_match(self):
"""OVF is identified when iso9660 cdrom path contains ovf schema."""
- self._test_ds_found('OVF')
+ self._test_ds_found("OVF")
def test_ovf_on_vmware_guestinfo_found(self):
"""OVF guest info is found on vmware."""
- self._test_ds_found('OVF-guestinfo')
+ self._test_ds_found("OVF-guestinfo")
def test_ovf_on_vmware_iso_found_when_vmware_customization(self):
"""OVF is identified when vmware customization is enabled."""
- self._test_ds_found('OVF-vmware-customization')
+ self._test_ds_found("OVF-vmware-customization")
def test_ovf_on_vmware_iso_found_open_vm_tools_64(self):
"""OVF is identified when open-vm-tools installed in /usr/lib64."""
- cust64 = copy.deepcopy(VALID_CFG['OVF-vmware-customization'])
- p32 = 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so'
- open64 = 'usr/lib64/open-vm-tools/plugins/vmsvc/libdeployPkgPlugin.so'
- cust64['files'][open64] = cust64['files'][p32]
- del cust64['files'][p32]
+ cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"])
+ p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so"
+ open64 = "usr/lib64/open-vm-tools/plugins/vmsvc/libdeployPkgPlugin.so"
+ cust64["files"][open64] = cust64["files"][p32]
+ del cust64["files"][p32]
+ return self._check_via_dict(
+ cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE]
+ )
+
+ def test_ovf_on_vmware_iso_found_open_vm_tools_x86_64_linux_gnu(self):
+ """OVF is identified when open-vm-tools installed in
+ /usr/lib/x86_64-linux-gnu."""
+ cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"])
+ p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so"
+ x86 = (
+ "usr/lib/x86_64-linux-gnu/open-vm-tools/plugins/vmsvc/"
+ "libdeployPkgPlugin.so"
+ )
+ cust64["files"][x86] = cust64["files"][p32]
+ del cust64["files"][p32]
+ return self._check_via_dict(
+ cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE]
+ )
+
+ def test_ovf_on_vmware_iso_found_open_vm_tools_aarch64_linux_gnu(self):
+ """OVF is identified when open-vm-tools installed in
+ /usr/lib/aarch64-linux-gnu."""
+ cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"])
+ p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so"
+ aarch64 = (
+ "usr/lib/aarch64-linux-gnu/open-vm-tools/plugins/vmsvc/"
+ "libdeployPkgPlugin.so"
+ )
+ cust64["files"][aarch64] = cust64["files"][p32]
+ del cust64["files"][p32]
return self._check_via_dict(
- cust64, RC_FOUND, dslist=[cust64.get('ds'), DS_NONE])
+ cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE]
+ )
def test_ovf_on_vmware_iso_found_by_cdrom_with_matching_fs_label(self):
"""OVF is identified by well-known iso9660 labels."""
- ovf_cdrom_by_label = copy.deepcopy(VALID_CFG['OVF'])
+ ovf_cdrom_by_label = copy.deepcopy(VALID_CFG["OVF"])
# Unset matching cdrom ovf schema content
- ovf_cdrom_by_label['files']['dev/sr0'] = 'No content match'
+ ovf_cdrom_by_label["files"]["dev/sr0"] = "No content match"
self._check_via_dict(
- ovf_cdrom_by_label, rc=RC_NOT_FOUND, policy_dmi="disabled")
+ ovf_cdrom_by_label, rc=RC_NOT_FOUND, policy_dmi="disabled"
+ )
# Add recognized labels
- valid_ovf_labels = ['ovf-transport', 'OVF-TRANSPORT',
- "OVFENV", "ovfenv", "OVF ENV", "ovf env"]
+ valid_ovf_labels = [
+ "ovf-transport",
+ "OVF-TRANSPORT",
+ "OVFENV",
+ "ovfenv",
+ "OVF ENV",
+ "ovf env",
+ ]
for valid_ovf_label in valid_ovf_labels:
- ovf_cdrom_by_label['mocks'][0]['out'] = blkid_out([
- {'DEVNAME': 'sda1', 'TYPE': 'ext4', 'LABEL': 'rootfs'},
- {'DEVNAME': 'sr0', 'TYPE': 'iso9660',
- 'LABEL': valid_ovf_label},
- {'DEVNAME': 'vda1', 'TYPE': 'ntfs', 'LABEL': 'data'}])
+ ovf_cdrom_by_label["mocks"][0]["out"] = blkid_out(
+ [
+ {"DEVNAME": "sda1", "TYPE": "ext4", "LABEL": "rootfs"},
+ {
+ "DEVNAME": "sr0",
+ "TYPE": "iso9660",
+ "LABEL": valid_ovf_label,
+ },
+ {"DEVNAME": "vda1", "TYPE": "ntfs", "LABEL": "data"},
+ ]
+ )
self._check_via_dict(
- ovf_cdrom_by_label, rc=RC_FOUND, dslist=['OVF', DS_NONE])
+ ovf_cdrom_by_label, rc=RC_FOUND, dslist=["OVF", DS_NONE]
+ )
def test_ovf_on_vmware_iso_found_by_cdrom_with_different_size(self):
"""OVF is identified by well-known iso9660 labels."""
- ovf_cdrom_with_size = copy.deepcopy(VALID_CFG['OVF'])
+ ovf_cdrom_with_size = copy.deepcopy(VALID_CFG["OVF"])
# Set cdrom size to 20480 (10MB in 512 byte units)
- ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '20480\n'
+ ovf_cdrom_with_size["files"]["sys/class/block/sr0/size"] = "20480\n"
self._check_via_dict(
- ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled")
+ ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled"
+ )
# Set cdrom size to 204800 (100MB in 512 byte units)
- ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '204800\n'
+ ovf_cdrom_with_size["files"]["sys/class/block/sr0/size"] = "204800\n"
self._check_via_dict(
- ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled")
+ ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled"
+ )
# Set cdrom size to 18432 (9MB in 512 byte units)
- ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '18432\n'
+ ovf_cdrom_with_size["files"]["sys/class/block/sr0/size"] = "18432\n"
self._check_via_dict(
- ovf_cdrom_with_size, rc=RC_FOUND, dslist=['OVF', DS_NONE])
+ ovf_cdrom_with_size, rc=RC_FOUND, dslist=["OVF", DS_NONE]
+ )
# Set cdrom size to 2048 (1MB in 512 byte units)
- ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '2048\n'
+ ovf_cdrom_with_size["files"]["sys/class/block/sr0/size"] = "2048\n"
self._check_via_dict(
- ovf_cdrom_with_size, rc=RC_FOUND, dslist=['OVF', DS_NONE])
+ ovf_cdrom_with_size, rc=RC_FOUND, dslist=["OVF", DS_NONE]
+ )
def test_default_nocloud_as_vdb_iso9660(self):
"""NoCloud is found with iso9660 filesystem on non-cdrom disk."""
- self._test_ds_found('NoCloud')
+ self._test_ds_found("NoCloud")
def test_nocloud_upper(self):
"""NoCloud is found with uppercase filesystem label."""
- self._test_ds_found('NoCloudUpper')
+ self._test_ds_found("NoCloudUpper")
def test_nocloud_fatboot(self):
"""NoCloud fatboot label - LP: #184166."""
- self._test_ds_found('NoCloud-fatboot')
+ self._test_ds_found("NoCloud-fatboot")
def test_nocloud_seed(self):
"""Nocloud seed directory."""
- self._test_ds_found('NoCloud-seed')
+ self._test_ds_found("NoCloud-seed")
def test_nocloud_seed_ubuntu_core_writable(self):
"""Nocloud seed directory ubuntu core writable"""
- self._test_ds_found('NoCloud-seed-ubuntu-core')
+ self._test_ds_found("NoCloud-seed-ubuntu-core")
def test_hetzner_found(self):
"""Hetzner cloud is identified in sys_vendor."""
- self._test_ds_found('Hetzner')
+ self._test_ds_found("Hetzner")
def test_smartos_bhyve(self):
"""SmartOS cloud identified by SmartDC in dmi."""
- self._test_ds_found('SmartOS-bhyve')
+ self._test_ds_found("SmartOS-bhyve")
def test_smartos_lxbrand(self):
"""SmartOS cloud identified on lxbrand container."""
- self._test_ds_found('SmartOS-lxbrand')
+ self._test_ds_found("SmartOS-lxbrand")
def test_smartos_lxbrand_requires_socket(self):
"""SmartOS cloud should not be identified if no socket file."""
- mycfg = copy.deepcopy(VALID_CFG['SmartOS-lxbrand'])
- del mycfg['files'][ds_smartos.METADATA_SOCKFILE]
+ mycfg = copy.deepcopy(VALID_CFG["SmartOS-lxbrand"])
+ del mycfg["files"][ds_smartos.METADATA_SOCKFILE]
self._check_via_dict(mycfg, rc=RC_NOT_FOUND, policy_dmi="disabled")
def test_path_env_gets_set_from_main(self):
"""PATH environment should always have some tokens when main is run.
We explicitly call main as we want to ensure it updates PATH."""
- cust = copy.deepcopy(VALID_CFG['NoCloud'])
+ cust = copy.deepcopy(VALID_CFG["NoCloud"])
rootd = self.tmp_dir()
- mpp = 'main-printpath'
+ mpp = "main-printpath"
pre = "MYPATH="
- cust['files'][mpp] = (
- 'PATH="/mycust/path"; main; r=$?; echo ' + pre + '$PATH; exit $r;')
+ cust["files"][mpp] = (
+ 'PATH="/mycust/path"; main; r=$?; echo ' + pre + "$PATH; exit $r;"
+ )
ret = self._check_via_dict(
- cust, RC_FOUND,
- func=".", args=[os.path.join(rootd, mpp)], rootd=rootd)
+ cust,
+ RC_FOUND,
+ func=".",
+ args=[os.path.join(rootd, mpp)],
+ rootd=rootd,
+ )
match = [
line for line in ret.stdout.splitlines() if line.startswith(pre)
][0]
toks = match.replace(pre, "").split(":")
expected = ["/sbin", "/bin", "/usr/sbin", "/usr/bin", "/mycust/path"]
- self.assertEqual(expected, [p for p in expected if p in toks],
- "path did not have expected tokens")
+ self.assertEqual(
+ expected,
+ [p for p in expected if p in toks],
+ "path did not have expected tokens",
+ )
def test_zstack_is_ec2(self):
"""EC2: chassis asset tag ends with 'zstack.io'"""
- self._test_ds_found('Ec2-ZStack')
+ self._test_ds_found("Ec2-ZStack")
def test_e24cloud_is_ec2(self):
"""EC2: e24cloud identified by sys_vendor"""
- self._test_ds_found('Ec2-E24Cloud')
+ self._test_ds_found("Ec2-E24Cloud")
def test_e24cloud_not_active(self):
"""EC2: bobrightbox.com in product_serial is not brightbox'"""
- self._test_ds_not_found('Ec2-E24Cloud-negative')
+ self._test_ds_not_found("Ec2-E24Cloud-negative")
+
+ def test_vmware_no_valid_transports(self):
+ """VMware: no valid transports"""
+ self._test_ds_not_found("VMware-NoValidTransports")
+
+ def test_vmware_envvar_no_data(self):
+ """VMware: envvar transport no data"""
+ self._test_ds_not_found("VMware-EnvVar-NoData")
+
+ def test_vmware_envvar_no_virt_id(self):
+ """VMware: envvar transport success if no virt id"""
+ self._test_ds_found("VMware-EnvVar-NoVirtID")
+
+ def test_vmware_envvar_activated_by_metadata(self):
+ """VMware: envvar transport activated by metadata"""
+ self._test_ds_found("VMware-EnvVar-Metadata")
+
+ def test_vmware_envvar_activated_by_userdata(self):
+ """VMware: envvar transport activated by userdata"""
+ self._test_ds_found("VMware-EnvVar-Userdata")
+
+ def test_vmware_envvar_activated_by_vendordata(self):
+ """VMware: envvar transport activated by vendordata"""
+ self._test_ds_found("VMware-EnvVar-Vendordata")
+
+ def test_vmware_guestinfo_no_data(self):
+ """VMware: guestinfo transport no data"""
+ self._test_ds_not_found("VMware-GuestInfo-NoData")
+
+ def test_vmware_guestinfo_no_virt_id(self):
+ """VMware: guestinfo transport fails if no virt id"""
+ self._test_ds_not_found("VMware-GuestInfo-NoVirtID")
+
+ def test_vmware_guestinfo_activated_by_metadata(self):
+ """VMware: guestinfo transport activated by metadata"""
+ self._test_ds_found("VMware-GuestInfo-Metadata")
+
+ def test_vmware_guestinfo_activated_by_userdata(self):
+ """VMware: guestinfo transport activated by userdata"""
+ self._test_ds_found("VMware-GuestInfo-Userdata")
+
+ def test_vmware_guestinfo_activated_by_vendordata(self):
+ """VMware: guestinfo transport activated by vendordata"""
+ self._test_ds_found("VMware-GuestInfo-Vendordata")
class TestBSDNoSys(DsIdentifyBase):
@@ -663,14 +839,14 @@ class TestBSDNoSys(DsIdentifyBase):
This will be used on FreeBSD systems.
"""
- self._test_ds_found('Hetzner-kenv')
+ self._test_ds_found("Hetzner-kenv")
def test_dmi_dmidecode(self):
"""Test that dmidecode(8) works on systems which don't have /sys
This will be used on all other BSD systems.
"""
- self._test_ds_found('Hetzner-dmidecode')
+ self._test_ds_found("Hetzner-dmidecode")
class TestIsIBMProvisioning(DsIdentifyBase):
@@ -694,9 +870,11 @@ class TestIsIBMProvisioning(DsIdentifyBase):
def test_config_with_old_log(self):
"""A config with a log from previous boot is not provisioning."""
rootd = self.tmp_dir()
- data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10),
- self.inst_log: ("log data\n", -30),
- self.boot_ref: ("PWD=/", 0)}
+ data = {
+ self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", -30),
+ self.boot_ref: ("PWD=/", 0),
+ }
populate_dir_with_ts(rootd, data)
ret = self.call(rootd=rootd, func=self.funcname)
self.assertEqual(shell_false, ret.rc)
@@ -705,9 +883,11 @@ class TestIsIBMProvisioning(DsIdentifyBase):
def test_config_with_new_log(self):
"""A config with a log from this boot is provisioning."""
rootd = self.tmp_dir()
- data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10),
- self.inst_log: ("log data\n", 30),
- self.boot_ref: ("PWD=/", 0)}
+ data = {
+ self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", 30),
+ self.boot_ref: ("PWD=/", 0),
+ }
populate_dir_with_ts(rootd, data)
ret = self.call(rootd=rootd, func=self.funcname)
self.assertEqual(shell_true, ret.rc)
@@ -717,12 +897,12 @@ class TestIsIBMProvisioning(DsIdentifyBase):
class TestOracle(DsIdentifyBase):
def test_found_by_chassis(self):
"""Simple positive test of Oracle by chassis id."""
- self._test_ds_found('Oracle')
+ self._test_ds_found("Oracle")
def test_not_found(self):
"""Simple negative test of Oracle."""
- mycfg = copy.deepcopy(VALID_CFG['Oracle'])
- mycfg['files'][P_CHASSIS_ASSET_TAG] = "Not Oracle"
+ mycfg = copy.deepcopy(VALID_CFG["Oracle"])
+ mycfg["files"][P_CHASSIS_ASSET_TAG] = "Not Oracle"
self._check_via_dict(mycfg, rc=RC_NOT_FOUND)
@@ -739,7 +919,7 @@ def blkid_out(disks=None):
for key in [d for d in disk if d != "DEVNAME"]:
lines.append("%s=%s" % (key, disk[key]))
lines.append("")
- return '\n'.join(lines)
+ return "\n".join(lines)
def geom_out(disks=None):
@@ -756,387 +936,813 @@ def geom_out(disks=None):
disks = []
lines = []
for disk in disks:
- lines.append("%s/%s N/A %s" % (
- disk["TYPE"], disk["LABEL"], disk["DEVNAME"]))
+ lines.append(
+ "%s/%s N/A %s" % (disk["TYPE"], disk["LABEL"], disk["DEVNAME"])
+ )
lines.append("")
- return '\n'.join(lines)
+ return "\n".join(lines)
def _print_run_output(rc, out, err, cfg, files):
"""A helper to print return of TestDsIdentify.
- _print_run_output(self.call())"""
- print('\n'.join([
- '-- rc = %s --' % rc,
- '-- out --', str(out),
- '-- err --', str(err),
- '-- cfg --', util.json_dumps(cfg)]))
- print('-- files --')
+ _print_run_output(self.call())"""
+ print(
+ "\n".join(
+ [
+ "-- rc = %s --" % rc,
+ "-- out --",
+ str(out),
+ "-- err --",
+ str(err),
+ "-- cfg --",
+ util.json_dumps(cfg),
+ ]
+ )
+ )
+ print("-- files --")
for k, v in files.items():
if "/_shwrap" in k:
continue
- print(' === %s ===' % k)
+ print(" === %s ===" % k)
for line in v.splitlines():
print(" " + line)
VALID_CFG = {
- 'AliYun': {
- 'ds': 'AliYun',
- 'files': {P_PRODUCT_NAME: 'Alibaba Cloud ECS\n'},
+ "AliYun": {
+ "ds": "AliYun",
+ "files": {P_PRODUCT_NAME: "Alibaba Cloud ECS\n"},
},
- 'Azure-dmi-detection': {
- 'ds': 'Azure',
- 'files': {
- P_CHASSIS_ASSET_TAG: '7783-7084-3265-9085-8269-3286-77\n',
- }
+ "Azure-dmi-detection": {
+ "ds": "Azure",
+ "files": {
+ P_CHASSIS_ASSET_TAG: "7783-7084-3265-9085-8269-3286-77\n",
+ },
},
- 'Azure-seed-detection': {
- 'ds': 'Azure',
- 'files': {
- P_CHASSIS_ASSET_TAG: 'No-match\n',
- os.path.join(P_SEED_DIR, 'azure', 'ovf-env.xml'): 'present\n',
- }
+ "Azure-seed-detection": {
+ "ds": "Azure",
+ "files": {
+ P_CHASSIS_ASSET_TAG: "No-match\n",
+ os.path.join(P_SEED_DIR, "azure", "ovf-env.xml"): "present\n",
+ },
},
- 'Ec2-hvm': {
- 'ds': 'Ec2',
- 'mocks': [{'name': 'detect_virt', 'RET': 'kvm', 'ret': 0}],
- 'files': {
- P_PRODUCT_SERIAL: 'ec23aef5-54be-4843-8d24-8c819f88453e\n',
- P_PRODUCT_UUID: 'EC23AEF5-54BE-4843-8D24-8C819F88453E\n',
- }
+ "Ec2-hvm": {
+ "ds": "Ec2",
+ "mocks": [{"name": "detect_virt", "RET": "kvm", "ret": 0}],
+ "files": {
+ P_PRODUCT_SERIAL: "ec23aef5-54be-4843-8d24-8c819f88453e\n",
+ P_PRODUCT_UUID: "EC23AEF5-54BE-4843-8D24-8C819F88453E\n",
+ },
},
- 'Ec2-xen': {
- 'ds': 'Ec2',
- 'mocks': [MOCK_VIRT_IS_XEN],
- 'files': {
- 'sys/hypervisor/uuid': 'ec2c6e2f-5fac-4fc7-9c82-74127ec14bbb\n'
+ "Ec2-xen": {
+ "ds": "Ec2",
+ "mocks": [MOCK_VIRT_IS_XEN],
+ "files": {
+ "sys/hypervisor/uuid": "ec2c6e2f-5fac-4fc7-9c82-74127ec14bbb\n"
},
},
- 'Ec2-brightbox': {
- 'ds': 'Ec2',
- 'files': {P_PRODUCT_SERIAL: 'srv-otuxg.gb1.brightbox.com\n'},
+ "Ec2-brightbox": {
+ "ds": "Ec2",
+ "files": {P_PRODUCT_SERIAL: "srv-otuxg.gb1.brightbox.com\n"},
},
- 'Ec2-brightbox-negative': {
- 'ds': 'Ec2',
- 'files': {P_PRODUCT_SERIAL: 'tricky-host.bobrightbox.com\n'},
+ "Ec2-brightbox-negative": {
+ "ds": "Ec2",
+ "files": {P_PRODUCT_SERIAL: "tricky-host.bobrightbox.com\n"},
},
- 'GCE': {
- 'ds': 'GCE',
- 'files': {P_PRODUCT_NAME: 'Google Compute Engine\n'},
- 'mocks': [MOCK_VIRT_IS_KVM],
+ "GCE": {
+ "ds": "GCE",
+ "files": {P_PRODUCT_NAME: "Google Compute Engine\n"},
+ "mocks": [MOCK_VIRT_IS_KVM],
},
- 'GCE-serial': {
- 'ds': 'GCE',
- 'files': {P_PRODUCT_SERIAL: 'GoogleCloud-8f2e88f\n'},
- 'mocks': [MOCK_VIRT_IS_KVM],
+ "GCE-serial": {
+ "ds": "GCE",
+ "files": {P_PRODUCT_SERIAL: "GoogleCloud-8f2e88f\n"},
+ "mocks": [MOCK_VIRT_IS_KVM],
},
- 'NoCloud': {
- 'ds': 'NoCloud',
- 'mocks': [
+ "NoCloud": {
+ "ds": "NoCloud",
+ "mocks": [
MOCK_VIRT_IS_KVM,
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- BLKID_UEFI_UBUNTU +
- [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'cidata'}])},
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ BLKID_UEFI_UBUNTU
+ + [
+ {
+ "DEVNAME": "vdb",
+ "TYPE": "iso9660",
+ "LABEL": "cidata",
+ }
+ ]
+ ),
+ },
],
- 'files': {
- 'dev/vdb': 'pretend iso content for cidata\n',
- }
+ "files": {
+ "dev/vdb": "pretend iso content for cidata\n",
+ },
},
- 'NoCloud-fbsd': {
- 'ds': 'NoCloud',
- 'mocks': [
+ "NoCloud-fbsd": {
+ "ds": "NoCloud",
+ "mocks": [
MOCK_VIRT_IS_KVM,
MOCK_UNAME_IS_FREEBSD,
- {'name': 'geom', 'ret': 0,
- 'out': geom_out(
- [{'DEVNAME': 'vtbd', 'TYPE': 'iso9660', 'LABEL': 'cidata'}])},
+ {
+ "name": "geom",
+ "ret": 0,
+ "out": geom_out(
+ [{"DEVNAME": "vtbd", "TYPE": "iso9660", "LABEL": "cidata"}]
+ ),
+ },
],
- 'files': {
- '/dev/vtdb': 'pretend iso content for cidata\n',
- }
+ "files": {
+ "/dev/vtdb": "pretend iso content for cidata\n",
+ },
},
- 'NoCloudUpper': {
- 'ds': 'NoCloud',
- 'mocks': [
+ "NoCloudUpper": {
+ "ds": "NoCloud",
+ "mocks": [
MOCK_VIRT_IS_KVM,
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- BLKID_UEFI_UBUNTU +
- [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'CIDATA'}])},
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ BLKID_UEFI_UBUNTU
+ + [
+ {
+ "DEVNAME": "vdb",
+ "TYPE": "iso9660",
+ "LABEL": "CIDATA",
+ }
+ ]
+ ),
+ },
],
- 'files': {
- 'dev/vdb': 'pretend iso content for cidata\n',
- }
+ "files": {
+ "dev/vdb": "pretend iso content for cidata\n",
+ },
},
- 'NoCloud-fatboot': {
- 'ds': 'NoCloud',
- 'mocks': [
+ "NoCloud-fatboot": {
+ "ds": "NoCloud",
+ "mocks": [
MOCK_VIRT_IS_XEN,
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- BLKID_UEFI_UBUNTU +
- [{'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'SEC_TYPE': 'msdos',
- 'UUID': '355a-4FC2', 'LABEL_FATBOOT': 'cidata'}])},
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ BLKID_UEFI_UBUNTU
+ + [
+ {
+ "DEVNAME": "xvdb",
+ "TYPE": "vfat",
+ "SEC_TYPE": "msdos",
+ "UUID": "355a-4FC2",
+ "LABEL_FATBOOT": "cidata",
+ }
+ ]
+ ),
+ },
],
- 'files': {
- 'dev/vdb': 'pretend iso content for cidata\n',
- }
+ "files": {
+ "dev/vdb": "pretend iso content for cidata\n",
+ },
},
- 'NoCloud-seed': {
- 'ds': 'NoCloud',
- 'files': {
- os.path.join(P_SEED_DIR, 'nocloud', 'user-data'): 'ud\n',
- os.path.join(P_SEED_DIR, 'nocloud', 'meta-data'): 'md\n',
- }
+ "NoCloud-seed": {
+ "ds": "NoCloud",
+ "files": {
+ os.path.join(P_SEED_DIR, "nocloud", "user-data"): "ud\n",
+ os.path.join(P_SEED_DIR, "nocloud", "meta-data"): "md\n",
+ },
},
- 'NoCloud-seed-ubuntu-core': {
- 'ds': 'NoCloud',
- 'files': {
- os.path.join('writable/system-data', P_SEED_DIR,
- 'nocloud-net', 'user-data'): 'ud\n',
- os.path.join('writable/system-data', P_SEED_DIR,
- 'nocloud-net', 'meta-data'): 'md\n',
- }
+ "NoCloud-seed-ubuntu-core": {
+ "ds": "NoCloud",
+ "files": {
+ os.path.join(
+ "writable/system-data", P_SEED_DIR, "nocloud-net", "user-data"
+ ): "ud\n",
+ os.path.join(
+ "writable/system-data", P_SEED_DIR, "nocloud-net", "meta-data"
+ ): "md\n",
+ },
},
- 'OpenStack': {
- 'ds': 'OpenStack',
- 'files': {P_PRODUCT_NAME: 'OpenStack Nova\n'},
- 'mocks': [MOCK_VIRT_IS_KVM],
- 'policy_dmi': POLICY_FOUND_ONLY,
- 'policy_no_dmi': POLICY_FOUND_ONLY,
+ "OpenStack": {
+ "ds": "OpenStack",
+ "files": {P_PRODUCT_NAME: "OpenStack Nova\n"},
+ "mocks": [MOCK_VIRT_IS_KVM],
+ "policy_dmi": POLICY_FOUND_ONLY,
+ "policy_no_dmi": POLICY_FOUND_ONLY,
},
- 'OpenStack-OpenTelekom': {
+ "OpenStack-OpenTelekom": {
# OTC gen1 (Xen) hosts use OpenStack datasource, LP: #1756471
- 'ds': 'OpenStack',
- 'files': {P_CHASSIS_ASSET_TAG: 'OpenTelekomCloud\n'},
- 'mocks': [MOCK_VIRT_IS_XEN],
+ "ds": "OpenStack",
+ "files": {P_CHASSIS_ASSET_TAG: "OpenTelekomCloud\n"},
+ "mocks": [MOCK_VIRT_IS_XEN],
},
- 'OpenStack-SAPCCloud': {
+ "OpenStack-SAPCCloud": {
# SAP CCloud hosts use OpenStack on VMware
- 'ds': 'OpenStack',
- 'files': {P_CHASSIS_ASSET_TAG: 'SAP CCloud VM\n'},
- 'mocks': [MOCK_VIRT_IS_VMWARE],
+ "ds": "OpenStack",
+ "files": {P_CHASSIS_ASSET_TAG: "SAP CCloud VM\n"},
+ "mocks": [MOCK_VIRT_IS_VMWARE],
},
- 'OpenStack-AssetTag-Nova': {
+ "OpenStack-AssetTag-Nova": {
# VMware vSphere can't modify product-name, LP: #1669875
- 'ds': 'OpenStack',
- 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Nova\n'},
- 'mocks': [MOCK_VIRT_IS_XEN],
+ "ds": "OpenStack",
+ "files": {P_CHASSIS_ASSET_TAG: "OpenStack Nova\n"},
+ "mocks": [MOCK_VIRT_IS_XEN],
},
- 'OpenStack-AssetTag-Compute': {
+ "OpenStack-AssetTag-Compute": {
# VMware vSphere can't modify product-name, LP: #1669875
- 'ds': 'OpenStack',
- 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Compute\n'},
- 'mocks': [MOCK_VIRT_IS_XEN],
+ "ds": "OpenStack",
+ "files": {P_CHASSIS_ASSET_TAG: "OpenStack Compute\n"},
+ "mocks": [MOCK_VIRT_IS_XEN],
},
- 'OVF-seed': {
- 'ds': 'OVF',
- 'files': {
- os.path.join(P_SEED_DIR, 'ovf', 'ovf-env.xml'): 'present\n',
- }
+ "OVF-seed": {
+ "ds": "OVF",
+ "files": {
+ os.path.join(P_SEED_DIR, "ovf", "ovf-env.xml"): "present\n",
+ },
},
- 'OVF-vmware-customization': {
- 'ds': 'OVF',
- 'mocks': [
+ "OVF-vmware-customization": {
+ "ds": "OVF",
+ "mocks": [
# Include a mockes iso9660 potential, even though content not ovf
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'sr0', 'TYPE': 'iso9660', 'LABEL': ''}])
- },
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [{"DEVNAME": "sr0", "TYPE": "iso9660", "LABEL": ""}]
+ ),
+ },
MOCK_VIRT_IS_VMWARE,
],
- 'files': {
- 'dev/sr0': 'no match',
+ "files": {
+ "dev/sr0": "no match",
# Setup vmware customization enabled
- 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so': 'here',
- 'etc/cloud/cloud.cfg': 'disable_vmware_customization: false\n',
- }
+ "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so": "here",
+ "etc/cloud/cloud.cfg": "disable_vmware_customization: false\n",
+ },
},
- 'OVF': {
- 'ds': 'OVF',
- 'mocks': [
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'sr0', 'TYPE': 'iso9660', 'LABEL': ''},
- {'DEVNAME': 'sr1', 'TYPE': 'iso9660', 'LABEL': 'ignoreme'},
- {'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}]),
- },
+ "OVF": {
+ "ds": "OVF",
+ "mocks": [
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {"DEVNAME": "sr0", "TYPE": "iso9660", "LABEL": ""},
+ {
+ "DEVNAME": "sr1",
+ "TYPE": "iso9660",
+ "LABEL": "ignoreme",
+ },
+ {
+ "DEVNAME": "vda1",
+ "TYPE": "vfat",
+ "PARTUUID": uuid4(),
+ },
+ ]
+ ),
+ },
MOCK_VIRT_IS_VMWARE,
],
- 'files': {
- 'dev/sr0': 'pretend ovf iso has ' + OVF_MATCH_STRING + '\n',
- 'sys/class/block/sr0/size': '2048\n',
- }
+ "files": {
+ "dev/sr0": "pretend ovf iso has " + OVF_MATCH_STRING + "\n",
+ "sys/class/block/sr0/size": "2048\n",
+ },
},
- 'OVF-guestinfo': {
- 'ds': 'OVF',
- 'mocks': [
- {'name': 'ovf_vmware_transport_guestinfo', 'ret': 0,
- 'out': '<?xml version="1.0" encoding="UTF-8"?>\n<Environment'},
+ "OVF-guestinfo": {
+ "ds": "OVF",
+ "mocks": [
+ {
+ "name": "ovf_vmware_transport_guestinfo",
+ "ret": 0,
+ "out": '<?xml version="1.0" encoding="UTF-8"?>\n<Environment',
+ },
MOCK_VIRT_IS_VMWARE,
],
},
- 'ConfigDrive': {
- 'ds': 'ConfigDrive',
- 'mocks': [
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vda2', 'TYPE': 'ext4',
- 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'config-2'}])
- },
+ "ConfigDrive": {
+ "ds": "ConfigDrive",
+ "mocks": [
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "vda1",
+ "TYPE": "vfat",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "vda2",
+ "TYPE": "ext4",
+ "LABEL": "cloudimg-rootfs",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "vdb",
+ "TYPE": "vfat",
+ "LABEL": "config-2",
+ },
+ ]
+ ),
+ },
],
},
- 'ConfigDriveUpper': {
- 'ds': 'ConfigDrive',
- 'mocks': [
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vda2', 'TYPE': 'ext4',
- 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CONFIG-2'}])
- },
+ "ConfigDriveUpper": {
+ "ds": "ConfigDrive",
+ "mocks": [
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "vda1",
+ "TYPE": "vfat",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "vda2",
+ "TYPE": "ext4",
+ "LABEL": "cloudimg-rootfs",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "vdb",
+ "TYPE": "vfat",
+ "LABEL": "CONFIG-2",
+ },
+ ]
+ ),
+ },
],
},
- 'ConfigDrive-seed': {
- 'ds': 'ConfigDrive',
- 'files': {
- os.path.join(P_SEED_DIR, 'config_drive', 'openstack',
- 'latest', 'meta_data.json'): 'md\n'},
+ "ConfigDrive-seed": {
+ "ds": "ConfigDrive",
+ "files": {
+ os.path.join(
+ P_SEED_DIR,
+ "config_drive",
+ "openstack",
+ "latest",
+ "meta_data.json",
+ ): "md\n"
+ },
},
- 'RbxCloud': {
- 'ds': 'RbxCloud',
- 'mocks': [
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vda2', 'TYPE': 'ext4',
- 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CLOUDMD'}]
- )},
+ "RbxCloud": {
+ "ds": "RbxCloud",
+ "mocks": [
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "vda1",
+ "TYPE": "vfat",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "vda2",
+ "TYPE": "ext4",
+ "LABEL": "cloudimg-rootfs",
+ "PARTUUID": uuid4(),
+ },
+ {"DEVNAME": "vdb", "TYPE": "vfat", "LABEL": "CLOUDMD"},
+ ]
+ ),
+ },
],
},
- 'RbxCloudLower': {
- 'ds': 'RbxCloud',
- 'mocks': [
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vda2', 'TYPE': 'ext4',
- 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'cloudmd'}]
- )},
+ "RbxCloudLower": {
+ "ds": "RbxCloud",
+ "mocks": [
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "vda1",
+ "TYPE": "vfat",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "vda2",
+ "TYPE": "ext4",
+ "LABEL": "cloudimg-rootfs",
+ "PARTUUID": uuid4(),
+ },
+ {"DEVNAME": "vdb", "TYPE": "vfat", "LABEL": "cloudmd"},
+ ]
+ ),
+ },
],
},
- 'Hetzner': {
- 'ds': 'Hetzner',
- 'files': {P_SYS_VENDOR: 'Hetzner\n'},
+ "Hetzner": {
+ "ds": "Hetzner",
+ "files": {P_SYS_VENDOR: "Hetzner\n"},
},
- 'Hetzner-kenv': {
- 'ds': 'Hetzner',
- 'mocks': [
+ "Hetzner-kenv": {
+ "ds": "Hetzner",
+ "mocks": [
MOCK_UNAME_IS_FREEBSD,
- {'name': 'get_kenv_field', 'ret': 0, 'RET': 'Hetzner'}
+ {"name": "get_kenv_field", "ret": 0, "RET": "Hetzner"},
],
},
- 'Hetzner-dmidecode': {
- 'ds': 'Hetzner',
- 'mocks': [
- {'name': 'dmi_decode', 'ret': 0, 'RET': 'Hetzner'}
- ],
+ "Hetzner-dmidecode": {
+ "ds": "Hetzner",
+ "mocks": [{"name": "dmi_decode", "ret": 0, "RET": "Hetzner"}],
},
- 'IBMCloud-metadata': {
- 'ds': 'IBMCloud',
- 'mocks': [
+ "IBMCloud-metadata": {
+ "ds": "IBMCloud",
+ "mocks": [
MOCK_VIRT_IS_XEN,
- {'name': 'is_ibm_provisioning', 'ret': shell_false},
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
- {'DEVNAME': 'xvda2', 'TYPE': 'ext4',
- 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()},
- {'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'METADATA'}]),
- },
+ {"name": "is_ibm_provisioning", "ret": shell_false},
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "xvda1",
+ "TYPE": "vfat",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "xvda2",
+ "TYPE": "ext4",
+ "LABEL": "cloudimg-rootfs",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "xvdb",
+ "TYPE": "vfat",
+ "LABEL": "METADATA",
+ },
+ ]
+ ),
+ },
],
},
- 'IBMCloud-config-2': {
- 'ds': 'IBMCloud',
- 'mocks': [
+ "IBMCloud-config-2": {
+ "ds": "IBMCloud",
+ "mocks": [
MOCK_VIRT_IS_XEN,
- {'name': 'is_ibm_provisioning', 'ret': shell_false},
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'xvda1', 'TYPE': 'ext3', 'PARTUUID': uuid4(),
- 'UUID': uuid4(), 'LABEL': 'cloudimg-bootfs'},
- {'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'config-2',
- 'UUID': ds_ibm.IBM_CONFIG_UUID},
- {'DEVNAME': 'xvda2', 'TYPE': 'ext4',
- 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4(),
- 'UUID': uuid4()},
- ]),
- },
+ {"name": "is_ibm_provisioning", "ret": shell_false},
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "xvda1",
+ "TYPE": "ext3",
+ "PARTUUID": uuid4(),
+ "UUID": uuid4(),
+ "LABEL": "cloudimg-bootfs",
+ },
+ {
+ "DEVNAME": "xvdb",
+ "TYPE": "vfat",
+ "LABEL": "config-2",
+ "UUID": ds_ibm.IBM_CONFIG_UUID,
+ },
+ {
+ "DEVNAME": "xvda2",
+ "TYPE": "ext4",
+ "LABEL": "cloudimg-rootfs",
+ "PARTUUID": uuid4(),
+ "UUID": uuid4(),
+ },
+ ]
+ ),
+ },
],
},
- 'IBMCloud-nodisks': {
- 'ds': 'IBMCloud',
- 'mocks': [
+ "IBMCloud-nodisks": {
+ "ds": "IBMCloud",
+ "mocks": [
MOCK_VIRT_IS_XEN,
- {'name': 'is_ibm_provisioning', 'ret': shell_false},
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
- {'DEVNAME': 'xvda2', 'TYPE': 'ext4',
- 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}]),
- },
+ {"name": "is_ibm_provisioning", "ret": shell_false},
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "xvda1",
+ "TYPE": "vfat",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "xvda2",
+ "TYPE": "ext4",
+ "LABEL": "cloudimg-rootfs",
+ "PARTUUID": uuid4(),
+ },
+ ]
+ ),
+ },
],
},
- 'Oracle': {
- 'ds': 'Oracle',
- 'files': {
- P_CHASSIS_ASSET_TAG: ds_oracle.CHASSIS_ASSET_TAG + '\n',
- }
+ "Oracle": {
+ "ds": "Oracle",
+ "files": {
+ P_CHASSIS_ASSET_TAG: ds_oracle.CHASSIS_ASSET_TAG + "\n",
+ },
},
- 'SmartOS-bhyve': {
- 'ds': 'SmartOS',
- 'mocks': [
+ "SmartOS-bhyve": {
+ "ds": "SmartOS",
+ "mocks": [
MOCK_VIRT_IS_VM_OTHER,
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'vda1', 'TYPE': 'ext4',
- 'PARTUUID': '49ec635a-01'},
- {'DEVNAME': 'vda2', 'TYPE': 'swap',
- 'LABEL': 'cloudimg-swap', 'PARTUUID': '49ec635a-02'}]),
- },
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "vda1",
+ "TYPE": "ext4",
+ "PARTUUID": "49ec635a-01",
+ },
+ {
+ "DEVNAME": "vda2",
+ "TYPE": "swap",
+ "LABEL": "cloudimg-swap",
+ "PARTUUID": "49ec635a-02",
+ },
+ ]
+ ),
+ },
],
- 'files': {P_PRODUCT_NAME: 'SmartDC HVM\n'},
+ "files": {P_PRODUCT_NAME: "SmartDC HVM\n"},
},
- 'SmartOS-lxbrand': {
- 'ds': 'SmartOS',
- 'mocks': [
+ "SmartOS-lxbrand": {
+ "ds": "SmartOS",
+ "mocks": [
MOCK_VIRT_IS_CONTAINER_OTHER,
- {'name': 'uname', 'ret': 0,
- 'out': ("Linux d43da87a-daca-60e8-e6d4-d2ed372662a3 4.3.0 "
- "BrandZ virtual linux x86_64 GNU/Linux")},
- {'name': 'blkid', 'ret': 2, 'out': ''},
+ {
+ "name": "uname",
+ "ret": 0,
+ "out": (
+ "Linux d43da87a-daca-60e8-e6d4-d2ed372662a3 4.3.0 "
+ "BrandZ virtual linux x86_64 GNU/Linux"
+ ),
+ },
+ {"name": "blkid", "ret": 2, "out": ""},
+ ],
+ "files": {ds_smartos.METADATA_SOCKFILE: "would be a socket\n"},
+ },
+ "Ec2-ZStack": {
+ "ds": "Ec2",
+ "files": {P_CHASSIS_ASSET_TAG: "123456.zstack.io\n"},
+ },
+ "Ec2-E24Cloud": {
+ "ds": "Ec2",
+ "files": {P_SYS_VENDOR: "e24cloud\n"},
+ },
+ "Ec2-E24Cloud-negative": {
+ "ds": "Ec2",
+ "files": {P_SYS_VENDOR: "e24cloudyday\n"},
+ },
+ "VMware-NoValidTransports": {
+ "ds": "VMware",
+ "mocks": [
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ "VMware-EnvVar-NoData": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_metadata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_vendordata",
+ "ret": 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
],
- 'files': {ds_smartos.METADATA_SOCKFILE: 'would be a socket\n'},
},
- 'Ec2-ZStack': {
- 'ds': 'Ec2',
- 'files': {P_CHASSIS_ASSET_TAG: '123456.zstack.io\n'},
+ "VMware-EnvVar-NoVirtID": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_metadata",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_vendordata",
+ "ret": 1,
+ },
+ ],
+ },
+ "VMware-EnvVar-Metadata": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_metadata",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_vendordata",
+ "ret": 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ "VMware-EnvVar-Userdata": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_metadata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_userdata",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_vendordata",
+ "ret": 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ "VMware-EnvVar-Vendordata": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_metadata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_vendordata",
+ "ret": 0,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
},
- 'Ec2-E24Cloud': {
- 'ds': 'Ec2',
- 'files': {P_SYS_VENDOR: 'e24cloud\n'},
+ "VMware-GuestInfo-NoData": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_rpctool",
+ "ret": 0,
+ "out": "/usr/bin/vmware-rpctool",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_metadata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_vendordata",
+ "ret": 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ "VMware-GuestInfo-NoVirtID": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_rpctool",
+ "ret": 0,
+ "out": "/usr/bin/vmware-rpctool",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_metadata",
+ "ret": 0,
+ "out": "---",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_vendordata",
+ "ret": 1,
+ },
+ ],
+ },
+ "VMware-GuestInfo-Metadata": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_rpctool",
+ "ret": 0,
+ "out": "/usr/bin/vmware-rpctool",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_metadata",
+ "ret": 0,
+ "out": "---",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_vendordata",
+ "ret": 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ "VMware-GuestInfo-Userdata": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_rpctool",
+ "ret": 0,
+ "out": "/usr/bin/vmware-rpctool",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_metadata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_userdata",
+ "ret": 0,
+ "out": "---",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_vendordata",
+ "ret": 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ "VMware-GuestInfo-Vendordata": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_rpctool",
+ "ret": 0,
+ "out": "/usr/bin/vmware-rpctool",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_metadata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_vendordata",
+ "ret": 0,
+ "out": "---",
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
},
- 'Ec2-E24Cloud-negative': {
- 'ds': 'Ec2',
- 'files': {P_SYS_VENDOR: 'e24cloudyday\n'},
- }
}
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py
index 3f50f57d..f447d295 100644
--- a/tests/unittests/test_ec2_util.py
+++ b/tests/unittests/test_ec2_util.py
@@ -2,178 +2,276 @@
import httpretty as hp
-from cloudinit.tests import helpers
-
from cloudinit import ec2_utils as eu
from cloudinit import url_helper as uh
+from tests.unittests import helpers
class TestEc2Util(helpers.HttprettyTestCase):
- VERSION = 'latest'
+ VERSION = "latest"
def test_userdata_fetch(self):
- hp.register_uri(hp.GET,
- 'http://169.254.169.254/%s/user-data' % (self.VERSION),
- body='stuff',
- status=200)
+ hp.register_uri(
+ hp.GET,
+ "http://169.254.169.254/%s/user-data" % (self.VERSION),
+ body="stuff",
+ status=200,
+ )
userdata = eu.get_instance_userdata(self.VERSION)
- self.assertEqual('stuff', userdata.decode('utf-8'))
+ self.assertEqual("stuff", userdata.decode("utf-8"))
def test_userdata_fetch_fail_not_found(self):
- hp.register_uri(hp.GET,
- 'http://169.254.169.254/%s/user-data' % (self.VERSION),
- status=404)
+ hp.register_uri(
+ hp.GET,
+ "http://169.254.169.254/%s/user-data" % (self.VERSION),
+ status=404,
+ )
userdata = eu.get_instance_userdata(self.VERSION, retries=0)
- self.assertEqual('', userdata)
+ self.assertEqual("", userdata)
def test_userdata_fetch_fail_server_dead(self):
- hp.register_uri(hp.GET,
- 'http://169.254.169.254/%s/user-data' % (self.VERSION),
- status=500)
+ hp.register_uri(
+ hp.GET,
+ "http://169.254.169.254/%s/user-data" % (self.VERSION),
+ status=500,
+ )
userdata = eu.get_instance_userdata(self.VERSION, retries=0)
- self.assertEqual('', userdata)
+ self.assertEqual("", userdata)
def test_userdata_fetch_fail_server_not_found(self):
- hp.register_uri(hp.GET,
- 'http://169.254.169.254/%s/user-data' % (self.VERSION),
- status=404)
+ hp.register_uri(
+ hp.GET,
+ "http://169.254.169.254/%s/user-data" % (self.VERSION),
+ status=404,
+ )
userdata = eu.get_instance_userdata(self.VERSION)
- self.assertEqual('', userdata)
+ self.assertEqual("", userdata)
def test_metadata_fetch_no_keys(self):
- base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
- hp.register_uri(hp.GET, base_url, status=200,
- body="\n".join(['hostname',
- 'instance-id',
- 'ami-launch-index']))
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
- status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
- status=200, body='123')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'ami-launch-index'),
- status=200, body='1')
+ base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
+ hp.register_uri(
+ hp.GET,
+ base_url,
+ status=200,
+ body="\n".join(["hostname", "instance-id", "ami-launch-index"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "hostname"),
+ status=200,
+ body="ec2.fake.host.name.com",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "instance-id"),
+ status=200,
+ body="123",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "ami-launch-index"),
+ status=200,
+ body="1",
+ )
md = eu.get_instance_metadata(self.VERSION, retries=0)
- self.assertEqual(md['hostname'], 'ec2.fake.host.name.com')
- self.assertEqual(md['instance-id'], '123')
- self.assertEqual(md['ami-launch-index'], '1')
+ self.assertEqual(md["hostname"], "ec2.fake.host.name.com")
+ self.assertEqual(md["instance-id"], "123")
+ self.assertEqual(md["ami-launch-index"], "1")
def test_metadata_fetch_key(self):
- base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
- hp.register_uri(hp.GET, base_url, status=200,
- body="\n".join(['hostname',
- 'instance-id',
- 'public-keys/']))
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
- status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
- status=200, body='123')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'public-keys/'),
- status=200, body='0=my-public-key')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'public-keys/0/openssh-key'),
- status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
+ base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
+ hp.register_uri(
+ hp.GET,
+ base_url,
+ status=200,
+ body="\n".join(["hostname", "instance-id", "public-keys/"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "hostname"),
+ status=200,
+ body="ec2.fake.host.name.com",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "instance-id"),
+ status=200,
+ body="123",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "public-keys/"),
+ status=200,
+ body="0=my-public-key",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "public-keys/0/openssh-key"),
+ status=200,
+ body="ssh-rsa AAAA.....wZEf my-public-key",
+ )
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
- self.assertEqual(md['hostname'], 'ec2.fake.host.name.com')
- self.assertEqual(md['instance-id'], '123')
- self.assertEqual(1, len(md['public-keys']))
+ self.assertEqual(md["hostname"], "ec2.fake.host.name.com")
+ self.assertEqual(md["instance-id"], "123")
+ self.assertEqual(1, len(md["public-keys"]))
def test_metadata_fetch_with_2_keys(self):
- base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
- hp.register_uri(hp.GET, base_url, status=200,
- body="\n".join(['hostname',
- 'instance-id',
- 'public-keys/']))
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
- status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
- status=200, body='123')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'public-keys/'),
- status=200,
- body="\n".join(['0=my-public-key', '1=my-other-key']))
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'public-keys/0/openssh-key'),
- status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'public-keys/1/openssh-key'),
- status=200, body='ssh-rsa AAAA.....wZEf my-other-key')
+ base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
+ hp.register_uri(
+ hp.GET,
+ base_url,
+ status=200,
+ body="\n".join(["hostname", "instance-id", "public-keys/"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "hostname"),
+ status=200,
+ body="ec2.fake.host.name.com",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "instance-id"),
+ status=200,
+ body="123",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "public-keys/"),
+ status=200,
+ body="\n".join(["0=my-public-key", "1=my-other-key"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "public-keys/0/openssh-key"),
+ status=200,
+ body="ssh-rsa AAAA.....wZEf my-public-key",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "public-keys/1/openssh-key"),
+ status=200,
+ body="ssh-rsa AAAA.....wZEf my-other-key",
+ )
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
- self.assertEqual(md['hostname'], 'ec2.fake.host.name.com')
- self.assertEqual(md['instance-id'], '123')
- self.assertEqual(2, len(md['public-keys']))
+ self.assertEqual(md["hostname"], "ec2.fake.host.name.com")
+ self.assertEqual(md["instance-id"], "123")
+ self.assertEqual(2, len(md["public-keys"]))
def test_metadata_fetch_bdm(self):
- base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
- hp.register_uri(hp.GET, base_url, status=200,
- body="\n".join(['hostname',
- 'instance-id',
- 'block-device-mapping/']))
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
- status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
- status=200, body='123')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'block-device-mapping/'),
- status=200,
- body="\n".join(['ami', 'ephemeral0']))
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'block-device-mapping/ami'),
- status=200,
- body="sdb")
- hp.register_uri(hp.GET,
- uh.combine_url(base_url,
- 'block-device-mapping/ephemeral0'),
- status=200,
- body="sdc")
+ base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
+ hp.register_uri(
+ hp.GET,
+ base_url,
+ status=200,
+ body="\n".join(
+ ["hostname", "instance-id", "block-device-mapping/"]
+ ),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "hostname"),
+ status=200,
+ body="ec2.fake.host.name.com",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "instance-id"),
+ status=200,
+ body="123",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "block-device-mapping/"),
+ status=200,
+ body="\n".join(["ami", "ephemeral0"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "block-device-mapping/ami"),
+ status=200,
+ body="sdb",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "block-device-mapping/ephemeral0"),
+ status=200,
+ body="sdc",
+ )
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
- self.assertEqual(md['hostname'], 'ec2.fake.host.name.com')
- self.assertEqual(md['instance-id'], '123')
- bdm = md['block-device-mapping']
+ self.assertEqual(md["hostname"], "ec2.fake.host.name.com")
+ self.assertEqual(md["instance-id"], "123")
+ bdm = md["block-device-mapping"]
self.assertEqual(2, len(bdm))
- self.assertEqual(bdm['ami'], 'sdb')
- self.assertEqual(bdm['ephemeral0'], 'sdc')
+ self.assertEqual(bdm["ami"], "sdb")
+ self.assertEqual(bdm["ephemeral0"], "sdc")
def test_metadata_no_security_credentials(self):
- base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
- hp.register_uri(hp.GET, base_url, status=200,
- body="\n".join(['instance-id',
- 'iam/']))
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
- status=200, body='i-0123451689abcdef0')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'iam/'),
- status=200,
- body="\n".join(['info/', 'security-credentials/']))
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'iam/info/'),
- status=200,
- body='LastUpdated')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'iam/info/LastUpdated'),
- status=200, body='2016-10-27T17:29:39Z')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'iam/security-credentials/'),
- status=200,
- body='ReadOnly/')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url,
- 'iam/security-credentials/ReadOnly/'),
- status=200,
- body="\n".join(['LastUpdated', 'Expiration']))
- hp.register_uri(hp.GET,
- uh.combine_url(
- base_url,
- 'iam/security-credentials/ReadOnly/LastUpdated'),
- status=200, body='2016-10-27T17:28:17Z')
- hp.register_uri(hp.GET,
- uh.combine_url(
- base_url,
- 'iam/security-credentials/ReadOnly/Expiration'),
- status=200, body='2016-10-28T00:00:34Z')
+ base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
+ hp.register_uri(
+ hp.GET,
+ base_url,
+ status=200,
+ body="\n".join(["instance-id", "iam/"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "instance-id"),
+ status=200,
+ body="i-0123451689abcdef0",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "iam/"),
+ status=200,
+ body="\n".join(["info/", "security-credentials/"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "iam/info/"),
+ status=200,
+ body="LastUpdated",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "iam/info/LastUpdated"),
+ status=200,
+ body="2016-10-27T17:29:39Z",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "iam/security-credentials/"),
+ status=200,
+ body="ReadOnly/",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "iam/security-credentials/ReadOnly/"),
+ status=200,
+ body="\n".join(["LastUpdated", "Expiration"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(
+ base_url, "iam/security-credentials/ReadOnly/LastUpdated"
+ ),
+ status=200,
+ body="2016-10-27T17:28:17Z",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(
+ base_url, "iam/security-credentials/ReadOnly/Expiration"
+ ),
+ status=200,
+ body="2016-10-28T00:00:34Z",
+ )
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
- self.assertEqual(md['instance-id'], 'i-0123451689abcdef0')
- iam = md['iam']
+ self.assertEqual(md["instance-id"], "i-0123451689abcdef0")
+ iam = md["iam"]
self.assertEqual(1, len(iam))
- self.assertEqual(iam['info']['LastUpdated'], '2016-10-27T17:29:39Z')
- self.assertNotIn('security-credentials', iam)
+ self.assertEqual(iam["info"]["LastUpdated"], "2016-10-27T17:29:39Z")
+ self.assertNotIn("security-credentials", iam)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_event.py b/tests/unittests/test_event.py
new file mode 100644
index 00000000..2ea91bb2
--- /dev/null
+++ b/tests/unittests/test_event.py
@@ -0,0 +1,26 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Tests related to cloudinit.event module."""
+from cloudinit.event import EventScope, EventType, userdata_to_events
+
+
+class TestEvent:
+ def test_userdata_to_events(self):
+ userdata = {"network": {"when": ["boot"]}}
+ expected = {EventScope.NETWORK: {EventType.BOOT}}
+ assert expected == userdata_to_events(userdata)
+
+ def test_invalid_scope(self, caplog):
+ userdata = {"networkasdfasdf": {"when": ["boot"]}}
+ userdata_to_events(userdata)
+ assert (
+ "'networkasdfasdf' is not a valid EventScope! Update data "
+ "will be ignored for 'networkasdfasdf' scope" in caplog.text
+ )
+
+ def test_invalid_event(self, caplog):
+ userdata = {"network": {"when": ["bootasdfasdf"]}}
+ userdata_to_events(userdata)
+ assert (
+ "'bootasdfasdf' is not a valid EventType! Update data "
+ "will be ignored for 'network' scope" in caplog.text
+ )
diff --git a/cloudinit/tests/test_features.py b/tests/unittests/test_features.py
index d7a7226d..794a9654 100644
--- a/cloudinit/tests/test_features.py
+++ b/tests/unittests/test_features.py
@@ -4,14 +4,15 @@
This file is for testing the feature flag functionality itself,
NOT for testing any individual feature flag
"""
-import pytest
import sys
from pathlib import Path
+import pytest
+
import cloudinit
-@pytest.yield_fixture()
+@pytest.fixture()
def create_override(request):
"""
Create a feature overrides file and do some module wizardry to make
@@ -24,37 +25,44 @@ def create_override(request):
features and feature_overrides modules to how they were before
the test started
"""
- override_path = Path(cloudinit.__file__).parent / 'feature_overrides.py'
+ override_path = Path(cloudinit.__file__).parent / "feature_overrides.py"
if override_path.exists():
- raise Exception("feature_overrides.py unexpectedly exists! "
- "Remove it to run this test.")
- with override_path.open('w') as f:
+ raise Exception(
+ "feature_overrides.py unexpectedly exists! "
+ "Remove it to run this test."
+ )
+ with override_path.open("w") as f:
for key, value in request.param.items():
- f.write('{} = {}\n'.format(key, value))
+ f.write("{} = {}\n".format(key, value))
- sys.modules.pop('cloudinit.features', None)
+ sys.modules.pop("cloudinit.features", None)
yield
override_path.unlink()
- sys.modules.pop('cloudinit.feature_overrides', None)
+ sys.modules.pop("cloudinit.feature_overrides", None)
class TestFeatures:
def test_feature_without_override(self):
from cloudinit.features import ERROR_ON_USER_DATA_FAILURE
+
assert ERROR_ON_USER_DATA_FAILURE is True
- @pytest.mark.parametrize('create_override',
- [{'ERROR_ON_USER_DATA_FAILURE': False}],
- indirect=True)
+ @pytest.mark.parametrize(
+ "create_override",
+ [{"ERROR_ON_USER_DATA_FAILURE": False}],
+ indirect=True,
+ )
def test_feature_with_override(self, create_override):
from cloudinit.features import ERROR_ON_USER_DATA_FAILURE
+
assert ERROR_ON_USER_DATA_FAILURE is False
- @pytest.mark.parametrize('create_override',
- [{'SPAM': True}],
- indirect=True)
+ @pytest.mark.parametrize(
+ "create_override", [{"SPAM": True}], indirect=True
+ )
def test_feature_only_in_override(self, create_override):
from cloudinit.features import SPAM
+
assert SPAM is True
diff --git a/tests/unittests/test_filters/__init__.py b/tests/unittests/test_filters/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/unittests/test_filters/__init__.py
+++ /dev/null
diff --git a/tests/unittests/test_gpg.py b/tests/unittests/test_gpg.py
new file mode 100644
index 00000000..c3772e3f
--- /dev/null
+++ b/tests/unittests/test_gpg.py
@@ -0,0 +1,139 @@
+from unittest import mock
+
+import pytest
+
+from cloudinit import gpg, subp
+from tests.unittests.helpers import CiTestCase
+
+TEST_KEY_HUMAN = """
+/etc/apt/cloud-init.gpg.d/my_key.gpg
+--------------------------------------------
+pub rsa4096 2021-10-22 [SC]
+ 3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85
+uid [ unknown] Brett Holman <brett.holman@canonical.com>
+sub rsa4096 2021-10-22 [A]
+sub rsa4096 2021-10-22 [E]
+"""
+
+TEST_KEY_MACHINE = """
+tru::1:1635129362:0:3:1:5
+pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0:
+fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85:
+uid:-::::1634912922::64F1F1D6FA96316752D635D7C6406C52C40713C7::Brett Holman \
+<brett.holman@canonical.com>::::::::::0:
+sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23:
+fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04:
+sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23:
+fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092:
+"""
+
+TEST_KEY_FINGERPRINT_HUMAN = (
+ "3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85"
+)
+
+TEST_KEY_FINGERPRINT_MACHINE = "3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85"
+
+
+class TestGPGCommands:
+ def test_dearmor_bad_value(self):
+ """This exception is handled by the callee. Ensure it is not caught
+ internally.
+ """
+ with mock.patch.object(
+ subp, "subp", side_effect=subp.ProcessExecutionError
+ ):
+ with pytest.raises(subp.ProcessExecutionError):
+ gpg.dearmor("garbage key value")
+
+ def test_gpg_list_args(self):
+ """Verify correct command gets called to list keys"""
+ no_colons = [
+ "gpg",
+ "--with-fingerprint",
+ "--no-default-keyring",
+ "--list-keys",
+ "--keyring",
+ "key",
+ ]
+ colons = [
+ "gpg",
+ "--with-fingerprint",
+ "--no-default-keyring",
+ "--list-keys",
+ "--keyring",
+ "--with-colons",
+ "key",
+ ]
+ with mock.patch.object(subp, "subp", return_value=("", "")) as m_subp:
+ gpg.list("key")
+ assert mock.call(colons, capture=True) == m_subp.call_args
+
+ gpg.list("key", human_output=True)
+ test_calls = mock.call((no_colons), capture=True)
+ assert test_calls == m_subp.call_args
+
+ def test_gpg_dearmor_args(self):
+ """Verify correct command gets called to dearmor keys"""
+ with mock.patch.object(subp, "subp", return_value=("", "")) as m_subp:
+ gpg.dearmor("key")
+ test_call = mock.call(
+ ["gpg", "--dearmor"], data="key", decode=False
+ )
+ assert test_call == m_subp.call_args
+
+ @mock.patch("cloudinit.gpg.time.sleep")
+ @mock.patch("cloudinit.gpg.subp.subp")
+ class TestReceiveKeys(CiTestCase):
+ """Test the recv_key method."""
+
+ def test_retries_on_subp_exc(self, m_subp, m_sleep):
+ """retry should be done on gpg receive keys failure."""
+ retries = (1, 2, 4)
+ my_exc = subp.ProcessExecutionError(
+ stdout="", stderr="", exit_code=2, cmd=["mycmd"]
+ )
+ m_subp.side_effect = (my_exc, my_exc, ("", ""))
+ gpg.recv_key("ABCD", "keyserver.example.com", retries=retries)
+ self.assertEqual(
+ [mock.call(1), mock.call(2)], m_sleep.call_args_list
+ )
+
+ def test_raises_error_after_retries(self, m_subp, m_sleep):
+ """If the final run fails, error should be raised."""
+ naplen = 1
+ keyid, keyserver = ("ABCD", "keyserver.example.com")
+ m_subp.side_effect = subp.ProcessExecutionError(
+ stdout="", stderr="", exit_code=2, cmd=["mycmd"]
+ )
+ with self.assertRaises(ValueError) as rcm:
+ gpg.recv_key(keyid, keyserver, retries=(naplen,))
+ self.assertIn(keyid, str(rcm.exception))
+ self.assertIn(keyserver, str(rcm.exception))
+ m_sleep.assert_called_with(naplen)
+
+ def test_no_retries_on_none(self, m_subp, m_sleep):
+ """retry should not be done if retries is None."""
+ m_subp.side_effect = subp.ProcessExecutionError(
+ stdout="", stderr="", exit_code=2, cmd=["mycmd"]
+ )
+ with self.assertRaises(ValueError):
+ gpg.recv_key("ABCD", "keyserver.example.com", retries=None)
+ m_sleep.assert_not_called()
+
+ def test_expected_gpg_command(self, m_subp, m_sleep):
+ """Verify gpg is called with expected args."""
+ key, keyserver = ("DEADBEEF", "keyserver.example.com")
+ retries = (1, 2, 4)
+ m_subp.return_value = ("", "")
+ gpg.recv_key(key, keyserver, retries=retries)
+ m_subp.assert_called_once_with(
+ [
+ "gpg",
+ "--no-tty",
+ "--keyserver=%s" % keyserver,
+ "--recv-keys",
+ key,
+ ],
+ capture=True,
+ )
+ m_sleep.assert_not_called()
diff --git a/tests/unittests/test_handler/__init__.py b/tests/unittests/test_handler/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/unittests/test_handler/__init__.py
+++ /dev/null
diff --git a/tests/unittests/test_handler/test_handler_apk_configure.py b/tests/unittests/test_handler/test_handler_apk_configure.py
deleted file mode 100644
index 8acc0b33..00000000
--- a/tests/unittests/test_handler/test_handler_apk_configure.py
+++ /dev/null
@@ -1,299 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-""" test_apk_configure
-Test creation of repositories file
-"""
-
-import logging
-import os
-import textwrap
-
-from cloudinit import (cloud, helpers, util)
-
-from cloudinit.config import cc_apk_configure
-from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock)
-
-REPO_FILE = "/etc/apk/repositories"
-DEFAULT_MIRROR_URL = "https://alpine.global.ssl.fastly.net/alpine"
-CC_APK = 'cloudinit.config.cc_apk_configure'
-
-
-class TestNoConfig(FilesystemMockingTestCase):
- def setUp(self):
- super(TestNoConfig, self).setUp()
- self.add_patch(CC_APK + '._write_repositories_file', 'm_write_repos')
- self.name = "apk-configure"
- self.cloud_init = None
- self.log = logging.getLogger("TestNoConfig")
- self.args = []
-
- def test_no_config(self):
- """
- Test that nothing is done if no apk-configure
- configuration is provided.
- """
- config = util.get_builtin_cfg()
-
- cc_apk_configure.handle(self.name, config, self.cloud_init,
- self.log, self.args)
-
- self.assertEqual(0, self.m_write_repos.call_count)
-
-
-class TestConfig(FilesystemMockingTestCase):
- def setUp(self):
- super(TestConfig, self).setUp()
- self.new_root = self.tmp_dir()
- self.new_root = self.reRoot(root=self.new_root)
- for dirname in ['tmp', 'etc/apk']:
- util.ensure_dir(os.path.join(self.new_root, dirname))
- self.paths = helpers.Paths({'templates_dir': self.new_root})
- self.name = "apk-configure"
- self.cloud = cloud.Cloud(None, self.paths, None, None, None)
- self.log = logging.getLogger("TestNoConfig")
- self.args = []
-
- @mock.patch(CC_APK + '._write_repositories_file')
- def test_no_repo_settings(self, m_write_repos):
- """
- Test that nothing is written if the 'alpine-repo' key
- is not present.
- """
- config = {"apk_repos": {}}
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- self.assertEqual(0, m_write_repos.call_count)
-
- @mock.patch(CC_APK + '._write_repositories_file')
- def test_empty_repo_settings(self, m_write_repos):
- """
- Test that nothing is written if 'alpine_repo' list is empty.
- """
- config = {"apk_repos": {"alpine_repo": []}}
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- self.assertEqual(0, m_write_repos.call_count)
-
- def test_only_main_repo(self):
- """
- Test when only details of main repo is written to file.
- """
- alpine_version = 'v3.12'
- config = {
- "apk_repos": {
- "alpine_repo": {
- "version": alpine_version
- }
- }
- }
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- expected_content = textwrap.dedent("""\
- #
- # Created by cloud-init
- #
- # This file is written on first boot of an instance
- #
-
- {0}/{1}/main
-
- """.format(DEFAULT_MIRROR_URL, alpine_version))
-
- self.assertEqual(expected_content, util.load_file(REPO_FILE))
-
- def test_main_and_community_repos(self):
- """
- Test when only details of main and community repos are
- written to file.
- """
- alpine_version = 'edge'
- config = {
- "apk_repos": {
- "alpine_repo": {
- "version": alpine_version,
- "community_enabled": True
- }
- }
- }
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- expected_content = textwrap.dedent("""\
- #
- # Created by cloud-init
- #
- # This file is written on first boot of an instance
- #
-
- {0}/{1}/main
- {0}/{1}/community
-
- """.format(DEFAULT_MIRROR_URL, alpine_version))
-
- self.assertEqual(expected_content, util.load_file(REPO_FILE))
-
- def test_main_community_testing_repos(self):
- """
- Test when details of main, community and testing repos
- are written to file.
- """
- alpine_version = 'v3.12'
- config = {
- "apk_repos": {
- "alpine_repo": {
- "version": alpine_version,
- "community_enabled": True,
- "testing_enabled": True
- }
- }
- }
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- expected_content = textwrap.dedent("""\
- #
- # Created by cloud-init
- #
- # This file is written on first boot of an instance
- #
-
- {0}/{1}/main
- {0}/{1}/community
- #
- # Testing - using with non-Edge installation may cause problems!
- #
- {0}/edge/testing
-
- """.format(DEFAULT_MIRROR_URL, alpine_version))
-
- self.assertEqual(expected_content, util.load_file(REPO_FILE))
-
- def test_edge_main_community_testing_repos(self):
- """
- Test when details of main, community and testing repos
- for Edge version of Alpine are written to file.
- """
- alpine_version = 'edge'
- config = {
- "apk_repos": {
- "alpine_repo": {
- "version": alpine_version,
- "community_enabled": True,
- "testing_enabled": True
- }
- }
- }
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- expected_content = textwrap.dedent("""\
- #
- # Created by cloud-init
- #
- # This file is written on first boot of an instance
- #
-
- {0}/{1}/main
- {0}/{1}/community
- {0}/{1}/testing
-
- """.format(DEFAULT_MIRROR_URL, alpine_version))
-
- self.assertEqual(expected_content, util.load_file(REPO_FILE))
-
- def test_main_community_testing_local_repos(self):
- """
- Test when details of main, community, testing and
- local repos are written to file.
- """
- alpine_version = 'v3.12'
- local_repo_url = 'http://some.mirror/whereever'
- config = {
- "apk_repos": {
- "alpine_repo": {
- "version": alpine_version,
- "community_enabled": True,
- "testing_enabled": True
- },
- "local_repo_base_url": local_repo_url
- }
- }
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- expected_content = textwrap.dedent("""\
- #
- # Created by cloud-init
- #
- # This file is written on first boot of an instance
- #
-
- {0}/{1}/main
- {0}/{1}/community
- #
- # Testing - using with non-Edge installation may cause problems!
- #
- {0}/edge/testing
-
- #
- # Local repo
- #
- {2}/{1}
-
- """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url))
-
- self.assertEqual(expected_content, util.load_file(REPO_FILE))
-
- def test_edge_main_community_testing_local_repos(self):
- """
- Test when details of main, community, testing and local repos
- for Edge version of Alpine are written to file.
- """
- alpine_version = 'edge'
- local_repo_url = 'http://some.mirror/whereever'
- config = {
- "apk_repos": {
- "alpine_repo": {
- "version": alpine_version,
- "community_enabled": True,
- "testing_enabled": True
- },
- "local_repo_base_url": local_repo_url
- }
- }
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- expected_content = textwrap.dedent("""\
- #
- # Created by cloud-init
- #
- # This file is written on first boot of an instance
- #
-
- {0}/{1}/main
- {0}/{1}/community
- {0}/edge/testing
-
- #
- # Local repo
- #
- {2}/{1}
-
- """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url))
-
- self.assertEqual(expected_content, util.load_file(REPO_FILE))
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py
deleted file mode 100644
index 367971cb..00000000
--- a/tests/unittests/test_handler/test_handler_apt_source_v1.py
+++ /dev/null
@@ -1,626 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-""" test_handler_apt_source_v1
-Testing various config variations of the apt_source config
-This calls all things with v1 format to stress the conversion code on top of
-the actually tested code.
-"""
-import os
-import re
-import shutil
-import tempfile
-from unittest import mock
-from unittest.mock import call
-
-from cloudinit.config import cc_apt_configure
-from cloudinit import gpg
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.tests.helpers import TestCase
-
-EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ
-NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy
-8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0
-HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG
-CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29
-OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ
-FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm
-S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
-=ACB2
------END PGP PUBLIC KEY BLOCK-----"""
-
-ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
-
-
-class FakeDistro(object):
- """Fake Distro helper object"""
- def update_package_sources(self):
- """Fake update_package_sources helper method"""
- return
-
-
-class FakeDatasource:
- """Fake Datasource helper object"""
- def __init__(self):
- self.region = 'region'
-
-
-class FakeCloud(object):
- """Fake Cloud helper object"""
- def __init__(self):
- self.distro = FakeDistro()
- self.datasource = FakeDatasource()
-
-
-class TestAptSourceConfig(TestCase):
- """TestAptSourceConfig
- Main Class to test apt_source configs
- """
- release = "fantastic"
-
- def setUp(self):
- super(TestAptSourceConfig, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
- self.aptlistfile = os.path.join(self.tmp, "single-deb.list")
- self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")
- self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
- self.join = os.path.join
- self.matcher = re.compile(ADD_APT_REPO_MATCH).search
- # mock fallback filename into writable tmp dir
- self.fallbackfn = os.path.join(self.tmp, "etc/apt/sources.list.d/",
- "cloud_config_sources.list")
-
- self.fakecloud = FakeCloud()
-
- rpatcher = mock.patch("cloudinit.util.lsb_release")
- get_rel = rpatcher.start()
- get_rel.return_value = {'codename': self.release}
- self.addCleanup(rpatcher.stop)
- apatcher = mock.patch("cloudinit.util.get_dpkg_architecture")
- get_arch = apatcher.start()
- get_arch.return_value = 'amd64'
- self.addCleanup(apatcher.stop)
-
- def _get_default_params(self):
- """get_default_params
- Get the most basic default mrror and release info to be used in tests
- """
- params = {}
- params['RELEASE'] = self.release
- params['MIRROR'] = "http://archive.ubuntu.com/ubuntu"
- return params
-
- def wrapv1conf(self, cfg):
- params = self._get_default_params()
- # old v1 list format under old keys, but callabe to main handler
- # disable source.list rendering and set mirror to avoid other code
- return {'apt_preserve_sources_list': True,
- 'apt_mirror': params['MIRROR'],
- 'apt_sources': cfg}
-
- def myjoin(self, *args, **kwargs):
- """myjoin - redir into writable tmpdir"""
- if (args[0] == "/etc/apt/sources.list.d/" and
- args[1] == "cloud_config_sources.list" and
- len(args) == 2):
- return self.join(self.tmp, args[0].lstrip("/"), args[1])
- else:
- return self.join(*args, **kwargs)
-
- def apt_src_basic(self, filename, cfg):
- """apt_src_basic
- Test Fix deb source string, has to overwrite mirror conf in params
- """
- cfg = self.wrapv1conf(cfg)
-
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = util.load_file(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://archive.ubuntu.com/ubuntu",
- "karmic-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_basic(self):
- """Test deb source string, overwrite mirror and filename"""
- cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted'),
- 'filename': self.aptlistfile}
- self.apt_src_basic(self.aptlistfile, [cfg])
-
- def test_apt_src_basic_dict(self):
- """Test deb source string, overwrite mirror and filename (dict)"""
- cfg = {self.aptlistfile: {'source':
- ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted')}}
- self.apt_src_basic(self.aptlistfile, cfg)
-
- def apt_src_basic_tri(self, cfg):
- """apt_src_basic_tri
- Test Fix three deb source string, has to overwrite mirror conf in
- params. Test with filenames provided in config.
- generic part to check three files with different content
- """
- self.apt_src_basic(self.aptlistfile, cfg)
-
- # extra verify on two extra files of this test
- contents = util.load_file(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://archive.ubuntu.com/ubuntu",
- "precise-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
- contents = util.load_file(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://archive.ubuntu.com/ubuntu",
- "lucid-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_basic_tri(self):
- """Test Fix three deb source string with filenames"""
- cfg1 = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted'),
- 'filename': self.aptlistfile}
- cfg2 = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' precise-backports'
- ' main universe multiverse restricted'),
- 'filename': self.aptlistfile2}
- cfg3 = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' lucid-backports'
- ' main universe multiverse restricted'),
- 'filename': self.aptlistfile3}
- self.apt_src_basic_tri([cfg1, cfg2, cfg3])
-
- def test_apt_src_basic_dict_tri(self):
- """Test Fix three deb source string with filenames (dict)"""
- cfg = {self.aptlistfile: {'source':
- ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted')},
- self.aptlistfile2: {'source':
- ('deb http://archive.ubuntu.com/ubuntu'
- ' precise-backports'
- ' main universe multiverse restricted')},
- self.aptlistfile3: {'source':
- ('deb http://archive.ubuntu.com/ubuntu'
- ' lucid-backports'
- ' main universe multiverse restricted')}}
- self.apt_src_basic_tri(cfg)
-
- def test_apt_src_basic_nofn(self):
- """Test Fix three deb source string without filenames (dict)"""
- cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted')}
- with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
- self.apt_src_basic(self.fallbackfn, [cfg])
-
- def apt_src_replacement(self, filename, cfg):
- """apt_src_replace
- Test Autoreplacement of MIRROR and RELEASE in source specs
- """
- cfg = self.wrapv1conf(cfg)
- params = self._get_default_params()
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = util.load_file(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "multiverse"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_replace(self):
- """Test Autoreplacement of MIRROR and RELEASE in source specs"""
- cfg = {'source': 'deb $MIRROR $RELEASE multiverse',
- 'filename': self.aptlistfile}
- self.apt_src_replacement(self.aptlistfile, [cfg])
-
- def apt_src_replace_tri(self, cfg):
- """apt_src_replace_tri
- Test three autoreplacements of MIRROR and RELEASE in source specs with
- generic part
- """
- self.apt_src_replacement(self.aptlistfile, cfg)
-
- # extra verify on two extra files of this test
- params = self._get_default_params()
- contents = util.load_file(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "main"),
- contents, flags=re.IGNORECASE))
- contents = util.load_file(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "universe"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_replace_tri(self):
- """Test triple Autoreplacement of MIRROR and RELEASE in source specs"""
- cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse',
- 'filename': self.aptlistfile}
- cfg2 = {'source': 'deb $MIRROR $RELEASE main',
- 'filename': self.aptlistfile2}
- cfg3 = {'source': 'deb $MIRROR $RELEASE universe',
- 'filename': self.aptlistfile3}
- self.apt_src_replace_tri([cfg1, cfg2, cfg3])
-
- def test_apt_src_replace_dict_tri(self):
- """Test triple Autoreplacement in source specs (dict)"""
- cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'},
- 'notused': {'source': 'deb $MIRROR $RELEASE main',
- 'filename': self.aptlistfile2},
- self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}}
- self.apt_src_replace_tri(cfg)
-
- def test_apt_src_replace_nofn(self):
- """Test Autoreplacement of MIRROR and RELEASE in source specs nofile"""
- cfg = {'source': 'deb $MIRROR $RELEASE multiverse'}
- with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
- self.apt_src_replacement(self.fallbackfn, [cfg])
-
- def apt_src_keyid(self, filename, cfg, keynum):
- """apt_src_keyid
- Test specification of a source + keyid
- """
- cfg = self.wrapv1conf(cfg)
-
- with mock.patch.object(subp, 'subp',
- return_value=('fakekey 1234', '')) as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
-
- # check if it added the right ammount of keys
- calls = []
- for _ in range(keynum):
- calls.append(call(['apt-key', 'add', '-'],
- data=b'fakekey 1234',
- target=None))
- mockobj.assert_has_calls(calls, any_order=True)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = util.load_file(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "main"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_keyid(self):
- """Test specification of a source + keyid with filename being set"""
- cfg = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'keyid': "03683F77",
- 'filename': self.aptlistfile}
- self.apt_src_keyid(self.aptlistfile, [cfg], 1)
-
- def test_apt_src_keyid_tri(self):
- """Test 3x specification of a source + keyid with filename being set"""
- cfg1 = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'keyid': "03683F77",
- 'filename': self.aptlistfile}
- cfg2 = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial universe'),
- 'keyid': "03683F77",
- 'filename': self.aptlistfile2}
- cfg3 = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial multiverse'),
- 'keyid': "03683F77",
- 'filename': self.aptlistfile3}
-
- self.apt_src_keyid(self.aptlistfile, [cfg1, cfg2, cfg3], 3)
- contents = util.load_file(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "universe"),
- contents, flags=re.IGNORECASE))
- contents = util.load_file(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "multiverse"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_keyid_nofn(self):
- """Test specification of a source + keyid without filename being set"""
- cfg = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'keyid': "03683F77"}
- with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
- self.apt_src_keyid(self.fallbackfn, [cfg], 1)
-
- def apt_src_key(self, filename, cfg):
- """apt_src_key
- Test specification of a source + key
- """
- cfg = self.wrapv1conf([cfg])
-
- with mock.patch.object(subp, 'subp') as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
-
- mockobj.assert_called_with(['apt-key', 'add', '-'],
- data=b'fakekey 4321', target=None)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = util.load_file(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "main"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_key(self):
- """Test specification of a source + key with filename being set"""
- cfg = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'key': "fakekey 4321",
- 'filename': self.aptlistfile}
- self.apt_src_key(self.aptlistfile, cfg)
-
- def test_apt_src_key_nofn(self):
- """Test specification of a source + key without filename being set"""
- cfg = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'key': "fakekey 4321"}
- with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
- self.apt_src_key(self.fallbackfn, cfg)
-
- def test_apt_src_keyonly(self):
- """Test specifying key without source"""
- cfg = {'key': "fakekey 4242",
- 'filename': self.aptlistfile}
- cfg = self.wrapv1conf([cfg])
-
- with mock.patch.object(subp, 'subp') as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
-
- mockobj.assert_called_once_with(['apt-key', 'add', '-'],
- data=b'fakekey 4242', target=None)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_src_keyidonly(self):
- """Test specification of a keyid without source"""
- cfg = {'keyid': "03683F77",
- 'filename': self.aptlistfile}
- cfg = self.wrapv1conf([cfg])
-
- with mock.patch.object(subp, 'subp',
- return_value=('fakekey 1212', '')) as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
-
- mockobj.assert_called_with(['apt-key', 'add', '-'],
- data=b'fakekey 1212', target=None)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def apt_src_keyid_real(self, cfg, expectedkey):
- """apt_src_keyid_real
- Test specification of a keyid without source including
- up to addition of the key (add_apt_key_raw mocked to keep the
- environment as is)
- """
- key = cfg['keyid']
- keyserver = cfg.get('keyserver', 'keyserver.ubuntu.com')
- cfg = self.wrapv1conf([cfg])
-
- with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey:
- with mock.patch.object(gpg, 'getkeybyid',
- return_value=expectedkey) as mockgetkey:
- cc_apt_configure.handle("test", cfg, self.fakecloud,
- None, None)
-
- mockgetkey.assert_called_with(key, keyserver)
- mockkey.assert_called_with(expectedkey, None)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_src_keyid_real(self):
- """test_apt_src_keyid_real - Test keyid including key add"""
- keyid = "03683F77"
- cfg = {'keyid': keyid,
- 'filename': self.aptlistfile}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_src_longkeyid_real(self):
- """test_apt_src_longkeyid_real - Test long keyid including key add"""
- keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
- cfg = {'keyid': keyid,
- 'filename': self.aptlistfile}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_src_longkeyid_ks_real(self):
- """test_apt_src_longkeyid_ks_real - Test long keyid from other ks"""
- keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
- cfg = {'keyid': keyid,
- 'keyserver': 'keys.gnupg.net',
- 'filename': self.aptlistfile}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_src_ppa(self):
- """Test adding a ppa"""
- cfg = {'source': 'ppa:smoser/cloud-init-test',
- 'filename': self.aptlistfile}
- cfg = self.wrapv1conf([cfg])
-
- with mock.patch.object(subp, 'subp') as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
- mockobj.assert_called_once_with(['add-apt-repository',
- 'ppa:smoser/cloud-init-test'],
- target=None)
-
- # adding ppa should ignore filename (uses add-apt-repository)
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_src_ppa_tri(self):
- """Test adding three ppa's"""
- cfg1 = {'source': 'ppa:smoser/cloud-init-test',
- 'filename': self.aptlistfile}
- cfg2 = {'source': 'ppa:smoser/cloud-init-test2',
- 'filename': self.aptlistfile2}
- cfg3 = {'source': 'ppa:smoser/cloud-init-test3',
- 'filename': self.aptlistfile3}
- cfg = self.wrapv1conf([cfg1, cfg2, cfg3])
-
- with mock.patch.object(subp, 'subp') as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud,
- None, None)
- calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'],
- target=None),
- call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'],
- target=None),
- call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'],
- target=None)]
- mockobj.assert_has_calls(calls, any_order=True)
-
- # adding ppa should ignore all filenames (uses add-apt-repository)
- self.assertFalse(os.path.isfile(self.aptlistfile))
- self.assertFalse(os.path.isfile(self.aptlistfile2))
- self.assertFalse(os.path.isfile(self.aptlistfile3))
-
- def test_convert_to_new_format(self):
- """Test the conversion of old to new format"""
- cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse',
- 'filename': self.aptlistfile}
- cfg2 = {'source': 'deb $MIRROR $RELEASE main',
- 'filename': self.aptlistfile2}
- cfg3 = {'source': 'deb $MIRROR $RELEASE universe',
- 'filename': self.aptlistfile3}
- cfg = {'apt_sources': [cfg1, cfg2, cfg3]}
- checkcfg = {self.aptlistfile: {'filename': self.aptlistfile,
- 'source': 'deb $MIRROR $RELEASE '
- 'multiverse'},
- self.aptlistfile2: {'filename': self.aptlistfile2,
- 'source': 'deb $MIRROR $RELEASE main'},
- self.aptlistfile3: {'filename': self.aptlistfile3,
- 'source': 'deb $MIRROR $RELEASE '
- 'universe'}}
-
- newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg)
- self.assertEqual(newcfg['apt']['sources'], checkcfg)
-
- # convert again, should stay the same
- newcfg2 = cc_apt_configure.convert_to_v3_apt_format(newcfg)
- self.assertEqual(newcfg2['apt']['sources'], checkcfg)
-
- # should work without raising an exception
- cc_apt_configure.convert_to_v3_apt_format({})
-
- with self.assertRaises(ValueError):
- cc_apt_configure.convert_to_v3_apt_format({'apt_sources': 5})
-
- def test_convert_to_new_format_collision(self):
- """Test the conversion of old to new format with collisions
- That matches e.g. the MAAS case specifying old and new config"""
- cfg_1_and_3 = {'apt': {'proxy': 'http://192.168.122.1:8000/'},
- 'apt_proxy': 'http://192.168.122.1:8000/'}
- cfg_3_only = {'apt': {'proxy': 'http://192.168.122.1:8000/'}}
- cfgconflict = {'apt': {'proxy': 'http://192.168.122.1:8000/'},
- 'apt_proxy': 'ftp://192.168.122.1:8000/'}
-
- # collision (equal)
- newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3)
- self.assertEqual(newcfg, cfg_3_only)
- # collision (equal, so ok to remove)
- newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only)
- self.assertEqual(newcfg, cfg_3_only)
- # collision (unequal)
- match = "Old and New.*unequal.*apt_proxy"
- with self.assertRaisesRegex(ValueError, match):
- cc_apt_configure.convert_to_v3_apt_format(cfgconflict)
-
- def test_convert_to_new_format_dict_collision(self):
- cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse',
- 'filename': self.aptlistfile}
- cfg2 = {'source': 'deb $MIRROR $RELEASE main',
- 'filename': self.aptlistfile2}
- cfg3 = {'source': 'deb $MIRROR $RELEASE universe',
- 'filename': self.aptlistfile3}
- fullv3 = {self.aptlistfile: {'filename': self.aptlistfile,
- 'source': 'deb $MIRROR $RELEASE '
- 'multiverse'},
- self.aptlistfile2: {'filename': self.aptlistfile2,
- 'source': 'deb $MIRROR $RELEASE main'},
- self.aptlistfile3: {'filename': self.aptlistfile3,
- 'source': 'deb $MIRROR $RELEASE '
- 'universe'}}
- cfg_3_only = {'apt': {'sources': fullv3}}
- cfg_1_and_3 = {'apt_sources': [cfg1, cfg2, cfg3]}
- cfg_1_and_3.update(cfg_3_only)
-
- # collision (equal, so ok to remove)
- newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3)
- self.assertEqual(newcfg, cfg_3_only)
- # no old spec (same result)
- newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only)
- self.assertEqual(newcfg, cfg_3_only)
-
- diff = {self.aptlistfile: {'filename': self.aptlistfile,
- 'source': 'deb $MIRROR $RELEASE '
- 'DIFFERENTVERSE'},
- self.aptlistfile2: {'filename': self.aptlistfile2,
- 'source': 'deb $MIRROR $RELEASE main'},
- self.aptlistfile3: {'filename': self.aptlistfile3,
- 'source': 'deb $MIRROR $RELEASE '
- 'universe'}}
- cfg_3_only = {'apt': {'sources': diff}}
- cfg_1_and_3_different = {'apt_sources': [cfg1, cfg2, cfg3]}
- cfg_1_and_3_different.update(cfg_3_only)
-
- # collision (unequal by dict having a different entry)
- with self.assertRaises(ValueError):
- cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_different)
-
- missing = {self.aptlistfile: {'filename': self.aptlistfile,
- 'source': 'deb $MIRROR $RELEASE '
- 'multiverse'}}
- cfg_3_only = {'apt': {'sources': missing}}
- cfg_1_and_3_missing = {'apt_sources': [cfg1, cfg2, cfg3]}
- cfg_1_and_3_missing.update(cfg_3_only)
- # collision (unequal by dict missing an entry)
- with self.assertRaises(ValueError):
- cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_missing)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py
deleted file mode 100644
index ac847238..00000000
--- a/tests/unittests/test_handler/test_handler_apt_source_v3.py
+++ /dev/null
@@ -1,1134 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""test_handler_apt_source_v3
-Testing various config variations of the apt_source custom config
-This tries to call all in the new v3 format and cares about new features
-"""
-import glob
-import os
-import re
-import shutil
-import socket
-import tempfile
-
-from unittest import TestCase, mock
-from unittest.mock import call
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import gpg
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.config import cc_apt_configure
-from cloudinit.sources import DataSourceNone
-
-from cloudinit.tests import helpers as t_help
-
-EXPECTEDKEY = u"""-----BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ
-NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy
-8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0
-HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG
-CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29
-OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ
-FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm
-S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
-=ACB2
------END PGP PUBLIC KEY BLOCK-----"""
-
-ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
-
-TARGET = None
-
-MOCK_LSB_RELEASE_DATA = {
- 'id': 'Ubuntu', 'description': 'Ubuntu 18.04.1 LTS',
- 'release': '18.04', 'codename': 'bionic'}
-
-
-class FakeDatasource:
- """Fake Datasource helper object"""
- def __init__(self):
- self.region = 'region'
-
-
-class FakeCloud:
- """Fake Cloud helper object"""
- def __init__(self):
- self.datasource = FakeDatasource()
-
-
-class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
- """TestAptSourceConfig
- Main Class to test apt configs
- """
- def setUp(self):
- super(TestAptSourceConfig, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
- self.addCleanup(shutil.rmtree, self.new_root)
- self.aptlistfile = os.path.join(self.tmp, "single-deb.list")
- self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")
- self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
- self.join = os.path.join
- self.matcher = re.compile(ADD_APT_REPO_MATCH).search
- self.add_patch(
- 'cloudinit.config.cc_apt_configure.util.lsb_release',
- 'm_lsb_release', return_value=MOCK_LSB_RELEASE_DATA.copy())
-
- @staticmethod
- def _add_apt_sources(*args, **kwargs):
- with mock.patch.object(cc_apt_configure, 'update_packages'):
- cc_apt_configure.add_apt_sources(*args, **kwargs)
-
- @staticmethod
- def _get_default_params():
- """get_default_params
- Get the most basic default mrror and release info to be used in tests
- """
- params = {}
- params['RELEASE'] = MOCK_LSB_RELEASE_DATA['release']
- arch = 'amd64'
- params['MIRROR'] = cc_apt_configure.\
- get_default_mirrors(arch)["PRIMARY"]
- return params
-
- def _myjoin(self, *args, **kwargs):
- """_myjoin - redir into writable tmpdir"""
- if (args[0] == "/etc/apt/sources.list.d/" and
- args[1] == "cloud_config_sources.list" and
- len(args) == 2):
- return self.join(self.tmp, args[0].lstrip("/"), args[1])
- else:
- return self.join(*args, **kwargs)
-
- def _get_cloud(self, distro, metadata=None):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- if metadata:
- myds.metadata.update(metadata)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
- def _apt_src_basic(self, filename, cfg):
- """_apt_src_basic
- Test Fix deb source string, has to overwrite mirror conf in params
- """
- params = self._get_default_params()
-
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = util.load_file(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://test.ubuntu.com/ubuntu",
- "karmic-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_v3_src_basic(self):
- """test_apt_v3_src_basic - Test fix deb source string"""
- cfg = {self.aptlistfile: {'source':
- ('deb http://test.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted')}}
- self._apt_src_basic(self.aptlistfile, cfg)
-
- def test_apt_v3_src_basic_tri(self):
- """test_apt_v3_src_basic_tri - Test multiple fix deb source strings"""
- cfg = {self.aptlistfile: {'source':
- ('deb http://test.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted')},
- self.aptlistfile2: {'source':
- ('deb http://test.ubuntu.com/ubuntu'
- ' precise-backports'
- ' main universe multiverse restricted')},
- self.aptlistfile3: {'source':
- ('deb http://test.ubuntu.com/ubuntu'
- ' lucid-backports'
- ' main universe multiverse restricted')}}
- self._apt_src_basic(self.aptlistfile, cfg)
-
- # extra verify on two extra files of this test
- contents = util.load_file(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://test.ubuntu.com/ubuntu",
- "precise-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
- contents = util.load_file(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://test.ubuntu.com/ubuntu",
- "lucid-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
-
- def _apt_src_replacement(self, filename, cfg):
- """apt_src_replace
- Test Autoreplacement of MIRROR and RELEASE in source specs
- """
- params = self._get_default_params()
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = util.load_file(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "multiverse"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_v3_src_replace(self):
- """test_apt_v3_src_replace - Test replacement of MIRROR & RELEASE"""
- cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}}
- self._apt_src_replacement(self.aptlistfile, cfg)
-
- def test_apt_v3_src_replace_fn(self):
- """test_apt_v3_src_replace_fn - Test filename overwritten in dict"""
- cfg = {'ignored': {'source': 'deb $MIRROR $RELEASE multiverse',
- 'filename': self.aptlistfile}}
- # second file should overwrite the dict key
- self._apt_src_replacement(self.aptlistfile, cfg)
-
- def _apt_src_replace_tri(self, cfg):
- """_apt_src_replace_tri
- Test three autoreplacements of MIRROR and RELEASE in source specs with
- generic part
- """
- self._apt_src_replacement(self.aptlistfile, cfg)
-
- # extra verify on two extra files of this test
- params = self._get_default_params()
- contents = util.load_file(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "main"),
- contents, flags=re.IGNORECASE))
- contents = util.load_file(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "universe"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_v3_src_replace_tri(self):
- """test_apt_v3_src_replace_tri - Test multiple replace/overwrites"""
- cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'},
- 'notused': {'source': 'deb $MIRROR $RELEASE main',
- 'filename': self.aptlistfile2},
- self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}}
- self._apt_src_replace_tri(cfg)
-
- def _apt_src_keyid(self, filename, cfg, keynum):
- """_apt_src_keyid
- Test specification of a source + keyid
- """
- params = self._get_default_params()
-
- with mock.patch("cloudinit.subp.subp",
- return_value=('fakekey 1234', '')) as mockobj:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- # check if it added the right ammount of keys
- calls = []
- for _ in range(keynum):
- calls.append(call(['apt-key', 'add', '-'], data=b'fakekey 1234',
- target=TARGET))
- mockobj.assert_has_calls(calls, any_order=True)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = util.load_file(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "main"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_v3_src_keyid(self):
- """test_apt_v3_src_keyid - Test source + keyid with filename"""
- cfg = {self.aptlistfile: {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'keyid': "03683F77"}}
- self._apt_src_keyid(self.aptlistfile, cfg, 1)
-
- def test_apt_v3_src_keyid_tri(self):
- """test_apt_v3_src_keyid_tri - Test multiple src+key+filen writes"""
- cfg = {self.aptlistfile: {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'keyid': "03683F77"},
- 'ignored': {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial universe'),
- 'keyid': "03683F77",
- 'filename': self.aptlistfile2},
- self.aptlistfile3: {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial multiverse'),
- 'keyid': "03683F77"}}
-
- self._apt_src_keyid(self.aptlistfile, cfg, 3)
- contents = util.load_file(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "universe"),
- contents, flags=re.IGNORECASE))
- contents = util.load_file(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "multiverse"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_v3_src_key(self):
- """test_apt_v3_src_key - Test source + key"""
- params = self._get_default_params()
- cfg = {self.aptlistfile: {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'key': "fakekey 4321"}}
-
- with mock.patch.object(subp, 'subp') as mockobj:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 4321',
- target=TARGET)
-
- self.assertTrue(os.path.isfile(self.aptlistfile))
-
- contents = util.load_file(self.aptlistfile)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "main"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_v3_src_keyonly(self):
- """test_apt_v3_src_keyonly - Test key without source"""
- params = self._get_default_params()
- cfg = {self.aptlistfile: {'key': "fakekey 4242"}}
-
- with mock.patch.object(subp, 'subp') as mockobj:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 4242',
- target=TARGET)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_v3_src_keyidonly(self):
- """test_apt_v3_src_keyidonly - Test keyid without source"""
- params = self._get_default_params()
- cfg = {self.aptlistfile: {'keyid': "03683F77"}}
-
- with mock.patch.object(subp, 'subp',
- return_value=('fakekey 1212', '')) as mockobj:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 1212',
- target=TARGET)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def apt_src_keyid_real(self, cfg, expectedkey):
- """apt_src_keyid_real
- Test specification of a keyid without source including
- up to addition of the key (add_apt_key_raw mocked to keep the
- environment as is)
- """
- params = self._get_default_params()
-
- with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey:
- with mock.patch.object(gpg, 'getkeybyid',
- return_value=expectedkey) as mockgetkey:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- keycfg = cfg[self.aptlistfile]
- mockgetkey.assert_called_with(keycfg['keyid'],
- keycfg.get('keyserver',
- 'keyserver.ubuntu.com'))
- mockkey.assert_called_with(expectedkey, TARGET)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_v3_src_keyid_real(self):
- """test_apt_v3_src_keyid_real - Test keyid including key add"""
- keyid = "03683F77"
- cfg = {self.aptlistfile: {'keyid': keyid}}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_v3_src_longkeyid_real(self):
- """test_apt_v3_src_longkeyid_real Test long keyid including key add"""
- keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
- cfg = {self.aptlistfile: {'keyid': keyid}}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_v3_src_longkeyid_ks_real(self):
- """test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks"""
- keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
- cfg = {self.aptlistfile: {'keyid': keyid,
- 'keyserver': 'keys.gnupg.net'}}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_v3_src_keyid_keyserver(self):
- """test_apt_v3_src_keyid_keyserver - Test custom keyserver"""
- keyid = "03683F77"
- params = self._get_default_params()
- cfg = {self.aptlistfile: {'keyid': keyid,
- 'keyserver': 'test.random.com'}}
-
- # in some test environments only *.ubuntu.com is reachable
- # so mock the call and check if the config got there
- with mock.patch.object(gpg, 'getkeybyid',
- return_value="fakekey") as mockgetkey:
- with mock.patch.object(cc_apt_configure,
- 'add_apt_key_raw') as mockadd:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- mockgetkey.assert_called_with('03683F77', 'test.random.com')
- mockadd.assert_called_with('fakekey', TARGET)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_v3_src_ppa(self):
- """test_apt_v3_src_ppa - Test specification of a ppa"""
- params = self._get_default_params()
- cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'}}
-
- with mock.patch("cloudinit.subp.subp") as mockobj:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
- mockobj.assert_any_call(['add-apt-repository',
- 'ppa:smoser/cloud-init-test'], target=TARGET)
-
- # adding ppa should ignore filename (uses add-apt-repository)
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_v3_src_ppa_tri(self):
- """test_apt_v3_src_ppa_tri - Test specification of multiple ppa's"""
- params = self._get_default_params()
- cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'},
- self.aptlistfile2: {'source': 'ppa:smoser/cloud-init-test2'},
- self.aptlistfile3: {'source': 'ppa:smoser/cloud-init-test3'}}
-
- with mock.patch("cloudinit.subp.subp") as mockobj:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
- calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'],
- target=TARGET),
- call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'],
- target=TARGET),
- call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'],
- target=TARGET)]
- mockobj.assert_has_calls(calls, any_order=True)
-
- # adding ppa should ignore all filenames (uses add-apt-repository)
- self.assertFalse(os.path.isfile(self.aptlistfile))
- self.assertFalse(os.path.isfile(self.aptlistfile2))
- self.assertFalse(os.path.isfile(self.aptlistfile3))
-
- @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
- def test_apt_v3_list_rename(self, m_get_dpkg_architecture):
- """test_apt_v3_list_rename - Test find mirror and apt list renaming"""
- pre = "/var/lib/apt/lists"
- # filenames are archive dependent
-
- arch = 's390x'
- m_get_dpkg_architecture.return_value = arch
- component = "ubuntu-ports"
- archive = "ports.ubuntu.com"
-
- cfg = {'primary': [{'arches': ["default"],
- 'uri':
- 'http://test.ubuntu.com/%s/' % component}],
- 'security': [{'arches': ["default"],
- 'uri':
- 'http://testsec.ubuntu.com/%s/' % component}]}
- post = ("%s_dists_%s-updates_InRelease" %
- (component, MOCK_LSB_RELEASE_DATA['codename']))
- fromfn = ("%s/%s_%s" % (pre, archive, post))
- tofn = ("%s/test.ubuntu.com_%s" % (pre, post))
-
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch)
-
- self.assertEqual(mirrors['MIRROR'],
- "http://test.ubuntu.com/%s/" % component)
- self.assertEqual(mirrors['PRIMARY'],
- "http://test.ubuntu.com/%s/" % component)
- self.assertEqual(mirrors['SECURITY'],
- "http://testsec.ubuntu.com/%s/" % component)
-
- with mock.patch.object(os, 'rename') as mockren:
- with mock.patch.object(glob, 'glob',
- return_value=[fromfn]):
- cc_apt_configure.rename_apt_lists(mirrors, TARGET, arch)
-
- mockren.assert_any_call(fromfn, tofn)
-
- @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
- def test_apt_v3_list_rename_non_slash(self, m_get_dpkg_architecture):
- target = os.path.join(self.tmp, "rename_non_slash")
- apt_lists_d = os.path.join(target, "./" + cc_apt_configure.APT_LISTS)
-
- arch = 'amd64'
- m_get_dpkg_architecture.return_value = arch
-
- mirror_path = "some/random/path/"
- primary = "http://test.ubuntu.com/" + mirror_path
- security = "http://test-security.ubuntu.com/" + mirror_path
- mirrors = {'PRIMARY': primary, 'SECURITY': security}
-
- # these match default archive prefixes
- opri_pre = "archive.ubuntu.com_ubuntu_dists_xenial"
- osec_pre = "security.ubuntu.com_ubuntu_dists_xenial"
- # this one won't match and should not be renamed defaults.
- other_pre = "dl.google.com_linux_chrome_deb_dists_stable"
- # these are our new expected prefixes
- npri_pre = "test.ubuntu.com_some_random_path_dists_xenial"
- nsec_pre = "test-security.ubuntu.com_some_random_path_dists_xenial"
-
- files = [
- # orig prefix, new prefix, suffix
- (opri_pre, npri_pre, "_main_binary-amd64_Packages"),
- (opri_pre, npri_pre, "_main_binary-amd64_InRelease"),
- (opri_pre, npri_pre, "-updates_main_binary-amd64_Packages"),
- (opri_pre, npri_pre, "-updates_main_binary-amd64_InRelease"),
- (other_pre, other_pre, "_main_binary-amd64_Packages"),
- (other_pre, other_pre, "_Release"),
- (other_pre, other_pre, "_Release.gpg"),
- (osec_pre, nsec_pre, "_InRelease"),
- (osec_pre, nsec_pre, "_main_binary-amd64_Packages"),
- (osec_pre, nsec_pre, "_universe_binary-amd64_Packages"),
- ]
-
- expected = sorted([npre + suff for opre, npre, suff in files])
- # create files
- for (opre, _npre, suff) in files:
- fpath = os.path.join(apt_lists_d, opre + suff)
- util.write_file(fpath, content=fpath)
-
- cc_apt_configure.rename_apt_lists(mirrors, target, arch)
- found = sorted(os.listdir(apt_lists_d))
- self.assertEqual(expected, found)
-
- @staticmethod
- def test_apt_v3_proxy():
- """test_apt_v3_proxy - Test apt_*proxy configuration"""
- cfg = {"proxy": "foobar1",
- "http_proxy": "foobar2",
- "ftp_proxy": "foobar3",
- "https_proxy": "foobar4"}
-
- with mock.patch.object(util, 'write_file') as mockobj:
- cc_apt_configure.apply_apt_config(cfg, "proxyfn", "notused")
-
- mockobj.assert_called_with('proxyfn',
- ('Acquire::http::Proxy "foobar1";\n'
- 'Acquire::http::Proxy "foobar2";\n'
- 'Acquire::ftp::Proxy "foobar3";\n'
- 'Acquire::https::Proxy "foobar4";\n'))
-
- def test_apt_v3_mirror(self):
- """test_apt_v3_mirror - Test defining a mirror"""
- pmir = "http://us.archive.ubuntu.com/ubuntu/"
- smir = "http://security.ubuntu.com/ubuntu/"
- cfg = {"primary": [{'arches': ["default"],
- "uri": pmir}],
- "security": [{'arches': ["default"],
- "uri": smir}]}
-
- mirrors = cc_apt_configure.find_apt_mirror_info(
- cfg, FakeCloud(), 'amd64')
-
- self.assertEqual(mirrors['MIRROR'],
- pmir)
- self.assertEqual(mirrors['PRIMARY'],
- pmir)
- self.assertEqual(mirrors['SECURITY'],
- smir)
-
- def test_apt_v3_mirror_default(self):
- """test_apt_v3_mirror_default - Test without defining a mirror"""
- arch = 'amd64'
- default_mirrors = cc_apt_configure.get_default_mirrors(arch)
- pmir = default_mirrors["PRIMARY"]
- smir = default_mirrors["SECURITY"]
- mycloud = self._get_cloud('ubuntu')
- mirrors = cc_apt_configure.find_apt_mirror_info({}, mycloud, arch)
-
- self.assertEqual(mirrors['MIRROR'],
- pmir)
- self.assertEqual(mirrors['PRIMARY'],
- pmir)
- self.assertEqual(mirrors['SECURITY'],
- smir)
-
- def test_apt_v3_mirror_arches(self):
- """test_apt_v3_mirror_arches - Test arches selection of mirror"""
- pmir = "http://my-primary.ubuntu.com/ubuntu/"
- smir = "http://my-security.ubuntu.com/ubuntu/"
- arch = 'ppc64el'
- cfg = {"primary": [{'arches': ["default"], "uri": "notthis-primary"},
- {'arches': [arch], "uri": pmir}],
- "security": [{'arches': ["default"], "uri": "nothis-security"},
- {'arches': [arch], "uri": smir}]}
-
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch)
-
- self.assertEqual(mirrors['PRIMARY'], pmir)
- self.assertEqual(mirrors['MIRROR'], pmir)
- self.assertEqual(mirrors['SECURITY'], smir)
-
- def test_apt_v3_mirror_arches_default(self):
- """test_apt_v3_mirror_arches - Test falling back to default arch"""
- pmir = "http://us.archive.ubuntu.com/ubuntu/"
- smir = "http://security.ubuntu.com/ubuntu/"
- cfg = {"primary": [{'arches': ["default"],
- "uri": pmir},
- {'arches': ["thisarchdoesntexist"],
- "uri": "notthis"}],
- "security": [{'arches': ["thisarchdoesntexist"],
- "uri": "nothat"},
- {'arches': ["default"],
- "uri": smir}]}
-
- mirrors = cc_apt_configure.find_apt_mirror_info(
- cfg, FakeCloud(), 'amd64')
-
- self.assertEqual(mirrors['MIRROR'],
- pmir)
- self.assertEqual(mirrors['PRIMARY'],
- pmir)
- self.assertEqual(mirrors['SECURITY'],
- smir)
-
- @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
- def test_apt_v3_get_def_mir_non_intel_no_arch(
- self, m_get_dpkg_architecture
- ):
- arch = 'ppc64el'
- m_get_dpkg_architecture.return_value = arch
- expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports',
- 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'}
- self.assertEqual(expected, cc_apt_configure.get_default_mirrors())
-
- def test_apt_v3_get_default_mirrors_non_intel_with_arch(self):
- found = cc_apt_configure.get_default_mirrors('ppc64el')
-
- expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports',
- 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'}
- self.assertEqual(expected, found)
-
- def test_apt_v3_mirror_arches_sysdefault(self):
- """test_apt_v3_mirror_arches - Test arches fallback to sys default"""
- arch = 'amd64'
- default_mirrors = cc_apt_configure.get_default_mirrors(arch)
- pmir = default_mirrors["PRIMARY"]
- smir = default_mirrors["SECURITY"]
- mycloud = self._get_cloud('ubuntu')
- cfg = {"primary": [{'arches': ["thisarchdoesntexist_64"],
- "uri": "notthis"},
- {'arches': ["thisarchdoesntexist"],
- "uri": "notthiseither"}],
- "security": [{'arches': ["thisarchdoesntexist"],
- "uri": "nothat"},
- {'arches': ["thisarchdoesntexist_64"],
- "uri": "nothateither"}]}
-
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
-
- self.assertEqual(mirrors['MIRROR'], pmir)
- self.assertEqual(mirrors['PRIMARY'], pmir)
- self.assertEqual(mirrors['SECURITY'], smir)
-
- def test_apt_v3_mirror_search(self):
- """test_apt_v3_mirror_search - Test searching mirrors in a list
- mock checks to avoid relying on network connectivity"""
- pmir = "http://us.archive.ubuntu.com/ubuntu/"
- smir = "http://security.ubuntu.com/ubuntu/"
- cfg = {"primary": [{'arches': ["default"],
- "search": ["pfailme", pmir]}],
- "security": [{'arches': ["default"],
- "search": ["sfailme", smir]}]}
-
- with mock.patch.object(cc_apt_configure.util, 'search_for_mirror',
- side_effect=[pmir, smir]) as mocksearch:
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(),
- 'amd64')
-
- calls = [call(["pfailme", pmir]),
- call(["sfailme", smir])]
- mocksearch.assert_has_calls(calls)
-
- self.assertEqual(mirrors['MIRROR'],
- pmir)
- self.assertEqual(mirrors['PRIMARY'],
- pmir)
- self.assertEqual(mirrors['SECURITY'],
- smir)
-
- def test_apt_v3_mirror_search_many2(self):
- """test_apt_v3_mirror_search_many3 - Test both mirrors specs at once"""
- pmir = "http://us.archive.ubuntu.com/ubuntu/"
- smir = "http://security.ubuntu.com/ubuntu/"
- cfg = {"primary": [{'arches': ["default"],
- "uri": pmir,
- "search": ["pfailme", "foo"]}],
- "security": [{'arches': ["default"],
- "uri": smir,
- "search": ["sfailme", "bar"]}]}
-
- arch = 'amd64'
-
- # should be called only once per type, despite two mirror configs
- mycloud = None
- with mock.patch.object(cc_apt_configure, 'get_mirror',
- return_value="http://mocked/foo") as mockgm:
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
- calls = [call(cfg, 'primary', arch, mycloud),
- call(cfg, 'security', arch, mycloud)]
- mockgm.assert_has_calls(calls)
-
- # should not be called, since primary is specified
- with mock.patch.object(cc_apt_configure.util,
- 'search_for_mirror') as mockse:
- mirrors = cc_apt_configure.find_apt_mirror_info(
- cfg, FakeCloud(), arch)
- mockse.assert_not_called()
-
- self.assertEqual(mirrors['MIRROR'],
- pmir)
- self.assertEqual(mirrors['PRIMARY'],
- pmir)
- self.assertEqual(mirrors['SECURITY'],
- smir)
-
- def test_apt_v3_url_resolvable(self):
- """test_apt_v3_url_resolvable - Test resolving urls"""
-
- with mock.patch.object(util, 'is_resolvable') as mockresolve:
- util.is_resolvable_url("http://1.2.3.4/ubuntu")
- mockresolve.assert_called_with("1.2.3.4")
-
- with mock.patch.object(util, 'is_resolvable') as mockresolve:
- util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
- mockresolve.assert_called_with("us.archive.ubuntu.com")
-
- # former tests can leave this set (or not if the test is ran directly)
- # do a hard reset to ensure a stable result
- util._DNS_REDIRECT_IP = None
- bad = [(None, None, None, "badname", ["10.3.2.1"])]
- good = [(None, None, None, "goodname", ["10.2.3.4"])]
- with mock.patch.object(socket, 'getaddrinfo',
- side_effect=[bad, bad, bad, good,
- good]) as mocksock:
- ret = util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
- ret2 = util.is_resolvable_url("http://1.2.3.4/ubuntu")
- mocksock.assert_any_call('does-not-exist.example.com.', None,
- 0, 0, 1, 2)
- mocksock.assert_any_call('example.invalid.', None, 0, 0, 1, 2)
- mocksock.assert_any_call('us.archive.ubuntu.com', None)
- mocksock.assert_any_call('1.2.3.4', None)
-
- self.assertTrue(ret)
- self.assertTrue(ret2)
-
- # side effect need only bad ret after initial call
- with mock.patch.object(socket, 'getaddrinfo',
- side_effect=[bad]) as mocksock:
- ret3 = util.is_resolvable_url("http://failme.com/ubuntu")
- calls = [call('failme.com', None)]
- mocksock.assert_has_calls(calls)
- self.assertFalse(ret3)
-
- def test_apt_v3_disable_suites(self):
- """test_disable_suites - disable_suites with many configurations"""
- release = "xenial"
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
-
- # disable nothing
- disabled = []
- expect = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # single disable release suite
- disabled = ["$RELEASE"]
- expect = """\
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # single disable other suite
- disabled = ["$RELEASE-updates"]
- expect = ("""deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu"""
- """ xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # multi disable
- disabled = ["$RELEASE-updates", "$RELEASE-security"]
- expect = ("""deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
- """xenial-updates main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
- """xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # multi line disable (same suite multiple times in input)
- disabled = ["$RELEASE-updates", "$RELEASE-security"]
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://UBUNTU.com//ubuntu xenial-updates main
-deb http://UBUNTU.COM//ubuntu xenial-updates main
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- expect = ("""deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
- """xenial-updates main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
- """xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-# suite disabled by cloud-init: deb http://UBUNTU.com//ubuntu """
- """xenial-updates main
-# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """
- """xenial-updates main
-deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # comment in input
- disabled = ["$RELEASE-updates", "$RELEASE-security"]
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-#foo
-#deb http://UBUNTU.com//ubuntu xenial-updates main
-deb http://UBUNTU.COM//ubuntu xenial-updates main
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- expect = ("""deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
- """xenial-updates main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
- """xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-#foo
-#deb http://UBUNTU.com//ubuntu xenial-updates main
-# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """
- """xenial-updates main
-deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # single disable custom suite
- disabled = ["foobar"]
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb http://ubuntu.com/ubuntu/ foobar main"""
- expect = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-# suite disabled by cloud-init: deb http://ubuntu.com/ubuntu/ foobar main"""
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # single disable non existing suite
- disabled = ["foobar"]
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb http://ubuntu.com/ubuntu/ notfoobar main"""
- expect = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb http://ubuntu.com/ubuntu/ notfoobar main"""
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # single disable suite with option
- disabled = ["$RELEASE-updates"]
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb [a=b] http://ubu.com//ubu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- expect = ("""deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by cloud-init: deb [a=b] http://ubu.com//ubu """
- """xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # single disable suite with more options and auto $RELEASE expansion
- disabled = ["updates"]
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb [a=b c=d] http://ubu.com//ubu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- expect = """deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by cloud-init: deb [a=b c=d] \
-http://ubu.com//ubu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # single disable suite while options at others
- disabled = ["$RELEASE-security"]
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- expect = ("""deb http://ubuntu.com//ubuntu xenial main
-deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
- """xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- def test_disable_suites_blank_lines(self):
- """test_disable_suites_blank_lines - ensure blank lines allowed"""
- lines = ["deb %(repo)s %(rel)s main universe",
- "",
- "deb %(repo)s %(rel)s-updates main universe",
- " # random comment",
- "#comment here",
- ""]
- rel = "trusty"
- repo = 'http://example.com/mirrors/ubuntu'
- orig = "\n".join(lines) % {'repo': repo, 'rel': rel}
- self.assertEqual(
- orig, cc_apt_configure.disable_suites(["proposed"], orig, rel))
-
- @mock.patch("cloudinit.util.get_hostname", return_value='abc.localdomain')
- def test_apt_v3_mirror_search_dns(self, m_get_hostname):
- """test_apt_v3_mirror_search_dns - Test searching dns patterns"""
- pmir = "phit"
- smir = "shit"
- arch = 'amd64'
- mycloud = self._get_cloud('ubuntu')
- cfg = {"primary": [{'arches': ["default"],
- "search_dns": True}],
- "security": [{'arches': ["default"],
- "search_dns": True}]}
-
- with mock.patch.object(cc_apt_configure, 'get_mirror',
- return_value="http://mocked/foo") as mockgm:
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
- calls = [call(cfg, 'primary', arch, mycloud),
- call(cfg, 'security', arch, mycloud)]
- mockgm.assert_has_calls(calls)
-
- with mock.patch.object(cc_apt_configure, 'search_for_mirror_dns',
- return_value="http://mocked/foo") as mocksdns:
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
- calls = [call(True, 'primary', cfg, mycloud),
- call(True, 'security', cfg, mycloud)]
- mocksdns.assert_has_calls(calls)
-
- # first return is for the non-dns call before
- with mock.patch.object(cc_apt_configure.util, 'search_for_mirror',
- side_effect=[None, pmir, None, smir]) as mockse:
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
-
- calls = [call(None),
- call(['http://ubuntu-mirror.localdomain/ubuntu',
- 'http://ubuntu-mirror/ubuntu']),
- call(None),
- call(['http://ubuntu-security-mirror.localdomain/ubuntu',
- 'http://ubuntu-security-mirror/ubuntu'])]
- mockse.assert_has_calls(calls)
-
- self.assertEqual(mirrors['MIRROR'],
- pmir)
- self.assertEqual(mirrors['PRIMARY'],
- pmir)
- self.assertEqual(mirrors['SECURITY'],
- smir)
-
-
-class TestDebconfSelections(TestCase):
-
- @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
- def test_set_sel_appends_newline_if_absent(self, m_subp):
- """Automatically append a newline to debconf-set-selections config."""
- selections = b'some/setting boolean true'
- cc_apt_configure.debconf_set_selections(selections=selections)
- cc_apt_configure.debconf_set_selections(selections=selections + b'\n')
- m_call = mock.call(
- ['debconf-set-selections'], data=selections + b'\n', capture=True,
- target=None)
- self.assertEqual([m_call, m_call], m_subp.call_args_list)
-
- @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
- def test_no_set_sel_if_none_to_set(self, m_set_sel):
- cc_apt_configure.apply_debconf_selections({'foo': 'bar'})
- m_set_sel.assert_not_called()
-
- @mock.patch("cloudinit.config.cc_apt_configure."
- "debconf_set_selections")
- @mock.patch("cloudinit.config.cc_apt_configure."
- "util.get_installed_packages")
- def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel):
- data = {
- 'set1': 'pkga pkga/q1 mybool false',
- 'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n'
- 'pkgc\tpkgc/ip\tstring\t10.0.0.1')}
- lines = '\n'.join(data.values()).split('\n')
-
- m_get_inst.return_value = ["adduser", "apparmor"]
- m_set_sel.return_value = None
-
- cc_apt_configure.apply_debconf_selections({'debconf_selections': data})
- self.assertTrue(m_get_inst.called)
- self.assertEqual(m_set_sel.call_count, 1)
-
- # assumes called with *args value.
- selections = m_set_sel.call_args_list[0][0][0].decode()
-
- missing = [
- line for line in lines if line not in selections.splitlines()
- ]
- self.assertEqual([], missing)
-
- @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure")
- @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
- @mock.patch("cloudinit.config.cc_apt_configure."
- "util.get_installed_packages")
- def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel,
- m_dpkg_r):
- data = {
- 'set1': 'pkga pkga/q1 mybool false',
- 'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n'
- 'pkgc\tpkgc/ip\tstring\t10.0.0.1'),
- 'cloud-init': ('cloud-init cloud-init/datasources'
- 'multiselect MAAS')}
-
- m_set_sel.return_value = None
- m_get_inst.return_value = ["adduser", "apparmor", "pkgb",
- "cloud-init", 'zdog']
-
- cc_apt_configure.apply_debconf_selections({'debconf_selections': data})
-
- # reconfigure should be called with the intersection
- # of (packages in config, packages installed)
- self.assertEqual(m_dpkg_r.call_count, 1)
- # assumes called with *args (dpkg_reconfigure([a,b,c], target=))
- packages = m_dpkg_r.call_args_list[0][0][0]
- self.assertEqual(set(['cloud-init', 'pkgb']), set(packages))
-
- @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure")
- @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
- @mock.patch("cloudinit.config.cc_apt_configure."
- "util.get_installed_packages")
- def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel,
- m_dpkg_r):
- data = {'set1': 'pkga pkga/q1 mybool false'}
-
- m_get_inst.return_value = ["adduser", "apparmor", "pkgb",
- "cloud-init", 'zdog']
- m_set_sel.return_value = None
-
- cc_apt_configure.apply_debconf_selections({'debconf_selections': data})
-
- self.assertTrue(m_get_inst.called)
- self.assertEqual(m_dpkg_r.call_count, 0)
-
- @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
- def test_dpkg_reconfigure_does_reconfigure(self, m_subp):
- target = "/foo-target"
-
- # due to the way the cleaners are called (via dictionary reference)
- # mocking clean_cloud_init directly does not work. So we mock
- # the CONFIG_CLEANERS dictionary and assert our cleaner is called.
- ci_cleaner = mock.MagicMock()
- with mock.patch.dict(("cloudinit.config.cc_apt_configure."
- "CONFIG_CLEANERS"),
- values={'cloud-init': ci_cleaner}, clear=True):
- cc_apt_configure.dpkg_reconfigure(['pkga', 'cloud-init'],
- target=target)
- # cloud-init is actually the only package we have a cleaner for
- # so for now, its the only one that should reconfigured
- self.assertTrue(m_subp.called)
- ci_cleaner.assert_called_with(target)
- self.assertEqual(m_subp.call_count, 1)
- found = m_subp.call_args_list[0][0][0]
- expected = ['dpkg-reconfigure', '--frontend=noninteractive',
- 'cloud-init']
- self.assertEqual(expected, found)
-
- @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
- def test_dpkg_reconfigure_not_done_on_no_data(self, m_subp):
- cc_apt_configure.dpkg_reconfigure([])
- m_subp.assert_not_called()
-
- @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
- def test_dpkg_reconfigure_not_done_if_no_cleaners(self, m_subp):
- cc_apt_configure.dpkg_reconfigure(['pkgfoo', 'pkgbar'])
- m_subp.assert_not_called()
-
-#
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py
deleted file mode 100644
index b53d60d4..00000000
--- a/tests/unittests/test_handler/test_handler_bootcmd.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config.cc_bootcmd import handle, schema
-from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, subp, util)
-from cloudinit.tests.helpers import (
- CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
-
-import logging
-import tempfile
-
-
-LOG = logging.getLogger(__name__)
-
-
-class FakeExtendedTempFile(object):
- def __init__(self, suffix):
- self.suffix = suffix
- self.handle = tempfile.NamedTemporaryFile(
- prefix="ci-%s." % self.__class__.__name__, delete=False)
-
- def __enter__(self):
- return self.handle
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.handle.close()
- util.del_file(self.handle.name)
-
-
-class TestBootcmd(CiTestCase):
-
- with_logs = True
-
- _etmpfile_path = ('cloudinit.config.cc_bootcmd.temp_utils.'
- 'ExtendedTemporaryFile')
-
- def setUp(self):
- super(TestBootcmd, self).setUp()
- self.subp = subp.subp
- self.new_root = self.tmp_dir()
-
- def _get_cloud(self, distro):
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- paths.datasource = myds
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
- def test_handler_skip_if_no_bootcmd(self):
- """When the provided config doesn't contain bootcmd, skip it."""
- cfg = {}
- mycloud = self._get_cloud('ubuntu')
- handle('notimportant', cfg, mycloud, LOG, None)
- self.assertIn(
- "Skipping module named notimportant, no 'bootcmd' key",
- self.logs.getvalue())
-
- def test_handler_invalid_command_set(self):
- """Commands which can't be converted to shell will raise errors."""
- invalid_config = {'bootcmd': 1}
- cc = self._get_cloud('ubuntu')
- with self.assertRaises(TypeError) as context_manager:
- handle('cc_bootcmd', invalid_config, cc, LOG, [])
- self.assertIn('Failed to shellify bootcmd', self.logs.getvalue())
- self.assertEqual(
- "Input to shellify was type 'int'. Expected list or tuple.",
- str(context_manager.exception))
-
- @skipUnlessJsonSchema()
- def test_handler_schema_validation_warns_non_array_type(self):
- """Schema validation warns of non-array type for bootcmd key.
-
- Schema validation is not strict, so bootcmd attempts to shellify the
- invalid content.
- """
- invalid_config = {'bootcmd': 1}
- cc = self._get_cloud('ubuntu')
- with self.assertRaises(TypeError):
- handle('cc_bootcmd', invalid_config, cc, LOG, [])
- self.assertIn(
- 'Invalid config:\nbootcmd: 1 is not of type \'array\'',
- self.logs.getvalue())
- self.assertIn('Failed to shellify', self.logs.getvalue())
-
- @skipUnlessJsonSchema()
- def test_handler_schema_validation_warns_non_array_item_type(self):
- """Schema validation warns of non-array or string bootcmd items.
-
- Schema validation is not strict, so bootcmd attempts to shellify the
- invalid content.
- """
- invalid_config = {
- 'bootcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]}
- cc = self._get_cloud('ubuntu')
- with self.assertRaises(TypeError) as context_manager:
- handle('cc_bootcmd', invalid_config, cc, LOG, [])
- expected_warnings = [
- 'bootcmd.1: 20 is not valid under any of the given schemas',
- 'bootcmd.3: {\'a\': \'n\'} is not valid under any of the given'
- ' schema'
- ]
- logs = self.logs.getvalue()
- for warning in expected_warnings:
- self.assertIn(warning, logs)
- self.assertIn('Failed to shellify', logs)
- self.assertEqual(
- ("Unable to shellify type 'int'. Expected list, string, tuple. "
- "Got: 20"),
- str(context_manager.exception))
-
- def test_handler_creates_and_runs_bootcmd_script_with_instance_id(self):
- """Valid schema runs a bootcmd script with INSTANCE_ID in the env."""
- cc = self._get_cloud('ubuntu')
- out_file = self.tmp_path('bootcmd.out', self.new_root)
- my_id = "b6ea0f59-e27d-49c6-9f87-79f19765a425"
- valid_config = {'bootcmd': [
- 'echo {0} $INSTANCE_ID > {1}'.format(my_id, out_file)]}
-
- with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
- with self.allow_subp(['/bin/sh']):
- handle('cc_bootcmd', valid_config, cc, LOG, [])
- self.assertEqual(my_id + ' iid-datasource-none\n',
- util.load_file(out_file))
-
- def test_handler_runs_bootcmd_script_with_error(self):
- """When a valid script generates an error, that error is raised."""
- cc = self._get_cloud('ubuntu')
- valid_config = {'bootcmd': ['exit 1']} # Script with error
-
- with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
- with self.allow_subp(['/bin/sh']):
- with self.assertRaises(subp.ProcessExecutionError) as ctxt:
- handle('does-not-matter', valid_config, cc, LOG, [])
- self.assertIn(
- 'Unexpected error while running command.\n'
- "Command: ['/bin/sh',",
- str(ctxt.exception))
- self.assertIn(
- 'Failed to run bootcmd module does-not-matter',
- self.logs.getvalue())
-
-
-@skipUnlessJsonSchema()
-class TestSchema(CiTestCase, SchemaTestCaseMixin):
- """Directly test schema rather than through handle."""
-
- schema = schema
-
- def test_duplicates_are_fine_array_array(self):
- """Duplicated commands array/array entries are allowed."""
- self.assertSchemaValid(
- ["byebye", "byebye"], 'command entries can be duplicate')
-
- def test_duplicates_are_fine_array_string(self):
- """Duplicated commands array/string entries are allowed."""
- self.assertSchemaValid(
- ["echo bye", "echo bye"], "command entries can be duplicate.")
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
deleted file mode 100644
index e74a0a08..00000000
--- a/tests/unittests/test_handler/test_handler_ca_certs.py
+++ /dev/null
@@ -1,298 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit.config import cc_ca_certs
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.tests.helpers import TestCase
-
-import logging
-import shutil
-import tempfile
-import unittest
-from contextlib import ExitStack
-from unittest import mock
-
-
-class TestNoConfig(unittest.TestCase):
- def setUp(self):
- super(TestNoConfig, self).setUp()
- self.name = "ca-certs"
- self.cloud_init = None
- self.log = logging.getLogger("TestNoConfig")
- self.args = []
-
- def test_no_config(self):
- """
- Test that nothing is done if no ca-certs configuration is provided.
- """
- config = util.get_builtin_cfg()
- with ExitStack() as mocks:
- util_mock = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- certs_mock = mocks.enter_context(
- mock.patch.object(cc_ca_certs, 'update_ca_certs'))
-
- cc_ca_certs.handle(self.name, config, self.cloud_init, self.log,
- self.args)
-
- self.assertEqual(util_mock.call_count, 0)
- self.assertEqual(certs_mock.call_count, 0)
-
-
-class TestConfig(TestCase):
- def setUp(self):
- super(TestConfig, self).setUp()
- self.name = "ca-certs"
- distro = self._fetch_distro('ubuntu')
- self.paths = None
- self.cloud = cloud.Cloud(None, self.paths, None, distro, None)
- self.log = logging.getLogger("TestNoConfig")
- self.args = []
-
- self.mocks = ExitStack()
- self.addCleanup(self.mocks.close)
-
- # Mock out the functions that actually modify the system
- self.mock_add = self.mocks.enter_context(
- mock.patch.object(cc_ca_certs, 'add_ca_certs'))
- self.mock_update = self.mocks.enter_context(
- mock.patch.object(cc_ca_certs, 'update_ca_certs'))
- self.mock_remove = self.mocks.enter_context(
- mock.patch.object(cc_ca_certs, 'remove_default_ca_certs'))
-
- def _fetch_distro(self, kind):
- cls = distros.fetch(kind)
- paths = helpers.Paths({})
- return cls(kind, {}, paths)
-
- def test_no_trusted_list(self):
- """
- Test that no certificates are written if the 'trusted' key is not
- present.
- """
- config = {"ca-certs": {}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_empty_trusted_list(self):
- """Test that no certificate are written if 'trusted' list is empty."""
- config = {"ca-certs": {"trusted": []}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_single_trusted(self):
- """Test that a single cert gets passed to add_ca_certs."""
- config = {"ca-certs": {"trusted": ["CERT1"]}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.mock_add.assert_called_once_with(['CERT1'])
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_multiple_trusted(self):
- """Test that multiple certs get passed to add_ca_certs."""
- config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.mock_add.assert_called_once_with(['CERT1', 'CERT2'])
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_remove_default_ca_certs(self):
- """Test remove_defaults works as expected."""
- config = {"ca-certs": {"remove-defaults": True}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 1)
-
- def test_no_remove_defaults_if_false(self):
- """Test remove_defaults is not called when config value is False."""
- config = {"ca-certs": {"remove-defaults": False}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_correct_order_for_remove_then_add(self):
- """Test remove_defaults is not called when config value is False."""
- config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.mock_add.assert_called_once_with(['CERT1'])
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 1)
-
-
-class TestAddCaCerts(TestCase):
-
- def setUp(self):
- super(TestAddCaCerts, self).setUp()
- tmpdir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, tmpdir)
- self.paths = helpers.Paths({
- 'cloud_dir': tmpdir,
- })
-
- def test_no_certs_in_list(self):
- """Test that no certificate are written if not provided."""
- with mock.patch.object(util, 'write_file') as mockobj:
- cc_ca_certs.add_ca_certs([])
- self.assertEqual(mockobj.call_count, 0)
-
- def test_single_cert_trailing_cr(self):
- """Test adding a single certificate to the trusted CAs
- when existing ca-certificates has trailing newline"""
- cert = "CERT1\nLINE2\nLINE3"
-
- ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n"
- expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n"
-
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_load = mocks.enter_context(
- mock.patch.object(util, 'load_file',
- return_value=ca_certs_content))
-
- cc_ca_certs.add_ca_certs([cert])
-
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- cert, mode=0o644),
- mock.call("/etc/ca-certificates.conf", expected, omode="wb")])
- mock_load.assert_called_once_with("/etc/ca-certificates.conf")
-
- def test_single_cert_no_trailing_cr(self):
- """Test adding a single certificate to the trusted CAs
- when existing ca-certificates has no trailing newline"""
- cert = "CERT1\nLINE2\nLINE3"
-
- ca_certs_content = "line1\nline2\nline3"
-
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_load = mocks.enter_context(
- mock.patch.object(util, 'load_file',
- return_value=ca_certs_content))
-
- cc_ca_certs.add_ca_certs([cert])
-
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- cert, mode=0o644),
- mock.call("/etc/ca-certificates.conf",
- "%s\n%s\n" % (ca_certs_content,
- "cloud-init-ca-certs.crt"),
- omode="wb")])
-
- mock_load.assert_called_once_with("/etc/ca-certificates.conf")
-
- def test_single_cert_to_empty_existing_ca_file(self):
- """Test adding a single certificate to the trusted CAs
- when existing ca-certificates.conf is empty"""
- cert = "CERT1\nLINE2\nLINE3"
-
- expected = "cloud-init-ca-certs.crt\n"
-
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file', autospec=True))
- mock_stat = mocks.enter_context(
- mock.patch("cloudinit.config.cc_ca_certs.os.stat")
- )
- mock_stat.return_value.st_size = 0
-
- cc_ca_certs.add_ca_certs([cert])
-
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- cert, mode=0o644),
- mock.call("/etc/ca-certificates.conf", expected, omode="wb")])
-
- def test_multiple_certs(self):
- """Test adding multiple certificates to the trusted CAs."""
- certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"]
- expected_cert_file = "\n".join(certs)
- ca_certs_content = "line1\nline2\nline3"
-
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_load = mocks.enter_context(
- mock.patch.object(util, 'load_file',
- return_value=ca_certs_content))
-
- cc_ca_certs.add_ca_certs(certs)
-
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- expected_cert_file, mode=0o644),
- mock.call("/etc/ca-certificates.conf",
- "%s\n%s\n" % (ca_certs_content,
- "cloud-init-ca-certs.crt"),
- omode='wb')])
-
- mock_load.assert_called_once_with("/etc/ca-certificates.conf")
-
-
-class TestUpdateCaCerts(unittest.TestCase):
- def test_commands(self):
- with mock.patch.object(subp, 'subp') as mockobj:
- cc_ca_certs.update_ca_certs()
- mockobj.assert_called_once_with(
- ["update-ca-certificates"], capture=False)
-
-
-class TestRemoveDefaultCaCerts(TestCase):
-
- def setUp(self):
- super(TestRemoveDefaultCaCerts, self).setUp()
- tmpdir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, tmpdir)
- self.paths = helpers.Paths({
- 'cloud_dir': tmpdir,
- })
-
- def test_commands(self):
- with ExitStack() as mocks:
- mock_delete = mocks.enter_context(
- mock.patch.object(util, 'delete_dir_contents'))
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_subp = mocks.enter_context(mock.patch.object(subp, 'subp'))
-
- cc_ca_certs.remove_default_ca_certs('ubuntu')
-
- mock_delete.assert_has_calls([
- mock.call("/usr/share/ca-certificates/"),
- mock.call("/etc/ssl/certs/")])
-
- mock_write.assert_called_once_with(
- "/etc/ca-certificates.conf", "", mode=0o644)
-
- mock_subp.assert_called_once_with(
- ('debconf-set-selections', '-'),
- "ca-certificates ca-certificates/trust_new_crts select no")
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py
deleted file mode 100644
index 7918c609..00000000
--- a/tests/unittests/test_handler/test_handler_chef.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import httpretty
-import json
-import logging
-import os
-
-from cloudinit import cloud
-from cloudinit.config import cc_chef
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit.sources import DataSourceNone
-from cloudinit import util
-
-from cloudinit.tests.helpers import (
- HttprettyTestCase, FilesystemMockingTestCase, mock, skipIf)
-
-LOG = logging.getLogger(__name__)
-
-CLIENT_TEMPL = os.path.sep.join(["templates", "chef_client.rb.tmpl"])
-
-# This is adjusted to use http because using with https causes issue
-# in some openssl/httpretty combinations.
-# https://github.com/gabrielfalcao/HTTPretty/issues/242
-# We saw issue in opensuse 42.3 with
-# httpretty=0.8.8-7.1 ndg-httpsclient=0.4.0-3.2 pyOpenSSL=16.0.0-4.1
-OMNIBUS_URL_HTTP = cc_chef.OMNIBUS_URL.replace("https:", "http:")
-
-
-class TestInstallChefOmnibus(HttprettyTestCase):
-
- def setUp(self):
- super(TestInstallChefOmnibus, self).setUp()
- self.new_root = self.tmp_dir()
-
- @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP)
- def test_install_chef_from_omnibus_runs_chef_url_content(self):
- """install_chef_from_omnibus calls subp_blob_in_tempfile."""
- response = b'#!/bin/bash\necho "Hi Mom"'
- httpretty.register_uri(
- httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200)
- ret = (None, None) # stdout, stderr but capture=False
-
- with mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile",
- return_value=ret) as m_subp_blob:
- cc_chef.install_chef_from_omnibus()
- # admittedly whitebox, but assuming subp_blob_in_tempfile works
- # this should be fine.
- self.assertEqual(
- [mock.call(blob=response, args=[], basename='chef-omnibus-install',
- capture=False)],
- m_subp_blob.call_args_list)
-
- @mock.patch('cloudinit.config.cc_chef.url_helper.readurl')
- @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile')
- def test_install_chef_from_omnibus_retries_url(self, m_subp_blob, m_rdurl):
- """install_chef_from_omnibus retries OMNIBUS_URL upon failure."""
-
- class FakeURLResponse(object):
- contents = '#!/bin/bash\necho "Hi Mom" > {0}/chef.out'.format(
- self.new_root)
-
- m_rdurl.return_value = FakeURLResponse()
-
- cc_chef.install_chef_from_omnibus()
- expected_kwargs = {'retries': cc_chef.OMNIBUS_URL_RETRIES,
- 'url': cc_chef.OMNIBUS_URL}
- self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[0][1])
- cc_chef.install_chef_from_omnibus(retries=10)
- expected_kwargs = {'retries': 10,
- 'url': cc_chef.OMNIBUS_URL}
- self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[1][1])
- expected_subp_kwargs = {
- 'args': ['-v', '2.0'],
- 'basename': 'chef-omnibus-install',
- 'blob': m_rdurl.return_value.contents,
- 'capture': False
- }
- self.assertCountEqual(
- expected_subp_kwargs,
- m_subp_blob.call_args_list[0][1])
-
- @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP)
- @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile')
- def test_install_chef_from_omnibus_has_omnibus_version(self, m_subp_blob):
- """install_chef_from_omnibus provides version arg to OMNIBUS_URL."""
- chef_outfile = self.tmp_path('chef.out', self.new_root)
- response = '#!/bin/bash\necho "Hi Mom" > {0}'.format(chef_outfile)
- httpretty.register_uri(
- httpretty.GET, cc_chef.OMNIBUS_URL, body=response)
- cc_chef.install_chef_from_omnibus(omnibus_version='2.0')
-
- called_kwargs = m_subp_blob.call_args_list[0][1]
- expected_kwargs = {
- 'args': ['-v', '2.0'],
- 'basename': 'chef-omnibus-install',
- 'blob': response,
- 'capture': False
- }
- self.assertCountEqual(expected_kwargs, called_kwargs)
-
-
-class TestChef(FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestChef, self).setUp()
- self.tmp = self.tmp_dir()
-
- def fetch_cloud(self, distro_kind):
- cls = distros.fetch(distro_kind)
- paths = helpers.Paths({})
- distro = cls(distro_kind, {}, paths)
- ds = DataSourceNone.DataSourceNone({}, distro, paths, None)
- return cloud.Cloud(ds, paths, {}, distro, None)
-
- def test_no_config(self):
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- cfg = {}
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- for d in cc_chef.CHEF_DIRS:
- self.assertFalse(os.path.isdir(d))
-
- @skipIf(not os.path.isfile(CLIENT_TEMPL),
- CLIENT_TEMPL + " is not available")
- def test_basic_config(self):
- """
- test basic config looks sane
-
- # This should create a file of the format...
- # Created by cloud-init v. 0.7.6 on Sat, 11 Oct 2014 23:57:21 +0000
- chef_license "accept"
- log_level :info
- ssl_verify_mode :verify_none
- log_location "/var/log/chef/client.log"
- validation_client_name "bob"
- validation_key "/etc/chef/validation.pem"
- client_key "/etc/chef/client.pem"
- chef_server_url "localhost"
- environment "_default"
- node_name "iid-datasource-none"
- json_attribs "/etc/chef/firstboot.json"
- file_cache_path "/var/cache/chef"
- file_backup_path "/var/backups/chef"
- pid_file "/var/run/chef/client.pid"
- Chef::Log::Formatter.show_time = true
- encrypted_data_bag_secret "/etc/chef/encrypted_data_bag_secret"
- """
- tpl_file = util.load_file('templates/chef_client.rb.tmpl')
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
- cfg = {
- 'chef': {
- 'chef_license': "accept",
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'validation_key': "/etc/chef/vkey.pem",
- 'validation_cert': "this is my cert",
- 'encrypted_data_bag_secret':
- '/etc/chef/encrypted_data_bag_secret'
- },
- }
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- for d in cc_chef.CHEF_DIRS:
- self.assertTrue(os.path.isdir(d))
- c = util.load_file(cc_chef.CHEF_RB_PATH)
-
- # the content of these keys is not expected to be rendered to tmpl
- unrendered_keys = ('validation_cert',)
- for k, v in cfg['chef'].items():
- if k in unrendered_keys:
- continue
- self.assertIn(v, c)
- for k, v in cc_chef.CHEF_RB_TPL_DEFAULTS.items():
- if k in unrendered_keys:
- continue
- # the value from the cfg overrides that in the default
- val = cfg['chef'].get(k, v)
- if isinstance(val, str):
- self.assertIn(val, c)
- c = util.load_file(cc_chef.CHEF_FB_PATH)
- self.assertEqual({}, json.loads(c))
-
- def test_firstboot_json(self):
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- cfg = {
- 'chef': {
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'run_list': ['a', 'b', 'c'],
- 'initial_attributes': {
- 'c': 'd',
- }
- },
- }
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- c = util.load_file(cc_chef.CHEF_FB_PATH)
- self.assertEqual(
- {
- 'run_list': ['a', 'b', 'c'],
- 'c': 'd',
- }, json.loads(c))
-
- @skipIf(not os.path.isfile(CLIENT_TEMPL),
- CLIENT_TEMPL + " is not available")
- def test_template_deletes(self):
- tpl_file = util.load_file('templates/chef_client.rb.tmpl')
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
- cfg = {
- 'chef': {
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'json_attribs': None,
- 'show_time': None,
- },
- }
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- c = util.load_file(cc_chef.CHEF_RB_PATH)
- self.assertNotIn('json_attribs', c)
- self.assertNotIn('Formatter.show_time', c)
-
- @skipIf(not os.path.isfile(CLIENT_TEMPL),
- CLIENT_TEMPL + " is not available")
- def test_validation_cert_and_validation_key(self):
- # test validation_cert content is written to validation_key path
- tpl_file = util.load_file('templates/chef_client.rb.tmpl')
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
- v_path = '/etc/chef/vkey.pem'
- v_cert = 'this is my cert'
- cfg = {
- 'chef': {
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'validation_key': v_path,
- 'validation_cert': v_cert
- },
- }
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- content = util.load_file(cc_chef.CHEF_RB_PATH)
- self.assertIn(v_path, content)
- util.load_file(v_path)
- self.assertEqual(v_cert, util.load_file(v_path))
-
- def test_validation_cert_with_system(self):
- # test validation_cert content is not written over system file
- tpl_file = util.load_file('templates/chef_client.rb.tmpl')
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- v_path = '/etc/chef/vkey.pem'
- v_cert = "system"
- expected_cert = "this is the system file certificate"
- cfg = {
- 'chef': {
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'validation_key': v_path,
- 'validation_cert': v_cert
- },
- }
- util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
- util.write_file(v_path, expected_cert)
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- content = util.load_file(cc_chef.CHEF_RB_PATH)
- self.assertIn(v_path, content)
- util.load_file(v_path)
- self.assertEqual(expected_cert, util.load_file(v_path))
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_debug.py b/tests/unittests/test_handler/test_handler_debug.py
deleted file mode 100644
index 787ba350..00000000
--- a/tests/unittests/test_handler/test_handler_debug.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright (C) 2014 Yahoo! Inc.
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_debug
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.sources import DataSourceNone
-
-from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock)
-
-import logging
-import shutil
-import tempfile
-
-LOG = logging.getLogger(__name__)
-
-
-@mock.patch('cloudinit.distros.debian.read_system_locale')
-class TestDebug(FilesystemMockingTestCase):
- def setUp(self):
- super(TestDebug, self).setUp()
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.new_root)
-
- def _get_cloud(self, distro, metadata=None):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- d = cls(distro, {}, paths)
- ds = DataSourceNone.DataSourceNone({}, d, paths)
- if metadata:
- ds.metadata.update(metadata)
- return cloud.Cloud(ds, paths, {}, d, None)
-
- def test_debug_write(self, m_locale):
- m_locale.return_value = 'en_US.UTF-8'
- cfg = {
- 'abc': '123',
- 'c': u'\u20a0',
- 'debug': {
- 'verbose': True,
- # Does not actually write here due to mocking...
- 'output': '/var/log/cloud-init-debug.log',
- },
- }
- cc = self._get_cloud('ubuntu')
- cc_debug.handle('cc_debug', cfg, cc, LOG, [])
- contents = util.load_file('/var/log/cloud-init-debug.log')
- # Some basic sanity tests...
- self.assertNotEqual(0, len(contents))
- for k in cfg.keys():
- self.assertIn(k, contents)
-
- def test_debug_no_write(self, m_locale):
- m_locale.return_value = 'en_US.UTF-8'
- cfg = {
- 'abc': '123',
- 'debug': {
- 'verbose': False,
- # Does not actually write here due to mocking...
- 'output': '/var/log/cloud-init-debug.log',
- },
- }
- cc = self._get_cloud('ubuntu')
- cc_debug.handle('cc_debug', cfg, cc, LOG, [])
- self.assertRaises(IOError,
- util.load_file, '/var/log/cloud-init-debug.log')
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py
deleted file mode 100644
index 4f4a57fa..00000000
--- a/tests/unittests/test_handler/test_handler_disk_setup.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import random
-
-from cloudinit.config import cc_disk_setup
-from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, TestCase
-
-
-class TestIsDiskUsed(TestCase):
-
- def setUp(self):
- super(TestIsDiskUsed, self).setUp()
- self.patches = ExitStack()
- mod_name = 'cloudinit.config.cc_disk_setup'
- self.enumerate_disk = self.patches.enter_context(
- mock.patch('{0}.enumerate_disk'.format(mod_name)))
- self.check_fs = self.patches.enter_context(
- mock.patch('{0}.check_fs'.format(mod_name)))
-
- def tearDown(self):
- super(TestIsDiskUsed, self).tearDown()
- self.patches.close()
-
- def test_multiple_child_nodes_returns_true(self):
- self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(2))
- self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock())
- self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock()))
-
- def test_valid_filesystem_returns_true(self):
- self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1))
- self.check_fs.return_value = (
- mock.MagicMock(), 'ext4', mock.MagicMock())
- self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock()))
-
- def test_one_child_nodes_and_no_fs_returns_false(self):
- self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1))
- self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock())
- self.assertFalse(cc_disk_setup.is_disk_used(mock.MagicMock()))
-
-
-class TestGetMbrHddSize(TestCase):
-
- def setUp(self):
- super(TestGetMbrHddSize, self).setUp()
- self.patches = ExitStack()
- self.subp = self.patches.enter_context(
- mock.patch.object(cc_disk_setup.subp, 'subp'))
-
- def tearDown(self):
- super(TestGetMbrHddSize, self).tearDown()
- self.patches.close()
-
- def _configure_subp_mock(self, hdd_size_in_bytes, sector_size_in_bytes):
- def _subp(cmd, *args, **kwargs):
- self.assertEqual(3, len(cmd))
- if '--getsize64' in cmd:
- return hdd_size_in_bytes, None
- elif '--getss' in cmd:
- return sector_size_in_bytes, None
- raise Exception('Unexpected blockdev command called')
-
- self.subp.side_effect = _subp
-
- def _test_for_sector_size(self, sector_size):
- size_in_bytes = random.randint(10000, 10000000) * 512
- size_in_sectors = size_in_bytes / sector_size
- self._configure_subp_mock(size_in_bytes, sector_size)
- self.assertEqual(size_in_sectors,
- cc_disk_setup.get_hdd_size('/dev/sda1'))
-
- def test_size_for_512_byte_sectors(self):
- self._test_for_sector_size(512)
-
- def test_size_for_1024_byte_sectors(self):
- self._test_for_sector_size(1024)
-
- def test_size_for_2048_byte_sectors(self):
- self._test_for_sector_size(2048)
-
- def test_size_for_4096_byte_sectors(self):
- self._test_for_sector_size(4096)
-
-
-class TestGetPartitionMbrLayout(TestCase):
-
- def test_single_partition_using_boolean(self):
- self.assertEqual('0,',
- cc_disk_setup.get_partition_mbr_layout(1000, True))
-
- def test_single_partition_using_list(self):
- disk_size = random.randint(1000000, 1000000000000)
- self.assertEqual(
- ',,83',
- cc_disk_setup.get_partition_mbr_layout(disk_size, [100]))
-
- def test_half_and_half(self):
- disk_size = random.randint(1000000, 1000000000000)
- expected_partition_size = int(float(disk_size) / 2)
- self.assertEqual(
- ',{0},83\n,,83'.format(expected_partition_size),
- cc_disk_setup.get_partition_mbr_layout(disk_size, [50, 50]))
-
- def test_thirds_with_different_partition_type(self):
- disk_size = random.randint(1000000, 1000000000000)
- expected_partition_size = int(float(disk_size) * 0.33)
- self.assertEqual(
- ',{0},83\n,,82'.format(expected_partition_size),
- cc_disk_setup.get_partition_mbr_layout(disk_size, [33, [66, 82]]))
-
-
-class TestUpdateFsSetupDevices(TestCase):
- def test_regression_1634678(self):
- # Cf. https://bugs.launchpad.net/cloud-init/+bug/1634678
- fs_setup = {
- 'partition': 'auto',
- 'device': '/dev/xvdb1',
- 'overwrite': False,
- 'label': 'test',
- 'filesystem': 'ext4'
- }
-
- cc_disk_setup.update_fs_setup_devices([fs_setup],
- lambda device: device)
-
- self.assertEqual({
- '_origname': '/dev/xvdb1',
- 'partition': 'auto',
- 'device': '/dev/xvdb1',
- 'overwrite': False,
- 'label': 'test',
- 'filesystem': 'ext4'
- }, fs_setup)
-
- def test_dotted_devname(self):
- fs_setup = {
- 'partition': 'auto',
- 'device': 'ephemeral0.0',
- 'label': 'test2',
- 'filesystem': 'xfs'
- }
-
- cc_disk_setup.update_fs_setup_devices([fs_setup],
- lambda device: device)
-
- self.assertEqual({
- '_origname': 'ephemeral0.0',
- '_partition': 'auto',
- 'partition': '0',
- 'device': 'ephemeral0',
- 'label': 'test2',
- 'filesystem': 'xfs'
- }, fs_setup)
-
- def test_dotted_devname_populates_partition(self):
- fs_setup = {
- 'device': 'ephemeral0.1',
- 'label': 'test2',
- 'filesystem': 'xfs'
- }
- cc_disk_setup.update_fs_setup_devices([fs_setup],
- lambda device: device)
- self.assertEqual({
- '_origname': 'ephemeral0.1',
- 'device': 'ephemeral0',
- 'partition': '1',
- 'label': 'test2',
- 'filesystem': 'xfs'
- }, fs_setup)
-
-
-@mock.patch('cloudinit.config.cc_disk_setup.assert_and_settle_device',
- return_value=None)
-@mock.patch('cloudinit.config.cc_disk_setup.find_device_node',
- return_value=('/dev/xdb1', False))
-@mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None)
-@mock.patch('cloudinit.config.cc_disk_setup.subp.subp', return_value=('', ''))
-class TestMkfsCommandHandling(CiTestCase):
-
- with_logs = True
-
- def test_with_cmd(self, subp, *args):
- """mkfs honors cmd and logs warnings when extra_opts or overwrite are
- provided."""
- cc_disk_setup.mkfs({
- 'cmd': 'mkfs -t %(filesystem)s -L %(label)s %(device)s',
- 'filesystem': 'ext4',
- 'device': '/dev/xdb1',
- 'label': 'with_cmd',
- 'extra_opts': ['should', 'generate', 'warning'],
- 'overwrite': 'should generate warning too'
- })
-
- self.assertIn(
- 'extra_opts ' +
- 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' +
- '/dev/xdb1',
- self.logs.getvalue())
- self.assertIn(
- 'overwrite ' +
- 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' +
- '/dev/xdb1',
- self.logs.getvalue())
-
- subp.assert_called_once_with(
- 'mkfs -t ext4 -L with_cmd /dev/xdb1', shell=True)
-
- @mock.patch('cloudinit.config.cc_disk_setup.subp.which')
- def test_overwrite_and_extra_opts_without_cmd(self, m_which, subp, *args):
- """mkfs observes extra_opts and overwrite settings when cmd is not
- present."""
- m_which.side_effect = lambda p: {'mkfs.ext4': '/sbin/mkfs.ext4'}[p]
- cc_disk_setup.mkfs({
- 'filesystem': 'ext4',
- 'device': '/dev/xdb1',
- 'label': 'without_cmd',
- 'extra_opts': ['are', 'added'],
- 'overwrite': True
- })
-
- subp.assert_called_once_with(
- ['/sbin/mkfs.ext4', '/dev/xdb1',
- '-L', 'without_cmd', '-F', 'are', 'added'],
- shell=False)
-
- @mock.patch('cloudinit.config.cc_disk_setup.subp.which')
- def test_mkswap(self, m_which, subp, *args):
- """mkfs observes extra_opts and overwrite settings when cmd is not
- present."""
- m_which.side_effect = iter([None, '/sbin/mkswap'])
- cc_disk_setup.mkfs({
- 'filesystem': 'swap',
- 'device': '/dev/xdb1',
- 'label': 'swap',
- 'overwrite': True,
- })
-
- self.assertEqual([mock.call('mkfs.swap'), mock.call('mkswap')],
- m_which.call_args_list)
- subp.assert_called_once_with(
- ['/sbin/mkswap', '/dev/xdb1', '-L', 'swap', '-f'], shell=False)
-
-#
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_etc_hosts.py b/tests/unittests/test_handler/test_handler_etc_hosts.py
deleted file mode 100644
index e3778b11..00000000
--- a/tests/unittests/test_handler/test_handler_etc_hosts.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_update_etc_hosts
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.tests import helpers as t_help
-
-import logging
-import os
-import shutil
-
-LOG = logging.getLogger(__name__)
-
-
-class TestHostsFile(t_help.FilesystemMockingTestCase):
- def setUp(self):
- super(TestHostsFile, self).setUp()
- self.tmp = self.tmp_dir()
-
- def _fetch_distro(self, kind):
- cls = distros.fetch(kind)
- paths = helpers.Paths({})
- return cls(kind, {}, paths)
-
- def test_write_etc_hosts_suse_localhost(self):
- cfg = {
- 'manage_etc_hosts': 'localhost',
- 'hostname': 'cloud-init.test.us'
- }
- os.makedirs('%s/etc/' % self.tmp)
- hosts_content = '192.168.1.1 blah.blah.us blah\n'
- fout = open('%s/etc/hosts' % self.tmp, 'w')
- fout.write(hosts_content)
- fout.close()
- distro = self._fetch_distro('sles')
- distro.hosts_fn = '%s/etc/hosts' % self.tmp
- paths = helpers.Paths({})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_update_etc_hosts.handle('test', cfg, cc, LOG, [])
- contents = util.load_file('%s/etc/hosts' % self.tmp)
- if '127.0.1.1\tcloud-init.test.us\tcloud-init' not in contents:
- self.assertIsNone('No entry for 127.0.1.1 in etc/hosts')
- if '192.168.1.1\tblah.blah.us\tblah' not in contents:
- self.assertIsNone('Default etc/hosts content modified')
-
- @t_help.skipUnlessJinja()
- def test_write_etc_hosts_suse_template(self):
- cfg = {
- 'manage_etc_hosts': 'template',
- 'hostname': 'cloud-init.test.us'
- }
- shutil.copytree('templates', '%s/etc/cloud/templates' % self.tmp)
- distro = self._fetch_distro('sles')
- paths = helpers.Paths({})
- paths.template_tpl = '%s' % self.tmp + '/etc/cloud/templates/%s.tmpl'
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_update_etc_hosts.handle('test', cfg, cc, LOG, [])
- contents = util.load_file('%s/etc/hosts' % self.tmp)
- if '127.0.1.1 cloud-init.test.us cloud-init' not in contents:
- self.assertIsNone('No entry for 127.0.1.1 in etc/hosts')
- if '::1 cloud-init.test.us cloud-init' not in contents:
- self.assertIsNone('No entry for 127.0.0.1 in etc/hosts')
diff --git a/tests/unittests/test_handler/test_handler_landscape.py b/tests/unittests/test_handler/test_handler_landscape.py
deleted file mode 100644
index 7d165687..00000000
--- a/tests/unittests/test_handler/test_handler_landscape.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_landscape
-from cloudinit import (distros, helpers, cloud, util)
-from cloudinit.sources import DataSourceNone
-from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock,
- wrap_and_call)
-
-from configobj import ConfigObj
-import logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-class TestLandscape(FilesystemMockingTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestLandscape, self).setUp()
- self.new_root = self.tmp_dir()
- self.conf = self.tmp_path('client.conf', self.new_root)
- self.default_file = self.tmp_path('default_landscape', self.new_root)
-
- def _get_cloud(self, distro):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({'templates_dir': self.new_root})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
- def test_handler_skips_empty_landscape_cloudconfig(self):
- """Empty landscape cloud-config section does no work."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
- cfg = {'landscape': {}}
- cc_landscape.handle('notimportant', cfg, mycloud, LOG, None)
- self.assertFalse(mycloud.distro.install_packages.called)
-
- def test_handler_error_on_invalid_landscape_type(self):
- """Raise an error when landscape configuraiton option is invalid."""
- mycloud = self._get_cloud('ubuntu')
- cfg = {'landscape': 'wrongtype'}
- with self.assertRaises(RuntimeError) as context_manager:
- cc_landscape.handle('notimportant', cfg, mycloud, LOG, None)
- self.assertIn(
- "'landscape' key existed in config, but not a dict",
- str(context_manager.exception))
-
- @mock.patch('cloudinit.config.cc_landscape.subp')
- def test_handler_restarts_landscape_client(self, m_subp):
- """handler restarts lansdscape-client after install."""
- mycloud = self._get_cloud('ubuntu')
- cfg = {'landscape': {'client': {}}}
- wrap_and_call(
- 'cloudinit.config.cc_landscape',
- {'LSC_CLIENT_CFG_FILE': {'new': self.conf}},
- cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(
- [mock.call(['service', 'landscape-client', 'restart'])],
- m_subp.subp.call_args_list)
-
- def test_handler_installs_client_and_creates_config_file(self):
- """Write landscape client.conf and install landscape-client."""
- mycloud = self._get_cloud('ubuntu')
- cfg = {'landscape': {'client': {}}}
- expected = {'client': {
- 'log_level': 'info',
- 'url': 'https://landscape.canonical.com/message-system',
- 'ping_url': 'http://landscape.canonical.com/ping',
- 'data_path': '/var/lib/landscape/client'}}
- mycloud.distro = mock.MagicMock()
- wrap_and_call(
- 'cloudinit.config.cc_landscape',
- {'LSC_CLIENT_CFG_FILE': {'new': self.conf},
- 'LS_DEFAULT_FILE': {'new': self.default_file}},
- cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(
- [mock.call('landscape-client')],
- mycloud.distro.install_packages.call_args)
- self.assertEqual(expected, dict(ConfigObj(self.conf)))
- self.assertIn(
- 'Wrote landscape config file to {0}'.format(self.conf),
- self.logs.getvalue())
- default_content = util.load_file(self.default_file)
- self.assertEqual('RUN=1\n', default_content)
-
- def test_handler_writes_merged_client_config_file_with_defaults(self):
- """Merge and write options from LSC_CLIENT_CFG_FILE with defaults."""
- # Write existing sparse client.conf file
- util.write_file(self.conf, '[client]\ncomputer_title = My PC\n')
- mycloud = self._get_cloud('ubuntu')
- cfg = {'landscape': {'client': {}}}
- expected = {'client': {
- 'log_level': 'info',
- 'url': 'https://landscape.canonical.com/message-system',
- 'ping_url': 'http://landscape.canonical.com/ping',
- 'data_path': '/var/lib/landscape/client',
- 'computer_title': 'My PC'}}
- wrap_and_call(
- 'cloudinit.config.cc_landscape',
- {'LSC_CLIENT_CFG_FILE': {'new': self.conf}},
- cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(expected, dict(ConfigObj(self.conf)))
- self.assertIn(
- 'Wrote landscape config file to {0}'.format(self.conf),
- self.logs.getvalue())
-
- def test_handler_writes_merged_provided_cloudconfig_with_defaults(self):
- """Merge and write options from cloud-config options with defaults."""
- # Write empty sparse client.conf file
- util.write_file(self.conf, '')
- mycloud = self._get_cloud('ubuntu')
- cfg = {'landscape': {'client': {'computer_title': 'My PC'}}}
- expected = {'client': {
- 'log_level': 'info',
- 'url': 'https://landscape.canonical.com/message-system',
- 'ping_url': 'http://landscape.canonical.com/ping',
- 'data_path': '/var/lib/landscape/client',
- 'computer_title': 'My PC'}}
- wrap_and_call(
- 'cloudinit.config.cc_landscape',
- {'LSC_CLIENT_CFG_FILE': {'new': self.conf}},
- cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(expected, dict(ConfigObj(self.conf)))
- self.assertIn(
- 'Wrote landscape config file to {0}'.format(self.conf),
- self.logs.getvalue())
diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py
deleted file mode 100644
index 47e7d804..00000000
--- a/tests/unittests/test_handler/test_handler_locale.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_locale
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.sources import DataSourceNoCloud
-
-from cloudinit.tests import helpers as t_help
-
-from configobj import ConfigObj
-
-import logging
-import os
-import shutil
-import tempfile
-from io import BytesIO
-from unittest import mock
-
-LOG = logging.getLogger(__name__)
-
-
-class TestLocale(t_help.FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestLocale, self).setUp()
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.new_root)
-
- def _get_cloud(self, distro):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
-
- cls = distros.fetch(distro)
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
- def test_set_locale_sles(self):
-
- cfg = {
- 'locale': 'My.Locale',
- }
- cc = self._get_cloud('sles')
- cc_locale.handle('cc_locale', cfg, cc, LOG, [])
- if cc.distro.uses_systemd():
- locale_conf = cc.distro.systemd_locale_conf_fn
- else:
- locale_conf = cc.distro.locale_conf_fn
- contents = util.load_file(locale_conf, decode=False)
- n_cfg = ConfigObj(BytesIO(contents))
- if cc.distro.uses_systemd():
- self.assertEqual({'LANG': cfg['locale']}, dict(n_cfg))
- else:
- self.assertEqual({'RC_LANG': cfg['locale']}, dict(n_cfg))
-
- def test_set_locale_sles_default(self):
- cfg = {}
- cc = self._get_cloud('sles')
- cc_locale.handle('cc_locale', cfg, cc, LOG, [])
-
- if cc.distro.uses_systemd():
- locale_conf = cc.distro.systemd_locale_conf_fn
- keyname = 'LANG'
- else:
- locale_conf = cc.distro.locale_conf_fn
- keyname = 'RC_LANG'
-
- contents = util.load_file(locale_conf, decode=False)
- n_cfg = ConfigObj(BytesIO(contents))
- self.assertEqual({keyname: 'en_US.UTF-8'}, dict(n_cfg))
-
- def test_locale_update_config_if_different_than_default(self):
- """Test cc_locale writes updates conf if different than default"""
- locale_conf = os.path.join(self.new_root, "etc/default/locale")
- util.write_file(locale_conf, 'LANG="en_US.UTF-8"\n')
- cfg = {'locale': 'C.UTF-8'}
- cc = self._get_cloud('ubuntu')
- with mock.patch('cloudinit.distros.debian.subp.subp') as m_subp:
- with mock.patch('cloudinit.distros.debian.LOCALE_CONF_FN',
- locale_conf):
- cc_locale.handle('cc_locale', cfg, cc, LOG, [])
- m_subp.assert_called_with(['update-locale',
- '--locale-file=%s' % locale_conf,
- 'LANG=C.UTF-8'], capture=False)
-
- def test_locale_rhel_defaults_en_us_utf8(self):
- """Test cc_locale gets en_US.UTF-8 from distro get_locale fallback"""
- cfg = {}
- cc = self._get_cloud('rhel')
- update_sysconfig = 'cloudinit.distros.rhel_util.update_sysconfig_file'
- with mock.patch.object(cc.distro, 'uses_systemd') as m_use_sd:
- m_use_sd.return_value = True
- with mock.patch(update_sysconfig) as m_update_syscfg:
- cc_locale.handle('cc_locale', cfg, cc, LOG, [])
- m_update_syscfg.assert_called_with('/etc/locale.conf',
- {'LANG': 'en_US.UTF-8'})
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
deleted file mode 100644
index b2181992..00000000
--- a/tests/unittests/test_handler/test_handler_lxd.py
+++ /dev/null
@@ -1,231 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_lxd
-from cloudinit.sources import DataSourceNoCloud
-from cloudinit import (distros, helpers, cloud)
-from cloudinit.tests import helpers as t_help
-
-from unittest import mock
-
-
-class TestLxd(t_help.CiTestCase):
-
- with_logs = True
-
- lxd_cfg = {
- 'lxd': {
- 'init': {
- 'network_address': '0.0.0.0',
- 'storage_backend': 'zfs',
- 'storage_pool': 'poolname',
- }
- }
- }
-
- def _get_cloud(self, distro):
- cls = distros.fetch(distro)
- paths = helpers.Paths({})
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
- @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.subp")
- def test_lxd_init(self, mock_subp, m_maybe_clean):
- cc = self._get_cloud('ubuntu')
- mock_subp.which.return_value = True
- m_maybe_clean.return_value = None
- cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
- self.assertTrue(mock_subp.which.called)
- # no bridge config, so maybe_cleanup should not be called.
- self.assertFalse(m_maybe_clean.called)
- self.assertEqual(
- [mock.call(['lxd', 'waitready', '--timeout=300']),
- mock.call(
- ['lxd', 'init', '--auto', '--network-address=0.0.0.0',
- '--storage-backend=zfs', '--storage-pool=poolname'])],
- mock_subp.subp.call_args_list)
-
- @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.subp")
- def test_lxd_install(self, mock_subp, m_maybe_clean):
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- mock_subp.which.return_value = None
- cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
- self.assertNotIn('WARN', self.logs.getvalue())
- self.assertTrue(cc.distro.install_packages.called)
- cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
- self.assertFalse(m_maybe_clean.called)
- install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
- self.assertEqual(sorted(install_pkg), ['lxd', 'zfsutils-linux'])
-
- @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.subp")
- def test_no_init_does_nothing(self, mock_subp, m_maybe_clean):
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, [])
- self.assertFalse(cc.distro.install_packages.called)
- self.assertFalse(mock_subp.subp.called)
- self.assertFalse(m_maybe_clean.called)
-
- @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.subp")
- def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean):
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, [])
- self.assertFalse(cc.distro.install_packages.called)
- self.assertFalse(mock_subp.subp.called)
- self.assertFalse(m_maybe_clean.called)
-
- def test_lxd_debconf_new_full(self):
- data = {"mode": "new",
- "name": "testbr0",
- "ipv4_address": "10.0.8.1",
- "ipv4_netmask": "24",
- "ipv4_dhcp_first": "10.0.8.2",
- "ipv4_dhcp_last": "10.0.8.254",
- "ipv4_dhcp_leases": "250",
- "ipv4_nat": "true",
- "ipv6_address": "fd98:9e0:3744::1",
- "ipv6_netmask": "64",
- "ipv6_nat": "true",
- "domain": "lxd"}
- self.assertEqual(
- cc_lxd.bridge_to_debconf(data),
- {"lxd/setup-bridge": "true",
- "lxd/bridge-name": "testbr0",
- "lxd/bridge-ipv4": "true",
- "lxd/bridge-ipv4-address": "10.0.8.1",
- "lxd/bridge-ipv4-netmask": "24",
- "lxd/bridge-ipv4-dhcp-first": "10.0.8.2",
- "lxd/bridge-ipv4-dhcp-last": "10.0.8.254",
- "lxd/bridge-ipv4-dhcp-leases": "250",
- "lxd/bridge-ipv4-nat": "true",
- "lxd/bridge-ipv6": "true",
- "lxd/bridge-ipv6-address": "fd98:9e0:3744::1",
- "lxd/bridge-ipv6-netmask": "64",
- "lxd/bridge-ipv6-nat": "true",
- "lxd/bridge-domain": "lxd"})
-
- def test_lxd_debconf_new_partial(self):
- data = {"mode": "new",
- "ipv6_address": "fd98:9e0:3744::1",
- "ipv6_netmask": "64",
- "ipv6_nat": "true"}
- self.assertEqual(
- cc_lxd.bridge_to_debconf(data),
- {"lxd/setup-bridge": "true",
- "lxd/bridge-ipv6": "true",
- "lxd/bridge-ipv6-address": "fd98:9e0:3744::1",
- "lxd/bridge-ipv6-netmask": "64",
- "lxd/bridge-ipv6-nat": "true"})
-
- def test_lxd_debconf_existing(self):
- data = {"mode": "existing",
- "name": "testbr0"}
- self.assertEqual(
- cc_lxd.bridge_to_debconf(data),
- {"lxd/setup-bridge": "false",
- "lxd/use-existing-bridge": "true",
- "lxd/bridge-name": "testbr0"})
-
- def test_lxd_debconf_none(self):
- data = {"mode": "none"}
- self.assertEqual(
- cc_lxd.bridge_to_debconf(data),
- {"lxd/setup-bridge": "false",
- "lxd/bridge-name": ""})
-
- def test_lxd_cmd_new_full(self):
- data = {"mode": "new",
- "name": "testbr0",
- "ipv4_address": "10.0.8.1",
- "ipv4_netmask": "24",
- "ipv4_dhcp_first": "10.0.8.2",
- "ipv4_dhcp_last": "10.0.8.254",
- "ipv4_dhcp_leases": "250",
- "ipv4_nat": "true",
- "ipv6_address": "fd98:9e0:3744::1",
- "ipv6_netmask": "64",
- "ipv6_nat": "true",
- "domain": "lxd"}
- self.assertEqual(
- cc_lxd.bridge_to_cmd(data),
- (["network", "create", "testbr0",
- "ipv4.address=10.0.8.1/24", "ipv4.nat=true",
- "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254",
- "ipv6.address=fd98:9e0:3744::1/64",
- "ipv6.nat=true", "dns.domain=lxd"],
- ["network", "attach-profile",
- "testbr0", "default", "eth0"]))
-
- def test_lxd_cmd_new_partial(self):
- data = {"mode": "new",
- "ipv6_address": "fd98:9e0:3744::1",
- "ipv6_netmask": "64",
- "ipv6_nat": "true"}
- self.assertEqual(
- cc_lxd.bridge_to_cmd(data),
- (["network", "create", "lxdbr0", "ipv4.address=none",
- "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true"],
- ["network", "attach-profile",
- "lxdbr0", "default", "eth0"]))
-
- def test_lxd_cmd_existing(self):
- data = {"mode": "existing",
- "name": "testbr0"}
- self.assertEqual(
- cc_lxd.bridge_to_cmd(data),
- (None, ["network", "attach-profile",
- "testbr0", "default", "eth0"]))
-
- def test_lxd_cmd_none(self):
- data = {"mode": "none"}
- self.assertEqual(
- cc_lxd.bridge_to_cmd(data),
- (None, None))
-
-
-class TestLxdMaybeCleanupDefault(t_help.CiTestCase):
- """Test the implementation of maybe_cleanup_default."""
-
- defnet = cc_lxd._DEFAULT_NETWORK_NAME
-
- @mock.patch("cloudinit.config.cc_lxd._lxc")
- def test_network_other_than_default_not_deleted(self, m_lxc):
- """deletion or removal should only occur if bridge is default."""
- cc_lxd.maybe_cleanup_default(
- net_name="lxdbr1", did_init=True, create=True, attach=True)
- m_lxc.assert_not_called()
-
- @mock.patch("cloudinit.config.cc_lxd._lxc")
- def test_did_init_false_does_not_delete(self, m_lxc):
- """deletion or removal should only occur if did_init is True."""
- cc_lxd.maybe_cleanup_default(
- net_name=self.defnet, did_init=False, create=True, attach=True)
- m_lxc.assert_not_called()
-
- @mock.patch("cloudinit.config.cc_lxd._lxc")
- def test_network_deleted_if_create_true(self, m_lxc):
- """deletion of network should occur if create is True."""
- cc_lxd.maybe_cleanup_default(
- net_name=self.defnet, did_init=True, create=True, attach=False)
- m_lxc.assert_called_with(["network", "delete", self.defnet])
-
- @mock.patch("cloudinit.config.cc_lxd._lxc")
- def test_device_removed_if_attach_true(self, m_lxc):
- """deletion of network should occur if create is True."""
- nic_name = "my_nic"
- profile = "my_profile"
- cc_lxd.maybe_cleanup_default(
- net_name=self.defnet, did_init=True, create=False, attach=True,
- profile=profile, nic_name=nic_name)
- m_lxc.assert_called_once_with(
- ["profile", "device", "remove", profile, nic_name])
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py
deleted file mode 100644
index e87069f6..00000000
--- a/tests/unittests/test_handler/test_handler_mounts.py
+++ /dev/null
@@ -1,397 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import os.path
-from unittest import mock
-
-from cloudinit.config import cc_mounts
-
-from cloudinit.tests import helpers as test_helpers
-
-
-class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestSanitizeDevname, self).setUp()
- self.new_root = self.tmp_dir()
- self.patchOS(self.new_root)
-
- def _touch(self, path):
- path = os.path.join(self.new_root, path.lstrip('/'))
- basedir = os.path.dirname(path)
- if not os.path.exists(basedir):
- os.makedirs(basedir)
- open(path, 'a').close()
-
- def _makedirs(self, directory):
- directory = os.path.join(self.new_root, directory.lstrip('/'))
- if not os.path.exists(directory):
- os.makedirs(directory)
-
- def mock_existence_of_disk(self, disk_path):
- self._touch(disk_path)
- self._makedirs(os.path.join('/sys/block', disk_path.split('/')[-1]))
-
- def mock_existence_of_partition(self, disk_path, partition_number):
- self.mock_existence_of_disk(disk_path)
- self._touch(disk_path + str(partition_number))
- disk_name = disk_path.split('/')[-1]
- self._makedirs(os.path.join('/sys/block',
- disk_name,
- disk_name + str(partition_number)))
-
- def test_existent_full_disk_path_is_returned(self):
- disk_path = '/dev/sda'
- self.mock_existence_of_disk(disk_path)
- self.assertEqual(disk_path,
- cc_mounts.sanitize_devname(disk_path,
- lambda x: None,
- mock.Mock()))
-
- def test_existent_disk_name_returns_full_path(self):
- disk_name = 'sda'
- disk_path = '/dev/' + disk_name
- self.mock_existence_of_disk(disk_path)
- self.assertEqual(disk_path,
- cc_mounts.sanitize_devname(disk_name,
- lambda x: None,
- mock.Mock()))
-
- def test_existent_meta_disk_is_returned(self):
- actual_disk_path = '/dev/sda'
- self.mock_existence_of_disk(actual_disk_path)
- self.assertEqual(
- actual_disk_path,
- cc_mounts.sanitize_devname('ephemeral0',
- lambda x: actual_disk_path,
- mock.Mock()))
-
- def test_existent_meta_partition_is_returned(self):
- disk_name, partition_part = '/dev/sda', '1'
- actual_partition_path = disk_name + partition_part
- self.mock_existence_of_partition(disk_name, partition_part)
- self.assertEqual(
- actual_partition_path,
- cc_mounts.sanitize_devname('ephemeral0.1',
- lambda x: disk_name,
- mock.Mock()))
-
- def test_existent_meta_partition_with_p_is_returned(self):
- disk_name, partition_part = '/dev/sda', 'p1'
- actual_partition_path = disk_name + partition_part
- self.mock_existence_of_partition(disk_name, partition_part)
- self.assertEqual(
- actual_partition_path,
- cc_mounts.sanitize_devname('ephemeral0.1',
- lambda x: disk_name,
- mock.Mock()))
-
- def test_first_partition_returned_if_existent_disk_is_partitioned(self):
- disk_name, partition_part = '/dev/sda', '1'
- actual_partition_path = disk_name + partition_part
- self.mock_existence_of_partition(disk_name, partition_part)
- self.assertEqual(
- actual_partition_path,
- cc_mounts.sanitize_devname('ephemeral0',
- lambda x: disk_name,
- mock.Mock()))
-
- def test_nth_partition_returned_if_requested(self):
- disk_name, partition_part = '/dev/sda', '3'
- actual_partition_path = disk_name + partition_part
- self.mock_existence_of_partition(disk_name, partition_part)
- self.assertEqual(
- actual_partition_path,
- cc_mounts.sanitize_devname('ephemeral0.3',
- lambda x: disk_name,
- mock.Mock()))
-
- def test_transformer_returning_none_returns_none(self):
- self.assertIsNone(
- cc_mounts.sanitize_devname(
- 'ephemeral0', lambda x: None, mock.Mock()))
-
- def test_missing_device_returns_none(self):
- self.assertIsNone(
- cc_mounts.sanitize_devname('/dev/sda', None, mock.Mock()))
-
- def test_missing_sys_returns_none(self):
- disk_path = '/dev/sda'
- self._makedirs(disk_path)
- self.assertIsNone(
- cc_mounts.sanitize_devname(disk_path, None, mock.Mock()))
-
- def test_existent_disk_but_missing_partition_returns_none(self):
- disk_path = '/dev/sda'
- self.mock_existence_of_disk(disk_path)
- self.assertIsNone(
- cc_mounts.sanitize_devname(
- 'ephemeral0.1', lambda x: disk_path, mock.Mock()))
-
- def test_network_device_returns_network_device(self):
- disk_path = 'netdevice:/path'
- self.assertEqual(
- disk_path,
- cc_mounts.sanitize_devname(disk_path, None, mock.Mock()))
-
-
-class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestSwapFileCreation, self).setUp()
- self.new_root = self.tmp_dir()
- self.patchOS(self.new_root)
-
- self.fstab_path = os.path.join(self.new_root, 'etc/fstab')
- self.swap_path = os.path.join(self.new_root, 'swap.img')
- self._makedirs('/etc')
-
- self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH',
- 'mock_fstab_path',
- self.fstab_path,
- autospec=False)
-
- self.add_patch('cloudinit.config.cc_mounts.subp.subp',
- 'm_subp_subp')
-
- self.add_patch('cloudinit.config.cc_mounts.util.mounts',
- 'mock_util_mounts',
- return_value={
- '/dev/sda1': {'fstype': 'ext4',
- 'mountpoint': '/',
- 'opts': 'rw,relatime,discard'
- }})
-
- self.mock_cloud = mock.Mock()
- self.mock_log = mock.Mock()
- self.mock_cloud.device_name_to_device = self.device_name_to_device
-
- self.cc = {
- 'swap': {
- 'filename': self.swap_path,
- 'size': '512',
- 'maxsize': '512'}}
-
- def _makedirs(self, directory):
- directory = os.path.join(self.new_root, directory.lstrip('/'))
- if not os.path.exists(directory):
- os.makedirs(directory)
-
- def device_name_to_device(self, path):
- if path == 'swap':
- return self.swap_path
- else:
- dev = None
-
- return dev
-
- @mock.patch('cloudinit.util.get_mount_info')
- @mock.patch('cloudinit.util.kernel_version')
- def test_swap_creation_method_fallocate_on_xfs(self, m_kernel_version,
- m_get_mount_info):
- m_kernel_version.return_value = (4, 20)
- m_get_mount_info.return_value = ["", "xfs"]
-
- cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
- self.m_subp_subp.assert_has_calls([
- mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True),
- mock.call(['mkswap', self.swap_path]),
- mock.call(['swapon', '-a'])])
-
- @mock.patch('cloudinit.util.get_mount_info')
- @mock.patch('cloudinit.util.kernel_version')
- def test_swap_creation_method_xfs(self, m_kernel_version,
- m_get_mount_info):
- m_kernel_version.return_value = (3, 18)
- m_get_mount_info.return_value = ["", "xfs"]
-
- cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
- self.m_subp_subp.assert_has_calls([
- mock.call(['dd', 'if=/dev/zero',
- 'of=' + self.swap_path,
- 'bs=1M', 'count=0'], capture=True),
- mock.call(['mkswap', self.swap_path]),
- mock.call(['swapon', '-a'])])
-
- @mock.patch('cloudinit.util.get_mount_info')
- @mock.patch('cloudinit.util.kernel_version')
- def test_swap_creation_method_btrfs(self, m_kernel_version,
- m_get_mount_info):
- m_kernel_version.return_value = (4, 20)
- m_get_mount_info.return_value = ["", "btrfs"]
-
- cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
- self.m_subp_subp.assert_has_calls([
- mock.call(['dd', 'if=/dev/zero',
- 'of=' + self.swap_path,
- 'bs=1M', 'count=0'], capture=True),
- mock.call(['mkswap', self.swap_path]),
- mock.call(['swapon', '-a'])])
-
- @mock.patch('cloudinit.util.get_mount_info')
- @mock.patch('cloudinit.util.kernel_version')
- def test_swap_creation_method_ext4(self, m_kernel_version,
- m_get_mount_info):
- m_kernel_version.return_value = (5, 14)
- m_get_mount_info.return_value = ["", "ext4"]
-
- cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
- self.m_subp_subp.assert_has_calls([
- mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True),
- mock.call(['mkswap', self.swap_path]),
- mock.call(['swapon', '-a'])])
-
-
-class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
-
- swap_path = '/dev/sdb1'
-
- def setUp(self):
- super(TestFstabHandling, self).setUp()
- self.new_root = self.tmp_dir()
- self.patchOS(self.new_root)
-
- self.fstab_path = os.path.join(self.new_root, 'etc/fstab')
- self._makedirs('/etc')
-
- self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH',
- 'mock_fstab_path',
- self.fstab_path,
- autospec=False)
-
- self.add_patch('cloudinit.config.cc_mounts._is_block_device',
- 'mock_is_block_device',
- return_value=True)
-
- self.add_patch('cloudinit.config.cc_mounts.subp.subp',
- 'm_subp_subp')
-
- self.add_patch('cloudinit.config.cc_mounts.util.mounts',
- 'mock_util_mounts',
- return_value={
- '/dev/sda1': {'fstype': 'ext4',
- 'mountpoint': '/',
- 'opts': 'rw,relatime,discard'
- }})
-
- self.mock_cloud = mock.Mock()
- self.mock_log = mock.Mock()
- self.mock_cloud.device_name_to_device = self.device_name_to_device
-
- def _makedirs(self, directory):
- directory = os.path.join(self.new_root, directory.lstrip('/'))
- if not os.path.exists(directory):
- os.makedirs(directory)
-
- def device_name_to_device(self, path):
- if path == 'swap':
- return self.swap_path
- else:
- dev = None
-
- return dev
-
- def test_no_fstab(self):
- """ Handle images which do not include an fstab. """
- self.assertFalse(os.path.exists(cc_mounts.FSTAB_PATH))
- fstab_expected_content = (
- '%s\tnone\tswap\tsw,comment=cloudconfig\t'
- '0\t0\n' % (self.swap_path,)
- )
- cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
- with open(cc_mounts.FSTAB_PATH, 'r') as fd:
- fstab_new_content = fd.read()
- self.assertEqual(fstab_expected_content, fstab_new_content)
-
- def test_swap_integrity(self):
- '''Ensure that the swap file is correctly created and can
- swapon successfully. Fixing the corner case of:
- kernel: swapon: swapfile has holes'''
-
- fstab = '/swap.img swap swap defaults 0 0\n'
-
- with open(cc_mounts.FSTAB_PATH, 'w') as fd:
- fd.write(fstab)
- cc = {'swap': ['filename: /swap.img', 'size: 512', 'maxsize: 512']}
- cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
-
- def test_fstab_no_swap_device(self):
- '''Ensure that cloud-init adds a discovered swap partition
- to /etc/fstab.'''
-
- fstab_original_content = ''
- fstab_expected_content = (
- '%s\tnone\tswap\tsw,comment=cloudconfig\t'
- '0\t0\n' % (self.swap_path,)
- )
-
- with open(cc_mounts.FSTAB_PATH, 'w') as fd:
- fd.write(fstab_original_content)
-
- cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
-
- with open(cc_mounts.FSTAB_PATH, 'r') as fd:
- fstab_new_content = fd.read()
- self.assertEqual(fstab_expected_content, fstab_new_content)
-
- def test_fstab_same_swap_device_already_configured(self):
- '''Ensure that cloud-init will not add a swap device if the same
- device already exists in /etc/fstab.'''
-
- fstab_original_content = '%s swap swap defaults 0 0\n' % (
- self.swap_path,)
- fstab_expected_content = fstab_original_content
-
- with open(cc_mounts.FSTAB_PATH, 'w') as fd:
- fd.write(fstab_original_content)
-
- cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
-
- with open(cc_mounts.FSTAB_PATH, 'r') as fd:
- fstab_new_content = fd.read()
- self.assertEqual(fstab_expected_content, fstab_new_content)
-
- def test_fstab_alternate_swap_device_already_configured(self):
- '''Ensure that cloud-init will add a discovered swap device to
- /etc/fstab even when there exists a swap definition on another
- device.'''
-
- fstab_original_content = '/dev/sdc1 swap swap defaults 0 0\n'
- fstab_expected_content = (
- fstab_original_content +
- '%s\tnone\tswap\tsw,comment=cloudconfig\t'
- '0\t0\n' % (self.swap_path,)
- )
-
- with open(cc_mounts.FSTAB_PATH, 'w') as fd:
- fd.write(fstab_original_content)
-
- cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
-
- with open(cc_mounts.FSTAB_PATH, 'r') as fd:
- fstab_new_content = fd.read()
- self.assertEqual(fstab_expected_content, fstab_new_content)
-
- def test_no_change_fstab_sets_needs_mount_all(self):
- '''verify unchanged fstab entries are mounted if not call mount -a'''
- fstab_original_content = (
- 'LABEL=cloudimg-rootfs / ext4 defaults 0 0\n'
- 'LABEL=UEFI /boot/efi vfat defaults 0 0\n'
- '/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n'
- )
- fstab_expected_content = fstab_original_content
- cc = {
- 'mounts': [
- ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']
- ]
- }
- with open(cc_mounts.FSTAB_PATH, 'w') as fd:
- fd.write(fstab_original_content)
- with open(cc_mounts.FSTAB_PATH, 'r') as fd:
- fstab_new_content = fd.read()
- self.assertEqual(fstab_expected_content, fstab_new_content)
- cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
- self.m_subp_subp.assert_has_calls([
- mock.call(['mount', '-a']),
- mock.call(['systemctl', 'daemon-reload'])])
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py
deleted file mode 100644
index 62388ac6..00000000
--- a/tests/unittests/test_handler/test_handler_puppet.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_puppet
-from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, util)
-from cloudinit.tests.helpers import CiTestCase, mock
-
-import logging
-import textwrap
-
-
-LOG = logging.getLogger(__name__)
-
-
-@mock.patch('cloudinit.config.cc_puppet.subp.subp')
-@mock.patch('cloudinit.config.cc_puppet.os')
-class TestAutostartPuppet(CiTestCase):
-
- def test_wb_autostart_puppet_updates_puppet_default(self, m_os, m_subp):
- """Update /etc/default/puppet to autostart if it exists."""
-
- def _fake_exists(path):
- return path == '/etc/default/puppet'
-
- m_os.path.exists.side_effect = _fake_exists
- cc_puppet._autostart_puppet(LOG)
- self.assertEqual(
- [mock.call(['sed', '-i', '-e', 's/^START=.*/START=yes/',
- '/etc/default/puppet'], capture=False)],
- m_subp.call_args_list)
-
- def test_wb_autostart_pupppet_enables_puppet_systemctl(self, m_os, m_subp):
- """If systemctl is present, enable puppet via systemctl."""
-
- def _fake_exists(path):
- return path == '/bin/systemctl'
-
- m_os.path.exists.side_effect = _fake_exists
- cc_puppet._autostart_puppet(LOG)
- expected_calls = [mock.call(
- ['/bin/systemctl', 'enable', 'puppet.service'], capture=False)]
- self.assertEqual(expected_calls, m_subp.call_args_list)
-
- def test_wb_autostart_pupppet_enables_puppet_chkconfig(self, m_os, m_subp):
- """If chkconfig is present, enable puppet via checkcfg."""
-
- def _fake_exists(path):
- return path == '/sbin/chkconfig'
-
- m_os.path.exists.side_effect = _fake_exists
- cc_puppet._autostart_puppet(LOG)
- expected_calls = [mock.call(
- ['/sbin/chkconfig', 'puppet', 'on'], capture=False)]
- self.assertEqual(expected_calls, m_subp.call_args_list)
-
-
-@mock.patch('cloudinit.config.cc_puppet._autostart_puppet')
-class TestPuppetHandle(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestPuppetHandle, self).setUp()
- self.new_root = self.tmp_dir()
- self.conf = self.tmp_path('puppet.conf')
- self.csr_attributes_path = self.tmp_path('csr_attributes.yaml')
-
- def _get_cloud(self, distro):
- paths = helpers.Paths({'templates_dir': self.new_root})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
- def test_handler_skips_missing_puppet_key_in_cloudconfig(self, m_auto):
- """Cloud-config containing no 'puppet' key is skipped."""
- mycloud = self._get_cloud('ubuntu')
- cfg = {}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
- self.assertIn(
- "no 'puppet' configuration found", self.logs.getvalue())
- self.assertEqual(0, m_auto.call_count)
-
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_starts_puppet_service(self, m_subp, m_auto):
- """Cloud-config 'puppet' configuration starts puppet."""
- mycloud = self._get_cloud('ubuntu')
- cfg = {'puppet': {'install': False}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(1, m_auto.call_count)
- self.assertEqual(
- [mock.call(['service', 'puppet', 'start'], capture=False)],
- m_subp.call_args_list)
-
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_empty_puppet_config_installs_puppet(self, m_subp, m_auto):
- """Cloud-config empty 'puppet' configuration installs latest puppet."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
- cfg = {'puppet': {}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(
- [mock.call(('puppet', None))],
- mycloud.distro.install_packages.call_args_list)
-
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_installs_puppet_on_true(self, m_subp, _):
- """Cloud-config with 'puppet' key installs when 'install' is True."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
- cfg = {'puppet': {'install': True}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(
- [mock.call(('puppet', None))],
- mycloud.distro.install_packages.call_args_list)
-
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_installs_puppet_version(self, m_subp, _):
- """Cloud-config 'puppet' configuration can specify a version."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
- cfg = {'puppet': {'version': '3.8'}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(
- [mock.call(('puppet', '3.8'))],
- mycloud.distro.install_packages.call_args_list)
-
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_updates_puppet_conf(self, m_subp, m_auto):
- """When 'conf' is provided update values in PUPPET_CONF_PATH."""
- mycloud = self._get_cloud('ubuntu')
- cfg = {
- 'puppet': {
- 'conf': {'agent': {'server': 'puppetmaster.example.org'}}}}
- util.write_file(self.conf, '[agent]\nserver = origpuppet\nother = 3')
- puppet_conf_path = 'cloudinit.config.cc_puppet.PUPPET_CONF_PATH'
- mycloud.distro = mock.MagicMock()
- with mock.patch(puppet_conf_path, self.conf):
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
- content = util.load_file(self.conf)
- expected = '[agent]\nserver = puppetmaster.example.org\nother = 3\n\n'
- self.assertEqual(expected, content)
-
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_writes_csr_attributes_file(self, m_subp, m_auto):
- """When csr_attributes is provided
- creates file in PUPPET_CSR_ATTRIBUTES_PATH."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
- cfg = {
- 'puppet': {
- 'csr_attributes': {
- 'custom_attributes': {
- '1.2.840.113549.1.9.7':
- '342thbjkt82094y0uthhor289jnqthpc2290'
- },
- 'extension_requests': {
- 'pp_uuid': 'ED803750-E3C7-44F5-BB08-41A04433FE2E',
- 'pp_image_name': 'my_ami_image',
- 'pp_preshared_key':
- '342thbjkt82094y0uthhor289jnqthpc2290'
- }
- }
- }
- }
- csr_attributes = 'cloudinit.config.cc_puppet.' \
- 'PUPPET_CSR_ATTRIBUTES_PATH'
- with mock.patch(csr_attributes, self.csr_attributes_path):
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
- content = util.load_file(self.csr_attributes_path)
- expected = textwrap.dedent("""\
- custom_attributes:
- 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290
- extension_requests:
- pp_image_name: my_ami_image
- pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290
- pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E
- """)
- self.assertEqual(expected, content)
diff --git a/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py b/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py
deleted file mode 100644
index e13b7793..00000000
--- a/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py
+++ /dev/null
@@ -1,109 +0,0 @@
-from cloudinit.config import cc_refresh_rmc_and_interface as ccrmci
-
-from cloudinit import util
-
-from cloudinit.tests import helpers as t_help
-from cloudinit.tests.helpers import mock
-
-from textwrap import dedent
-import logging
-
-LOG = logging.getLogger(__name__)
-MPATH = "cloudinit.config.cc_refresh_rmc_and_interface"
-NET_INFO = {
- 'lo': {'ipv4': [{'ip': '127.0.0.1',
- 'bcast': '', 'mask': '255.0.0.0',
- 'scope': 'host'}],
- 'ipv6': [{'ip': '::1/128',
- 'scope6': 'host'}], 'hwaddr': '',
- 'up': 'True'},
- 'env2': {'ipv4': [{'ip': '8.0.0.19',
- 'bcast': '8.0.0.255', 'mask': '255.255.255.0',
- 'scope': 'global'}],
- 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8220/64',
- 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:20',
- 'up': 'True'},
- 'env3': {'ipv4': [{'ip': '90.0.0.14',
- 'bcast': '90.0.0.255', 'mask': '255.255.255.0',
- 'scope': 'global'}],
- 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8221/64',
- 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:21',
- 'up': 'True'},
- 'env4': {'ipv4': [{'ip': '9.114.23.7',
- 'bcast': '9.114.23.255', 'mask': '255.255.255.0',
- 'scope': 'global'}],
- 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8222/64',
- 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:22',
- 'up': 'True'},
- 'env5': {'ipv4': [],
- 'ipv6': [{'ip': 'fe80::9c26:c3ff:fea4:62c8/64',
- 'scope6': 'link'}], 'hwaddr': '42:20:86:df:fa:4c',
- 'up': 'True'}}
-
-
-class TestRsctNodeFile(t_help.CiTestCase):
- def test_disable_ipv6_interface(self):
- """test parsing of iface files."""
- fname = self.tmp_path("iface-eth5")
- util.write_file(fname, dedent("""\
- BOOTPROTO=static
- DEVICE=eth5
- HWADDR=42:20:86:df:fa:4c
- IPV6INIT=yes
- IPADDR6=fe80::9c26:c3ff:fea4:62c8/64
- IPV6ADDR=fe80::9c26:c3ff:fea4:62c8/64
- NM_CONTROLLED=yes
- ONBOOT=yes
- STARTMODE=auto
- TYPE=Ethernet
- USERCTL=no
- """))
-
- ccrmci.disable_ipv6(fname)
- self.assertEqual(dedent("""\
- BOOTPROTO=static
- DEVICE=eth5
- HWADDR=42:20:86:df:fa:4c
- ONBOOT=yes
- STARTMODE=auto
- TYPE=Ethernet
- USERCTL=no
- NM_CONTROLLED=no
- """), util.load_file(fname))
-
- @mock.patch(MPATH + '.refresh_rmc')
- @mock.patch(MPATH + '.restart_network_manager')
- @mock.patch(MPATH + '.disable_ipv6')
- @mock.patch(MPATH + '.refresh_ipv6')
- @mock.patch(MPATH + '.netinfo.netdev_info')
- @mock.patch(MPATH + '.subp.which')
- def test_handle(self, m_refresh_rmc,
- m_netdev_info, m_refresh_ipv6, m_disable_ipv6,
- m_restart_nm, m_which):
- """Basic test of handle."""
- m_netdev_info.return_value = NET_INFO
- m_which.return_value = '/opt/rsct/bin/rmcctrl'
- ccrmci.handle(
- "refresh_rmc_and_interface", None, None, None, None)
- self.assertEqual(1, m_netdev_info.call_count)
- m_refresh_ipv6.assert_called_with('env5')
- m_disable_ipv6.assert_called_with(
- '/etc/sysconfig/network-scripts/ifcfg-env5')
- self.assertEqual(1, m_restart_nm.call_count)
- self.assertEqual(1, m_refresh_rmc.call_count)
-
- @mock.patch(MPATH + '.netinfo.netdev_info')
- def test_find_ipv6(self, m_netdev_info):
- """find_ipv6_ifaces parses netdev_info returning those with ipv6"""
- m_netdev_info.return_value = NET_INFO
- found = ccrmci.find_ipv6_ifaces()
- self.assertEqual(['env5'], found)
-
- @mock.patch(MPATH + '.subp.subp')
- def test_refresh_ipv6(self, m_subp):
- """refresh_ipv6 should ip down and up the interface."""
- iface = "myeth0"
- ccrmci.refresh_ipv6(iface)
- m_subp.assert_has_calls([
- mock.call(['ip', 'link', 'set', iface, 'down']),
- mock.call(['ip', 'link', 'set', iface, 'up'])])
diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py
deleted file mode 100644
index 28d55072..00000000
--- a/tests/unittests/test_handler/test_handler_resizefs.py
+++ /dev/null
@@ -1,398 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config.cc_resizefs import (
- can_skip_resize, handle, maybe_get_writable_device_path, _resize_btrfs,
- _resize_zfs, _resize_xfs, _resize_ext, _resize_ufs)
-
-from collections import namedtuple
-import logging
-
-from cloudinit.subp import ProcessExecutionError
-from cloudinit.tests.helpers import (
- CiTestCase, mock, skipUnlessJsonSchema, util, wrap_and_call)
-
-
-LOG = logging.getLogger(__name__)
-
-
-class TestResizefs(CiTestCase):
- with_logs = True
-
- def setUp(self):
- super(TestResizefs, self).setUp()
- self.name = "resizefs"
-
- @mock.patch('cloudinit.subp.subp')
- def test_skip_ufs_resize(self, m_subp):
- fs_type = "ufs"
- resize_what = "/"
- devpth = "/dev/da0p2"
- err = ("growfs: requested size 2.0GB is not larger than the "
- "current filesystem size 2.0GB\n")
- exception = ProcessExecutionError(stderr=err, exit_code=1)
- m_subp.side_effect = exception
- res = can_skip_resize(fs_type, resize_what, devpth)
- self.assertTrue(res)
-
- @mock.patch('cloudinit.subp.subp')
- def test_cannot_skip_ufs_resize(self, m_subp):
- fs_type = "ufs"
- resize_what = "/"
- devpth = "/dev/da0p2"
- m_subp.return_value = (
- ("stdout: super-block backups (for fsck_ffs -b #) at:\n\n"),
- ("growfs: no room to allocate last cylinder group; "
- "leaving 364KB unused\n")
- )
- res = can_skip_resize(fs_type, resize_what, devpth)
- self.assertFalse(res)
-
- @mock.patch('cloudinit.subp.subp')
- def test_cannot_skip_ufs_growfs_exception(self, m_subp):
- fs_type = "ufs"
- resize_what = "/"
- devpth = "/dev/da0p2"
- err = "growfs: /dev/da0p2 is not clean - run fsck.\n"
- exception = ProcessExecutionError(stderr=err, exit_code=1)
- m_subp.side_effect = exception
- with self.assertRaises(ProcessExecutionError):
- can_skip_resize(fs_type, resize_what, devpth)
-
- def test_can_skip_resize_ext(self):
- self.assertFalse(can_skip_resize('ext', '/', '/dev/sda1'))
-
- def test_handle_noops_on_disabled(self):
- """The handle function logs when the configuration disables resize."""
- cfg = {'resize_rootfs': False}
- handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[])
- self.assertIn(
- 'DEBUG: Skipping module named cc_resizefs, resizing disabled\n',
- self.logs.getvalue())
-
- @skipUnlessJsonSchema()
- def test_handle_schema_validation_logs_invalid_resize_rootfs_value(self):
- """The handle reports json schema violations as a warning.
-
- Invalid values for resize_rootfs result in disabling the module.
- """
- cfg = {'resize_rootfs': 'junk'}
- handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[])
- logs = self.logs.getvalue()
- self.assertIn(
- "WARNING: Invalid config:\nresize_rootfs: 'junk' is not one of"
- " [True, False, 'noblock']",
- logs)
- self.assertIn(
- 'DEBUG: Skipping module named cc_resizefs, resizing disabled\n',
- logs)
-
- @mock.patch('cloudinit.config.cc_resizefs.util.get_mount_info')
- def test_handle_warns_on_unknown_mount_info(self, m_get_mount_info):
- """handle warns when get_mount_info sees unknown filesystem for /."""
- m_get_mount_info.return_value = None
- cfg = {'resize_rootfs': True}
- handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[])
- logs = self.logs.getvalue()
- self.assertNotIn("WARNING: Invalid config:\nresize_rootfs:", logs)
- self.assertIn(
- 'WARNING: Could not determine filesystem type of /\n',
- logs)
- self.assertEqual(
- [mock.call('/', LOG)],
- m_get_mount_info.call_args_list)
-
- def test_handle_warns_on_undiscoverable_root_path_in_commandline(self):
- """handle noops when the root path is not found on the commandline."""
- cfg = {'resize_rootfs': True}
- exists_mock_path = 'cloudinit.config.cc_resizefs.os.path.exists'
-
- def fake_mount_info(path, log):
- self.assertEqual('/', path)
- self.assertEqual(LOG, log)
- return ('/dev/root', 'ext4', '/')
-
- with mock.patch(exists_mock_path) as m_exists:
- m_exists.return_value = False
- wrap_and_call(
- 'cloudinit.config.cc_resizefs.util',
- {'is_container': {'return_value': False},
- 'get_mount_info': {'side_effect': fake_mount_info},
- 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}},
- handle, 'cc_resizefs', cfg, _cloud=None, log=LOG,
- args=[])
- logs = self.logs.getvalue()
- self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
-
- def test_resize_zfs_cmd_return(self):
- zpool = 'zroot'
- devpth = 'gpt/system'
- self.assertEqual(('zpool', 'online', '-e', zpool, devpth),
- _resize_zfs(zpool, devpth))
-
- def test_resize_xfs_cmd_return(self):
- mount_point = '/mnt/test'
- devpth = '/dev/sda1'
- self.assertEqual(('xfs_growfs', mount_point),
- _resize_xfs(mount_point, devpth))
-
- def test_resize_ext_cmd_return(self):
- mount_point = '/'
- devpth = '/dev/sdb1'
- self.assertEqual(('resize2fs', devpth),
- _resize_ext(mount_point, devpth))
-
- def test_resize_ufs_cmd_return(self):
- mount_point = '/'
- devpth = '/dev/sda2'
- self.assertEqual(('growfs', '-y', mount_point),
- _resize_ufs(mount_point, devpth))
-
- @mock.patch('cloudinit.util.is_container', return_value=False)
- @mock.patch('cloudinit.util.parse_mount')
- @mock.patch('cloudinit.util.get_device_info_from_zpool')
- @mock.patch('cloudinit.util.get_mount_info')
- def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount,
- is_container):
- devpth = 'vmzroot/ROOT/freebsd'
- disk = 'gpt/system'
- fs_type = 'zfs'
- mount_point = '/'
-
- mount_info.return_value = (devpth, fs_type, mount_point)
- zpool_info.return_value = disk
- parse_mount.return_value = (devpth, fs_type, mount_point)
-
- cfg = {'resize_rootfs': True}
-
- with mock.patch('cloudinit.config.cc_resizefs.do_resize') as dresize:
- handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[])
- ret = dresize.call_args[0][0]
-
- self.assertEqual(('zpool', 'online', '-e', 'vmzroot', disk), ret)
-
- @mock.patch('cloudinit.util.is_container', return_value=False)
- @mock.patch('cloudinit.util.get_mount_info')
- @mock.patch('cloudinit.util.get_device_info_from_zpool')
- @mock.patch('cloudinit.util.parse_mount')
- def test_handle_modern_zfsroot(self, mount_info, zpool_info, parse_mount,
- is_container):
- devpth = 'zroot/ROOT/default'
- disk = 'da0p3'
- fs_type = 'zfs'
- mount_point = '/'
-
- mount_info.return_value = (devpth, fs_type, mount_point)
- zpool_info.return_value = disk
- parse_mount.return_value = (devpth, fs_type, mount_point)
-
- cfg = {'resize_rootfs': True}
-
- def fake_stat(devpath):
- if devpath == disk:
- raise OSError("not here")
- FakeStat = namedtuple(
- 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal stat
- return FakeStat(25008, 0, 1) # fake char block device
-
- with mock.patch('cloudinit.config.cc_resizefs.do_resize') as dresize:
- with mock.patch('cloudinit.config.cc_resizefs.os.stat') as m_stat:
- m_stat.side_effect = fake_stat
- handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[])
-
- self.assertEqual(('zpool', 'online', '-e', 'zroot', '/dev/' + disk),
- dresize.call_args[0][0])
-
-
-class TestRootDevFromCmdline(CiTestCase):
-
- def test_rootdev_from_cmdline_with_no_root(self):
- """Return None from rootdev_from_cmdline when root is not present."""
- invalid_cases = [
- 'BOOT_IMAGE=/adsf asdfa werasef root adf', 'BOOT_IMAGE=/adsf', '']
- for case in invalid_cases:
- self.assertIsNone(util.rootdev_from_cmdline(case))
-
- def test_rootdev_from_cmdline_with_root_startswith_dev(self):
- """Return the cmdline root when the path starts with /dev."""
- self.assertEqual(
- '/dev/this', util.rootdev_from_cmdline('asdf root=/dev/this'))
-
- def test_rootdev_from_cmdline_with_root_without_dev_prefix(self):
- """Add /dev prefix to cmdline root when the path lacks the prefix."""
- self.assertEqual(
- '/dev/this', util.rootdev_from_cmdline('asdf root=this'))
-
- def test_rootdev_from_cmdline_with_root_with_label(self):
- """When cmdline root contains a LABEL, our root is disk/by-label."""
- self.assertEqual(
- '/dev/disk/by-label/unique',
- util.rootdev_from_cmdline('asdf root=LABEL=unique'))
-
- def test_rootdev_from_cmdline_with_root_with_uuid(self):
- """When cmdline root contains a UUID, our root is disk/by-uuid."""
- self.assertEqual(
- '/dev/disk/by-uuid/adsfdsaf-adsf',
- util.rootdev_from_cmdline('asdf root=UUID=adsfdsaf-adsf'))
-
-
-class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
-
- with_logs = True
-
- def test_maybe_get_writable_device_path_none_on_overlayroot(self):
- """When devpath is overlayroot (on MAAS), is_dev_writable is False."""
- info = 'does not matter'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs.util',
- {'is_container': {'return_value': False}},
- maybe_get_writable_device_path, 'overlayroot', info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "Not attempting to resize devpath 'overlayroot'",
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self):
- """When root does not exist isn't in the cmdline, log warning."""
- info = 'does not matter'
-
- def fake_mount_info(path, log):
- self.assertEqual('/', path)
- self.assertEqual(LOG, log)
- return ('/dev/root', 'ext4', '/')
-
- exists_mock_path = 'cloudinit.config.cc_resizefs.os.path.exists'
- with mock.patch(exists_mock_path) as m_exists:
- m_exists.return_value = False
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs.util',
- {'is_container': {'return_value': False},
- 'get_mount_info': {'side_effect': fake_mount_info},
- 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}},
- maybe_get_writable_device_path, '/dev/root', info, LOG)
- self.assertIsNone(devpath)
- logs = self.logs.getvalue()
- self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
-
- def test_maybe_get_writable_device_path_does_not_exist(self):
- """When devpath does not exist, a warning is logged."""
- info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs.util',
- {'is_container': {'return_value': False}},
- maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "WARNING: Device '/dev/I/dont/exist' did not exist."
- ' cannot resize: %s' % info,
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_does_not_exist_in_container(self):
- """When devpath does not exist in a container, log a debug message."""
- info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs.util',
- {'is_container': {'return_value': True}},
- maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "DEBUG: Device '/dev/I/dont/exist' did not exist in container."
- ' cannot resize: %s' % info,
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_raises_oserror(self):
- """When unexpected OSError is raises by os.stat it is reraised."""
- info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none'
- with self.assertRaises(OSError) as context_manager:
- wrap_and_call(
- 'cloudinit.config.cc_resizefs',
- {'util.is_container': {'return_value': True},
- 'os.stat': {'side_effect': OSError('Something unexpected')}},
- maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG)
- self.assertEqual(
- 'Something unexpected', str(context_manager.exception))
-
- def test_maybe_get_writable_device_path_non_block(self):
- """When device is not a block device, emit warning return False."""
- fake_devpath = self.tmp_path('dev/readwrite')
- util.write_file(fake_devpath, '', mode=0o600) # read-write
- info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath)
-
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs.util',
- {'is_container': {'return_value': False}},
- maybe_get_writable_device_path, fake_devpath, info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "WARNING: device '{0}' not a block device. cannot resize".format(
- fake_devpath),
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_non_block_on_container(self):
- """When device is non-block device in container, emit debug log."""
- fake_devpath = self.tmp_path('dev/readwrite')
- util.write_file(fake_devpath, '', mode=0o600) # read-write
- info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath)
-
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs.util',
- {'is_container': {'return_value': True}},
- maybe_get_writable_device_path, fake_devpath, info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "DEBUG: device '{0}' not a block device in container."
- ' cannot resize'.format(fake_devpath),
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_returns_cmdline_root(self):
- """When root device is UUID in kernel commandline, update devpath."""
- # XXX Long-term we want to use FilesystemMocking test to avoid
- # touching os.stat.
- FakeStat = namedtuple(
- 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal def.
- info = 'dev=/dev/root mnt_point=/ path=/does/not/matter'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs',
- {'util.get_cmdline': {'return_value': 'asdf root=UUID=my-uuid'},
- 'util.is_container': False,
- 'os.path.exists': False, # /dev/root doesn't exist
- 'os.stat': {
- 'return_value': FakeStat(25008, 0, 1)} # char block device
- },
- maybe_get_writable_device_path, '/dev/root', info, LOG)
- self.assertEqual('/dev/disk/by-uuid/my-uuid', devpath)
- self.assertIn(
- "DEBUG: Converted /dev/root to '/dev/disk/by-uuid/my-uuid'"
- " per kernel cmdline",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.util.mount_is_read_write')
- @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir')
- def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw):
- """Do not resize / directly if it is read-only. (LP: #1734787)."""
- m_is_rw.return_value = False
- m_is_dir.return_value = True
- self.assertEqual(
- ('btrfs', 'filesystem', 'resize', 'max', '//.snapshots'),
- _resize_btrfs("/", "/dev/sda1"))
-
- @mock.patch('cloudinit.util.mount_is_read_write')
- @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir')
- def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw):
- """Do not resize / directly if it is read-only. (LP: #1734787)."""
- m_is_rw.return_value = True
- m_is_dir.return_value = True
- self.assertEqual(
- ('btrfs', 'filesystem', 'resize', 'max', '/'),
- _resize_btrfs("/", "/dev/sda1"))
-
- @mock.patch('cloudinit.util.is_container', return_value=True)
- @mock.patch('cloudinit.util.is_FreeBSD')
- def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd,
- m_is_container):
- freebsd.return_value = True
- info = 'dev=gpt/system mnt_point=/ path=/'
- devpth = maybe_get_writable_device_path('gpt/system', info, LOG)
- self.assertEqual('gpt/system', devpth)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_resizefs_vyos.py b/tests/unittests/test_handler/test_handler_resizefs_vyos.py
deleted file mode 100644
index c18ab1ea..00000000
--- a/tests/unittests/test_handler/test_handler_resizefs_vyos.py
+++ /dev/null
@@ -1,398 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config.cc_resizefs_vyos import (
- can_skip_resize, handle, maybe_get_writable_device_path, _resize_btrfs,
- _resize_zfs, _resize_xfs, _resize_ext, _resize_ufs)
-
-from collections import namedtuple
-import logging
-
-from cloudinit.subp import ProcessExecutionError
-from cloudinit.tests.helpers import (
- CiTestCase, mock, skipUnlessJsonSchema, util, wrap_and_call)
-
-
-LOG = logging.getLogger(__name__)
-
-
-class TestResizefs(CiTestCase):
- with_logs = True
-
- def setUp(self):
- super(TestResizefs, self).setUp()
- self.name = "resizefs"
-
- @mock.patch('cloudinit.subp.subp')
- def test_skip_ufs_resize(self, m_subp):
- fs_type = "ufs"
- resize_what = "/"
- devpth = "/dev/da0p2"
- err = ("growfs: requested size 2.0GB is not larger than the "
- "current filesystem size 2.0GB\n")
- exception = ProcessExecutionError(stderr=err, exit_code=1)
- m_subp.side_effect = exception
- res = can_skip_resize(fs_type, resize_what, devpth)
- self.assertTrue(res)
-
- @mock.patch('cloudinit.subp.subp')
- def test_cannot_skip_ufs_resize(self, m_subp):
- fs_type = "ufs"
- resize_what = "/"
- devpth = "/dev/da0p2"
- m_subp.return_value = (
- ("stdout: super-block backups (for fsck_ffs -b #) at:\n\n"),
- ("growfs: no room to allocate last cylinder group; "
- "leaving 364KB unused\n")
- )
- res = can_skip_resize(fs_type, resize_what, devpth)
- self.assertFalse(res)
-
- @mock.patch('cloudinit.subp.subp')
- def test_cannot_skip_ufs_growfs_exception(self, m_subp):
- fs_type = "ufs"
- resize_what = "/"
- devpth = "/dev/da0p2"
- err = "growfs: /dev/da0p2 is not clean - run fsck.\n"
- exception = ProcessExecutionError(stderr=err, exit_code=1)
- m_subp.side_effect = exception
- with self.assertRaises(ProcessExecutionError):
- can_skip_resize(fs_type, resize_what, devpth)
-
- def test_can_skip_resize_ext(self):
- self.assertFalse(can_skip_resize('ext', '/', '/dev/sda1'))
-
- def test_handle_noops_on_disabled(self):
- """The handle function logs when the configuration disables resize."""
- cfg = {'resizefs_enabled': False}
- handle('cc_resizefs_vyos', cfg, _cloud=None, log=LOG, args=[])
- self.assertIn(
- 'DEBUG: Skipping module named cc_resizefs_vyos, resizing disabled\n',
- self.logs.getvalue())
-
- @skipUnlessJsonSchema()
- def test_handle_schema_validation_logs_invalid_resize_enabled_value(self):
- """The handle reports json schema violations as a warning.
-
- Invalid values for resizefs_enabled result in disabling the module.
- """
- cfg = {'resizefs_enabled': 'junk'}
- handle('cc_resizefs_vyos', cfg, _cloud=None, log=LOG, args=[])
- logs = self.logs.getvalue()
- self.assertIn(
- "WARNING: Invalid config:\nresizefs_enabled: 'junk' is not one of"
- " [True, False, 'noblock']",
- logs)
- self.assertIn(
- 'DEBUG: Skipping module named cc_resizefs_vyos, resizing disabled\n',
- logs)
-
- @mock.patch('cloudinit.config.cc_resizefs_vyos.util.get_mount_info')
- def test_handle_warns_on_unknown_mount_info(self, m_get_mount_info):
- """handle warns when get_mount_info sees unknown filesystem for /."""
- m_get_mount_info.return_value = None
- cfg = {'resizefs_enabled': True}
- handle('cc_resizefs_vyos', cfg, _cloud=None, log=LOG, args=[])
- logs = self.logs.getvalue()
- self.assertNotIn("WARNING: Invalid config:\nresizefs_enabled:", logs)
- self.assertIn(
- 'WARNING: Could not determine filesystem type of /\n',
- logs)
- self.assertEqual(
- [mock.call('/', LOG)],
- m_get_mount_info.call_args_list)
-
- def test_handle_warns_on_undiscoverable_root_path_in_commandline(self):
- """handle noops when the root path is not found on the commandline."""
- cfg = {'resizefs_enabled': True}
- exists_mock_path = 'cloudinit.config.cc_resizefs_vyos.os.path.exists'
-
- def fake_mount_info(path, log):
- self.assertEqual('/', path)
- self.assertEqual(LOG, log)
- return ('/dev/root', 'ext4', '/')
-
- with mock.patch(exists_mock_path) as m_exists:
- m_exists.return_value = False
- wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos.util',
- {'is_container': {'return_value': False},
- 'get_mount_info': {'side_effect': fake_mount_info},
- 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}},
- handle, 'cc_resizefs_vyos', cfg, _cloud=None, log=LOG,
- args=[])
- logs = self.logs.getvalue()
- self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
-
- def test_resize_zfs_cmd_return(self):
- zpool = 'zroot'
- devpth = 'gpt/system'
- self.assertEqual(('zpool', 'online', '-e', zpool, devpth),
- _resize_zfs(zpool, devpth))
-
- def test_resize_xfs_cmd_return(self):
- mount_point = '/mnt/test'
- devpth = '/dev/sda1'
- self.assertEqual(('xfs_growfs', mount_point),
- _resize_xfs(mount_point, devpth))
-
- def test_resize_ext_cmd_return(self):
- mount_point = '/'
- devpth = '/dev/sdb1'
- self.assertEqual(('resize2fs', devpth),
- _resize_ext(mount_point, devpth))
-
- def test_resize_ufs_cmd_return(self):
- mount_point = '/'
- devpth = '/dev/sda2'
- self.assertEqual(('growfs', '-y', mount_point),
- _resize_ufs(mount_point, devpth))
-
- @mock.patch('cloudinit.util.is_container', return_value=False)
- @mock.patch('cloudinit.util.parse_mount')
- @mock.patch('cloudinit.util.get_device_info_from_zpool')
- @mock.patch('cloudinit.util.get_mount_info')
- def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount,
- is_container):
- devpth = 'vmzroot/ROOT/freebsd'
- disk = 'gpt/system'
- fs_type = 'zfs'
- mount_point = '/'
-
- mount_info.return_value = (devpth, fs_type, mount_point)
- zpool_info.return_value = disk
- parse_mount.return_value = (devpth, fs_type, mount_point)
-
- cfg = {'resizefs_enabled': True}
-
- with mock.patch('cloudinit.config.cc_resizefs_vyos.do_resize') as dresize:
- handle('cc_resizefs_vyos', cfg, _cloud=None, log=LOG, args=[])
- ret = dresize.call_args[0][0]
-
- self.assertEqual(('zpool', 'online', '-e', 'vmzroot', disk), ret)
-
- @mock.patch('cloudinit.util.is_container', return_value=False)
- @mock.patch('cloudinit.util.get_mount_info')
- @mock.patch('cloudinit.util.get_device_info_from_zpool')
- @mock.patch('cloudinit.util.parse_mount')
- def test_handle_modern_zfsroot(self, mount_info, zpool_info, parse_mount,
- is_container):
- devpth = 'zroot/ROOT/default'
- disk = 'da0p3'
- fs_type = 'zfs'
- mount_point = '/'
-
- mount_info.return_value = (devpth, fs_type, mount_point)
- zpool_info.return_value = disk
- parse_mount.return_value = (devpth, fs_type, mount_point)
-
- cfg = {'resizefs_enabled': True}
-
- def fake_stat(devpath):
- if devpath == disk:
- raise OSError("not here")
- FakeStat = namedtuple(
- 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal stat
- return FakeStat(25008, 0, 1) # fake char block device
-
- with mock.patch('cloudinit.config.cc_resizefs_vyos.do_resize') as dresize:
- with mock.patch('cloudinit.config.cc_resizefs_vyos.os.stat') as m_stat:
- m_stat.side_effect = fake_stat
- handle('cc_resizefs_vyos', cfg, _cloud=None, log=LOG, args=[])
-
- self.assertEqual(('zpool', 'online', '-e', 'zroot', '/dev/' + disk),
- dresize.call_args[0][0])
-
-
-class TestRootDevFromCmdline(CiTestCase):
-
- def test_rootdev_from_cmdline_with_no_root(self):
- """Return None from rootdev_from_cmdline when root is not present."""
- invalid_cases = [
- 'BOOT_IMAGE=/adsf asdfa werasef root adf', 'BOOT_IMAGE=/adsf', '']
- for case in invalid_cases:
- self.assertIsNone(util.rootdev_from_cmdline(case))
-
- def test_rootdev_from_cmdline_with_root_startswith_dev(self):
- """Return the cmdline root when the path starts with /dev."""
- self.assertEqual(
- '/dev/this', util.rootdev_from_cmdline('asdf root=/dev/this'))
-
- def test_rootdev_from_cmdline_with_root_without_dev_prefix(self):
- """Add /dev prefix to cmdline root when the path lacks the prefix."""
- self.assertEqual(
- '/dev/this', util.rootdev_from_cmdline('asdf root=this'))
-
- def test_rootdev_from_cmdline_with_root_with_label(self):
- """When cmdline root contains a LABEL, our root is disk/by-label."""
- self.assertEqual(
- '/dev/disk/by-label/unique',
- util.rootdev_from_cmdline('asdf root=LABEL=unique'))
-
- def test_rootdev_from_cmdline_with_root_with_uuid(self):
- """When cmdline root contains a UUID, our root is disk/by-uuid."""
- self.assertEqual(
- '/dev/disk/by-uuid/adsfdsaf-adsf',
- util.rootdev_from_cmdline('asdf root=UUID=adsfdsaf-adsf'))
-
-
-class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
-
- with_logs = True
-
- def test_maybe_get_writable_device_path_none_on_overlayroot(self):
- """When devpath is overlayroot (on MAAS), is_dev_writable is False."""
- info = 'does not matter'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos.util',
- {'is_container': {'return_value': False}},
- maybe_get_writable_device_path, 'overlayroot', info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "Not attempting to resize devpath 'overlayroot'",
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self):
- """When root does not exist isn't in the cmdline, log warning."""
- info = 'does not matter'
-
- def fake_mount_info(path, log):
- self.assertEqual('/', path)
- self.assertEqual(LOG, log)
- return ('/dev/root', 'ext4', '/')
-
- exists_mock_path = 'cloudinit.config.cc_resizefs_vyos.os.path.exists'
- with mock.patch(exists_mock_path) as m_exists:
- m_exists.return_value = False
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos.util',
- {'is_container': {'return_value': False},
- 'get_mount_info': {'side_effect': fake_mount_info},
- 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}},
- maybe_get_writable_device_path, '/dev/root', info, LOG)
- self.assertIsNone(devpath)
- logs = self.logs.getvalue()
- self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
-
- def test_maybe_get_writable_device_path_does_not_exist(self):
- """When devpath does not exist, a warning is logged."""
- info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos.util',
- {'is_container': {'return_value': False}},
- maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "WARNING: Device '/dev/I/dont/exist' did not exist."
- ' cannot resize: %s' % info,
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_does_not_exist_in_container(self):
- """When devpath does not exist in a container, log a debug message."""
- info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos.util',
- {'is_container': {'return_value': True}},
- maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "DEBUG: Device '/dev/I/dont/exist' did not exist in container."
- ' cannot resize: %s' % info,
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_raises_oserror(self):
- """When unexpected OSError is raises by os.stat it is reraised."""
- info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none'
- with self.assertRaises(OSError) as context_manager:
- wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos',
- {'util.is_container': {'return_value': True},
- 'os.stat': {'side_effect': OSError('Something unexpected')}},
- maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG)
- self.assertEqual(
- 'Something unexpected', str(context_manager.exception))
-
- def test_maybe_get_writable_device_path_non_block(self):
- """When device is not a block device, emit warning return False."""
- fake_devpath = self.tmp_path('dev/readwrite')
- util.write_file(fake_devpath, '', mode=0o600) # read-write
- info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath)
-
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos.util',
- {'is_container': {'return_value': False}},
- maybe_get_writable_device_path, fake_devpath, info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "WARNING: device '{0}' not a block device. cannot resize".format(
- fake_devpath),
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_non_block_on_container(self):
- """When device is non-block device in container, emit debug log."""
- fake_devpath = self.tmp_path('dev/readwrite')
- util.write_file(fake_devpath, '', mode=0o600) # read-write
- info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath)
-
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos.util',
- {'is_container': {'return_value': True}},
- maybe_get_writable_device_path, fake_devpath, info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "DEBUG: device '{0}' not a block device in container."
- ' cannot resize'.format(fake_devpath),
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_returns_cmdline_root(self):
- """When root device is UUID in kernel commandline, update devpath."""
- # XXX Long-term we want to use FilesystemMocking test to avoid
- # touching os.stat.
- FakeStat = namedtuple(
- 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal def.
- info = 'dev=/dev/root mnt_point=/ path=/does/not/matter'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos',
- {'util.get_cmdline': {'return_value': 'asdf root=UUID=my-uuid'},
- 'util.is_container': False,
- 'os.path.exists': False, # /dev/root doesn't exist
- 'os.stat': {
- 'return_value': FakeStat(25008, 0, 1)} # char block device
- },
- maybe_get_writable_device_path, '/dev/root', info, LOG)
- self.assertEqual('/dev/disk/by-uuid/my-uuid', devpath)
- self.assertIn(
- "DEBUG: Converted /dev/root to '/dev/disk/by-uuid/my-uuid'"
- " per kernel cmdline",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.util.mount_is_read_write')
- @mock.patch('cloudinit.config.cc_resizefs_vyos.os.path.isdir')
- def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw):
- """Do not resize / directly if it is read-only. (LP: #1734787)."""
- m_is_rw.return_value = False
- m_is_dir.return_value = True
- self.assertEqual(
- ('btrfs', 'filesystem', 'resize', 'max', '//.snapshots'),
- _resize_btrfs("/", "/dev/sda1"))
-
- @mock.patch('cloudinit.util.mount_is_read_write')
- @mock.patch('cloudinit.config.cc_resizefs_vyos.os.path.isdir')
- def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw):
- """Do not resize / directly if it is read-only. (LP: #1734787)."""
- m_is_rw.return_value = True
- m_is_dir.return_value = True
- self.assertEqual(
- ('btrfs', 'filesystem', 'resize', 'max', '/'),
- _resize_btrfs("/", "/dev/sda1"))
-
- @mock.patch('cloudinit.util.is_container', return_value=True)
- @mock.patch('cloudinit.util.is_FreeBSD')
- def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd,
- m_is_container):
- freebsd.return_value = True
- info = 'dev=gpt/system mnt_point=/ path=/'
- devpth = maybe_get_writable_device_path('gpt/system', info, LOG)
- self.assertEqual('gpt/system', devpth)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/test_handler/test_handler_runcmd.py
deleted file mode 100644
index 73237d68..00000000
--- a/tests/unittests/test_handler/test_handler_runcmd.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config.cc_runcmd import handle, schema
-from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, subp, util)
-from cloudinit.tests.helpers import (
- CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin,
- skipUnlessJsonSchema)
-
-import logging
-import os
-import stat
-
-LOG = logging.getLogger(__name__)
-
-
-class TestRuncmd(FilesystemMockingTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestRuncmd, self).setUp()
- self.subp = subp.subp
- self.new_root = self.tmp_dir()
-
- def _get_cloud(self, distro):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({'scripts': self.new_root})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- paths.datasource = myds
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
- def test_handler_skip_if_no_runcmd(self):
- """When the provided config doesn't contain runcmd, skip it."""
- cfg = {}
- mycloud = self._get_cloud('ubuntu')
- handle('notimportant', cfg, mycloud, LOG, None)
- self.assertIn(
- "Skipping module named notimportant, no 'runcmd' key",
- self.logs.getvalue())
-
- def test_handler_invalid_command_set(self):
- """Commands which can't be converted to shell will raise errors."""
- invalid_config = {'runcmd': 1}
- cc = self._get_cloud('ubuntu')
- handle('cc_runcmd', invalid_config, cc, LOG, [])
- self.assertIn(
- 'Failed to shellify 1 into file'
- ' /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd',
- self.logs.getvalue())
-
- @skipUnlessJsonSchema()
- def test_handler_schema_validation_warns_non_array_type(self):
- """Schema validation warns of non-array type for runcmd key.
-
- Schema validation is not strict, so runcmd attempts to shellify the
- invalid content.
- """
- invalid_config = {'runcmd': 1}
- cc = self._get_cloud('ubuntu')
- handle('cc_runcmd', invalid_config, cc, LOG, [])
- self.assertIn(
- 'Invalid config:\nruncmd: 1 is not of type \'array\'',
- self.logs.getvalue())
- self.assertIn('Failed to shellify', self.logs.getvalue())
-
- @skipUnlessJsonSchema()
- def test_handler_schema_validation_warns_non_array_item_type(self):
- """Schema validation warns of non-array or string runcmd items.
-
- Schema validation is not strict, so runcmd attempts to shellify the
- invalid content.
- """
- invalid_config = {
- 'runcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]}
- cc = self._get_cloud('ubuntu')
- handle('cc_runcmd', invalid_config, cc, LOG, [])
- expected_warnings = [
- 'runcmd.1: 20 is not valid under any of the given schemas',
- 'runcmd.3: {\'a\': \'n\'} is not valid under any of the given'
- ' schema'
- ]
- logs = self.logs.getvalue()
- for warning in expected_warnings:
- self.assertIn(warning, logs)
- self.assertIn('Failed to shellify', logs)
-
- def test_handler_write_valid_runcmd_schema_to_file(self):
- """Valid runcmd schema is written to a runcmd shell script."""
- valid_config = {'runcmd': [['ls', '/']]}
- cc = self._get_cloud('ubuntu')
- handle('cc_runcmd', valid_config, cc, LOG, [])
- runcmd_file = os.path.join(
- self.new_root,
- 'var/lib/cloud/instances/iid-datasource-none/scripts/runcmd')
- self.assertEqual("#!/bin/sh\n'ls' '/'\n", util.load_file(runcmd_file))
- file_stat = os.stat(runcmd_file)
- self.assertEqual(0o700, stat.S_IMODE(file_stat.st_mode))
-
-
-@skipUnlessJsonSchema()
-class TestSchema(CiTestCase, SchemaTestCaseMixin):
- """Directly test schema rather than through handle."""
-
- schema = schema
-
- def test_duplicates_are_fine_array_array(self):
- """Duplicated commands array/array entries are allowed."""
- self.assertSchemaValid(
- [["echo", "bye"], ["echo", "bye"]],
- "command entries can be duplicate.")
-
- def test_duplicates_are_fine_array_string(self):
- """Duplicated commands array/string entries are allowed."""
- self.assertSchemaValid(
- ["echo bye", "echo bye"],
- "command entries can be duplicate.")
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py
deleted file mode 100644
index 85167f19..00000000
--- a/tests/unittests/test_handler/test_handler_seed_random.py
+++ /dev/null
@@ -1,221 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# Based on test_handler_set_hostname.py
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_seed_random
-
-import gzip
-import tempfile
-from io import BytesIO
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.sources import DataSourceNone
-
-from cloudinit.tests import helpers as t_help
-
-import logging
-
-LOG = logging.getLogger(__name__)
-
-
-class TestRandomSeed(t_help.TestCase):
- def setUp(self):
- super(TestRandomSeed, self).setUp()
- self._seed_file = tempfile.mktemp()
- self.unapply = []
-
- # by default 'which' has nothing in its path
- self.apply_patches([(subp, 'which', self._which)])
- self.apply_patches([(subp, 'subp', self._subp)])
- self.subp_called = []
- self.whichdata = {}
-
- def tearDown(self):
- apply_patches([i for i in reversed(self.unapply)])
- util.del_file(self._seed_file)
-
- def apply_patches(self, patches):
- ret = apply_patches(patches)
- self.unapply += ret
-
- def _which(self, program):
- return self.whichdata.get(program)
-
- def _subp(self, *args, **kwargs):
- # supports subp calling with cmd as args or kwargs
- if 'args' not in kwargs:
- kwargs['args'] = args[0]
- self.subp_called.append(kwargs)
- return
-
- def _compress(self, text):
- contents = BytesIO()
- gz_fh = gzip.GzipFile(mode='wb', fileobj=contents)
- gz_fh.write(text)
- gz_fh.close()
- return contents.getvalue()
-
- def _get_cloud(self, distro, metadata=None):
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- ubuntu_distro = cls(distro, {}, paths)
- ds = DataSourceNone.DataSourceNone({}, ubuntu_distro, paths)
- if metadata:
- ds.metadata = metadata
- return cloud.Cloud(ds, paths, {}, ubuntu_distro, None)
-
- def test_append_random(self):
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': 'tiny-tim-was-here',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("tiny-tim-was-here", contents)
-
- def test_append_random_unknown_encoding(self):
- data = self._compress(b"tiny-toe")
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'special_encoding',
- }
- }
- self.assertRaises(IOError, cc_seed_random.handle, 'test', cfg,
- self._get_cloud('ubuntu'), LOG, [])
-
- def test_append_random_gzip(self):
- data = self._compress(b"tiny-toe")
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'gzip',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("tiny-toe", contents)
-
- def test_append_random_gz(self):
- data = self._compress(b"big-toe")
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'gz',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("big-toe", contents)
-
- def test_append_random_base64(self):
- data = util.b64e('bubbles')
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'base64',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("bubbles", contents)
-
- def test_append_random_b64(self):
- data = util.b64e('kit-kat')
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'b64',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("kit-kat", contents)
-
- def test_append_random_metadata(self):
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': 'tiny-tim-was-here',
- }
- }
- c = self._get_cloud('ubuntu', {'random_seed': '-so-was-josh'})
- cc_seed_random.handle('test', cfg, c, LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual('tiny-tim-was-here-so-was-josh', contents)
-
- def test_seed_command_provided_and_available(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {'pollinate': '/usr/bin/pollinate'}
- cfg = {'random_seed': {'command': ['pollinate', '-q']}}
- cc_seed_random.handle('test', cfg, c, LOG, [])
-
- subp_args = [f['args'] for f in self.subp_called]
- self.assertIn(['pollinate', '-q'], subp_args)
-
- def test_seed_command_not_provided(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {}
- cc_seed_random.handle('test', {}, c, LOG, [])
-
- # subp should not have been called as which would say not available
- self.assertFalse(self.subp_called)
-
- def test_unavailable_seed_command_and_required_raises_error(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {}
- cfg = {'random_seed': {'command': ['THIS_NO_COMMAND'],
- 'command_required': True}}
- self.assertRaises(ValueError, cc_seed_random.handle,
- 'test', cfg, c, LOG, [])
-
- def test_seed_command_and_required(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {'foo': 'foo'}
- cfg = {'random_seed': {'command_required': True, 'command': ['foo']}}
- cc_seed_random.handle('test', cfg, c, LOG, [])
-
- self.assertIn(['foo'], [f['args'] for f in self.subp_called])
-
- def test_file_in_environment_for_command(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {'foo': 'foo'}
- cfg = {'random_seed': {'command_required': True, 'command': ['foo'],
- 'file': self._seed_file}}
- cc_seed_random.handle('test', cfg, c, LOG, [])
-
- # this just instists that the first time subp was called,
- # RANDOM_SEED_FILE was in the environment set up correctly
- subp_env = [f['env'] for f in self.subp_called]
- self.assertEqual(subp_env[0].get('RANDOM_SEED_FILE'), self._seed_file)
-
-
-def apply_patches(patches):
- ret = []
- for (ref, name, replace) in patches:
- if replace is None:
- continue
- orig = getattr(ref, name)
- setattr(ref, name, replace)
- ret.append((ref, name, orig))
- return ret
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py
deleted file mode 100644
index 58abf51a..00000000
--- a/tests/unittests/test_handler/test_handler_set_hostname.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_set_hostname
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.tests import helpers as t_help
-
-from configobj import ConfigObj
-import logging
-import os
-import shutil
-import tempfile
-from io import BytesIO
-
-LOG = logging.getLogger(__name__)
-
-
-class TestHostname(t_help.FilesystemMockingTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestHostname, self).setUp()
- self.tmp = tempfile.mkdtemp()
- util.ensure_dir(os.path.join(self.tmp, 'data'))
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def _fetch_distro(self, kind):
- cls = distros.fetch(kind)
- paths = helpers.Paths({'cloud_dir': self.tmp})
- return cls(kind, {}, paths)
-
- def test_write_hostname_rhel(self):
- cfg = {
- 'hostname': 'blah.blah.blah.yahoo.com',
- }
- distro = self._fetch_distro('rhel')
- paths = helpers.Paths({'cloud_dir': self.tmp})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_set_hostname.handle('cc_set_hostname',
- cfg, cc, LOG, [])
- if not distro.uses_systemd():
- contents = util.load_file("/etc/sysconfig/network", decode=False)
- n_cfg = ConfigObj(BytesIO(contents))
- self.assertEqual({'HOSTNAME': 'blah.blah.blah.yahoo.com'},
- dict(n_cfg))
-
- def test_write_hostname_debian(self):
- cfg = {
- 'hostname': 'blah.blah.blah.yahoo.com',
- }
- distro = self._fetch_distro('debian')
- paths = helpers.Paths({'cloud_dir': self.tmp})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_set_hostname.handle('cc_set_hostname',
- cfg, cc, LOG, [])
- contents = util.load_file("/etc/hostname")
- self.assertEqual('blah', contents.strip())
-
- def test_write_hostname_sles(self):
- cfg = {
- 'hostname': 'blah.blah.blah.suse.com',
- }
- distro = self._fetch_distro('sles')
- paths = helpers.Paths({'cloud_dir': self.tmp})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, [])
- if not distro.uses_systemd():
- contents = util.load_file(distro.hostname_conf_fn)
- self.assertEqual('blah', contents.strip())
-
- def test_multiple_calls_skips_unchanged_hostname(self):
- """Only new hostname or fqdn values will generate a hostname call."""
- distro = self._fetch_distro('debian')
- paths = helpers.Paths({'cloud_dir': self.tmp})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_set_hostname.handle(
- 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, [])
- contents = util.load_file("/etc/hostname")
- self.assertEqual('hostname1', contents.strip())
- cc_set_hostname.handle(
- 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, [])
- self.assertIn(
- 'DEBUG: No hostname changes. Skipping set-hostname\n',
- self.logs.getvalue())
- cc_set_hostname.handle(
- 'cc_set_hostname', {'hostname': 'hostname2.me.com'}, cc, LOG, [])
- contents = util.load_file("/etc/hostname")
- self.assertEqual('hostname2', contents.strip())
- self.assertIn(
- 'Non-persistently setting the system hostname to hostname2',
- self.logs.getvalue())
-
- def test_error_on_distro_set_hostname_errors(self):
- """Raise SetHostnameError on exceptions from distro.set_hostname."""
- distro = self._fetch_distro('debian')
-
- def set_hostname_error(hostname, fqdn):
- raise Exception("OOPS on: %s" % fqdn)
-
- distro.set_hostname = set_hostname_error
- paths = helpers.Paths({'cloud_dir': self.tmp})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- with self.assertRaises(cc_set_hostname.SetHostnameError) as ctx_mgr:
- cc_set_hostname.handle(
- 'somename', {'hostname': 'hostname1.me.com'}, cc, LOG, [])
- self.assertEqual(
- 'Failed to set the hostname to hostname1.me.com (hostname1):'
- ' OOPS on: hostname1.me.com',
- str(ctx_mgr.exception))
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py
deleted file mode 100644
index 7c61bbf9..00000000
--- a/tests/unittests/test_handler/test_handler_yum_add_repo.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import configparser
-import logging
-import shutil
-import tempfile
-
-from cloudinit import util
-from cloudinit.config import cc_yum_add_repo
-from cloudinit.tests import helpers
-
-LOG = logging.getLogger(__name__)
-
-
-class TestConfig(helpers.FilesystemMockingTestCase):
- def setUp(self):
- super(TestConfig, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def test_bad_config(self):
- cfg = {
- 'yum_repos': {
- 'epel-testing': {
- 'name': 'Extra Packages for Enterprise Linux 5 - Testing',
- # Missing this should cause the repo not to be written
- # 'baseurl': 'http://blah.org/pub/epel/testing/5/$barch',
- 'enabled': False,
- 'gpgcheck': True,
- 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
- 'failovermethod': 'priority',
- },
- },
- }
- self.patchUtils(self.tmp)
- cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
- self.assertRaises(IOError, util.load_file,
- "/etc/yum.repos.d/epel_testing.repo")
-
- def test_write_config(self):
- cfg = {
- 'yum_repos': {
- 'epel-testing': {
- 'name': 'Extra Packages for Enterprise Linux 5 - Testing',
- 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
- 'enabled': False,
- 'gpgcheck': True,
- 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
- 'failovermethod': 'priority',
- },
- },
- }
- self.patchUtils(self.tmp)
- cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
- contents = util.load_file("/etc/yum.repos.d/epel_testing.repo")
- parser = configparser.ConfigParser()
- parser.read_string(contents)
- expected = {
- 'epel_testing': {
- 'name': 'Extra Packages for Enterprise Linux 5 - Testing',
- 'failovermethod': 'priority',
- 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
- 'enabled': '0',
- 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
- 'gpgcheck': '1',
- }
- }
- for section in expected:
- self.assertTrue(parser.has_section(section),
- "Contains section {0}".format(section))
- for k, v in expected[section].items():
- self.assertEqual(parser.get(section, k), v)
-
- def test_write_config_array(self):
- cfg = {
- 'yum_repos': {
- 'puppetlabs-products': {
- 'name': 'Puppet Labs Products El 6 - $basearch',
- 'baseurl':
- 'http://yum.puppetlabs.com/el/6/products/$basearch',
- 'gpgkey': [
- 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs',
- 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet',
- ],
- 'enabled': True,
- 'gpgcheck': True,
- }
- }
- }
- self.patchUtils(self.tmp)
- cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
- contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo")
- parser = configparser.ConfigParser()
- parser.read_string(contents)
- expected = {
- 'puppetlabs_products': {
- 'name': 'Puppet Labs Products El 6 - $basearch',
- 'baseurl': 'http://yum.puppetlabs.com/el/6/products/$basearch',
- 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs\n'
- 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet',
- 'enabled': '1',
- 'gpgcheck': '1',
- }
- }
- for section in expected:
- self.assertTrue(parser.has_section(section),
- "Contains section {0}".format(section))
- for k, v in expected[section].items():
- self.assertEqual(parser.get(section, k), v)
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py
deleted file mode 100644
index 15aa77bb..00000000
--- a/tests/unittests/test_handler/test_schema.py
+++ /dev/null
@@ -1,554 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-import cloudinit
-from cloudinit.config.schema import (
- CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file,
- get_schema_doc, get_schema, validate_cloudconfig_file,
- validate_cloudconfig_schema, main)
-from cloudinit.util import write_file
-
-from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema
-
-from copy import copy
-import itertools
-import os
-import pytest
-from pathlib import Path
-from textwrap import dedent
-from yaml import safe_load
-
-
-class GetSchemaTest(CiTestCase):
-
- def test_get_schema_coalesces_known_schema(self):
- """Every cloudconfig module with schema is listed in allOf keyword."""
- schema = get_schema()
- self.assertCountEqual(
- [
- 'cc_apk_configure',
- 'cc_apt_configure',
- 'cc_bootcmd',
- 'cc_locale',
- 'cc_ntp',
- 'cc_resizefs',
- 'cc_runcmd',
- 'cc_snap',
- 'cc_ubuntu_advantage',
- 'cc_ubuntu_drivers',
- 'cc_write_files',
- 'cc_zypper_add_repo',
- 'cc_chef'
- ],
- [subschema['id'] for subschema in schema['allOf']])
- self.assertEqual('cloud-config-schema', schema['id'])
- self.assertEqual(
- 'http://json-schema.org/draft-04/schema#',
- schema['$schema'])
- # FULL_SCHEMA is updated by the get_schema call
- from cloudinit.config.schema import FULL_SCHEMA
- self.assertCountEqual(['id', '$schema', 'allOf'], FULL_SCHEMA.keys())
-
- def test_get_schema_returns_global_when_set(self):
- """When FULL_SCHEMA global is already set, get_schema returns it."""
- m_schema_path = 'cloudinit.config.schema.FULL_SCHEMA'
- with mock.patch(m_schema_path, {'here': 'iam'}):
- self.assertEqual({'here': 'iam'}, get_schema())
-
-
-class SchemaValidationErrorTest(CiTestCase):
- """Test validate_cloudconfig_schema"""
-
- def test_schema_validation_error_expects_schema_errors(self):
- """SchemaValidationError is initialized from schema_errors."""
- errors = (('key.path', 'unexpected key "junk"'),
- ('key2.path', '"-123" is not a valid "hostname" format'))
- exception = SchemaValidationError(schema_errors=errors)
- self.assertIsInstance(exception, Exception)
- self.assertEqual(exception.schema_errors, errors)
- self.assertEqual(
- 'Cloud config schema errors: key.path: unexpected key "junk", '
- 'key2.path: "-123" is not a valid "hostname" format',
- str(exception))
- self.assertTrue(isinstance(exception, ValueError))
-
-
-class ValidateCloudConfigSchemaTest(CiTestCase):
- """Tests for validate_cloudconfig_schema."""
-
- with_logs = True
-
- @skipUnlessJsonSchema()
- def test_validateconfig_schema_non_strict_emits_warnings(self):
- """When strict is False validate_cloudconfig_schema emits warnings."""
- schema = {'properties': {'p1': {'type': 'string'}}}
- validate_cloudconfig_schema({'p1': -1}, schema, strict=False)
- self.assertIn(
- "Invalid config:\np1: -1 is not of type 'string'\n",
- self.logs.getvalue())
-
- @skipUnlessJsonSchema()
- def test_validateconfig_schema_emits_warning_on_missing_jsonschema(self):
- """Warning from validate_cloudconfig_schema when missing jsonschema."""
- schema = {'properties': {'p1': {'type': 'string'}}}
- with mock.patch.dict('sys.modules', **{'jsonschema': ImportError()}):
- validate_cloudconfig_schema({'p1': -1}, schema, strict=True)
- self.assertIn(
- 'Ignoring schema validation. python-jsonschema is not present',
- self.logs.getvalue())
-
- @skipUnlessJsonSchema()
- def test_validateconfig_schema_strict_raises_errors(self):
- """When strict is True validate_cloudconfig_schema raises errors."""
- schema = {'properties': {'p1': {'type': 'string'}}}
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_schema({'p1': -1}, schema, strict=True)
- self.assertEqual(
- "Cloud config schema errors: p1: -1 is not of type 'string'",
- str(context_mgr.exception))
-
- @skipUnlessJsonSchema()
- def test_validateconfig_schema_honors_formats(self):
- """With strict True, validate_cloudconfig_schema errors on format."""
- schema = {
- 'properties': {'p1': {'type': 'string', 'format': 'hostname'}}}
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_schema({'p1': '-1'}, schema, strict=True)
- self.assertEqual(
- "Cloud config schema errors: p1: '-1' is not a 'hostname'",
- str(context_mgr.exception))
-
-
-class TestCloudConfigExamples:
- schema = get_schema()
- params = [
- (schema["id"], example)
- for schema in schema["allOf"] for example in schema["examples"]]
-
- @pytest.mark.parametrize("schema_id,example", params)
- @skipUnlessJsonSchema()
- def test_validateconfig_schema_of_example(self, schema_id, example):
- """ For a given example in a config module we test if it is valid
- according to the unified schema of all config modules
- """
- config_load = safe_load(example)
- validate_cloudconfig_schema(
- config_load, self.schema, strict=True)
-
-
-class ValidateCloudConfigFileTest(CiTestCase):
- """Tests for validate_cloudconfig_file."""
-
- def setUp(self):
- super(ValidateCloudConfigFileTest, self).setUp()
- self.config_file = self.tmp_path('cloudcfg.yaml')
-
- def test_validateconfig_file_error_on_absent_file(self):
- """On absent config_path, validate_cloudconfig_file errors."""
- with self.assertRaises(RuntimeError) as context_mgr:
- validate_cloudconfig_file('/not/here', {})
- self.assertEqual(
- 'Configfile /not/here does not exist',
- str(context_mgr.exception))
-
- def test_validateconfig_file_error_on_invalid_header(self):
- """On invalid header, validate_cloudconfig_file errors.
-
- A SchemaValidationError is raised when the file doesn't begin with
- CLOUD_CONFIG_HEADER.
- """
- write_file(self.config_file, '#junk')
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_file(self.config_file, {})
- self.assertEqual(
- 'Cloud config schema errors: format-l1.c1: File {0} needs to begin'
- ' with "{1}"'.format(
- self.config_file, CLOUD_CONFIG_HEADER.decode()),
- str(context_mgr.exception))
-
- def test_validateconfig_file_error_on_non_yaml_scanner_error(self):
- """On non-yaml scan issues, validate_cloudconfig_file errors."""
- # Generate a scanner error by providing text on a single line with
- # improper indent.
- write_file(self.config_file, '#cloud-config\nasdf:\nasdf')
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_file(self.config_file, {})
- self.assertIn(
- 'schema errors: format-l3.c1: File {0} is not valid yaml.'.format(
- self.config_file),
- str(context_mgr.exception))
-
- def test_validateconfig_file_error_on_non_yaml_parser_error(self):
- """On non-yaml parser issues, validate_cloudconfig_file errors."""
- write_file(self.config_file, '#cloud-config\n{}}')
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_file(self.config_file, {})
- self.assertIn(
- 'schema errors: format-l2.c3: File {0} is not valid yaml.'.format(
- self.config_file),
- str(context_mgr.exception))
-
- @skipUnlessJsonSchema()
- def test_validateconfig_file_sctrictly_validates_schema(self):
- """validate_cloudconfig_file raises errors on invalid schema."""
- schema = {
- 'properties': {'p1': {'type': 'string', 'format': 'hostname'}}}
- write_file(self.config_file, '#cloud-config\np1: "-1"')
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_file(self.config_file, schema)
- self.assertEqual(
- "Cloud config schema errors: p1: '-1' is not a 'hostname'",
- str(context_mgr.exception))
-
-
-class GetSchemaDocTest(CiTestCase):
- """Tests for get_schema_doc."""
-
- def setUp(self):
- super(GetSchemaDocTest, self).setUp()
- self.required_schema = {
- 'title': 'title', 'description': 'description', 'id': 'id',
- 'name': 'name', 'frequency': 'frequency',
- 'distros': ['debian', 'rhel']}
-
- def test_get_schema_doc_returns_restructured_text(self):
- """get_schema_doc returns restructured text for a cloudinit schema."""
- full_schema = copy(self.required_schema)
- full_schema.update(
- {'properties': {
- 'prop1': {'type': 'array', 'description': 'prop-description',
- 'items': {'type': 'integer'}}}})
- self.assertEqual(
- dedent("""
- name
- ----
- **Summary:** title
-
- description
-
- **Internal name:** ``id``
-
- **Module frequency:** frequency
-
- **Supported distros:** debian, rhel
-
- **Config schema**:
- **prop1:** (array of integer) prop-description\n\n"""),
- get_schema_doc(full_schema))
-
- def test_get_schema_doc_handles_multiple_types(self):
- """get_schema_doc delimits multiple property types with a '/'."""
- full_schema = copy(self.required_schema)
- full_schema.update(
- {'properties': {
- 'prop1': {'type': ['string', 'integer'],
- 'description': 'prop-description'}}})
- self.assertIn(
- '**prop1:** (string/integer) prop-description',
- get_schema_doc(full_schema))
-
- def test_get_schema_doc_handles_enum_types(self):
- """get_schema_doc converts enum types to yaml and delimits with '/'."""
- full_schema = copy(self.required_schema)
- full_schema.update(
- {'properties': {
- 'prop1': {'enum': [True, False, 'stuff'],
- 'description': 'prop-description'}}})
- self.assertIn(
- '**prop1:** (true/false/stuff) prop-description',
- get_schema_doc(full_schema))
-
- def test_get_schema_doc_handles_nested_oneof_property_types(self):
- """get_schema_doc describes array items oneOf declarations in type."""
- full_schema = copy(self.required_schema)
- full_schema.update(
- {'properties': {
- 'prop1': {'type': 'array',
- 'items': {
- 'oneOf': [{'type': 'string'},
- {'type': 'integer'}]},
- 'description': 'prop-description'}}})
- self.assertIn(
- '**prop1:** (array of (string)/(integer)) prop-description',
- get_schema_doc(full_schema))
-
- def test_get_schema_doc_handles_string_examples(self):
- """get_schema_doc properly indented examples as a list of strings."""
- full_schema = copy(self.required_schema)
- full_schema.update(
- {'examples': ['ex1:\n [don\'t, expand, "this"]', 'ex2: true'],
- 'properties': {
- 'prop1': {'type': 'array', 'description': 'prop-description',
- 'items': {'type': 'integer'}}}})
- self.assertIn(
- dedent("""
- **Config schema**:
- **prop1:** (array of integer) prop-description
-
- **Examples**::
-
- ex1:
- [don't, expand, "this"]
- # --- Example2 ---
- ex2: true
- """),
- get_schema_doc(full_schema))
-
- def test_get_schema_doc_properly_parse_description(self):
- """get_schema_doc description properly formatted"""
- full_schema = copy(self.required_schema)
- full_schema.update(
- {'properties': {
- 'p1': {
- 'type': 'string',
- 'description': dedent("""\
- This item
- has the
- following options:
-
- - option1
- - option2
- - option3
-
- The default value is
- option1""")
- }
- }}
- )
-
- self.assertIn(
- dedent("""
- **Config schema**:
- **p1:** (string) This item has the following options:
-
- - option1
- - option2
- - option3
-
- The default value is option1
- """),
- get_schema_doc(full_schema))
-
- def test_get_schema_doc_raises_key_errors(self):
- """get_schema_doc raises KeyErrors on missing keys."""
- for key in self.required_schema:
- invalid_schema = copy(self.required_schema)
- invalid_schema.pop(key)
- with self.assertRaises(KeyError) as context_mgr:
- get_schema_doc(invalid_schema)
- self.assertIn(key, str(context_mgr.exception))
-
-
-class AnnotatedCloudconfigFileTest(CiTestCase):
- maxDiff = None
-
- def test_annotated_cloudconfig_file_no_schema_errors(self):
- """With no schema_errors, print the original content."""
- content = b'ntp:\n pools: [ntp1.pools.com]\n'
- self.assertEqual(
- content,
- annotated_cloudconfig_file({}, content, schema_errors=[]))
-
- def test_annotated_cloudconfig_file_schema_annotates_and_adds_footer(self):
- """With schema_errors, error lines are annotated and a footer added."""
- content = dedent("""\
- #cloud-config
- # comment
- ntp:
- pools: [-99, 75]
- """).encode()
- expected = dedent("""\
- #cloud-config
- # comment
- ntp: # E1
- pools: [-99, 75] # E2,E3
-
- # Errors: -------------
- # E1: Some type error
- # E2: -99 is not a string
- # E3: 75 is not a string
-
- """)
- parsed_config = safe_load(content[13:])
- schema_errors = [
- ('ntp', 'Some type error'), ('ntp.pools.0', '-99 is not a string'),
- ('ntp.pools.1', '75 is not a string')]
- self.assertEqual(
- expected,
- annotated_cloudconfig_file(parsed_config, content, schema_errors))
-
- def test_annotated_cloudconfig_file_annotates_separate_line_items(self):
- """Errors are annotated for lists with items on separate lines."""
- content = dedent("""\
- #cloud-config
- # comment
- ntp:
- pools:
- - -99
- - 75
- """).encode()
- expected = dedent("""\
- ntp:
- pools:
- - -99 # E1
- - 75 # E2
- """)
- parsed_config = safe_load(content[13:])
- schema_errors = [
- ('ntp.pools.0', '-99 is not a string'),
- ('ntp.pools.1', '75 is not a string')]
- self.assertIn(
- expected,
- annotated_cloudconfig_file(parsed_config, content, schema_errors))
-
-
-class TestMain:
-
- exclusive_combinations = itertools.combinations(
- ["--system", "--docs all", "--config-file something"], 2
- )
-
- @pytest.mark.parametrize("params", exclusive_combinations)
- def test_main_exclusive_args(self, params, capsys):
- """Main exits non-zero and error on required exclusive args."""
- params = list(itertools.chain(*[a.split() for a in params]))
- with mock.patch('sys.argv', ['mycmd'] + params):
- with pytest.raises(SystemExit) as context_manager:
- main()
- assert 1 == context_manager.value.code
-
- _out, err = capsys.readouterr()
- expected = (
- 'Expected one of --config-file, --system or --docs arguments\n'
- )
- assert expected == err
-
- def test_main_missing_args(self, capsys):
- """Main exits non-zero and reports an error on missing parameters."""
- with mock.patch('sys.argv', ['mycmd']):
- with pytest.raises(SystemExit) as context_manager:
- main()
- assert 1 == context_manager.value.code
-
- _out, err = capsys.readouterr()
- expected = (
- 'Expected one of --config-file, --system or --docs arguments\n'
- )
- assert expected == err
-
- def test_main_absent_config_file(self, capsys):
- """Main exits non-zero when config file is absent."""
- myargs = ['mycmd', '--annotate', '--config-file', 'NOT_A_FILE']
- with mock.patch('sys.argv', myargs):
- with pytest.raises(SystemExit) as context_manager:
- main()
- assert 1 == context_manager.value.code
- _out, err = capsys.readouterr()
- assert 'Configfile NOT_A_FILE does not exist\n' == err
-
- def test_main_prints_docs(self, capsys):
- """When --docs parameter is provided, main generates documentation."""
- myargs = ['mycmd', '--docs', 'all']
- with mock.patch('sys.argv', myargs):
- assert 0 == main(), 'Expected 0 exit code'
- out, _err = capsys.readouterr()
- assert '\nNTP\n---\n' in out
- assert '\nRuncmd\n------\n' in out
-
- def test_main_validates_config_file(self, tmpdir, capsys):
- """When --config-file parameter is provided, main validates schema."""
- myyaml = tmpdir.join('my.yaml')
- myargs = ['mycmd', '--config-file', myyaml.strpath]
- myyaml.write(b'#cloud-config\nntp:') # shortest ntp schema
- with mock.patch('sys.argv', myargs):
- assert 0 == main(), 'Expected 0 exit code'
- out, _err = capsys.readouterr()
- assert 'Valid cloud-config: {0}\n'.format(myyaml) == out
-
- @mock.patch('cloudinit.config.schema.read_cfg_paths')
- @mock.patch('cloudinit.config.schema.os.getuid', return_value=0)
- def test_main_validates_system_userdata(
- self, m_getuid, m_read_cfg_paths, capsys, paths
- ):
- """When --system is provided, main validates system userdata."""
- m_read_cfg_paths.return_value = paths
- ud_file = paths.get_ipath_cur("userdata_raw")
- write_file(ud_file, b'#cloud-config\nntp:')
- myargs = ['mycmd', '--system']
- with mock.patch('sys.argv', myargs):
- assert 0 == main(), 'Expected 0 exit code'
- out, _err = capsys.readouterr()
- assert 'Valid cloud-config: system userdata\n' == out
-
- @mock.patch('cloudinit.config.schema.os.getuid', return_value=1000)
- def test_main_system_userdata_requires_root(self, m_getuid, capsys, paths):
- """Non-root user can't use --system param"""
- myargs = ['mycmd', '--system']
- with mock.patch('sys.argv', myargs):
- with pytest.raises(SystemExit) as context_manager:
- main()
- assert 1 == context_manager.value.code
- _out, err = capsys.readouterr()
- expected = (
- 'Unable to read system userdata as non-root user. Try using sudo\n'
- )
- assert expected == err
-
-
-class CloudTestsIntegrationTest(CiTestCase):
- """Validate all cloud-config yaml schema provided in integration tests.
-
- It is less expensive to have unittests validate schema of all cloud-config
- yaml provided to integration tests, than to run an integration test which
- raises Warnings or errors on invalid cloud-config schema.
- """
-
- @skipUnlessJsonSchema()
- def test_all_integration_test_cloud_config_schema(self):
- """Validate schema of cloud_tests yaml files looking for warnings."""
- schema = get_schema()
- testsdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
- integration_testdir = os.path.sep.join(
- [testsdir, 'cloud_tests', 'testcases'])
- errors = []
-
- yaml_files = []
- for root, _dirnames, filenames in os.walk(integration_testdir):
- yaml_files.extend([os.path.join(root, f)
- for f in filenames if f.endswith(".yaml")])
- self.assertTrue(len(yaml_files) > 0)
-
- for filename in yaml_files:
- test_cfg = safe_load(open(filename))
- cloud_config = test_cfg.get('cloud_config')
- if cloud_config:
- cloud_config = safe_load(
- cloud_config.replace("#cloud-config\n", ""))
- try:
- validate_cloudconfig_schema(
- cloud_config, schema, strict=True)
- except SchemaValidationError as e:
- errors.append(
- '{0}: {1}'.format(
- filename, e))
- if errors:
- raise AssertionError(', '.join(errors))
-
-
-def _get_schema_doc_examples():
- examples_dir = Path(
- cloudinit.__file__).parent.parent / 'doc' / 'examples'
- assert examples_dir.is_dir()
-
- all_text_files = (f for f in examples_dir.glob('cloud-config*.txt')
- if not f.name.startswith('cloud-config-archive'))
- return all_text_files
-
-
-class TestSchemaDocExamples:
- schema = get_schema()
-
- @pytest.mark.parametrize("example_path", _get_schema_doc_examples())
- @skipUnlessJsonSchema()
- def test_schema_doc_examples(self, example_path):
- validate_cloudconfig_file(str(example_path), self.schema)
-
-# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_helpers.py b/tests/unittests/test_helpers.py
index 2e4582a0..69291597 100644
--- a/tests/unittests/test_helpers.py
+++ b/tests/unittests/test_helpers.py
@@ -3,10 +3,10 @@
"""Tests of the built-in user data handlers."""
import os
-
-from cloudinit.tests import helpers as test_helpers
+from pathlib import Path
from cloudinit import sources
+from tests.unittests import helpers as test_helpers
class MyDataSource(sources.DataSource):
@@ -24,8 +24,9 @@ class TestPaths(test_helpers.ResourceUsingTestCase):
mypaths = self.getCloudPaths(myds)
self.assertEqual(
- os.path.join(mypaths.cloud_dir, 'instances', safe_iid),
- mypaths.get_ipath())
+ os.path.join(mypaths.cloud_dir, "instances", safe_iid),
+ mypaths.get_ipath(),
+ )
def test_get_ipath_and_empty_instance_id_returns_none(self):
myds = MyDataSource(sys_cfg={}, distro=None, paths={})
@@ -34,4 +35,35 @@ class TestPaths(test_helpers.ResourceUsingTestCase):
self.assertIsNone(mypaths.get_ipath())
+
+class Testcloud_init_project_dir:
+ top_dir = test_helpers.get_top_level_dir()
+
+ @staticmethod
+ def _get_top_level_dir_alt_implementation():
+ """Alternative implementation for comparing against.
+
+ Note: Recursively searching for .git/ fails during build tests due to
+ .git not existing. This implementation assumes that ../../../ is the
+ relative path to the cloud-init project directory form this file.
+ """
+ out = Path(__file__).parent.parent.parent.resolve()
+ return out
+
+ def test_top_level_dir(self):
+ """Assert the location of the top project directory is correct"""
+ assert self.top_dir == self._get_top_level_dir_alt_implementation()
+
+ def test_cloud_init_project_dir(self):
+ """Assert cloud_init_project_dir produces an expected location
+
+ Compare the returned value to an alternate (naive) implementation
+ """
+ assert (
+ str(Path(self.top_dir, "test"))
+ == test_helpers.cloud_init_project_dir("test")
+ == str(Path(self._get_top_level_dir_alt_implementation(), "test"))
+ )
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py
index e069a487..87c69dbb 100644
--- a/tests/unittests/test_log.py
+++ b/tests/unittests/test_log.py
@@ -9,11 +9,10 @@ import time
from cloudinit import log as ci_logging
from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT
-from cloudinit.tests.helpers import CiTestCase
+from tests.unittests.helpers import CiTestCase
class TestCloudInitLogger(CiTestCase):
-
def setUp(self):
# set up a logger like cloud-init does in setupLogging, but instead
# of sys.stderr, we'll plug in a StringIO() object so we can see
@@ -26,7 +25,7 @@ class TestCloudInitLogger(CiTestCase):
console.setLevel(ci_logging.DEBUG)
self.ci_root.addHandler(console)
self.ci_root.setLevel(ci_logging.DEBUG)
- self.LOG = logging.getLogger('test_cloudinit_logger')
+ self.LOG = logging.getLogger("test_cloudinit_logger")
def test_logger_uses_gmtime(self):
"""Test that log message have timestamp in UTC (gmtime)"""
@@ -43,15 +42,16 @@ class TestCloudInitLogger(CiTestCase):
# utc_after : 2017-08-23 14:19:43.570064
utc_before = datetime.datetime.utcnow() - datetime.timedelta(0, 0.5)
- self.LOG.error('Test message')
+ self.LOG.error("Test message")
utc_after = datetime.datetime.utcnow() + datetime.timedelta(0, 0.5)
# extract timestamp from log:
# 2017-08-23 14:19:43,069 - test_log.py[ERROR]: Test message
logstr = self.ci_logs.getvalue().splitlines()[0]
- timestampstr = logstr.split(' - ')[0]
- parsed_dt = datetime.datetime.strptime(timestampstr,
- CLOUD_INIT_ASCTIME_FMT)
+ timestampstr = logstr.split(" - ")[0]
+ parsed_dt = datetime.datetime.strptime(
+ timestampstr, CLOUD_INIT_ASCTIME_FMT
+ )
self.assertLess(utc_before, parsed_dt)
self.assertLess(parsed_dt, utc_after)
diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py
index 10871bcf..cf484dda 100644
--- a/tests/unittests/test_merging.py
+++ b/tests/unittests/test_merging.py
@@ -1,13 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.tests import helpers
-
-from cloudinit.handlers import cloud_config
-from cloudinit.handlers import (CONTENT_START, CONTENT_END)
-
-from cloudinit import helpers as c_helpers
-from cloudinit import util
-
import collections
import glob
import os
@@ -15,6 +7,11 @@ import random
import re
import string
+from cloudinit import helpers as c_helpers
+from cloudinit import util
+from cloudinit.handlers import CONTENT_END, CONTENT_START, cloud_config
+from tests.unittests import helpers
+
SOURCE_PAT = "source*.*yaml"
EXPECTED_PAT = "expected%s.yaml"
TYPES = [dict, str, list, tuple, None, int]
@@ -43,7 +40,7 @@ def _old_mergemanydict(*args):
def _random_str(rand):
- base = ''
+ base = ""
for _i in range(rand.randint(1, 2 ** 8)):
base += rand.choice(string.ascii_letters + string.digits)
return base
@@ -98,7 +95,7 @@ def make_dict(max_depth, seed=None):
class TestSimpleRun(helpers.ResourceUsingTestCase):
def _load_merge_files(self):
- merge_root = helpers.resourceLocation('merge_sources')
+ merge_root = helpers.resourceLocation("merge_sources")
tests = []
source_ids = collections.defaultdict(list)
expected_files = {}
@@ -106,8 +103,9 @@ class TestSimpleRun(helpers.ResourceUsingTestCase):
base_fn = os.path.basename(fn)
file_id = re.match(r"source(\d+)\-(\d+)[.]yaml", base_fn)
if not file_id:
- raise IOError("File %s does not have a numeric identifier"
- % (fn))
+ raise IOError(
+ "File %s does not have a numeric identifier" % (fn)
+ )
file_id = int(file_id.group(1))
source_ids[file_id].append(fn)
expected_fn = os.path.join(merge_root, EXPECTED_PAT % (file_id))
@@ -141,29 +139,31 @@ class TestSimpleRun(helpers.ResourceUsingTestCase):
cc_handler = cloud_config.CloudConfigPartHandler(paths)
cc_handler.cloud_fn = None
for (payloads, (expected_merge, expected_fn)) in tests:
- cc_handler.handle_part(None, CONTENT_START, None,
- None, None, None)
+ cc_handler.handle_part(None, CONTENT_START, None, None, None, None)
merging_fns = []
for (fn, contents) in payloads:
- cc_handler.handle_part(None, None, "%s.yaml" % (fn),
- contents, None, {})
+ cc_handler.handle_part(
+ None, None, "%s.yaml" % (fn), contents, None, {}
+ )
merging_fns.append(fn)
merged_buf = cc_handler.cloud_buf
- cc_handler.handle_part(None, CONTENT_END, None,
- None, None, None)
+ cc_handler.handle_part(None, CONTENT_END, None, None, None, None)
fail_msg = "Equality failure on checking %s with %s: %s != %s"
- fail_msg = fail_msg % (expected_fn,
- ",".join(merging_fns), merged_buf,
- expected_merge)
+ fail_msg = fail_msg % (
+ expected_fn,
+ ",".join(merging_fns),
+ merged_buf,
+ expected_merge,
+ )
self.assertEqual(expected_merge, merged_buf, msg=fail_msg)
def test_compat_merges_dict(self):
a = {
- '1': '2',
- 'b': 'c',
+ "1": "2",
+ "b": "c",
}
b = {
- 'b': 'e',
+ "b": "e",
}
c = _old_mergedict(a, b)
d = util.mergemanydict([a, b])
@@ -171,53 +171,53 @@ class TestSimpleRun(helpers.ResourceUsingTestCase):
def test_compat_merges_dict2(self):
a = {
- 'Blah': 1,
- 'Blah2': 2,
- 'Blah3': 3,
+ "Blah": 1,
+ "Blah2": 2,
+ "Blah3": 3,
}
b = {
- 'Blah': 1,
- 'Blah2': 2,
- 'Blah3': [1],
+ "Blah": 1,
+ "Blah2": 2,
+ "Blah3": [1],
}
c = _old_mergedict(a, b)
d = util.mergemanydict([a, b])
self.assertEqual(c, d)
def test_compat_merges_list(self):
- a = {'b': [1, 2, 3]}
- b = {'b': [4, 5]}
- c = {'b': [6, 7]}
+ a = {"b": [1, 2, 3]}
+ b = {"b": [4, 5]}
+ c = {"b": [6, 7]}
e = _old_mergemanydict(a, b, c)
f = util.mergemanydict([a, b, c])
self.assertEqual(e, f)
def test_compat_merges_str(self):
- a = {'b': "hi"}
- b = {'b': "howdy"}
- c = {'b': "hallo"}
+ a = {"b": "hi"}
+ b = {"b": "howdy"}
+ c = {"b": "hallo"}
e = _old_mergemanydict(a, b, c)
f = util.mergemanydict([a, b, c])
self.assertEqual(e, f)
def test_compat_merge_sub_dict(self):
a = {
- '1': '2',
- 'b': {
- 'f': 'g',
- 'e': 'c',
- 'h': 'd',
- 'hh': {
- '1': 2,
+ "1": "2",
+ "b": {
+ "f": "g",
+ "e": "c",
+ "h": "d",
+ "hh": {
+ "1": 2,
},
- }
+ },
}
b = {
- 'b': {
- 'e': 'c',
- 'hh': {
- '3': 4,
- }
+ "b": {
+ "e": "c",
+ "hh": {
+ "3": 4,
+ },
}
}
c = _old_mergedict(a, b)
@@ -226,14 +226,14 @@ class TestSimpleRun(helpers.ResourceUsingTestCase):
def test_compat_merge_sub_dict2(self):
a = {
- '1': '2',
- 'b': {
- 'f': 'g',
- }
+ "1": "2",
+ "b": {
+ "f": "g",
+ },
}
b = {
- 'b': {
- 'e': 'c',
+ "b": {
+ "e": "c",
}
}
c = _old_mergedict(a, b)
@@ -242,18 +242,19 @@ class TestSimpleRun(helpers.ResourceUsingTestCase):
def test_compat_merge_sub_list(self):
a = {
- '1': '2',
- 'b': {
- 'f': ['1'],
- }
+ "1": "2",
+ "b": {
+ "f": ["1"],
+ },
}
b = {
- 'b': {
- 'f': [],
+ "b": {
+ "f": [],
}
}
c = _old_mergedict(a, b)
d = util.mergemanydict([a, b])
self.assertEqual(c, d)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 70453683..47e4ba00 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -1,20 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import net
-from cloudinit import distros
-from cloudinit.net import cmdline
-from cloudinit.net import (
- eni, interface_has_own_mac, natural_sort_key, netplan, network_state,
- renderers, sysconfig)
-from cloudinit.sources.helpers import openstack
-from cloudinit import temp_utils
-from cloudinit import subp
-from cloudinit import util
-from cloudinit import safeyaml as yaml
-
-from cloudinit.tests.helpers import (
- CiTestCase, FilesystemMockingTestCase, dir2dict, mock, populate_dir)
-
import base64
import copy
import gzip
@@ -23,9 +8,32 @@ import json
import os
import re
import textwrap
-from yaml.serializer import Serializer
import pytest
+from yaml.serializer import Serializer
+
+from cloudinit import distros, net
+from cloudinit import safeyaml as yaml
+from cloudinit import subp, temp_utils, util
+from cloudinit.net import (
+ cmdline,
+ eni,
+ interface_has_own_mac,
+ natural_sort_key,
+ netplan,
+ network_state,
+ networkd,
+ renderers,
+ sysconfig,
+)
+from cloudinit.sources.helpers import openstack
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ dir2dict,
+ mock,
+ populate_dir,
+)
DHCP_CONTENT_1 = """
DEVICE='eth0'
@@ -48,15 +56,19 @@ DOMAINSEARCH='foo.com'
"""
DHCP_EXPECTED_1 = {
- 'name': 'eth0',
- 'type': 'physical',
- 'subnets': [{'broadcast': '192.168.122.255',
- 'control': 'manual',
- 'gateway': '192.168.122.1',
- 'dns_search': ['foo.com'],
- 'type': 'dhcp',
- 'netmask': '255.255.255.0',
- 'dns_nameservers': ['192.168.122.1']}],
+ "name": "eth0",
+ "type": "physical",
+ "subnets": [
+ {
+ "broadcast": "192.168.122.255",
+ "control": "manual",
+ "gateway": "192.168.122.1",
+ "dns_search": ["foo.com"],
+ "type": "dhcp",
+ "netmask": "255.255.255.0",
+ "dns_nameservers": ["192.168.122.1"],
+ }
+ ],
}
DHCP6_CONTENT_1 = """
@@ -73,12 +85,17 @@ DNSDOMAIN=
"""
DHCP6_EXPECTED_1 = {
- 'name': 'eno1',
- 'type': 'physical',
- 'subnets': [{'control': 'manual',
- 'dns_nameservers': ['2001:67c:1562:8010::2:1'],
- 'netmask': '64',
- 'type': 'dhcp6'}]}
+ "name": "eno1",
+ "type": "physical",
+ "subnets": [
+ {
+ "control": "manual",
+ "dns_nameservers": ["2001:67c:1562:8010::2:1"],
+ "netmask": "64",
+ "type": "dhcp6",
+ }
+ ],
+}
STATIC_CONTENT_1 = """
@@ -97,14 +114,20 @@ DOMAINSEARCH='foo.com'
"""
STATIC_EXPECTED_1 = {
- 'name': 'eth1',
- 'type': 'physical',
- 'subnets': [{'broadcast': '10.0.0.255', 'control': 'manual',
- 'gateway': '10.0.0.1',
- 'dns_search': ['foo.com'], 'type': 'static',
- 'netmask': '255.255.255.0',
- 'dns_nameservers': ['10.0.1.1'],
- 'address': '10.0.0.2'}],
+ "name": "eth1",
+ "type": "physical",
+ "subnets": [
+ {
+ "broadcast": "10.0.0.255",
+ "control": "manual",
+ "gateway": "10.0.0.1",
+ "dns_search": ["foo.com"],
+ "type": "static",
+ "netmask": "255.255.255.0",
+ "dns_nameservers": ["10.0.1.1"],
+ "address": "10.0.0.2",
+ }
+ ],
}
V1_NAMESERVER_ALIAS = """
@@ -471,34 +494,42 @@ ethernets:
# Examples (and expected outputs for various renderers).
OS_SAMPLES = [
{
- 'in_data': {
+ "in_data": {
"services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
- "type": "ipv4", "netmask": "255.255.252.0",
- "link": "tap1a81968a-79",
- "routes": [{
- "netmask": "0.0.0.0",
- "network": "0.0.0.0",
- "gateway": "172.19.3.254",
- }],
- "ip_address": "172.19.1.34", "id": "network0"
- }],
+ "networks": [
+ {
+ "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+ "type": "ipv4",
+ "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ }
+ ],
+ "ip_address": "172.19.1.34",
+ "id": "network0",
+ }
+ ],
"links": [
{
"ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
},
],
},
- 'in_macs': {
- 'fa:16:3e:ed:9a:59': 'eth0',
+ "in_macs": {
+ "fa:16:3e:ed:9a:59": "eth0",
},
- 'out_sysconfig_opensuse': [
- ('etc/sysconfig/network/ifcfg-eth0',
- """
+ "out_sysconfig_opensuse": [
+ (
+ "etc/sysconfig/network/ifcfg-eth0",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=static
@@ -506,26 +537,39 @@ IPADDR=172.19.1.34
LLADDR=fa:16:3e:ed:9a:59
NETMASK=255.255.252.0
STARTMODE=auto
-""".lstrip()),
- ('etc/resolv.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/resolv.conf",
+ """
; Created by cloud-init on instance boot automatically, do not edit.
;
nameserver 172.19.0.12
-""".lstrip()),
- ('etc/NetworkManager/conf.d/99-cloud-init.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/NetworkManager/conf.d/99-cloud-init.conf",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
[main]
dns = none
-""".lstrip()),
- ('etc/udev/rules.d/85-persistent-net-cloud-init.rules',
- "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
- 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))],
- 'out_sysconfig_rhel': [
- ('etc/sysconfig/network-scripts/ifcfg-eth0',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/udev/rules.d/85-persistent-net-cloud-init.rules",
+ "".join(
+ [
+ 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n',
+ ]
+ ),
+ ),
+ ],
+ "out_sysconfig_rhel": [
+ (
+ "etc/sysconfig/network-scripts/ifcfg-eth0",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=none
@@ -539,60 +583,82 @@ NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
-""".lstrip()),
- ('etc/resolv.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/resolv.conf",
+ """
; Created by cloud-init on instance boot automatically, do not edit.
;
nameserver 172.19.0.12
-""".lstrip()),
- ('etc/NetworkManager/conf.d/99-cloud-init.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/NetworkManager/conf.d/99-cloud-init.conf",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
[main]
dns = none
-""".lstrip()),
- ('etc/udev/rules.d/70-persistent-net.rules',
- "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
- 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
-
+""".lstrip(),
+ ),
+ (
+ "etc/udev/rules.d/70-persistent-net.rules",
+ "".join(
+ [
+ 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n',
+ ]
+ ),
+ ),
+ ],
},
{
- 'in_data': {
+ "in_data": {
"services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "public-ipv4",
- "type": "ipv4", "netmask": "255.255.252.0",
- "link": "tap1a81968a-79",
- "routes": [{
- "netmask": "0.0.0.0",
- "network": "0.0.0.0",
- "gateway": "172.19.3.254",
- }],
- "ip_address": "172.19.1.34", "id": "network0"
- }, {
- "network_id": "private-ipv4",
- "type": "ipv4", "netmask": "255.255.255.0",
- "link": "tap1a81968a-79",
- "routes": [],
- "ip_address": "10.0.0.10", "id": "network1"
- }],
+ "networks": [
+ {
+ "network_id": "public-ipv4",
+ "type": "ipv4",
+ "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ }
+ ],
+ "ip_address": "172.19.1.34",
+ "id": "network0",
+ },
+ {
+ "network_id": "private-ipv4",
+ "type": "ipv4",
+ "netmask": "255.255.255.0",
+ "link": "tap1a81968a-79",
+ "routes": [],
+ "ip_address": "10.0.0.10",
+ "id": "network1",
+ },
+ ],
"links": [
{
"ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
},
],
},
- 'in_macs': {
- 'fa:16:3e:ed:9a:59': 'eth0',
+ "in_macs": {
+ "fa:16:3e:ed:9a:59": "eth0",
},
- 'out_sysconfig_opensuse': [
- ('etc/sysconfig/network/ifcfg-eth0',
- """
+ "out_sysconfig_opensuse": [
+ (
+ "etc/sysconfig/network/ifcfg-eth0",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=static
@@ -602,26 +668,39 @@ LLADDR=fa:16:3e:ed:9a:59
NETMASK=255.255.252.0
NETMASK1=255.255.255.0
STARTMODE=auto
-""".lstrip()),
- ('etc/resolv.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/resolv.conf",
+ """
; Created by cloud-init on instance boot automatically, do not edit.
;
nameserver 172.19.0.12
-""".lstrip()),
- ('etc/NetworkManager/conf.d/99-cloud-init.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/NetworkManager/conf.d/99-cloud-init.conf",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
[main]
dns = none
-""".lstrip()),
- ('etc/udev/rules.d/85-persistent-net-cloud-init.rules',
- "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
- 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))],
- 'out_sysconfig_rhel': [
- ('etc/sysconfig/network-scripts/ifcfg-eth0',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/udev/rules.d/85-persistent-net-cloud-init.rules",
+ "".join(
+ [
+ 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n',
+ ]
+ ),
+ ),
+ ],
+ "out_sysconfig_rhel": [
+ (
+ "etc/sysconfig/network-scripts/ifcfg-eth0",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=none
@@ -637,80 +716,106 @@ NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
-""".lstrip()),
- ('etc/resolv.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/resolv.conf",
+ """
; Created by cloud-init on instance boot automatically, do not edit.
;
nameserver 172.19.0.12
-""".lstrip()),
- ('etc/NetworkManager/conf.d/99-cloud-init.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/NetworkManager/conf.d/99-cloud-init.conf",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
[main]
dns = none
-""".lstrip()),
- ('etc/udev/rules.d/70-persistent-net.rules',
- "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
- 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
-
+""".lstrip(),
+ ),
+ (
+ "etc/udev/rules.d/70-persistent-net.rules",
+ "".join(
+ [
+ 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n',
+ ]
+ ),
+ ),
+ ],
},
{
- 'in_data': {
+ "in_data": {
"services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "public-ipv4",
- "type": "ipv4", "netmask": "255.255.252.0",
- "link": "tap1a81968a-79",
- "routes": [{
- "netmask": "0.0.0.0",
- "network": "0.0.0.0",
- "gateway": "172.19.3.254",
- }],
- "ip_address": "172.19.1.34", "id": "network0"
- }, {
- "network_id": "public-ipv6-a",
- "type": "ipv6", "netmask": "",
- "link": "tap1a81968a-79",
- "routes": [
- {
- "gateway": "2001:DB8::1",
- "netmask": "::",
- "network": "::"
- }
- ],
- "ip_address": "2001:DB8::10", "id": "network1"
- }, {
- "network_id": "public-ipv6-b",
- "type": "ipv6", "netmask": "64",
- "link": "tap1a81968a-79",
- "routes": [
- ],
- "ip_address": "2001:DB9::10", "id": "network2"
- }, {
- "network_id": "public-ipv6-c",
- "type": "ipv6", "netmask": "64",
- "link": "tap1a81968a-79",
- "routes": [
- ],
- "ip_address": "2001:DB10::10", "id": "network3"
- }],
+ "networks": [
+ {
+ "network_id": "public-ipv4",
+ "type": "ipv4",
+ "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ }
+ ],
+ "ip_address": "172.19.1.34",
+ "id": "network0",
+ },
+ {
+ "network_id": "public-ipv6-a",
+ "type": "ipv6",
+ "netmask": "",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "gateway": "2001:DB8::1",
+ "netmask": "::",
+ "network": "::",
+ }
+ ],
+ "ip_address": "2001:DB8::10",
+ "id": "network1",
+ },
+ {
+ "network_id": "public-ipv6-b",
+ "type": "ipv6",
+ "netmask": "64",
+ "link": "tap1a81968a-79",
+ "routes": [],
+ "ip_address": "2001:DB9::10",
+ "id": "network2",
+ },
+ {
+ "network_id": "public-ipv6-c",
+ "type": "ipv6",
+ "netmask": "64",
+ "link": "tap1a81968a-79",
+ "routes": [],
+ "ip_address": "2001:DB10::10",
+ "id": "network3",
+ },
+ ],
"links": [
{
"ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
},
],
},
- 'in_macs': {
- 'fa:16:3e:ed:9a:59': 'eth0',
+ "in_macs": {
+ "fa:16:3e:ed:9a:59": "eth0",
},
- 'out_sysconfig_opensuse': [
- ('etc/sysconfig/network/ifcfg-eth0',
- """
+ "out_sysconfig_opensuse": [
+ (
+ "etc/sysconfig/network/ifcfg-eth0",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=static
@@ -721,26 +826,39 @@ IPADDR6_2=2001:DB10::10/64
LLADDR=fa:16:3e:ed:9a:59
NETMASK=255.255.252.0
STARTMODE=auto
-""".lstrip()),
- ('etc/resolv.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/resolv.conf",
+ """
; Created by cloud-init on instance boot automatically, do not edit.
;
nameserver 172.19.0.12
-""".lstrip()),
- ('etc/NetworkManager/conf.d/99-cloud-init.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/NetworkManager/conf.d/99-cloud-init.conf",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
[main]
dns = none
-""".lstrip()),
- ('etc/udev/rules.d/85-persistent-net-cloud-init.rules',
- "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
- 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))],
- 'out_sysconfig_rhel': [
- ('etc/sysconfig/network-scripts/ifcfg-eth0',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/udev/rules.d/85-persistent-net-cloud-init.rules",
+ "".join(
+ [
+ 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n',
+ ]
+ ),
+ ),
+ ],
+ "out_sysconfig_rhel": [
+ (
+ "etc/sysconfig/network-scripts/ifcfg-eth0",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=none
@@ -760,24 +878,36 @@ NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
-""".lstrip()),
- ('etc/resolv.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/resolv.conf",
+ """
; Created by cloud-init on instance boot automatically, do not edit.
;
nameserver 172.19.0.12
-""".lstrip()),
- ('etc/NetworkManager/conf.d/99-cloud-init.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/NetworkManager/conf.d/99-cloud-init.conf",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
[main]
dns = none
-""".lstrip()),
- ('etc/udev/rules.d/70-persistent-net.rules',
- "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
- 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
- }
+""".lstrip(),
+ ),
+ (
+ "etc/udev/rules.d/70-persistent-net.rules",
+ "".join(
+ [
+ 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n',
+ ]
+ ),
+ ),
+ ],
+ },
]
EXAMPLE_ENI = """
@@ -820,8 +950,39 @@ iface eth1 inet static
""".lstrip()
NETWORK_CONFIGS = {
- 'small': {
- 'expected_eni': textwrap.dedent("""\
+ "small": {
+ "expected_networkd_eth99": textwrap.dedent(
+ """\
+ [Match]
+ Name=eth99
+ MACAddress=c0:d6:9f:2c:e8:80
+ [Address]
+ Address=192.168.21.3/24
+ [Network]
+ DHCP=ipv4
+ Domains=barley.maas sach.maas
+ Domains=wark.maas
+ DNS=1.2.3.4 5.6.7.8
+ DNS=8.8.8.8 8.8.4.4
+ [Route]
+ Gateway=65.61.151.37
+ Destination=0.0.0.0/0
+ Metric=10000
+ """
+ ).rstrip(" "),
+ "expected_networkd_eth1": textwrap.dedent(
+ """\
+ [Match]
+ Name=eth1
+ MACAddress=cf:d6:af:48:e8:80
+ [Network]
+ DHCP=no
+ Domains=wark.maas
+ DNS=1.2.3.4 5.6.7.8
+ """
+ ).rstrip(" "),
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
dns-nameservers 1.2.3.4 5.6.7.8
@@ -839,8 +1000,10 @@ NETWORK_CONFIGS = {
dns-search barley.maas sach.maas
post-up route add default gw 65.61.151.37 metric 10000 || true
pre-down route del default gw 65.61.151.37 metric 10000 || true
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
@@ -866,29 +1029,37 @@ NETWORK_CONFIGS = {
to: 0.0.0.0/0
via: 65.61.151.37
set-name: eth99
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-eth1': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=static
LLADDR=cf:d6:af:48:e8:80
- STARTMODE=auto"""),
- 'ifcfg-eth99': textwrap.dedent("""\
+ STARTMODE=auto"""
+ ),
+ "ifcfg-eth99": textwrap.dedent(
+ """\
BOOTPROTO=dhcp4
LLADDR=c0:d6:9f:2c:e8:80
IPADDR=192.168.21.3
NETMASK=255.255.255.0
- STARTMODE=auto"""),
+ STARTMODE=auto"""
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-eth1': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eth1
HWADDR=cf:d6:af:48:e8:80
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-eth99': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-eth99": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DEFROUTE=yes
DEVICE=eth99
@@ -904,9 +1075,11 @@ NETWORK_CONFIGS = {
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
- USERCTL=no"""),
+ USERCTL=no"""
+ ),
},
- 'yaml': textwrap.dedent("""
+ "yaml": textwrap.dedent(
+ """
version: 1
config:
# Physical interfaces.
@@ -935,10 +1108,20 @@ NETWORK_CONFIGS = {
- 5.6.7.8
search:
- wark.maas
- """),
+ """
+ ),
},
- 'v4_and_v6': {
- 'expected_eni': textwrap.dedent("""\
+ "v4_and_v6": {
+ "expected_networkd": textwrap.dedent(
+ """\
+ [Match]
+ Name=iface0
+ [Network]
+ DHCP=yes
+ """
+ ).rstrip(" "),
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
@@ -947,22 +1130,28 @@ NETWORK_CONFIGS = {
# control-alias iface0
iface iface0 inet6 dhcp
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
iface0:
dhcp4: true
dhcp6: true
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DHCLIENT6_MODE=managed
- STARTMODE=auto""")
+ STARTMODE=auto"""
+ )
},
- 'yaml': textwrap.dedent("""\
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
@@ -970,10 +1159,25 @@ NETWORK_CONFIGS = {
subnets:
- {'type': 'dhcp4'}
- {'type': 'dhcp6'}
- """).rstrip(' '),
+ """
+ ).rstrip(" "),
},
- 'v4_and_v6_static': {
- 'expected_eni': textwrap.dedent("""\
+ "v4_and_v6_static": {
+ "expected_networkd": textwrap.dedent(
+ """\
+ [Match]
+ Name=iface0
+ [Link]
+ MTUBytes=8999
+ [Network]
+ DHCP=no
+ [Address]
+ Address=192.168.14.2/24
+ Address=2001:1::1/64
+ """
+ ).rstrip(" "),
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
@@ -986,8 +1190,10 @@ NETWORK_CONFIGS = {
iface iface0 inet6 static
address 2001:1::1/64
mtu 1500
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
@@ -997,8 +1203,10 @@ NETWORK_CONFIGS = {
- 2001:1::1/64
ipv6-mtu: 1500
mtu: 9000
- """).rstrip(' '),
- 'yaml': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
@@ -1011,19 +1219,23 @@ NETWORK_CONFIGS = {
- type: static
address: 2001:1::1/64
mtu: 1500
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=static
IPADDR=192.168.14.2
IPADDR6=2001:1::1/64
NETMASK=255.255.255.0
STARTMODE=auto
MTU=9000
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=iface0
IPADDR=192.168.14.2
@@ -1038,17 +1250,21 @@ NETWORK_CONFIGS = {
USERCTL=no
MTU=9000
IPV6_MTU=1500
- """),
+ """
+ ),
},
},
- 'v6_and_v4': {
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "v6_and_v4": {
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DHCLIENT6_MODE=managed
- STARTMODE=auto""")
+ STARTMODE=auto"""
+ )
},
- 'yaml': textwrap.dedent("""\
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
@@ -1056,40 +1272,58 @@ NETWORK_CONFIGS = {
subnets:
- type: dhcp6
- type: dhcp4
- """).rstrip(' '),
+ """
+ ).rstrip(" "),
},
- 'dhcpv6_only': {
- 'expected_eni': textwrap.dedent("""\
+ "dhcpv6_only": {
+ "expected_networkd": textwrap.dedent(
+ """\
+ [Match]
+ Name=iface0
+ [Network]
+ DHCP=ipv6
+ """
+ ).rstrip(" "),
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet6 dhcp
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
iface0:
dhcp6: true
- """).rstrip(' '),
- 'yaml': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
name: 'iface0'
subnets:
- {'type': 'dhcp6'}
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp6
DHCLIENT6_MODE=managed
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=iface0
DHCPV6C=yes
@@ -1099,27 +1333,33 @@ NETWORK_CONFIGS = {
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
- 'dhcpv6_accept_ra': {
- 'expected_eni': textwrap.dedent("""\
+ "dhcpv6_accept_ra": {
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet6 dhcp
accept_ra 1
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
iface0:
accept-ra: true
dhcp6: true
- """).rstrip(' '),
- 'yaml_v1': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml_v1": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
@@ -1127,23 +1367,29 @@ NETWORK_CONFIGS = {
subnets:
- {'type': 'dhcp6'}
accept-ra: true
- """).rstrip(' '),
- 'yaml_v2': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml_v2": textwrap.dedent(
+ """\
version: 2
ethernets:
iface0:
dhcp6: true
accept-ra: true
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp6
DHCLIENT6_MODE=managed
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=iface0
DHCPV6C=yes
@@ -1154,27 +1400,42 @@ NETWORK_CONFIGS = {
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
+ "expected_networkd": textwrap.dedent(
+ """\
+ [Match]
+ Name=iface0
+ [Network]
+ DHCP=ipv6
+ IPv6AcceptRA=True
+ """
+ ).rstrip(" "),
},
- 'dhcpv6_reject_ra': {
- 'expected_eni': textwrap.dedent("""\
+ "dhcpv6_reject_ra": {
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet6 dhcp
accept_ra 0
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
iface0:
accept-ra: false
dhcp6: true
- """).rstrip(' '),
- 'yaml_v1': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml_v1": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
@@ -1182,23 +1443,29 @@ NETWORK_CONFIGS = {
subnets:
- {'type': 'dhcp6'}
accept-ra: false
- """).rstrip(' '),
- 'yaml_v2': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml_v2": textwrap.dedent(
+ """\
version: 2
ethernets:
iface0:
dhcp6: true
accept-ra: false
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp6
DHCLIENT6_MODE=managed
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=iface0
DHCPV6C=yes
@@ -1209,42 +1476,61 @@ NETWORK_CONFIGS = {
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
+ "expected_networkd": textwrap.dedent(
+ """\
+ [Match]
+ Name=iface0
+ [Network]
+ DHCP=ipv6
+ IPv6AcceptRA=False
+ """
+ ).rstrip(" "),
},
- 'ipv6_slaac': {
- 'expected_eni': textwrap.dedent("""\
+ "ipv6_slaac": {
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet6 auto
dhcp 0
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
iface0:
dhcp6: true
- """).rstrip(' '),
- 'yaml': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
name: 'iface0'
subnets:
- {'type': 'ipv6_slaac'}
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp6
DHCLIENT6_MODE=info
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=iface0
IPV6_AUTOCONF=yes
@@ -1254,11 +1540,13 @@ NETWORK_CONFIGS = {
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
- 'static6': {
- 'yaml': textwrap.dedent("""\
+ "static6": {
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
@@ -1267,9 +1555,11 @@ NETWORK_CONFIGS = {
subnets:
- type: 'static6'
address: 2001:1::1/64
- """).rstrip(' '),
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=iface0
IPV6ADDR=2001:1::1/64
@@ -1281,42 +1571,52 @@ NETWORK_CONFIGS = {
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
- 'dhcpv6_stateless': {
- 'expected_eni': textwrap.dedent("""\
+ "dhcpv6_stateless": {
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet6 auto
dhcp 1
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
iface0:
dhcp6: true
- """).rstrip(' '),
- 'yaml': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
name: 'iface0'
subnets:
- {'type': 'ipv6_dhcpv6-stateless'}
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp6
DHCLIENT6_MODE=info
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=iface0
DHCPV6C=yes
@@ -1328,26 +1628,32 @@ NETWORK_CONFIGS = {
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
- 'dhcpv6_stateful': {
- 'expected_eni': textwrap.dedent("""\
+ "dhcpv6_stateful": {
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet6 dhcp
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
iface0:
accept-ra: true
dhcp6: true
- """).rstrip(' '),
- 'yaml': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
@@ -1355,95 +1661,118 @@ NETWORK_CONFIGS = {
subnets:
- {'type': 'ipv6_dhcpv6-stateful'}
accept-ra: true
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp6
DHCLIENT6_MODE=managed
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
- BOOTPROTO=none
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
+ BOOTPROTO=dhcp
DEVICE=iface0
DHCPV6C=yes
IPV6INIT=yes
+ IPV6_AUTOCONF=no
IPV6_FORCE_ACCEPT_RA=yes
DEVICE=iface0
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
- 'wakeonlan_disabled': {
- 'expected_eni': textwrap.dedent("""\
+ "wakeonlan_disabled": {
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet dhcp
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
ethernets:
iface0:
dhcp4: true
wakeonlan: false
version: 2
- """),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp4
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DEVICE=iface0
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
- 'yaml_v2': textwrap.dedent("""\
+ "yaml_v2": textwrap.dedent(
+ """\
version: 2
ethernets:
iface0:
dhcp4: true
wakeonlan: false
- """).rstrip(' '),
+ """
+ ).rstrip(" "),
},
- 'wakeonlan_enabled': {
- 'expected_eni': textwrap.dedent("""\
+ "wakeonlan_enabled": {
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet dhcp
ethernet-wol g
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
ethernets:
iface0:
dhcp4: true
wakeonlan: true
version: 2
- """),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp4
ETHTOOL_OPTS="wol g"
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DEVICE=iface0
ETHTOOL_OPTS="wol g"
@@ -1451,18 +1780,21 @@ NETWORK_CONFIGS = {
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
- 'yaml_v2': textwrap.dedent("""\
+ "yaml_v2": textwrap.dedent(
+ """\
version: 2
ethernets:
iface0:
dhcp4: true
wakeonlan: true
- """).rstrip(' '),
+ """
+ ).rstrip(" "),
},
- 'all': {
- 'expected_eni': ("""\
+ "all": {
+ "expected_eni": """\
auto lo
iface lo inet loopback
dns-nameservers 8.8.8.8 4.4.4.4 8.8.4.4
@@ -1552,8 +1884,9 @@ iface eth0.101 inet static
post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
-"""),
- 'expected_netplan': textwrap.dedent("""
+""",
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
@@ -1649,25 +1982,31 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
- barley.maas
- sacchromyces.maas
- brettanomyces.maas
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-bond0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-bond0": textwrap.dedent(
+ """\
BONDING_MASTER=yes
- BONDING_OPTS="mode=active-backup """
- """xmit_hash_policy=layer3+4 """
- """miimon=100"
+ BONDING_MODULE_OPTS="mode=active-backup """
+ """xmit_hash_policy=layer3+4 """
+ """miimon=100"
BONDING_SLAVE_0=eth1
BONDING_SLAVE_1=eth2
BOOTPROTO=dhcp6
DHCLIENT6_MODE=managed
LLADDR=aa:bb:cc:dd:ee:ff
- STARTMODE=auto"""),
- 'ifcfg-bond0.200': textwrap.dedent("""\
+ STARTMODE=auto"""
+ ),
+ "ifcfg-bond0.200": textwrap.dedent(
+ """\
BOOTPROTO=dhcp4
ETHERDEVICE=bond0
STARTMODE=auto
- VLAN_ID=200"""),
- 'ifcfg-br0': textwrap.dedent("""\
+ VLAN_ID=200"""
+ ),
+ "ifcfg-br0": textwrap.dedent(
+ """\
BRIDGE_AGEINGTIME=250
BOOTPROTO=static
IPADDR=192.168.14.2
@@ -1677,12 +2016,16 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
BRIDGE_PRIORITY=22
BRIDGE_PORTS='eth3 eth4'
STARTMODE=auto
- BRIDGE_STP=off"""),
- 'ifcfg-eth0': textwrap.dedent("""\
+ BRIDGE_STP=off"""
+ ),
+ "ifcfg-eth0": textwrap.dedent(
+ """\
BOOTPROTO=static
LLADDR=c0:d6:9f:2c:e8:80
- STARTMODE=auto"""),
- 'ifcfg-eth0.101': textwrap.dedent("""\
+ STARTMODE=auto"""
+ ),
+ "ifcfg-eth0.101": textwrap.dedent(
+ """\
BOOTPROTO=static
IPADDR=192.168.0.2
IPADDR1=192.168.2.10
@@ -1691,44 +2034,58 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NETMASK1=255.255.255.0
ETHERDEVICE=eth0
STARTMODE=auto
- VLAN_ID=101"""),
- 'ifcfg-eth1': textwrap.dedent("""\
+ VLAN_ID=101"""
+ ),
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=none
LLADDR=aa:d6:9f:2c:e8:80
- STARTMODE=hotplug"""),
- 'ifcfg-eth2': textwrap.dedent("""\
+ STARTMODE=hotplug"""
+ ),
+ "ifcfg-eth2": textwrap.dedent(
+ """\
BOOTPROTO=none
LLADDR=c0:bb:9f:2c:e8:80
- STARTMODE=hotplug"""),
- 'ifcfg-eth3': textwrap.dedent("""\
+ STARTMODE=hotplug"""
+ ),
+ "ifcfg-eth3": textwrap.dedent(
+ """\
BOOTPROTO=static
BRIDGE=yes
LLADDR=66:bb:9f:2c:e8:80
- STARTMODE=auto"""),
- 'ifcfg-eth4': textwrap.dedent("""\
+ STARTMODE=auto"""
+ ),
+ "ifcfg-eth4": textwrap.dedent(
+ """\
BOOTPROTO=static
BRIDGE=yes
LLADDR=98:bb:9f:2c:e8:80
- STARTMODE=auto"""),
- 'ifcfg-eth5': textwrap.dedent("""\
+ STARTMODE=auto"""
+ ),
+ "ifcfg-eth5": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
LLADDR=98:bb:9f:2c:e8:8a
- STARTMODE=manual"""),
- 'ifcfg-ib0': textwrap.dedent("""\
+ STARTMODE=manual"""
+ ),
+ "ifcfg-ib0": textwrap.dedent(
+ """\
BOOTPROTO=static
LLADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1
IPADDR=192.168.200.7
MTU=9000
NETMASK=255.255.255.0
STARTMODE=auto
- TYPE=InfiniBand"""),
+ TYPE=InfiniBand"""
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-bond0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-bond0": textwrap.dedent(
+ """\
BONDING_MASTER=yes
BONDING_OPTS="mode=active-backup """
- """xmit_hash_policy=layer3+4 """
- """miimon=100"
+ """xmit_hash_policy=layer3+4 """
+ """miimon=100"
BONDING_SLAVE0=eth1
BONDING_SLAVE1=eth2
BOOTPROTO=none
@@ -1739,8 +2096,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Bond
- USERCTL=no"""),
- 'ifcfg-bond0.200': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-bond0.200": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DEVICE=bond0.200
DHCLIENT_SET_DEFAULT_ROUTE=no
@@ -1748,8 +2107,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
ONBOOT=yes
PHYSDEV=bond0
USERCTL=no
- VLAN=yes"""),
- 'ifcfg-br0': textwrap.dedent("""\
+ VLAN=yes"""
+ ),
+ "ifcfg-br0": textwrap.dedent(
+ """\
AGEING=250
BOOTPROTO=none
DEFROUTE=yes
@@ -1767,16 +2128,20 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
PRIO=22
STP=no
TYPE=Bridge
- USERCTL=no"""),
- 'ifcfg-eth0': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-eth0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eth0
HWADDR=c0:d6:9f:2c:e8:80
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-eth0.101': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-eth0.101": textwrap.dedent(
+ """\
BOOTPROTO=none
DEFROUTE=yes
DEVICE=eth0.101
@@ -1793,8 +2158,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
ONBOOT=yes
PHYSDEV=eth0
USERCTL=no
- VLAN=yes"""),
- 'ifcfg-eth1': textwrap.dedent("""\
+ VLAN=yes"""
+ ),
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eth1
HWADDR=aa:d6:9f:2c:e8:80
@@ -1803,8 +2170,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
ONBOOT=yes
SLAVE=yes
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-eth2': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-eth2": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eth2
HWADDR=c0:bb:9f:2c:e8:80
@@ -1813,8 +2182,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
ONBOOT=yes
SLAVE=yes
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-eth3': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-eth3": textwrap.dedent(
+ """\
BOOTPROTO=none
BRIDGE=br0
DEVICE=eth3
@@ -1822,8 +2193,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-eth4': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-eth4": textwrap.dedent(
+ """\
BOOTPROTO=none
BRIDGE=br0
DEVICE=eth4
@@ -1831,8 +2204,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-eth5': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-eth5": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DEVICE=eth5
DHCLIENT_SET_DEFAULT_ROUTE=no
@@ -1840,8 +2215,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=no
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-ib0': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-ib0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=ib0
HWADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1
@@ -1851,9 +2228,11 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=yes
TYPE=InfiniBand
- USERCTL=no"""),
+ USERCTL=no"""
+ ),
},
- 'yaml': textwrap.dedent("""
+ "yaml": textwrap.dedent(
+ """
version: 1
config:
# Physical interfaces.
@@ -1996,10 +2375,12 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
destination: 10.0.0.0/8
gateway: 11.0.0.1
metric: 3
- """).lstrip(),
+ """
+ ).lstrip(),
},
- 'bond': {
- 'yaml': textwrap.dedent("""
+ "bond": {
+ "yaml": textwrap.dedent(
+ """
version: 1
config:
- type: physical
@@ -2040,13 +2421,15 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
routes:
- gateway: 2001:67c:1562:1
network: 2001:67c:1
- netmask: ffff:ffff:0
+ netmask: "ffff:ffff::"
- gateway: 3001:67c:1562:1
network: 3001:67c:1
- netmask: ffff:ffff:0
+ netmask: "ffff:ffff::"
metric: 10000
- """),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
@@ -2088,8 +2471,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
- metric: 10000
to: 3001:67c:1/32
via: 3001:67c:1562:1
- """),
- 'expected_eni': textwrap.dedent("""\
+ """
+ ),
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
@@ -2151,8 +2536,10 @@ iface bond0 inet6 static
|| true
pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \
|| true
- """),
- 'yaml-v2': textwrap.dedent("""
+ """
+ ),
+ "yaml-v2": textwrap.dedent(
+ """
version: 2
ethernets:
eth0:
@@ -2192,8 +2579,10 @@ iface bond0 inet6 static
- metric: 10000
to: 3001:67c:1562:8007::1/64
via: 3001:67c:1562:8007::aac:40b2
- """),
- 'expected_netplan-v2': textwrap.dedent("""
+ """
+ ),
+ "expected_netplan-v2": textwrap.dedent(
+ """
network:
bonds:
bond0:
@@ -2234,17 +2623,18 @@ iface bond0 inet6 static
macaddress: aa:bb:cc:dd:e8:01
set-name: vf0
version: 2
- """),
-
- 'expected_sysconfig_opensuse': {
- 'ifcfg-bond0': textwrap.dedent("""\
+ """
+ ),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-bond0": textwrap.dedent(
+ """\
BONDING_MASTER=yes
- BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """
- """miimon=100 num_grat_arp=5 """
- """downdelay=10 updelay=20 """
- """fail_over_mac=active """
- """primary=bond0s0 """
- """primary_reselect=always"
+ BONDING_MODULE_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """
+ """miimon=100 num_grat_arp=5 """
+ """downdelay=10 updelay=20 """
+ """fail_over_mac=active """
+ """primary=bond0s0 """
+ """primary_reselect=always"
BONDING_SLAVE_0=bond0s0
BONDING_SLAVE_1=bond0s1
BOOTPROTO=static
@@ -2256,27 +2646,33 @@ iface bond0 inet6 static
NETMASK=255.255.255.0
NETMASK1=255.255.255.0
STARTMODE=auto
- """),
- 'ifcfg-bond0s0': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-bond0s0": textwrap.dedent(
+ """\
BOOTPROTO=none
LLADDR=aa:bb:cc:dd:e8:00
STARTMODE=hotplug
- """),
- 'ifcfg-bond0s1': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-bond0s1": textwrap.dedent(
+ """\
BOOTPROTO=none
LLADDR=aa:bb:cc:dd:e8:01
STARTMODE=hotplug
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-bond0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-bond0": textwrap.dedent(
+ """\
BONDING_MASTER=yes
BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """
- """miimon=100 num_grat_arp=5 """
- """downdelay=10 updelay=20 """
- """fail_over_mac=active """
- """primary=bond0s0 """
- """primary_reselect=always"
+ """miimon=100 num_grat_arp=5 """
+ """downdelay=10 updelay=20 """
+ """fail_over_mac=active """
+ """primary=bond0s0 """
+ """primary_reselect=always"
BONDING_SLAVE0=bond0s0
BONDING_SLAVE1=bond0s1
BOOTPROTO=none
@@ -2297,8 +2693,10 @@ iface bond0 inet6 static
ONBOOT=yes
TYPE=Bond
USERCTL=no
- """),
- 'ifcfg-bond0s0': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-bond0s0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=bond0s0
HWADDR=aa:bb:cc:dd:e8:00
@@ -2308,19 +2706,25 @@ iface bond0 inet6 static
SLAVE=yes
TYPE=Ethernet
USERCTL=no
- """),
- 'route6-bond0': textwrap.dedent("""\
+ """
+ ),
+ "route6-bond0": textwrap.dedent(
+ """\
# Created by cloud-init on instance boot automatically, do not edit.
#
- 2001:67c:1/ffff:ffff:0 via 2001:67c:1562:1 dev bond0
- 3001:67c:1/ffff:ffff:0 via 3001:67c:1562:1 metric 10000 dev bond0
- """),
- 'route-bond0': textwrap.dedent("""\
+ 2001:67c:1/32 via 2001:67c:1562:1 dev bond0
+ 3001:67c:1/32 via 3001:67c:1562:1 metric 10000 dev bond0
+ """
+ ),
+ "route-bond0": textwrap.dedent(
+ """\
ADDRESS0=10.1.3.0
GATEWAY0=192.168.0.3
NETMASK0=255.255.255.0
- """),
- 'ifcfg-bond0s1': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-bond0s1": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=bond0s1
HWADDR=aa:bb:cc:dd:e8:01
@@ -2330,11 +2734,13 @@ iface bond0 inet6 static
SLAVE=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
- 'vlan': {
- 'yaml': textwrap.dedent("""
+ "vlan": {
+ "yaml": textwrap.dedent(
+ """
version: 1
config:
- type: physical
@@ -2357,14 +2763,18 @@ iface bond0 inet6 static
- gateway: 2001:1::1
netmask: '::'
network: '::'
- """),
- 'expected_sysconfig_opensuse': {
+ """
+ ),
+ "expected_sysconfig_opensuse": {
# TODO RJS: unknown proper BOOTPROTO setting ask Marius
- 'ifcfg-en0': textwrap.dedent("""\
+ "ifcfg-en0": textwrap.dedent(
+ """\
BOOTPROTO=static
LLADDR=aa:bb:cc:dd:e8:00
- STARTMODE=auto"""),
- 'ifcfg-en0.99': textwrap.dedent("""\
+ STARTMODE=auto"""
+ ),
+ "ifcfg-en0.99": textwrap.dedent(
+ """\
BOOTPROTO=static
IPADDR=192.168.2.2
IPADDR1=192.168.1.2
@@ -2375,18 +2785,22 @@ iface bond0 inet6 static
STARTMODE=auto
ETHERDEVICE=en0
VLAN_ID=99
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-en0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-en0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=en0
HWADDR=aa:bb:cc:dd:e8:00
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-en0.99': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-en0.99": textwrap.dedent(
+ """\
BOOTPROTO=none
DEFROUTE=yes
DEVICE=en0.99
@@ -2405,11 +2819,13 @@ iface bond0 inet6 static
ONBOOT=yes
PHYSDEV=en0
USERCTL=no
- VLAN=yes"""),
+ VLAN=yes"""
+ ),
},
},
- 'bridge': {
- 'yaml': textwrap.dedent("""
+ "bridge": {
+ "yaml": textwrap.dedent(
+ """
version: 1
config:
- type: physical
@@ -2434,9 +2850,11 @@ iface bond0 inet6 static
bridge_bridgeprio: 22
subnets:
- type: static
- address: 192.168.2.2/24"""),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-br0': textwrap.dedent("""\
+ address: 192.168.2.2/24"""
+ ),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-br0": textwrap.dedent(
+ """\
BOOTPROTO=static
IPADDR=192.168.2.2
NETMASK=255.255.255.0
@@ -2444,24 +2862,30 @@ iface bond0 inet6 static
BRIDGE_STP=off
BRIDGE_PRIORITY=22
BRIDGE_PORTS='eth0 eth1'
- """),
- 'ifcfg-eth0': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth0": textwrap.dedent(
+ """\
BOOTPROTO=static
BRIDGE=yes
LLADDR=52:54:00:12:34:00
IPADDR6=2001:1::100/96
STARTMODE=auto
- """),
- 'ifcfg-eth1': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=static
BRIDGE=yes
LLADDR=52:54:00:12:34:01
IPADDR6=2001:1::101/96
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-br0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-br0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=br0
IPADDR=192.168.2.2
@@ -2472,8 +2896,10 @@ iface bond0 inet6 static
STP=no
TYPE=Bridge
USERCTL=no
- """),
- 'ifcfg-eth0': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth0": textwrap.dedent(
+ """\
BOOTPROTO=none
BRIDGE=br0
DEVICE=eth0
@@ -2486,8 +2912,10 @@ iface bond0 inet6 static
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- 'ifcfg-eth1': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=none
BRIDGE=br0
DEVICE=eth1
@@ -2500,11 +2928,13 @@ iface bond0 inet6 static
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
- 'manual': {
- 'yaml': textwrap.dedent("""
+ "manual": {
+ "yaml": textwrap.dedent(
+ """
version: 1
config:
- type: physical
@@ -2526,8 +2956,10 @@ iface bond0 inet6 static
subnets:
- type: manual
control: manual
- """),
- 'expected_eni': textwrap.dedent("""\
+ """
+ ),
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
@@ -2541,8 +2973,10 @@ iface bond0 inet6 static
# control-manual eth2
iface eth2 inet manual
- """),
- 'expected_netplan': textwrap.dedent("""\
+ """
+ ),
+ "expected_netplan": textwrap.dedent(
+ """\
network:
version: 2
@@ -2562,29 +2996,37 @@ iface bond0 inet6 static
match:
macaddress: 52:54:00:12:34:ff
set-name: eth2
- """),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-eth0': textwrap.dedent("""\
+ """
+ ),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-eth0": textwrap.dedent(
+ """\
BOOTPROTO=static
LLADDR=52:54:00:12:34:00
IPADDR=192.168.1.2
NETMASK=255.255.255.0
STARTMODE=manual
- """),
- 'ifcfg-eth1': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=static
LLADDR=52:54:00:12:34:aa
MTU=1480
STARTMODE=auto
- """),
- 'ifcfg-eth2': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth2": textwrap.dedent(
+ """\
BOOTPROTO=static
LLADDR=52:54:00:12:34:ff
STARTMODE=manual
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-eth0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-eth0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eth0
HWADDR=52:54:00:12:34:00
@@ -2594,8 +3036,10 @@ iface bond0 inet6 static
ONBOOT=no
TYPE=Ethernet
USERCTL=no
- """),
- 'ifcfg-eth1': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eth1
HWADDR=52:54:00:12:34:aa
@@ -2604,8 +3048,10 @@ iface bond0 inet6 static
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- 'ifcfg-eth2': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth2": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eth2
HWADDR=52:54:00:12:34:ff
@@ -2613,51 +3059,85 @@ iface bond0 inet6 static
ONBOOT=no
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
}
CONFIG_V1_EXPLICIT_LOOPBACK = {
- 'version': 1,
- 'config': [{'name': 'eth0', 'type': 'physical',
- 'subnets': [{'control': 'auto', 'type': 'dhcp'}]},
- {'name': 'lo', 'type': 'loopback',
- 'subnets': [{'control': 'auto', 'type': 'loopback'}]},
- ]}
+ "version": 1,
+ "config": [
+ {
+ "name": "eth0",
+ "type": "physical",
+ "subnets": [{"control": "auto", "type": "dhcp"}],
+ },
+ {
+ "name": "lo",
+ "type": "loopback",
+ "subnets": [{"control": "auto", "type": "loopback"}],
+ },
+ ],
+}
CONFIG_V1_SIMPLE_SUBNET = {
- 'version': 1,
- 'config': [{'mac_address': '52:54:00:12:34:00',
- 'name': 'interface0',
- 'subnets': [{'address': '10.0.2.15',
- 'gateway': '10.0.2.2',
- 'netmask': '255.255.255.0',
- 'type': 'static'}],
- 'type': 'physical'}]}
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "52:54:00:12:34:00",
+ "name": "interface0",
+ "subnets": [
+ {
+ "address": "10.0.2.15",
+ "gateway": "10.0.2.2",
+ "netmask": "255.255.255.0",
+ "type": "static",
+ }
+ ],
+ "type": "physical",
+ }
+ ],
+}
CONFIG_V1_MULTI_IFACE = {
- 'version': 1,
- 'config': [{'type': 'physical',
- 'mtu': 1500,
- 'subnets': [{'type': 'static',
- 'netmask': '255.255.240.0',
- 'routes': [{'netmask': '0.0.0.0',
- 'network': '0.0.0.0',
- 'gateway': '51.68.80.1'}],
- 'address': '51.68.89.122',
- 'ipv4': True}],
- 'mac_address': 'fa:16:3e:25:b4:59',
- 'name': 'eth0'},
- {'type': 'physical',
- 'mtu': 9000,
- 'subnets': [{'type': 'dhcp4'}],
- 'mac_address': 'fa:16:3e:b1:ca:29', 'name': 'eth1'}]}
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "mtu": 1500,
+ "subnets": [
+ {
+ "type": "static",
+ "netmask": "255.255.240.0",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "51.68.80.1",
+ }
+ ],
+ "address": "51.68.89.122",
+ "ipv4": True,
+ }
+ ],
+ "mac_address": "fa:16:3e:25:b4:59",
+ "name": "eth0",
+ },
+ {
+ "type": "physical",
+ "mtu": 9000,
+ "subnets": [{"type": "dhcp4"}],
+ "mac_address": "fa:16:3e:b1:ca:29",
+ "name": "eth1",
+ },
+ ],
+}
DEFAULT_DEV_ATTRS = {
- 'eth1000': {
+ "eth1000": {
"bridge": False,
"carrier": False,
"dormant": False,
@@ -2670,16 +3150,26 @@ DEFAULT_DEV_ATTRS = {
}
-def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net,
- mock_sys_dev_path, dev_attrs=None):
+def _setup_test(
+ tmp_dir,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ dev_attrs=None,
+):
if not dev_attrs:
dev_attrs = DEFAULT_DEV_ATTRS
mock_get_devicelist.return_value = dev_attrs.keys()
- def fake_read(devname, path, translate=None,
- on_enoent=None, on_keyerror=None,
- on_einval=None):
+ def fake_read(
+ devname,
+ path,
+ translate=None,
+ on_enoent=None,
+ on_keyerror=None,
+ on_einval=None,
+ ):
return dev_attrs[devname][path]
mock_read_sys_net.side_effect = fake_read
@@ -2689,99 +3179,137 @@ def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net,
for dev in dev_attrs:
os.makedirs(os.path.join(tmp_dir, dev))
- with open(os.path.join(tmp_dir, dev, 'operstate'), 'w') as fh:
- fh.write(dev_attrs[dev]['operstate'])
+ with open(os.path.join(tmp_dir, dev, "operstate"), "w") as fh:
+ fh.write(dev_attrs[dev]["operstate"])
os.makedirs(os.path.join(tmp_dir, dev, "device"))
- for key in ['device/driver']:
+ for key in ["device/driver"]:
if key in dev_attrs[dev] and dev_attrs[dev][key]:
target = dev_attrs[dev][key]
link = os.path.join(tmp_dir, dev, key)
- print('symlink %s -> %s' % (link, target))
+ print("symlink %s -> %s" % (link, target))
os.symlink(target, link)
mock_sys_dev_path.side_effect = sys_dev_path
class TestGenerateFallbackConfig(CiTestCase):
-
def setUp(self):
super(TestGenerateFallbackConfig, self).setUp()
self.add_patch(
- "cloudinit.util.get_cmdline", "m_get_cmdline",
- return_value="root=/dev/sda1")
+ "cloudinit.util.get_cmdline",
+ "m_get_cmdline",
+ return_value="root=/dev/sda1",
+ )
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_device_driver_v2(self, mock_get_devicelist, mock_read_sys_net,
- mock_sys_dev_path):
+ def test_device_driver_v2(
+ self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ ):
"""Network configuration for generate_fallback_config is version 2."""
devices = {
- 'eth0': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'hv_netsvc', 'device/device': '0x3',
- 'name_assign_type': '4'},
- 'eth1': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'mlx4_core', 'device/device': '0x7',
- 'name_assign_type': '4'},
-
+ "eth0": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "hv_netsvc",
+ "device/device": "0x3",
+ "name_assign_type": "4",
+ },
+ "eth1": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "mlx4_core",
+ "device/device": "0x7",
+ "name_assign_type": "4",
+ },
}
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path,
- dev_attrs=devices)
+ _setup_test(
+ tmp_dir,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ dev_attrs=devices,
+ )
network_cfg = net.generate_fallback_config(config_driver=True)
expected = {
- 'ethernets': {'eth0': {'dhcp4': True, 'set-name': 'eth0',
- 'match': {'macaddress': '00:11:22:33:44:55',
- 'driver': 'hv_netsvc'}}},
- 'version': 2}
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "set-name": "eth0",
+ "match": {
+ "macaddress": "00:11:22:33:44:55",
+ "driver": "hv_netsvc",
+ },
+ }
+ },
+ "version": 2,
+ }
self.assertEqual(expected, network_cfg)
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_device_driver(self, mock_get_devicelist, mock_read_sys_net,
- mock_sys_dev_path):
+ def test_device_driver(
+ self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ ):
devices = {
- 'eth0': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'hv_netsvc', 'device/device': '0x3',
- 'name_assign_type': '4'},
- 'eth1': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'mlx4_core', 'device/device': '0x7',
- 'name_assign_type': '4'},
-
+ "eth0": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "hv_netsvc",
+ "device/device": "0x3",
+ "name_assign_type": "4",
+ },
+ "eth1": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "mlx4_core",
+ "device/device": "0x7",
+ "name_assign_type": "4",
+ },
}
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path,
- dev_attrs=devices)
+ _setup_test(
+ tmp_dir,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ dev_attrs=devices,
+ )
network_cfg = net.generate_fallback_config(config_driver=True)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
# don't set rulepath so eni writes them
renderer = eni.Renderer(
- {'eni_path': 'interfaces', 'netrules_path': 'netrules'})
+ {"eni_path": "interfaces", "netrules_path": "netrules"}
+ )
renderer.render_network_state(ns, target=render_dir)
- self.assertTrue(os.path.exists(os.path.join(render_dir,
- 'interfaces')))
- with open(os.path.join(render_dir, 'interfaces')) as fh:
+ self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces")))
+ with open(os.path.join(render_dir, "interfaces")) as fh:
contents = fh.read()
print(contents)
expected = """
@@ -2793,8 +3321,8 @@ iface eth0 inet dhcp
"""
self.assertEqual(expected.lstrip(), contents.lstrip())
- self.assertTrue(os.path.exists(os.path.join(render_dir, 'netrules')))
- with open(os.path.join(render_dir, 'netrules')) as fh:
+ self.assertTrue(os.path.exists(os.path.join(render_dir, "netrules")))
+ with open(os.path.join(render_dir, "netrules")) as fh:
contents = fh.read()
print(contents)
expected_rule = [
@@ -2804,48 +3332,65 @@ iface eth0 inet dhcp
'ATTR{address}=="00:11:22:33:44:55"',
'NAME="eth0"',
]
- self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip())
+ self.assertEqual(", ".join(expected_rule) + "\n", contents.lstrip())
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_device_driver_blacklist(self, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path):
+ def test_device_driver_blacklist(
+ self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ ):
devices = {
- 'eth1': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'hv_netsvc', 'device/device': '0x3',
- 'name_assign_type': '4'},
- 'eth0': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'mlx4_core', 'device/device': '0x7',
- 'name_assign_type': '4'},
+ "eth1": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "hv_netsvc",
+ "device/device": "0x3",
+ "name_assign_type": "4",
+ },
+ "eth0": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "mlx4_core",
+ "device/device": "0x7",
+ "name_assign_type": "4",
+ },
}
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path,
- dev_attrs=devices)
+ _setup_test(
+ tmp_dir,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ dev_attrs=devices,
+ )
- blacklist = ['mlx4_core']
- network_cfg = net.generate_fallback_config(blacklist_drivers=blacklist,
- config_driver=True)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ blacklist = ["mlx4_core"]
+ network_cfg = net.generate_fallback_config(
+ blacklist_drivers=blacklist, config_driver=True
+ )
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
# don't set rulepath so eni writes them
renderer = eni.Renderer(
- {'eni_path': 'interfaces', 'netrules_path': 'netrules'})
+ {"eni_path": "interfaces", "netrules_path": "netrules"}
+ )
renderer.render_network_state(ns, target=render_dir)
- self.assertTrue(os.path.exists(os.path.join(render_dir,
- 'interfaces')))
- with open(os.path.join(render_dir, 'interfaces')) as fh:
+ self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces")))
+ with open(os.path.join(render_dir, "interfaces")) as fh:
contents = fh.read()
print(contents)
expected = """
@@ -2857,8 +3402,8 @@ iface eth1 inet dhcp
"""
self.assertEqual(expected.lstrip(), contents.lstrip())
- self.assertTrue(os.path.exists(os.path.join(render_dir, 'netrules')))
- with open(os.path.join(render_dir, 'netrules')) as fh:
+ self.assertTrue(os.path.exists(os.path.join(render_dir, "netrules")))
+ with open(os.path.join(render_dir, "netrules")) as fh:
contents = fh.read()
print(contents)
expected_rule = [
@@ -2868,35 +3413,54 @@ iface eth1 inet dhcp
'ATTR{address}=="00:11:22:33:44:55"',
'NAME="eth1"',
]
- self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip())
+ self.assertEqual(", ".join(expected_rule) + "\n", contents.lstrip())
@mock.patch("cloudinit.util.get_cmdline")
@mock.patch("cloudinit.util.udevadm_settle")
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_unstable_names(self, mock_get_devicelist, mock_read_sys_net,
- mock_sys_dev_path, mock_settle, m_get_cmdline):
+ def test_unstable_names(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ mock_settle,
+ m_get_cmdline,
+ ):
"""verify that udevadm settle is called when we find unstable names"""
devices = {
- 'eth0': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'hv_netsvc', 'device/device': '0x3',
- 'name_assign_type': False},
- 'ens4': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'mlx4_core', 'device/device': '0x7',
- 'name_assign_type': '4'},
-
+ "eth0": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "hv_netsvc",
+ "device/device": "0x3",
+ "name_assign_type": False,
+ },
+ "ens4": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "mlx4_core",
+ "device/device": "0x7",
+ "name_assign_type": "4",
+ },
}
- m_get_cmdline.return_value = ''
+ m_get_cmdline.return_value = ""
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path,
- dev_attrs=devices)
+ _setup_test(
+ tmp_dir,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ dev_attrs=devices,
+ )
net.generate_fallback_config(config_driver=True)
self.assertEqual(1, mock_settle.call_count)
@@ -2905,48 +3469,73 @@ iface eth1 inet dhcp
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_unstable_names_disabled(self, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path,
- mock_settle, m_get_cmdline):
+ def test_unstable_names_disabled(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ mock_settle,
+ m_get_cmdline,
+ ):
"""verify udevadm settle not called when cmdline has net.ifnames=0"""
devices = {
- 'eth0': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'hv_netsvc', 'device/device': '0x3',
- 'name_assign_type': False},
- 'ens4': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'mlx4_core', 'device/device': '0x7',
- 'name_assign_type': '4'},
-
+ "eth0": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "hv_netsvc",
+ "device/device": "0x3",
+ "name_assign_type": False,
+ },
+ "ens4": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "mlx4_core",
+ "device/device": "0x7",
+ "name_assign_type": "4",
+ },
}
- m_get_cmdline.return_value = 'net.ifnames=0'
+ m_get_cmdline.return_value = "net.ifnames=0"
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path,
- dev_attrs=devices)
+ _setup_test(
+ tmp_dir,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ dev_attrs=devices,
+ )
net.generate_fallback_config(config_driver=True)
self.assertEqual(0, mock_settle.call_count)
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
class TestRhelSysConfigRendering(CiTestCase):
with_logs = True
nm_cfg_file = "/etc/NetworkManager/NetworkManager.conf"
- scripts_dir = '/etc/sysconfig/network-scripts'
- header = ('# Created by cloud-init on instance boot automatically, '
- 'do not edit.\n#\n')
+ scripts_dir = "/etc/sysconfig/network-scripts"
+ header = (
+ "# Created by cloud-init on instance boot automatically, "
+ "do not edit.\n#\n"
+ )
- expected_name = 'expected_sysconfig_rhel'
+ expected_name = "expected_sysconfig_rhel"
def _get_renderer(self):
- distro_cls = distros.fetch('rhel')
+ distro_cls = distros.fetch("rhel")
return sysconfig.Renderer(
- config=distro_cls.renderer_configs.get('sysconfig'))
+ config=distro_cls.renderer_configs.get("sysconfig")
+ )
def _render_and_read(self, network_config=None, state=None, dir=None):
if dir is None:
@@ -2964,9 +3553,8 @@ class TestRhelSysConfigRendering(CiTestCase):
return dir2dict(dir)
def _compare_files_to_expected(self, expected, found):
-
def _try_load(f):
- ''' Attempt to load shell content, otherwise return as-is '''
+ """Attempt to load shell content, otherwise return as-is"""
try:
return util.load_shell_content(f)
except ValueError:
@@ -2977,12 +3565,15 @@ class TestRhelSysConfigRendering(CiTestCase):
orig_maxdiff = self.maxDiff
expected_d = dict(
(os.path.join(self.scripts_dir, k), _try_load(v))
- for k, v in expected.items())
+ for k, v in expected.items()
+ )
# only compare the files in scripts_dir
scripts_found = dict(
- (k, _try_load(v)) for k, v in found.items()
- if k.startswith(self.scripts_dir))
+ (k, _try_load(v))
+ for k, v in found.items()
+ if k.startswith(self.scripts_dir)
+ )
try:
self.maxDiff = None
self.assertEqual(expected_d, scripts_found)
@@ -2990,9 +3581,14 @@ class TestRhelSysConfigRendering(CiTestCase):
self.maxDiff = orig_maxdiff
def _assert_headers(self, found):
- missing = [f for f in found
- if (f.startswith(self.scripts_dir) and
- not found[f].startswith(self.header))]
+ missing = [
+ f
+ for f in found
+ if (
+ f.startswith(self.scripts_dir)
+ and not found[f].startswith(self.header)
+ )
+ ]
if missing:
raise AssertionError("Missing headers in: %s" % missing)
@@ -3000,16 +3596,22 @@ class TestRhelSysConfigRendering(CiTestCase):
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_default_generation(self, mock_get_devicelist,
- mock_read_sys_net,
- mock_sys_dev_path, m_get_cmdline):
+ def test_default_generation(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ m_get_cmdline,
+ ):
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path)
+ _setup_test(
+ tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ )
network_cfg = net.generate_fallback_config()
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
@@ -3017,7 +3619,7 @@ class TestRhelSysConfigRendering(CiTestCase):
renderer = self._get_renderer()
renderer.render_network_state(ns, target=render_dir)
- render_file = 'etc/sysconfig/network-scripts/ifcfg-eth1000'
+ render_file = "etc/sysconfig/network-scripts/ifcfg-eth1000"
with open(os.path.join(render_dir, render_file)) as fh:
content = fh.read()
expected_content = """
@@ -3037,35 +3639,44 @@ USERCTL=no
"""ValueError is raised when duplicate ipv4 gateways exist."""
net_json = {
"services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
- "type": "ipv4", "netmask": "255.255.252.0",
- "link": "tap1a81968a-79",
- "routes": [{
- "netmask": "0.0.0.0",
- "network": "0.0.0.0",
- "gateway": "172.19.3.254",
- }, {
- "netmask": "0.0.0.0", # A second default gateway
- "network": "0.0.0.0",
- "gateway": "172.20.3.254",
- }],
- "ip_address": "172.19.1.34", "id": "network0"
- }],
+ "networks": [
+ {
+ "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+ "type": "ipv4",
+ "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ },
+ {
+ "netmask": "0.0.0.0", # A second default gateway
+ "network": "0.0.0.0",
+ "gateway": "172.20.3.254",
+ },
+ ],
+ "ip_address": "172.19.1.34",
+ "id": "network0",
+ }
+ ],
"links": [
{
"ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
},
],
}
- macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+ macs = {"fa:16:3e:ed:9a:59": "eth0"}
render_dir = self.tmp_dir()
network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
renderer = self._get_renderer()
with self.assertRaises(ValueError):
renderer.render_network_state(ns, target=render_dir)
@@ -3075,56 +3686,138 @@ USERCTL=no
"""ValueError is raised when duplicate ipv6 gateways exist."""
net_json = {
"services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "public-ipv6",
- "type": "ipv6", "netmask": "",
- "link": "tap1a81968a-79",
- "routes": [{
- "gateway": "2001:DB8::1",
- "netmask": "::",
- "network": "::"
- }, {
- "gateway": "2001:DB9::1",
- "netmask": "::",
- "network": "::"
- }],
- "ip_address": "2001:DB8::10", "id": "network1"
- }],
+ "networks": [
+ {
+ "network_id": "public-ipv6",
+ "type": "ipv6",
+ "netmask": "",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "gateway": "2001:DB8::1",
+ "netmask": "::",
+ "network": "::",
+ },
+ {
+ "gateway": "2001:DB9::1",
+ "netmask": "::",
+ "network": "::",
+ },
+ ],
+ "ip_address": "2001:DB8::10",
+ "id": "network1",
+ }
+ ],
"links": [
{
"ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
},
],
}
- macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+ macs = {"fa:16:3e:ed:9a:59": "eth0"}
render_dir = self.tmp_dir()
network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
renderer = self._get_renderer()
with self.assertRaises(ValueError):
renderer.render_network_state(ns, target=render_dir)
self.assertEqual([], os.listdir(render_dir))
+ def test_invalid_network_mask_ipv6(self):
+ net_json = {
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
+ "networks": [
+ {
+ "network_id": "public-ipv6",
+ "type": "ipv6",
+ "netmask": "",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "gateway": "2001:DB8::1",
+ "netmask": "ff:ff:ff:ff::",
+ "network": "2001:DB8:1::1",
+ },
+ ],
+ "ip_address": "2001:DB8::10",
+ "id": "network1",
+ }
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
+ },
+ ],
+ }
+ macs = {"fa:16:3e:ed:9a:59": "eth0"}
+ network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
+ with self.assertRaises(ValueError):
+ network_state.parse_net_config_data(network_cfg, skip_broken=False)
+
+ def test_invalid_network_mask_ipv4(self):
+ net_json = {
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
+ "networks": [
+ {
+ "network_id": "public-ipv4",
+ "type": "ipv4",
+ "netmask": "",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "gateway": "172.20.0.1",
+ "netmask": "255.234.255.0",
+ "network": "172.19.0.0",
+ },
+ ],
+ "ip_address": "172.20.0.10",
+ "id": "network1",
+ }
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
+ },
+ ],
+ }
+ macs = {"fa:16:3e:ed:9a:59": "eth0"}
+ network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
+ with self.assertRaises(ValueError):
+ network_state.parse_net_config_data(network_cfg, skip_broken=False)
+
def test_openstack_rendering_samples(self):
for os_sample in OS_SAMPLES:
render_dir = self.tmp_dir()
- ex_input = os_sample['in_data']
- ex_mac_addrs = os_sample['in_macs']
+ ex_input = os_sample["in_data"]
+ ex_mac_addrs = os_sample["in_macs"]
network_cfg = openstack.convert_net_json(
- ex_input, known_macs=ex_mac_addrs)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ex_input, known_macs=ex_mac_addrs
+ )
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
renderer = self._get_renderer()
# render a multiple times to simulate reboots
renderer.render_network_state(ns, target=render_dir)
renderer.render_network_state(ns, target=render_dir)
renderer.render_network_state(ns, target=render_dir)
- for fn, expected_content in os_sample.get('out_sysconfig_rhel',
- []):
+ for fn, expected_content in os_sample.get(
+ "out_sysconfig_rhel", []
+ ):
with open(os.path.join(render_dir, fn)) as fh:
self.assertEqual(expected_content, fh.read())
@@ -3135,8 +3828,8 @@ USERCTL=no
renderer = self._get_renderer()
renderer.render_network_state(ns, target=render_dir)
found = dir2dict(render_dir)
- nspath = '/etc/sysconfig/network-scripts/'
- self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
+ nspath = "/etc/sysconfig/network-scripts/"
+ self.assertNotIn(nspath + "ifcfg-lo", found.keys())
expected = """\
# Created by cloud-init on instance boot automatically, do not edit.
#
@@ -3152,10 +3845,10 @@ ONBOOT=yes
TYPE=Ethernet
USERCTL=no
"""
- self.assertEqual(expected, found[nspath + 'ifcfg-interface0'])
+ self.assertEqual(expected, found[nspath + "ifcfg-interface0"])
# The configuration has no nameserver information make sure we
# do not write the resolv.conf file
- respath = '/etc/resolv.conf'
+ respath = "/etc/resolv.conf"
self.assertNotIn(respath, found.keys())
def test_network_config_v1_multi_iface_samples(self):
@@ -3165,8 +3858,8 @@ USERCTL=no
renderer = self._get_renderer()
renderer.render_network_state(ns, target=render_dir)
found = dir2dict(render_dir)
- nspath = '/etc/sysconfig/network-scripts/'
- self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
+ nspath = "/etc/sysconfig/network-scripts/"
+ self.assertNotIn(nspath + "ifcfg-lo", found.keys())
expected_i1 = """\
# Created by cloud-init on instance boot automatically, do not edit.
#
@@ -3183,7 +3876,7 @@ ONBOOT=yes
TYPE=Ethernet
USERCTL=no
"""
- self.assertEqual(expected_i1, found[nspath + 'ifcfg-eth0'])
+ self.assertEqual(expected_i1, found[nspath + "ifcfg-eth0"])
expected_i2 = """\
# Created by cloud-init on instance boot automatically, do not edit.
#
@@ -3197,21 +3890,21 @@ ONBOOT=yes
TYPE=Ethernet
USERCTL=no
"""
- self.assertEqual(expected_i2, found[nspath + 'ifcfg-eth1'])
+ self.assertEqual(expected_i2, found[nspath + "ifcfg-eth1"])
def test_config_with_explicit_loopback(self):
ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK)
render_dir = self.tmp_path("render")
os.makedirs(render_dir)
# write an etc/resolv.conf and expect it to not be modified
- resolvconf = os.path.join(render_dir, 'etc/resolv.conf')
+ resolvconf = os.path.join(render_dir, "etc/resolv.conf")
resolvconf_content = "# Original Content"
util.write_file(resolvconf, resolvconf_content)
renderer = self._get_renderer()
renderer.render_network_state(ns, target=render_dir)
found = dir2dict(render_dir)
- nspath = '/etc/sysconfig/network-scripts/'
- self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
+ nspath = "/etc/sysconfig/network-scripts/"
+ self.assertNotIn(nspath + "ifcfg-lo", found.keys())
expected = """\
# Created by cloud-init on instance boot automatically, do not edit.
#
@@ -3222,171 +3915,188 @@ ONBOOT=yes
TYPE=Ethernet
USERCTL=no
"""
- self.assertEqual(expected, found[nspath + 'ifcfg-eth0'])
+ self.assertEqual(expected, found[nspath + "ifcfg-eth0"])
# a dhcp only config should not modify resolv.conf
- self.assertEqual(resolvconf_content, found['/etc/resolv.conf'])
+ self.assertEqual(resolvconf_content, found["/etc/resolv.conf"])
def test_bond_config(self):
- entry = NETWORK_CONFIGS['bond']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["bond"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_vlan_config(self):
- entry = NETWORK_CONFIGS['vlan']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["vlan"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_bridge_config(self):
- entry = NETWORK_CONFIGS['bridge']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["bridge"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_manual_config(self):
- entry = NETWORK_CONFIGS['manual']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["manual"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_all_config(self):
- entry = NETWORK_CONFIGS['all']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["all"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
self.assertNotIn(
- 'WARNING: Network config: ignoring eth0.101 device-level mtu',
- self.logs.getvalue())
+ "WARNING: Network config: ignoring eth0.101 device-level mtu",
+ self.logs.getvalue(),
+ )
def test_small_config(self):
- entry = NETWORK_CONFIGS['small']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["small"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_v4_and_v6_static_config(self):
- entry = NETWORK_CONFIGS['v4_and_v6_static']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v4_and_v6_static"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
expected_msg = (
- 'WARNING: Network config: ignoring iface0 device-level mtu:8999'
- ' because ipv4 subnet-level mtu:9000 provided.')
+ "WARNING: Network config: ignoring iface0 device-level mtu:8999"
+ " because ipv4 subnet-level mtu:9000 provided."
+ )
self.assertIn(expected_msg, self.logs.getvalue())
def test_dhcpv6_only_config(self):
- entry = NETWORK_CONFIGS['dhcpv6_only']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_only"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_dhcpv6_accept_ra_config_v1(self):
- entry = NETWORK_CONFIGS['dhcpv6_accept_ra']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v1']))
+ entry = NETWORK_CONFIGS["dhcpv6_accept_ra"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_dhcpv6_accept_ra_config_v2(self):
- entry = NETWORK_CONFIGS['dhcpv6_accept_ra']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["dhcpv6_accept_ra"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_dhcpv6_reject_ra_config_v1(self):
- entry = NETWORK_CONFIGS['dhcpv6_reject_ra']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v1']))
+ entry = NETWORK_CONFIGS["dhcpv6_reject_ra"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_stattic6_from_json(self):
net_json = {
"services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
- "type": "ipv4", "netmask": "255.255.252.0",
- "link": "tap1a81968a-79",
- "routes": [{
- "netmask": "0.0.0.0",
- "network": "0.0.0.0",
- "gateway": "172.19.3.254",
- }, {
- "netmask": "0.0.0.0", # A second default gateway
- "network": "0.0.0.0",
- "gateway": "172.20.3.254",
- }],
- "ip_address": "172.19.1.34", "id": "network0"
- }, {
- "network_id": "mgmt",
- "netmask": "ffff:ffff:ffff:ffff::",
- "link": "interface1",
- "mode": "link-local",
- "routes": [],
- "ip_address": "fe80::c096:67ff:fe5c:6e84",
- "type": "static6",
- "id": "network1",
- "services": [],
- "accept-ra": "false"
- }],
+ "networks": [
+ {
+ "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+ "type": "ipv4",
+ "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ },
+ {
+ "netmask": "0.0.0.0", # A second default gateway
+ "network": "0.0.0.0",
+ "gateway": "172.20.3.254",
+ },
+ ],
+ "ip_address": "172.19.1.34",
+ "id": "network0",
+ },
+ {
+ "network_id": "mgmt",
+ "netmask": "ffff:ffff:ffff:ffff::",
+ "link": "interface1",
+ "mode": "link-local",
+ "routes": [],
+ "ip_address": "fe80::c096:67ff:fe5c:6e84",
+ "type": "static6",
+ "id": "network1",
+ "services": [],
+ "accept-ra": "false",
+ },
+ ],
"links": [
{
"ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
},
],
}
- macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+ macs = {"fa:16:3e:ed:9a:59": "eth0"}
render_dir = self.tmp_dir()
network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
renderer = self._get_renderer()
with self.assertRaises(ValueError):
renderer.render_network_state(ns, target=render_dir)
self.assertEqual([], os.listdir(render_dir))
def test_static6_from_yaml(self):
- entry = NETWORK_CONFIGS['static6']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml']))
+ entry = NETWORK_CONFIGS["static6"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_dhcpv6_reject_ra_config_v2(self):
- entry = NETWORK_CONFIGS['dhcpv6_reject_ra']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["dhcpv6_reject_ra"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_dhcpv6_stateless_config(self):
- entry = NETWORK_CONFIGS['dhcpv6_stateless']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_stateless"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_dhcpv6_stateful_config(self):
- entry = NETWORK_CONFIGS['dhcpv6_stateful']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_stateful"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_wakeonlan_disabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_disabled']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_disabled"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_wakeonlan_enabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_enabled']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_enabled"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
@@ -3397,20 +4107,21 @@ USERCTL=no
util.ensure_dir(os.path.dirname(nm_cfg))
# write a template nm.conf, note plugins is a list here
- with open(nm_cfg, 'w') as fh:
- fh.write('# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n')
+ with open(nm_cfg, "w") as fh:
+ fh.write("# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n")
self.assertTrue(os.path.exists(nm_cfg))
# render and read
- entry = NETWORK_CONFIGS['small']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']),
- dir=render_dir)
+ entry = NETWORK_CONFIGS["small"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml"]), dir=render_dir
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
# check ifcfg-rh is in the 'plugins' list
config = sysconfig.ConfigObj(nm_cfg)
- self.assertIn('ifcfg-rh', config['main']['plugins'])
+ self.assertIn("ifcfg-rh", config["main"]["plugins"])
def test_check_ifcfg_rh_plugins_string(self):
"""ifcfg-rh plugin is append when plugins is a string."""
@@ -3420,22 +4131,23 @@ USERCTL=no
util.ensure_dir(os.path.dirname(nm_cfg))
# write a template nm.conf, note plugins is a value here
- util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\nplugins=foo\n')
+ util.write_file(nm_cfg, "# test_check_ifcfg_rh\n[main]\nplugins=foo\n")
# render and read
- entry = NETWORK_CONFIGS['small']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']),
- dir=render_dir)
+ entry = NETWORK_CONFIGS["small"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml"]), dir=render_dir
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
# check raw content has plugin
nm_file_content = util.load_file(nm_cfg)
- self.assertIn('ifcfg-rh', nm_file_content)
+ self.assertIn("ifcfg-rh", nm_file_content)
# check ifcfg-rh is in the 'plugins' list
config = sysconfig.ConfigObj(nm_cfg)
- self.assertIn('ifcfg-rh', config['main']['plugins'])
+ self.assertIn("ifcfg-rh", config["main"]["plugins"])
def test_check_ifcfg_rh_plugins_no_plugins(self):
"""enable_ifcfg_plugin creates plugins value if missing."""
@@ -3445,28 +4157,32 @@ USERCTL=no
util.ensure_dir(os.path.dirname(nm_cfg))
# write a template nm.conf, note plugins is missing
- util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\n')
+ util.write_file(nm_cfg, "# test_check_ifcfg_rh\n[main]\n")
self.assertTrue(os.path.exists(nm_cfg))
# render and read
- entry = NETWORK_CONFIGS['small']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']),
- dir=render_dir)
+ entry = NETWORK_CONFIGS["small"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml"]), dir=render_dir
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
# check ifcfg-rh is in the 'plugins' list
config = sysconfig.ConfigObj(nm_cfg)
- self.assertIn('ifcfg-rh', config['main']['plugins'])
+ self.assertIn("ifcfg-rh", config["main"]["plugins"])
def test_netplan_dhcp_false_disable_dhcp_in_state(self):
"""netplan config with dhcp[46]: False should not add dhcp in state"""
net_config = yaml.load(NETPLAN_DHCP_FALSE)
- ns = network_state.parse_net_config_data(net_config,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(net_config, skip_broken=False)
- dhcp_found = [snet for iface in ns.iter_interfaces()
- for snet in iface['subnets'] if 'dhcp' in snet['type']]
+ dhcp_found = [
+ snet
+ for iface in ns.iter_interfaces()
+ for snet in iface["subnets"]
+ if "dhcp" in snet["type"]
+ ]
self.assertEqual([], dhcp_found)
@@ -3474,9 +4190,10 @@ USERCTL=no
"""netplan cfg with dhcp[46]: False should not have bootproto=dhcp"""
entry = {
- 'yaml': NETPLAN_DHCP_FALSE,
- 'expected_sysconfig': {
- 'ifcfg-ens3': textwrap.dedent("""\
+ "yaml": NETPLAN_DHCP_FALSE,
+ "expected_sysconfig": {
+ "ifcfg-ens3": textwrap.dedent(
+ """\
BOOTPROTO=none
DEFROUTE=yes
DEVICE=ens3
@@ -3496,33 +4213,42 @@ USERCTL=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- }
+ """
+ ),
+ },
}
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
- self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry["expected_sysconfig"], found)
self._assert_headers(found)
def test_from_v2_vlan_mtu(self):
"""verify mtu gets rendered on bond when source is netplan."""
v2data = {
- 'version': 2,
- 'ethernets': {'eno1': {}},
- 'vlans': {
- 'eno1.1000': {
- 'addresses': ["192.6.1.9/24"],
- 'id': 1000, 'link': 'eno1', 'mtu': 1495}}}
+ "version": 2,
+ "ethernets": {"eno1": {}},
+ "vlans": {
+ "eno1.1000": {
+ "addresses": ["192.6.1.9/24"],
+ "id": 1000,
+ "link": "eno1",
+ "mtu": 1495,
+ }
+ },
+ }
expected = {
- 'ifcfg-eno1': textwrap.dedent("""\
+ "ifcfg-eno1": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eno1
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- 'ifcfg-eno1.1000': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eno1.1000": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eno1.1000
IPADDR=192.6.1.9
@@ -3533,23 +4259,29 @@ USERCTL=no
PHYSDEV=eno1
USERCTL=no
VLAN=yes
- """)
+ """
+ ),
}
self._compare_files_to_expected(
- expected, self._render_and_read(network_config=v2data))
+ expected, self._render_and_read(network_config=v2data)
+ )
def test_from_v2_bond_mtu(self):
"""verify mtu gets rendered on bond when source is netplan."""
v2data = {
- 'version': 2,
- 'bonds': {
- 'bond0': {'addresses': ['10.101.8.65/26'],
- 'interfaces': ['enp0s0', 'enp0s1'],
- 'mtu': 1334,
- 'parameters': {}}}
+ "version": 2,
+ "bonds": {
+ "bond0": {
+ "addresses": ["10.101.8.65/26"],
+ "interfaces": ["enp0s0", "enp0s1"],
+ "mtu": 1334,
+ "parameters": {},
+ }
+ },
}
expected = {
- 'ifcfg-bond0': textwrap.dedent("""\
+ "ifcfg-bond0": textwrap.dedent(
+ """\
BONDING_MASTER=yes
BONDING_SLAVE0=enp0s0
BONDING_SLAVE1=enp0s1
@@ -3562,8 +4294,10 @@ USERCTL=no
ONBOOT=yes
TYPE=Bond
USERCTL=no
- """),
- 'ifcfg-enp0s0': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-enp0s0": textwrap.dedent(
+ """\
BONDING_MASTER=yes
BOOTPROTO=none
DEVICE=enp0s0
@@ -3573,8 +4307,10 @@ USERCTL=no
SLAVE=yes
TYPE=Bond
USERCTL=no
- """),
- 'ifcfg-enp0s1': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-enp0s1": textwrap.dedent(
+ """\
BONDING_MASTER=yes
BOOTPROTO=none
DEVICE=enp0s1
@@ -3584,21 +4320,28 @@ USERCTL=no
SLAVE=yes
TYPE=Bond
USERCTL=no
- """)
+ """
+ ),
}
self._compare_files_to_expected(
- expected, self._render_and_read(network_config=v2data))
+ expected, self._render_and_read(network_config=v2data)
+ )
def test_from_v2_route_metric(self):
"""verify route-metric gets rendered on nic when source is netplan."""
- overrides = {'route-metric': 100}
+ overrides = {"route-metric": 100}
v2base = {
- 'version': 2,
- 'ethernets': {
- 'eno1': {'dhcp4': True,
- 'match': {'macaddress': '07-1c-c6-75-a4-be'}}}}
+ "version": 2,
+ "ethernets": {
+ "eno1": {
+ "dhcp4": True,
+ "match": {"macaddress": "07-1c-c6-75-a4-be"},
+ }
+ },
+ }
expected = {
- 'ifcfg-eno1': textwrap.dedent("""\
+ "ifcfg-eno1": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DEVICE=eno1
HWADDR=07-1c-c6-75-a4-be
@@ -3607,32 +4350,42 @@ USERCTL=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
}
- for dhcp_ver in ('dhcp4', 'dhcp6'):
+ for dhcp_ver in ("dhcp4", "dhcp6"):
v2data = copy.deepcopy(v2base)
- if dhcp_ver == 'dhcp6':
- expected['ifcfg-eno1'] += "IPV6INIT=yes\nDHCPV6C=yes\n"
- v2data['ethernets']['eno1'].update(
- {dhcp_ver: True, '{0}-overrides'.format(dhcp_ver): overrides})
+ if dhcp_ver == "dhcp6":
+ expected["ifcfg-eno1"] += "IPV6INIT=yes\nDHCPV6C=yes\n"
+ v2data["ethernets"]["eno1"].update(
+ {dhcp_ver: True, "{0}-overrides".format(dhcp_ver): overrides}
+ )
self._compare_files_to_expected(
- expected, self._render_and_read(network_config=v2data))
+ expected, self._render_and_read(network_config=v2data)
+ )
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
class TestOpenSuseSysConfigRendering(CiTestCase):
with_logs = True
- scripts_dir = '/etc/sysconfig/network'
- header = ('# Created by cloud-init on instance boot automatically, '
- 'do not edit.\n#\n')
+ scripts_dir = "/etc/sysconfig/network"
+ header = (
+ "# Created by cloud-init on instance boot automatically, "
+ "do not edit.\n#\n"
+ )
- expected_name = 'expected_sysconfig_opensuse'
+ expected_name = "expected_sysconfig_opensuse"
def _get_renderer(self):
- distro_cls = distros.fetch('opensuse')
+ distro_cls = distros.fetch("opensuse")
return sysconfig.Renderer(
- config=distro_cls.renderer_configs.get('sysconfig'))
+ config=distro_cls.renderer_configs.get("sysconfig")
+ )
def _render_and_read(self, network_config=None, state=None, dir=None):
if dir is None:
@@ -3653,12 +4406,15 @@ class TestOpenSuseSysConfigRendering(CiTestCase):
orig_maxdiff = self.maxDiff
expected_d = dict(
(os.path.join(self.scripts_dir, k), util.load_shell_content(v))
- for k, v in expected.items())
+ for k, v in expected.items()
+ )
# only compare the files in scripts_dir
scripts_found = dict(
- (k, util.load_shell_content(v)) for k, v in found.items()
- if k.startswith(self.scripts_dir))
+ (k, util.load_shell_content(v))
+ for k, v in found.items()
+ if k.startswith(self.scripts_dir)
+ )
try:
self.maxDiff = None
self.assertEqual(expected_d, scripts_found)
@@ -3666,9 +4422,14 @@ class TestOpenSuseSysConfigRendering(CiTestCase):
self.maxDiff = orig_maxdiff
def _assert_headers(self, found):
- missing = [f for f in found
- if (f.startswith(self.scripts_dir) and
- not found[f].startswith(self.header))]
+ missing = [
+ f
+ for f in found
+ if (
+ f.startswith(self.scripts_dir)
+ and not found[f].startswith(self.header)
+ )
+ ]
if missing:
raise AssertionError("Missing headers in: %s" % missing)
@@ -3676,16 +4437,22 @@ class TestOpenSuseSysConfigRendering(CiTestCase):
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_default_generation(self, mock_get_devicelist,
- mock_read_sys_net,
- mock_sys_dev_path, m_get_cmdline):
+ def test_default_generation(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ m_get_cmdline,
+ ):
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path)
+ _setup_test(
+ tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ )
network_cfg = net.generate_fallback_config()
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
@@ -3693,7 +4460,7 @@ class TestOpenSuseSysConfigRendering(CiTestCase):
renderer = self._get_renderer()
renderer.render_network_state(ns, target=render_dir)
- render_file = 'etc/sysconfig/network/ifcfg-eth1000'
+ render_file = "etc/sysconfig/network/ifcfg-eth1000"
with open(os.path.join(render_dir, render_file)) as fh:
content = fh.read()
expected_content = """
@@ -3707,98 +4474,101 @@ STARTMODE=auto
# TODO(rjschwei): re-enable test once route writing is implemented
# for SUSE distros
-# def test_multiple_ipv4_default_gateways(self):
-# """ValueError is raised when duplicate ipv4 gateways exist."""
-# net_json = {
-# "services": [{"type": "dns", "address": "172.19.0.12"}],
-# "networks": [{
-# "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
-# "type": "ipv4", "netmask": "255.255.252.0",
-# "link": "tap1a81968a-79",
-# "routes": [{
-# "netmask": "0.0.0.0",
-# "network": "0.0.0.0",
-# "gateway": "172.19.3.254",
-# }, {
-# "netmask": "0.0.0.0", # A second default gateway
-# "network": "0.0.0.0",
-# "gateway": "172.20.3.254",
-# }],
-# "ip_address": "172.19.1.34", "id": "network0"
-# }],
-# "links": [
-# {
-# "ethernet_mac_address": "fa:16:3e:ed:9a:59",
-# "mtu": None, "type": "bridge", "id":
-# "tap1a81968a-79",
-# "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
-# },
-# ],
-# }
-# macs = {'fa:16:3e:ed:9a:59': 'eth0'}
-# render_dir = self.tmp_dir()
-# network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
-# ns = network_state.parse_net_config_data(network_cfg,
-# skip_broken=False)
-# renderer = self._get_renderer()
-# with self.assertRaises(ValueError):
-# renderer.render_network_state(ns, target=render_dir)
-# self.assertEqual([], os.listdir(render_dir))
-#
-# def test_multiple_ipv6_default_gateways(self):
-# """ValueError is raised when duplicate ipv6 gateways exist."""
-# net_json = {
-# "services": [{"type": "dns", "address": "172.19.0.12"}],
-# "networks": [{
-# "network_id": "public-ipv6",
-# "type": "ipv6", "netmask": "",
-# "link": "tap1a81968a-79",
-# "routes": [{
-# "gateway": "2001:DB8::1",
-# "netmask": "::",
-# "network": "::"
-# }, {
-# "gateway": "2001:DB9::1",
-# "netmask": "::",
-# "network": "::"
-# }],
-# "ip_address": "2001:DB8::10", "id": "network1"
-# }],
-# "links": [
-# {
-# "ethernet_mac_address": "fa:16:3e:ed:9a:59",
-# "mtu": None, "type": "bridge", "id":
-# "tap1a81968a-79",
-# "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
-# },
-# ],
-# }
-# macs = {'fa:16:3e:ed:9a:59': 'eth0'}
-# render_dir = self.tmp_dir()
-# network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
-# ns = network_state.parse_net_config_data(network_cfg,
-# skip_broken=False)
-# renderer = self._get_renderer()
-# with self.assertRaises(ValueError):
-# renderer.render_network_state(ns, target=render_dir)
-# self.assertEqual([], os.listdir(render_dir))
+ # def test_multiple_ipv4_default_gateways(self):
+ # """ValueError is raised when duplicate ipv4 gateways exist."""
+ # net_json = {
+ # "services": [{"type": "dns", "address": "172.19.0.12"}],
+ # "networks": [{
+ # "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+ # "type": "ipv4", "netmask": "255.255.252.0",
+ # "link": "tap1a81968a-79",
+ # "routes": [{
+ # "netmask": "0.0.0.0",
+ # "network": "0.0.0.0",
+ # "gateway": "172.19.3.254",
+ # }, {
+ # "netmask": "0.0.0.0", # A second default gateway
+ # "network": "0.0.0.0",
+ # "gateway": "172.20.3.254",
+ # }],
+ # "ip_address": "172.19.1.34", "id": "network0"
+ # }],
+ # "links": [
+ # {
+ # "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ # "mtu": None, "type": "bridge", "id":
+ # "tap1a81968a-79",
+ # "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ # },
+ # ],
+ # }
+ # macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+ # render_dir = self.tmp_dir()
+ # network_cfg = openstack.convert_net_json(net_json, known_macs=macs) # noqa: E501
+ # ns = network_state.parse_net_config_data(network_cfg,
+ # skip_broken=False)
+ # renderer = self._get_renderer()
+ # with self.assertRaises(ValueError):
+ # renderer.render_network_state(ns, target=render_dir)
+ # self.assertEqual([], os.listdir(render_dir))
+ #
+ # def test_multiple_ipv6_default_gateways(self):
+ # """ValueError is raised when duplicate ipv6 gateways exist."""
+ # net_json = {
+ # "services": [{"type": "dns", "address": "172.19.0.12"}],
+ # "networks": [{
+ # "network_id": "public-ipv6",
+ # "type": "ipv6", "netmask": "",
+ # "link": "tap1a81968a-79",
+ # "routes": [{
+ # "gateway": "2001:DB8::1",
+ # "netmask": "::",
+ # "network": "::"
+ # }, {
+ # "gateway": "2001:DB9::1",
+ # "netmask": "::",
+ # "network": "::"
+ # }],
+ # "ip_address": "2001:DB8::10", "id": "network1"
+ # }],
+ # "links": [
+ # {
+ # "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ # "mtu": None, "type": "bridge", "id":
+ # "tap1a81968a-79",
+ # "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ # },
+ # ],
+ # }
+ # macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+ # render_dir = self.tmp_dir()
+ # network_cfg = openstack.convert_net_json(net_json, known_macs=macs) # noqa: E501
+ # ns = network_state.parse_net_config_data(network_cfg,
+ # skip_broken=False)
+ # renderer = self._get_renderer()
+ # with self.assertRaises(ValueError):
+ # renderer.render_network_state(ns, target=render_dir)
+ # self.assertEqual([], os.listdir(render_dir))
def test_openstack_rendering_samples(self):
for os_sample in OS_SAMPLES:
render_dir = self.tmp_dir()
- ex_input = os_sample['in_data']
- ex_mac_addrs = os_sample['in_macs']
+ ex_input = os_sample["in_data"]
+ ex_mac_addrs = os_sample["in_macs"]
network_cfg = openstack.convert_net_json(
- ex_input, known_macs=ex_mac_addrs)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ex_input, known_macs=ex_mac_addrs
+ )
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
renderer = self._get_renderer()
# render a multiple times to simulate reboots
renderer.render_network_state(ns, target=render_dir)
renderer.render_network_state(ns, target=render_dir)
renderer.render_network_state(ns, target=render_dir)
- for fn, expected_content in os_sample.get('out_sysconfig_opensuse',
- []):
+ for fn, expected_content in os_sample.get(
+ "out_sysconfig_opensuse", []
+ ):
with open(os.path.join(render_dir, fn)) as fh:
self.assertEqual(expected_content, fh.read())
@@ -3809,8 +4579,8 @@ STARTMODE=auto
renderer = self._get_renderer()
renderer.render_network_state(ns, target=render_dir)
found = dir2dict(render_dir)
- nspath = '/etc/sysconfig/network/'
- self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
+ nspath = "/etc/sysconfig/network/"
+ self.assertNotIn(nspath + "ifcfg-lo", found.keys())
expected = """\
# Created by cloud-init on instance boot automatically, do not edit.
#
@@ -3820,10 +4590,10 @@ LLADDR=52:54:00:12:34:00
NETMASK=255.255.255.0
STARTMODE=auto
"""
- self.assertEqual(expected, found[nspath + 'ifcfg-interface0'])
+ self.assertEqual(expected, found[nspath + "ifcfg-interface0"])
# The configuration has no nameserver information make sure we
# do not write the resolv.conf file
- respath = '/etc/resolv.conf'
+ respath = "/etc/resolv.conf"
self.assertNotIn(respath, found.keys())
def test_config_with_explicit_loopback(self):
@@ -3831,33 +4601,33 @@ STARTMODE=auto
render_dir = self.tmp_path("render")
os.makedirs(render_dir)
# write an etc/resolv.conf and expect it to not be modified
- resolvconf = os.path.join(render_dir, 'etc/resolv.conf')
+ resolvconf = os.path.join(render_dir, "etc/resolv.conf")
resolvconf_content = "# Original Content"
util.write_file(resolvconf, resolvconf_content)
renderer = self._get_renderer()
renderer.render_network_state(ns, target=render_dir)
found = dir2dict(render_dir)
- nspath = '/etc/sysconfig/network/'
- self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
+ nspath = "/etc/sysconfig/network/"
+ self.assertNotIn(nspath + "ifcfg-lo", found.keys())
expected = """\
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=dhcp
STARTMODE=auto
"""
- self.assertEqual(expected, found[nspath + 'ifcfg-eth0'])
+ self.assertEqual(expected, found[nspath + "ifcfg-eth0"])
# a dhcp only config should not modify resolv.conf
- self.assertEqual(resolvconf_content, found['/etc/resolv.conf'])
+ self.assertEqual(resolvconf_content, found["/etc/resolv.conf"])
def test_bond_config(self):
- expected_name = 'expected_sysconfig_opensuse'
- entry = NETWORK_CONFIGS['bond']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ expected_name = "expected_sysconfig_opensuse"
+ entry = NETWORK_CONFIGS["bond"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
for fname, contents in entry[expected_name].items():
print(fname)
print(contents)
print()
- print('-- expected ^ | v rendered --')
+ print("-- expected ^ | v rendered --")
for fname, contents in found.items():
print(fname)
print(contents)
@@ -3866,120 +4636,129 @@ STARTMODE=auto
self._assert_headers(found)
def test_vlan_config(self):
- entry = NETWORK_CONFIGS['vlan']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["vlan"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_bridge_config(self):
- entry = NETWORK_CONFIGS['bridge']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["bridge"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_manual_config(self):
- entry = NETWORK_CONFIGS['manual']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["manual"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_all_config(self):
- entry = NETWORK_CONFIGS['all']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["all"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
self.assertNotIn(
- 'WARNING: Network config: ignoring eth0.101 device-level mtu',
- self.logs.getvalue())
+ "WARNING: Network config: ignoring eth0.101 device-level mtu",
+ self.logs.getvalue(),
+ )
def test_small_config(self):
- entry = NETWORK_CONFIGS['small']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["small"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_v4_and_v6_static_config(self):
- entry = NETWORK_CONFIGS['v4_and_v6_static']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v4_and_v6_static"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
expected_msg = (
- 'WARNING: Network config: ignoring iface0 device-level mtu:8999'
- ' because ipv4 subnet-level mtu:9000 provided.')
+ "WARNING: Network config: ignoring iface0 device-level mtu:8999"
+ " because ipv4 subnet-level mtu:9000 provided."
+ )
self.assertIn(expected_msg, self.logs.getvalue())
def test_dhcpv6_only_config(self):
- entry = NETWORK_CONFIGS['dhcpv6_only']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_only"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_simple_render_ipv6_slaac(self):
- entry = NETWORK_CONFIGS['ipv6_slaac']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["ipv6_slaac"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_dhcpv6_stateless_config(self):
- entry = NETWORK_CONFIGS['dhcpv6_stateless']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_stateless"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_wakeonlan_disabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_disabled']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_disabled"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_wakeonlan_enabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_enabled']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_enabled"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_render_v4_and_v6(self):
- entry = NETWORK_CONFIGS['v4_and_v6']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v4_and_v6"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_render_v6_and_v4(self):
- entry = NETWORK_CONFIGS['v6_and_v4']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v6_and_v4"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
class TestEniNetRendering(CiTestCase):
-
@mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_default_generation(self, mock_get_devicelist,
- mock_read_sys_net,
- mock_sys_dev_path, m_get_cmdline):
+ def test_default_generation(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ m_get_cmdline,
+ ):
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path)
+ _setup_test(
+ tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ )
network_cfg = net.generate_fallback_config()
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
renderer = eni.Renderer(
- {'eni_path': 'interfaces', 'netrules_path': None})
+ {"eni_path": "interfaces", "netrules_path": None}
+ )
renderer.render_network_state(ns, target=render_dir)
- self.assertTrue(os.path.exists(os.path.join(render_dir,
- 'interfaces')))
- with open(os.path.join(render_dir, 'interfaces')) as fh:
+ self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces")))
+ with open(os.path.join(render_dir, "interfaces")) as fh:
contents = fh.read()
expected = """
@@ -4004,62 +4783,74 @@ auto eth0
iface eth0 inet dhcp
"""
self.assertEqual(
- expected, dir2dict(tmp_dir)['/etc/network/interfaces'])
+ expected, dir2dict(tmp_dir)["/etc/network/interfaces"]
+ )
def test_v2_route_metric_to_eni(self):
"""Network v2 route-metric overrides are preserved in eni output"""
tmp_dir = self.tmp_dir()
renderer = eni.Renderer()
- expected_tmpl = textwrap.dedent("""\
+ expected_tmpl = textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet{suffix} dhcp
metric 100
- """)
- for dhcp_ver in ('dhcp4', 'dhcp6'):
- suffix = '6' if dhcp_ver == 'dhcp6' else ''
+ """
+ )
+ for dhcp_ver in ("dhcp4", "dhcp6"):
+ suffix = "6" if dhcp_ver == "dhcp6" else ""
dhcp_cfg = {
dhcp_ver: True,
- '{ver}-overrides'.format(ver=dhcp_ver): {'route-metric': 100}}
- v2_input = {'version': 2, 'ethernets': {'eth0': dhcp_cfg}}
+ "{ver}-overrides".format(ver=dhcp_ver): {"route-metric": 100},
+ }
+ v2_input = {"version": 2, "ethernets": {"eth0": dhcp_cfg}}
ns = network_state.parse_net_config_data(v2_input)
renderer.render_network_state(ns, target=tmp_dir)
self.assertEqual(
expected_tmpl.format(suffix=suffix),
- dir2dict(tmp_dir)['/etc/network/interfaces'])
+ dir2dict(tmp_dir)["/etc/network/interfaces"],
+ )
class TestNetplanNetRendering(CiTestCase):
-
@mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
@mock.patch("cloudinit.net.netplan._clean_default")
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_default_generation(self, mock_get_devicelist,
- mock_read_sys_net,
- mock_sys_dev_path,
- mock_clean_default, m_get_cmdline):
+ def test_default_generation(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ mock_clean_default,
+ m_get_cmdline,
+ ):
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path)
+ _setup_test(
+ tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ )
network_cfg = net.generate_fallback_config()
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
- render_target = 'netplan.yaml'
+ render_target = "netplan.yaml"
renderer = netplan.Renderer(
- {'netplan_path': render_target, 'postcmds': False})
+ {"netplan_path": render_target, "postcmds": False}
+ )
renderer.render_network_state(ns, target=render_dir)
- self.assertTrue(os.path.exists(os.path.join(render_dir,
- render_target)))
+ self.assertTrue(
+ os.path.exists(os.path.join(render_dir, render_target))
+ )
with open(os.path.join(render_dir, render_target)) as fh:
contents = fh.read()
print(contents)
@@ -4079,8 +4870,9 @@ network:
class TestNetplanCleanDefault(CiTestCase):
- snapd_known_path = 'etc/netplan/00-snapd-config.yaml'
- snapd_known_content = textwrap.dedent("""\
+ snapd_known_path = "etc/netplan/00-snapd-config.yaml"
+ snapd_known_content = textwrap.dedent(
+ """\
# This is the initial network config.
# It can be overwritten by cloud-init or console-conf.
network:
@@ -4094,15 +4886,18 @@ class TestNetplanCleanDefault(CiTestCase):
match:
name: "eth*"
dhcp4: true
- """)
+ """
+ )
stub_known = {
- 'run/systemd/network/10-netplan-all-en.network': 'foo-en',
- 'run/systemd/network/10-netplan-all-eth.network': 'foo-eth',
- 'run/systemd/generator/netplan.stamp': 'stamp',
+ "run/systemd/network/10-netplan-all-en.network": "foo-en",
+ "run/systemd/network/10-netplan-all-eth.network": "foo-eth",
+ "run/systemd/generator/netplan.stamp": "stamp",
}
def test_clean_known_config_cleaned(self):
- content = {self.snapd_known_path: self.snapd_known_content, }
+ content = {
+ self.snapd_known_path: self.snapd_known_content,
+ }
content.update(self.stub_known)
tmpd = self.tmp_dir()
files = sorted(populate_dir(tmpd, content))
@@ -4111,7 +4906,9 @@ class TestNetplanCleanDefault(CiTestCase):
self.assertEqual([], found)
def test_clean_unknown_config_not_cleaned(self):
- content = {self.snapd_known_path: self.snapd_known_content, }
+ content = {
+ self.snapd_known_path: self.snapd_known_content,
+ }
content.update(self.stub_known)
content[self.snapd_known_path] += "# user put a comment\n"
tmpd = self.tmp_dir()
@@ -4142,78 +4939,100 @@ class TestNetplanCleanDefault(CiTestCase):
class TestNetplanPostcommands(CiTestCase):
mycfg = {
- 'config': [{"type": "physical", "name": "eth0",
- "mac_address": "c0:d6:9f:2c:e8:80",
- "subnets": [{"type": "dhcp"}]}],
- 'version': 1}
-
- @mock.patch.object(netplan.Renderer, '_netplan_generate')
- @mock.patch.object(netplan.Renderer, '_net_setup_link')
- @mock.patch('cloudinit.subp.subp')
- def test_netplan_render_calls_postcmds(self, mock_subp,
- mock_netplan_generate,
- mock_net_setup_link):
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "c0:d6:9f:2c:e8:80",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ "version": 1,
+ }
+
+ @mock.patch.object(netplan.Renderer, "_netplan_generate")
+ @mock.patch.object(netplan.Renderer, "_net_setup_link")
+ @mock.patch("cloudinit.subp.subp")
+ def test_netplan_render_calls_postcmds(
+ self, mock_subp, mock_netplan_generate, mock_net_setup_link
+ ):
tmp_dir = self.tmp_dir()
- ns = network_state.parse_net_config_data(self.mycfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(self.mycfg, skip_broken=False)
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
- render_target = 'netplan.yaml'
+ render_target = "netplan.yaml"
renderer = netplan.Renderer(
- {'netplan_path': render_target, 'postcmds': True})
+ {"netplan_path": render_target, "postcmds": True}
+ )
mock_subp.side_effect = iter([subp.ProcessExecutionError])
renderer.render_network_state(ns, target=render_dir)
mock_netplan_generate.assert_called_with(run=True)
mock_net_setup_link.assert_called_with(run=True)
- @mock.patch('cloudinit.util.SeLinuxGuard')
+ @mock.patch("cloudinit.util.SeLinuxGuard")
@mock.patch.object(netplan, "get_devicelist")
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel):
mock_sel.__enter__ = mock.Mock(return_value=False)
mock_sel.__exit__ = mock.Mock()
- mock_devlist.side_effect = [['lo']]
+ mock_devlist.side_effect = [["lo"]]
tmp_dir = self.tmp_dir()
- ns = network_state.parse_net_config_data(self.mycfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(self.mycfg, skip_broken=False)
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
- render_target = 'netplan.yaml'
+ render_target = "netplan.yaml"
renderer = netplan.Renderer(
- {'netplan_path': render_target, 'postcmds': True})
- mock_subp.side_effect = iter([
- subp.ProcessExecutionError,
- ('', ''),
- ('', ''),
- ])
+ {"netplan_path": render_target, "postcmds": True}
+ )
+ mock_subp.side_effect = iter(
+ [
+ subp.ProcessExecutionError,
+ ("", ""),
+ ("", ""),
+ ]
+ )
expected = [
- mock.call(['netplan', 'info'], capture=True),
- mock.call(['netplan', 'generate'], capture=True),
- mock.call(['udevadm', 'test-builtin', 'net_setup_link',
- '/sys/class/net/lo'], capture=True),
+ mock.call(["netplan", "info"], capture=True),
+ mock.call(["netplan", "generate"], capture=True),
+ mock.call(
+ [
+ "udevadm",
+ "test-builtin",
+ "net_setup_link",
+ "/sys/class/net/lo",
+ ],
+ capture=True,
+ ),
]
- with mock.patch.object(os.path, 'islink', return_value=True):
+ with mock.patch.object(os.path, "islink", return_value=True):
renderer.render_network_state(ns, target=render_dir)
mock_subp.assert_has_calls(expected)
class TestEniNetworkStateToEni(CiTestCase):
mycfg = {
- 'config': [{"type": "physical", "name": "eth0",
- "mac_address": "c0:d6:9f:2c:e8:80",
- "subnets": [{"type": "dhcp"}]}],
- 'version': 1}
- my_mac = 'c0:d6:9f:2c:e8:80'
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "c0:d6:9f:2c:e8:80",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ "version": 1,
+ }
+ my_mac = "c0:d6:9f:2c:e8:80"
def test_no_header(self):
rendered = eni.network_state_to_eni(
network_state=network_state.parse_net_config_data(self.mycfg),
- render_hwaddress=True)
+ render_hwaddress=True,
+ )
self.assertIn(self.my_mac, rendered)
self.assertIn("hwaddress", rendered)
@@ -4221,14 +5040,17 @@ class TestEniNetworkStateToEni(CiTestCase):
header = "# hello world\n"
rendered = eni.network_state_to_eni(
network_state=network_state.parse_net_config_data(self.mycfg),
- header=header, render_hwaddress=True)
+ header=header,
+ render_hwaddress=True,
+ )
self.assertIn(header, rendered)
self.assertIn(self.my_mac, rendered)
def test_no_hwaddress(self):
rendered = eni.network_state_to_eni(
network_state=network_state.parse_net_config_data(self.mycfg),
- render_hwaddress=False)
+ render_hwaddress=False,
+ )
self.assertNotIn(self.my_mac, rendered)
self.assertNotIn("hwaddress", rendered)
@@ -4237,156 +5059,241 @@ class TestCmdlineConfigParsing(CiTestCase):
with_logs = True
simple_cfg = {
- 'config': [{"type": "physical", "name": "eth0",
- "mac_address": "c0:d6:9f:2c:e8:80",
- "subnets": [{"type": "dhcp"}]}]}
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "c0:d6:9f:2c:e8:80",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ]
+ }
def test_cmdline_convert_dhcp(self):
found = cmdline._klibc_to_config_entry(DHCP_CONTENT_1)
- self.assertEqual(found, ('eth0', DHCP_EXPECTED_1))
+ self.assertEqual(found, ("eth0", DHCP_EXPECTED_1))
def test_cmdline_convert_dhcp6(self):
found = cmdline._klibc_to_config_entry(DHCP6_CONTENT_1)
- self.assertEqual(found, ('eno1', DHCP6_EXPECTED_1))
+ self.assertEqual(found, ("eno1", DHCP6_EXPECTED_1))
def test_cmdline_convert_static(self):
found = cmdline._klibc_to_config_entry(STATIC_CONTENT_1)
- self.assertEqual(found, ('eth1', STATIC_EXPECTED_1))
+ self.assertEqual(found, ("eth1", STATIC_EXPECTED_1))
def test_config_from_cmdline_net_cfg(self):
files = []
- pairs = (('net-eth0.cfg', DHCP_CONTENT_1),
- ('net-eth1.cfg', STATIC_CONTENT_1))
+ pairs = (
+ ("net-eth0.cfg", DHCP_CONTENT_1),
+ ("net-eth1.cfg", STATIC_CONTENT_1),
+ )
- macs = {'eth1': 'b8:ae:ed:75:ff:2b',
- 'eth0': 'b8:ae:ed:75:ff:2a'}
+ macs = {"eth1": "b8:ae:ed:75:ff:2b", "eth0": "b8:ae:ed:75:ff:2a"}
dhcp = copy.deepcopy(DHCP_EXPECTED_1)
- dhcp['mac_address'] = macs['eth0']
+ dhcp["mac_address"] = macs["eth0"]
static = copy.deepcopy(STATIC_EXPECTED_1)
- static['mac_address'] = macs['eth1']
+ static["mac_address"] = macs["eth1"]
- expected = {'version': 1, 'config': [dhcp, static]}
+ expected = {"version": 1, "config": [dhcp, static]}
with temp_utils.tempdir() as tmpd:
for fname, content in pairs:
fp = os.path.join(tmpd, fname)
files.append(fp)
util.write_file(fp, content)
- found = cmdline.config_from_klibc_net_cfg(files=files,
- mac_addrs=macs)
+ found = cmdline.config_from_klibc_net_cfg(
+ files=files, mac_addrs=macs
+ )
self.assertEqual(found, expected)
def test_cmdline_with_b64(self):
data = base64.b64encode(json.dumps(self.simple_cfg).encode())
encoded_text = data.decode()
- raw_cmdline = 'ro network-config=' + encoded_text + ' root=foo'
+ raw_cmdline = "ro network-config=" + encoded_text + " root=foo"
found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
self.assertEqual(found, self.simple_cfg)
def test_cmdline_with_net_config_disabled(self):
- raw_cmdline = 'ro network-config=disabled root=foo'
+ raw_cmdline = "ro network-config=disabled root=foo"
found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
- self.assertEqual(found, {'config': 'disabled'})
+ self.assertEqual(found, {"config": "disabled"})
def test_cmdline_with_net_config_unencoded_logs_error(self):
"""network-config cannot be unencoded besides 'disabled'."""
- raw_cmdline = 'ro network-config={config:disabled} root=foo'
+ raw_cmdline = "ro network-config={config:disabled} root=foo"
found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
self.assertIsNone(found)
expected_log = (
- 'ERROR: Expected base64 encoded kernel commandline parameter'
- ' network-config. Ignoring network-config={config:disabled}.')
+ "ERROR: Expected base64 encoded kernel commandline parameter"
+ " network-config. Ignoring network-config={config:disabled}."
+ )
self.assertIn(expected_log, self.logs.getvalue())
def test_cmdline_with_b64_gz(self):
data = _gzip_data(json.dumps(self.simple_cfg).encode())
encoded_text = base64.b64encode(data).decode()
- raw_cmdline = 'ro network-config=' + encoded_text + ' root=foo'
+ raw_cmdline = "ro network-config=" + encoded_text + " root=foo"
found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
self.assertEqual(found, self.simple_cfg)
class TestCmdlineKlibcNetworkConfigSource(FilesystemMockingTestCase):
macs = {
- 'eth0': '14:02:ec:42:48:00',
- 'eno1': '14:02:ec:42:48:01',
+ "eth0": "14:02:ec:42:48:00",
+ "eno1": "14:02:ec:42:48:01",
}
def test_without_ip(self):
- content = {'/run/net-eth0.conf': DHCP_CONTENT_1,
- cmdline._OPEN_ISCSI_INTERFACE_FILE: "eth0\n"}
+ content = {
+ "/run/net-eth0.conf": DHCP_CONTENT_1,
+ cmdline._OPEN_ISCSI_INTERFACE_FILE: "eth0\n",
+ }
exp1 = copy.deepcopy(DHCP_EXPECTED_1)
- exp1['mac_address'] = self.macs['eth0']
+ exp1["mac_address"] = self.macs["eth0"]
root = self.tmp_dir()
populate_dir(root, content)
self.reRoot(root)
src = cmdline.KlibcNetworkConfigSource(
- _cmdline='foo root=/root/bar', _mac_addrs=self.macs,
+ _cmdline="foo root=/root/bar",
+ _mac_addrs=self.macs,
)
self.assertTrue(src.is_applicable())
found = src.render_config()
- self.assertEqual(found['version'], 1)
- self.assertEqual(found['config'], [exp1])
+ self.assertEqual(found["version"], 1)
+ self.assertEqual(found["config"], [exp1])
def test_with_ip(self):
- content = {'/run/net-eth0.conf': DHCP_CONTENT_1}
+ content = {"/run/net-eth0.conf": DHCP_CONTENT_1}
exp1 = copy.deepcopy(DHCP_EXPECTED_1)
- exp1['mac_address'] = self.macs['eth0']
+ exp1["mac_address"] = self.macs["eth0"]
root = self.tmp_dir()
populate_dir(root, content)
self.reRoot(root)
src = cmdline.KlibcNetworkConfigSource(
- _cmdline='foo ip=dhcp', _mac_addrs=self.macs,
+ _cmdline="foo ip=dhcp",
+ _mac_addrs=self.macs,
)
self.assertTrue(src.is_applicable())
found = src.render_config()
- self.assertEqual(found['version'], 1)
- self.assertEqual(found['config'], [exp1])
+ self.assertEqual(found["version"], 1)
+ self.assertEqual(found["config"], [exp1])
def test_with_ip6(self):
- content = {'/run/net6-eno1.conf': DHCP6_CONTENT_1}
+ content = {"/run/net6-eno1.conf": DHCP6_CONTENT_1}
root = self.tmp_dir()
populate_dir(root, content)
self.reRoot(root)
src = cmdline.KlibcNetworkConfigSource(
- _cmdline='foo ip6=dhcp root=/dev/sda', _mac_addrs=self.macs,
+ _cmdline="foo ip6=dhcp root=/dev/sda",
+ _mac_addrs=self.macs,
)
self.assertTrue(src.is_applicable())
found = src.render_config()
self.assertEqual(
found,
- {'version': 1, 'config': [
- {'type': 'physical', 'name': 'eno1',
- 'mac_address': self.macs['eno1'],
- 'subnets': [
- {'dns_nameservers': ['2001:67c:1562:8010::2:1'],
- 'control': 'manual', 'type': 'dhcp6', 'netmask': '64'}]}]})
+ {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eno1",
+ "mac_address": self.macs["eno1"],
+ "subnets": [
+ {
+ "dns_nameservers": ["2001:67c:1562:8010::2:1"],
+ "control": "manual",
+ "type": "dhcp6",
+ "netmask": "64",
+ }
+ ],
+ }
+ ],
+ },
+ )
def test_with_no_ip_or_ip6(self):
# if there is no ip= or ip6= on cmdline, return value should be None
- content = {'net6-eno1.conf': DHCP6_CONTENT_1}
+ content = {"net6-eno1.conf": DHCP6_CONTENT_1}
files = sorted(populate_dir(self.tmp_dir(), content))
src = cmdline.KlibcNetworkConfigSource(
- _files=files, _cmdline='foo root=/dev/sda', _mac_addrs=self.macs,
+ _files=files,
+ _cmdline="foo root=/dev/sda",
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
+ def test_with_faux_ip(self):
+ content = {"net6-eno1.conf": DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline="foo iscsi_target_ip=root=/dev/sda",
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
+ def test_empty_cmdline(self):
+ content = {"net6-eno1.conf": DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline="",
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
+ def test_whitespace_cmdline(self):
+ content = {"net6-eno1.conf": DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline=" ",
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
+ def test_cmdline_no_lhand(self):
+ content = {"net6-eno1.conf": DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline="=wut",
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
+ def test_cmdline_embedded_ip(self):
+ content = {"net6-eno1.conf": DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline='opt="some things and ip=foo"',
+ _mac_addrs=self.macs,
)
self.assertFalse(src.is_applicable())
def test_with_both_ip_ip6(self):
content = {
- '/run/net-eth0.conf': DHCP_CONTENT_1,
- '/run/net6-eth0.conf': DHCP6_CONTENT_1.replace('eno1', 'eth0')}
+ "/run/net-eth0.conf": DHCP_CONTENT_1,
+ "/run/net6-eth0.conf": DHCP6_CONTENT_1.replace("eno1", "eth0"),
+ }
eth0 = copy.deepcopy(DHCP_EXPECTED_1)
- eth0['mac_address'] = self.macs['eth0']
- eth0['subnets'].append(
- {'control': 'manual', 'type': 'dhcp6',
- 'netmask': '64', 'dns_nameservers': ['2001:67c:1562:8010::2:1']})
+ eth0["mac_address"] = self.macs["eth0"]
+ eth0["subnets"].append(
+ {
+ "control": "manual",
+ "type": "dhcp6",
+ "netmask": "64",
+ "dns_nameservers": ["2001:67c:1562:8010::2:1"],
+ }
+ )
expected = [eth0]
root = self.tmp_dir()
@@ -4394,17 +5301,17 @@ class TestCmdlineKlibcNetworkConfigSource(FilesystemMockingTestCase):
self.reRoot(root)
src = cmdline.KlibcNetworkConfigSource(
- _cmdline='foo ip=dhcp ip6=dhcp', _mac_addrs=self.macs,
+ _cmdline="foo ip=dhcp ip6=dhcp",
+ _mac_addrs=self.macs,
)
self.assertTrue(src.is_applicable())
found = src.render_config()
- self.assertEqual(found['version'], 1)
- self.assertEqual(found['config'], expected)
+ self.assertEqual(found["version"], 1)
+ self.assertEqual(found["config"], expected)
class TestReadInitramfsConfig(CiTestCase):
-
def _config_source_cls_mock(self, is_applicable, render_config=None):
return lambda: mock.Mock(
is_applicable=lambda: is_applicable,
@@ -4412,7 +5319,7 @@ class TestReadInitramfsConfig(CiTestCase):
)
def test_no_sources(self):
- with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', []):
+ with mock.patch("cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", []):
self.assertIsNone(cmdline.read_initramfs_config())
def test_no_applicable_sources(self):
@@ -4421,19 +5328,22 @@ class TestReadInitramfsConfig(CiTestCase):
self._config_source_cls_mock(is_applicable=False),
self._config_source_cls_mock(is_applicable=False),
]
- with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES',
- sources):
+ with mock.patch(
+ "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources
+ ):
self.assertIsNone(cmdline.read_initramfs_config())
def test_one_applicable_source(self):
expected_config = object()
sources = [
self._config_source_cls_mock(
- is_applicable=True, render_config=expected_config,
+ is_applicable=True,
+ render_config=expected_config,
),
]
- with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES',
- sources):
+ with mock.patch(
+ "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources
+ ):
self.assertEqual(expected_config, cmdline.read_initramfs_config())
def test_one_applicable_source_after_inapplicable_sources(self):
@@ -4442,45 +5352,53 @@ class TestReadInitramfsConfig(CiTestCase):
self._config_source_cls_mock(is_applicable=False),
self._config_source_cls_mock(is_applicable=False),
self._config_source_cls_mock(
- is_applicable=True, render_config=expected_config,
+ is_applicable=True,
+ render_config=expected_config,
),
]
- with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES',
- sources):
+ with mock.patch(
+ "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources
+ ):
self.assertEqual(expected_config, cmdline.read_initramfs_config())
def test_first_applicable_source_is_used(self):
first_config, second_config = object(), object()
sources = [
self._config_source_cls_mock(
- is_applicable=True, render_config=first_config,
+ is_applicable=True,
+ render_config=first_config,
),
self._config_source_cls_mock(
- is_applicable=True, render_config=second_config,
+ is_applicable=True,
+ render_config=second_config,
),
]
- with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES',
- sources):
+ with mock.patch(
+ "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources
+ ):
self.assertEqual(first_config, cmdline.read_initramfs_config())
class TestNetplanRoundTrip(CiTestCase):
- NETPLAN_INFO_OUT = textwrap.dedent("""
+ NETPLAN_INFO_OUT = textwrap.dedent(
+ """
netplan.io:
features:
- dhcp-use-domains
- ipv6-mtu
website: https://netplan.io/
- """)
+ """
+ )
def setUp(self):
super(TestNetplanRoundTrip, self).setUp()
- self.add_patch('cloudinit.net.netplan.subp.subp', 'm_subp')
- self.m_subp.return_value = (self.NETPLAN_INFO_OUT, '')
+ self.add_patch("cloudinit.net.netplan.subp.subp", "m_subp")
+ self.m_subp.return_value = (self.NETPLAN_INFO_OUT, "")
- def _render_and_read(self, network_config=None, state=None,
- netplan_path=None, target=None):
+ def _render_and_read(
+ self, network_config=None, state=None, netplan_path=None, target=None
+ ):
if target is None:
target = self.tmp_dir()
@@ -4492,188 +5410,212 @@ class TestNetplanRoundTrip(CiTestCase):
raise ValueError("Expected data or state, got neither")
if netplan_path is None:
- netplan_path = 'etc/netplan/50-cloud-init.yaml'
+ netplan_path = "etc/netplan/50-cloud-init.yaml"
- renderer = netplan.Renderer(
- config={'netplan_path': netplan_path})
+ renderer = netplan.Renderer(config={"netplan_path": netplan_path})
renderer.render_network_state(ns, target=target)
return dir2dict(target)
def testsimple_render_bond_netplan(self):
- entry = NETWORK_CONFIGS['bond']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
- print(entry['expected_netplan'])
- print('-- expected ^ | v rendered --')
- print(files['/etc/netplan/50-cloud-init.yaml'])
+ entry = NETWORK_CONFIGS["bond"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ print(entry["expected_netplan"])
+ print("-- expected ^ | v rendered --")
+ print(files["/etc/netplan/50-cloud-init.yaml"])
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_bond_v2_input_netplan(self):
- entry = NETWORK_CONFIGS['bond']
+ entry = NETWORK_CONFIGS["bond"]
files = self._render_and_read(
- network_config=yaml.load(entry['yaml-v2']))
- print(entry['expected_netplan-v2'])
- print('-- expected ^ | v rendered --')
- print(files['/etc/netplan/50-cloud-init.yaml'])
+ network_config=yaml.load(entry["yaml-v2"])
+ )
+ print(entry["expected_netplan-v2"])
+ print("-- expected ^ | v rendered --")
+ print(files["/etc/netplan/50-cloud-init.yaml"])
self.assertEqual(
- entry['expected_netplan-v2'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan-v2"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_small_netplan(self):
- entry = NETWORK_CONFIGS['small']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["small"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_v4_and_v6(self):
- entry = NETWORK_CONFIGS['v4_and_v6']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v4_and_v6"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_v4_and_v6_static(self):
- entry = NETWORK_CONFIGS['v4_and_v6_static']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v4_and_v6_static"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_dhcpv6_only(self):
- entry = NETWORK_CONFIGS['dhcpv6_only']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_only"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_dhcpv6_accept_ra(self):
- entry = NETWORK_CONFIGS['dhcpv6_accept_ra']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v1']))
+ entry = NETWORK_CONFIGS["dhcpv6_accept_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_dhcpv6_reject_ra(self):
- entry = NETWORK_CONFIGS['dhcpv6_reject_ra']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v1']))
+ entry = NETWORK_CONFIGS["dhcpv6_reject_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_ipv6_slaac(self):
- entry = NETWORK_CONFIGS['ipv6_slaac']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml']))
+ entry = NETWORK_CONFIGS["ipv6_slaac"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_dhcpv6_stateless(self):
- entry = NETWORK_CONFIGS['dhcpv6_stateless']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_stateless"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_dhcpv6_stateful(self):
- entry = NETWORK_CONFIGS['dhcpv6_stateful']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_stateful"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_wakeonlan_disabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_disabled']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_disabled"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_wakeonlan_enabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_enabled']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_enabled"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_all(self):
- entry = NETWORK_CONFIGS['all']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
- print(entry['expected_netplan'])
- print('-- expected ^ | v rendered --')
- print(files['/etc/netplan/50-cloud-init.yaml'])
+ entry = NETWORK_CONFIGS["all"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ print(entry["expected_netplan"])
+ print("-- expected ^ | v rendered --")
+ print(files["/etc/netplan/50-cloud-init.yaml"])
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_manual(self):
- entry = NETWORK_CONFIGS['manual']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["manual"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def test_render_output_has_yaml_no_aliases(self):
entry = {
- 'yaml': V1_NAMESERVER_ALIAS,
- 'expected_netplan': NETPLAN_NO_ALIAS,
+ "yaml": V1_NAMESERVER_ALIAS,
+ "expected_netplan": NETPLAN_NO_ALIAS,
}
- network_config = yaml.load(entry['yaml'])
+ network_config = yaml.load(entry["yaml"])
ns = network_state.parse_net_config_data(network_config)
files = self._render_and_read(state=ns)
# check for alias
- content = files['/etc/netplan/50-cloud-init.yaml']
+ content = files["/etc/netplan/50-cloud-init.yaml"]
# test load the yaml to ensure we don't render something not loadable
# this allows single aliases, but not duplicate ones
- parsed = yaml.load(files['/etc/netplan/50-cloud-init.yaml'])
+ parsed = yaml.load(files["/etc/netplan/50-cloud-init.yaml"])
self.assertNotEqual(None, parsed)
# now look for any alias, avoid rendering them entirely
# generate the first anchor string using the template
# as of this writing, looks like "&id001"
- anchor = r'&' + Serializer.ANCHOR_TEMPLATE % 1
+ anchor = r"&" + Serializer.ANCHOR_TEMPLATE % 1
found_alias = re.search(anchor, content, re.MULTILINE)
if found_alias:
msg = "Error at: %s\nContent:\n%s" % (found_alias, content)
- raise ValueError('Found yaml alias in rendered netplan: ' + msg)
+ raise ValueError("Found yaml alias in rendered netplan: " + msg)
- print(entry['expected_netplan'])
- print('-- expected ^ | v rendered --')
- print(files['/etc/netplan/50-cloud-init.yaml'])
+ print(entry["expected_netplan"])
+ print("-- expected ^ | v rendered --")
+ print(files["/etc/netplan/50-cloud-init.yaml"])
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def test_render_output_supports_both_grat_arp_spelling(self):
entry = {
- 'yaml': NETPLAN_BOND_GRAT_ARP,
- 'expected_netplan': NETPLAN_BOND_GRAT_ARP.replace('gratuitous',
- 'gratuitious'),
+ "yaml": NETPLAN_BOND_GRAT_ARP,
+ "expected_netplan": NETPLAN_BOND_GRAT_ARP.replace(
+ "gratuitous", "gratuitious"
+ ),
}
- network_config = yaml.load(entry['yaml']).get('network')
+ network_config = yaml.load(entry["yaml"]).get("network")
files = self._render_and_read(network_config=network_config)
- print(entry['expected_netplan'])
- print('-- expected ^ | v rendered --')
- print(files['/etc/netplan/50-cloud-init.yaml'])
+ print(entry["expected_netplan"])
+ print("-- expected ^ | v rendered --")
+ print(files["/etc/netplan/50-cloud-init.yaml"])
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
class TestEniRoundTrip(CiTestCase):
-
- def _render_and_read(self, network_config=None, state=None, eni_path=None,
- netrules_path=None, dir=None):
+ def _render_and_read(
+ self,
+ network_config=None,
+ state=None,
+ eni_path=None,
+ netrules_path=None,
+ dir=None,
+ ):
if dir is None:
dir = self.tmp_dir()
@@ -4685,10 +5627,11 @@ class TestEniRoundTrip(CiTestCase):
raise ValueError("Expected data or state, got neither")
if eni_path is None:
- eni_path = 'etc/network/interfaces'
+ eni_path = "etc/network/interfaces"
renderer = eni.Renderer(
- config={'eni_path': eni_path, 'netrules_path': netrules_path})
+ config={"eni_path": eni_path, "netrules_path": netrules_path}
+ )
renderer.render_network_state(ns, target=dir)
return dir2dict(dir)
@@ -4698,95 +5641,112 @@ class TestEniRoundTrip(CiTestCase):
files = self._render_and_read(network_config=network_config)
self.assertEqual(
RENDERED_ENI.splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_all(self):
- entry = NETWORK_CONFIGS['all']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["all"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_small(self):
- entry = NETWORK_CONFIGS['small']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["small"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_v4_and_v6(self):
- entry = NETWORK_CONFIGS['v4_and_v6']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v4_and_v6"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_dhcpv6_only(self):
- entry = NETWORK_CONFIGS['dhcpv6_only']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_only"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_v4_and_v6_static(self):
- entry = NETWORK_CONFIGS['v4_and_v6_static']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v4_and_v6_static"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_dhcpv6_stateless(self):
- entry = NETWORK_CONFIGS['dhcpv6_stateless']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_stateless"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_ipv6_slaac(self):
- entry = NETWORK_CONFIGS['ipv6_slaac']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["ipv6_slaac"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_dhcpv6_stateful(self):
- entry = NETWORK_CONFIGS['dhcpv6_stateless']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_stateless"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_dhcpv6_accept_ra(self):
- entry = NETWORK_CONFIGS['dhcpv6_accept_ra']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v1']))
+ entry = NETWORK_CONFIGS["dhcpv6_accept_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_dhcpv6_reject_ra(self):
- entry = NETWORK_CONFIGS['dhcpv6_reject_ra']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v1']))
+ entry = NETWORK_CONFIGS["dhcpv6_reject_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_wakeonlan_disabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_disabled']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_disabled"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_wakeonlan_enabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_enabled']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_enabled"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_manual(self):
"""Test rendering of 'manual' for 'type' and 'control'.
@@ -4796,165 +5756,471 @@ class TestEniRoundTrip(CiTestCase):
if there were no addresses to configure. Also strange is the fact
that in order to apply that MTU the ifupdown device must be set
to 'auto', or the MTU would not be set."""
- entry = NETWORK_CONFIGS['manual']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["manual"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def test_routes_rendered(self):
# as reported in bug 1649652
conf = [
- {'name': 'eth0', 'type': 'physical',
- 'subnets': [{
- 'address': '172.23.31.42/26',
- 'dns_nameservers': [], 'gateway': '172.23.31.2',
- 'type': 'static'}]},
- {'type': 'route', 'id': 4,
- 'metric': 0, 'destination': '10.0.0.0/12',
- 'gateway': '172.23.31.1'},
- {'type': 'route', 'id': 5,
- 'metric': 0, 'destination': '192.168.2.0/16',
- 'gateway': '172.23.31.1'},
- {'type': 'route', 'id': 6,
- 'metric': 1, 'destination': '10.0.200.0/16',
- 'gateway': '172.23.31.1'},
+ {
+ "name": "eth0",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "172.23.31.42/26",
+ "dns_nameservers": [],
+ "gateway": "172.23.31.2",
+ "type": "static",
+ }
+ ],
+ },
+ {
+ "type": "route",
+ "id": 4,
+ "metric": 0,
+ "destination": "10.0.0.0/12",
+ "gateway": "172.23.31.1",
+ },
+ {
+ "type": "route",
+ "id": 5,
+ "metric": 0,
+ "destination": "192.168.2.0/16",
+ "gateway": "172.23.31.1",
+ },
+ {
+ "type": "route",
+ "id": 6,
+ "metric": 1,
+ "destination": "10.0.200.0/16",
+ "gateway": "172.23.31.1",
+ },
+ {
+ "type": "route",
+ "id": 7,
+ "metric": 1,
+ "destination": "10.0.0.100/32",
+ "gateway": "172.23.31.1",
+ },
]
files = self._render_and_read(
- network_config={'config': conf, 'version': 1})
+ network_config={"config": conf, "version": 1}
+ )
expected = [
- 'auto lo',
- 'iface lo inet loopback',
- 'auto eth0',
- 'iface eth0 inet static',
- ' address 172.23.31.42/26',
- ' gateway 172.23.31.2',
- ('post-up route add -net 10.0.0.0/12 gw '
- '172.23.31.1 metric 0 || true'),
- ('pre-down route del -net 10.0.0.0/12 gw '
- '172.23.31.1 metric 0 || true'),
- ('post-up route add -net 192.168.2.0/16 gw '
- '172.23.31.1 metric 0 || true'),
- ('pre-down route del -net 192.168.2.0/16 gw '
- '172.23.31.1 metric 0 || true'),
- ('post-up route add -net 10.0.200.0/16 gw '
- '172.23.31.1 metric 1 || true'),
- ('pre-down route del -net 10.0.200.0/16 gw '
- '172.23.31.1 metric 1 || true'),
+ "auto lo",
+ "iface lo inet loopback",
+ "auto eth0",
+ "iface eth0 inet static",
+ " address 172.23.31.42/26",
+ " gateway 172.23.31.2",
+ "post-up route add -net 10.0.0.0/12 gw "
+ "172.23.31.1 metric 0 || true",
+ "pre-down route del -net 10.0.0.0/12 gw "
+ "172.23.31.1 metric 0 || true",
+ "post-up route add -net 192.168.2.0/16 gw "
+ "172.23.31.1 metric 0 || true",
+ "pre-down route del -net 192.168.2.0/16 gw "
+ "172.23.31.1 metric 0 || true",
+ "post-up route add -net 10.0.200.0/16 gw "
+ "172.23.31.1 metric 1 || true",
+ "pre-down route del -net 10.0.200.0/16 gw "
+ "172.23.31.1 metric 1 || true",
+ "post-up route add -host 10.0.0.100/32 gw "
+ "172.23.31.1 metric 1 || true",
+ "pre-down route del -host 10.0.0.100/32 gw "
+ "172.23.31.1 metric 1 || true",
]
- found = files['/etc/network/interfaces'].splitlines()
+ found = files["/etc/network/interfaces"].splitlines()
- self.assertEqual(
- expected, [line for line in found if line])
+ self.assertEqual(expected, [line for line in found if line])
def test_ipv6_static_routes(self):
# as reported in bug 1818669
conf = [
- {'name': 'eno3', 'type': 'physical',
- 'subnets': [{
- 'address': 'fd00::12/64',
- 'dns_nameservers': ['fd00:2::15'],
- 'gateway': 'fd00::1',
- 'ipv6': True,
- 'type': 'static',
- 'routes': [{'netmask': '32',
- 'network': 'fd00:12::',
- 'gateway': 'fd00::2'},
- {'network': 'fd00:14::',
- 'gateway': 'fd00::3'},
- {'destination': 'fe00:14::/48',
- 'gateway': 'fe00::4',
- 'metric': 500},
- {'gateway': '192.168.23.1',
- 'metric': 999,
- 'netmask': 24,
- 'network': '192.168.23.0'},
- {'destination': '10.23.23.0/24',
- 'gateway': '10.23.23.2',
- 'metric': 300}]}]},
+ {
+ "name": "eno3",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "fd00::12/64",
+ "dns_nameservers": ["fd00:2::15"],
+ "gateway": "fd00::1",
+ "ipv6": True,
+ "type": "static",
+ "routes": [
+ {
+ "netmask": "32",
+ "network": "fd00:12::",
+ "gateway": "fd00::2",
+ },
+ {"network": "fd00:14::", "gateway": "fd00::3"},
+ {
+ "destination": "fe00:14::/48",
+ "gateway": "fe00::4",
+ "metric": 500,
+ },
+ {
+ "gateway": "192.168.23.1",
+ "metric": 999,
+ "netmask": 24,
+ "network": "192.168.23.0",
+ },
+ {
+ "destination": "10.23.23.0/24",
+ "gateway": "10.23.23.2",
+ "metric": 300,
+ },
+ ],
+ }
+ ],
+ },
]
files = self._render_and_read(
- network_config={'config': conf, 'version': 1})
+ network_config={"config": conf, "version": 1}
+ )
expected = [
- 'auto lo',
- 'iface lo inet loopback',
- 'auto eno3',
- 'iface eno3 inet6 static',
- ' address fd00::12/64',
- ' dns-nameservers fd00:2::15',
- ' gateway fd00::1',
- (' post-up route add -A inet6 fd00:12::/32 gw '
- 'fd00::2 || true'),
- (' pre-down route del -A inet6 fd00:12::/32 gw '
- 'fd00::2 || true'),
- (' post-up route add -A inet6 fd00:14::/64 gw '
- 'fd00::3 || true'),
- (' pre-down route del -A inet6 fd00:14::/64 gw '
- 'fd00::3 || true'),
- (' post-up route add -A inet6 fe00:14::/48 gw '
- 'fe00::4 metric 500 || true'),
- (' pre-down route del -A inet6 fe00:14::/48 gw '
- 'fe00::4 metric 500 || true'),
- (' post-up route add -net 192.168.23.0/24 gw '
- '192.168.23.1 metric 999 || true'),
- (' pre-down route del -net 192.168.23.0/24 gw '
- '192.168.23.1 metric 999 || true'),
- (' post-up route add -net 10.23.23.0/24 gw '
- '10.23.23.2 metric 300 || true'),
- (' pre-down route del -net 10.23.23.0/24 gw '
- '10.23.23.2 metric 300 || true'),
-
+ "auto lo",
+ "iface lo inet loopback",
+ "auto eno3",
+ "iface eno3 inet6 static",
+ " address fd00::12/64",
+ " dns-nameservers fd00:2::15",
+ " gateway fd00::1",
+ " post-up route add -A inet6 fd00:12::/32 gw fd00::2 || true",
+ " pre-down route del -A inet6 fd00:12::/32 gw fd00::2 || true",
+ " post-up route add -A inet6 fd00:14::/64 gw fd00::3 || true",
+ " pre-down route del -A inet6 fd00:14::/64 gw fd00::3 || true",
+ " post-up route add -A inet6 fe00:14::/48 gw "
+ "fe00::4 metric 500 || true",
+ " pre-down route del -A inet6 fe00:14::/48 gw "
+ "fe00::4 metric 500 || true",
+ " post-up route add -net 192.168.23.0/24 gw "
+ "192.168.23.1 metric 999 || true",
+ " pre-down route del -net 192.168.23.0/24 gw "
+ "192.168.23.1 metric 999 || true",
+ " post-up route add -net 10.23.23.0/24 gw "
+ "10.23.23.2 metric 300 || true",
+ " pre-down route del -net 10.23.23.0/24 gw "
+ "10.23.23.2 metric 300 || true",
]
- found = files['/etc/network/interfaces'].splitlines()
+ found = files["/etc/network/interfaces"].splitlines()
- self.assertEqual(
- expected, [line for line in found if line])
+ self.assertEqual(expected, [line for line in found if line])
def testsimple_render_bond(self):
- entry = NETWORK_CONFIGS['bond']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["bond"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
-class TestRenderersSelect:
+class TestNetworkdNetRendering(CiTestCase):
+ def create_conf_dict(self, contents):
+ content_dict = {}
+ for line in contents:
+ if line:
+ line = line.strip()
+ if line and re.search(r"^\[(.+)\]$", line):
+ content_dict[line] = []
+ key = line
+ elif line:
+ content_dict[key].append(line)
+
+ return content_dict
+
+ def compare_dicts(self, actual, expected):
+ for k, v in actual.items():
+ self.assertEqual(sorted(expected[k]), sorted(v))
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.net.get_devicelist")
+ def test_networkd_default_generation(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ m_get_cmdline,
+ m_chown,
+ ):
+ tmp_dir = self.tmp_dir()
+ _setup_test(
+ tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ )
+
+ network_cfg = net.generate_fallback_config()
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
+
+ render_dir = os.path.join(tmp_dir, "render")
+ os.makedirs(render_dir)
+
+ render_target = "etc/systemd/network/10-cloud-init-eth1000.network"
+ renderer = networkd.Renderer({})
+ renderer.render_network_state(ns, target=render_dir)
+
+ self.assertTrue(
+ os.path.exists(os.path.join(render_dir, render_target))
+ )
+ with open(os.path.join(render_dir, render_target)) as fh:
+ contents = fh.readlines()
+
+ actual = self.create_conf_dict(contents)
+ print(actual)
+
+ expected = textwrap.dedent(
+ """\
+ [Match]
+ Name=eth1000
+ MACAddress=07-1c-c6-75-a4-be
+ [Network]
+ DHCP=ipv4"""
+ ).rstrip(" ")
+
+ expected = self.create_conf_dict(expected.splitlines())
+
+ self.compare_dicts(actual, expected)
+
+
+class TestNetworkdRoundTrip(CiTestCase):
+ def create_conf_dict(self, contents):
+ content_dict = {}
+ for line in contents:
+ if line:
+ line = line.strip()
+ if line and re.search(r"^\[(.+)\]$", line):
+ content_dict[line] = []
+ key = line
+ elif line:
+ content_dict[key].append(line)
+
+ return content_dict
+
+ def compare_dicts(self, actual, expected):
+ for k, v in actual.items():
+ self.assertEqual(sorted(expected[k]), sorted(v))
+
+ def _render_and_read(
+ self, network_config=None, state=None, nwkd_path=None, dir=None
+ ):
+ if dir is None:
+ dir = self.tmp_dir()
+
+ if network_config:
+ ns = network_state.parse_net_config_data(network_config)
+ elif state:
+ ns = state
+ else:
+ raise ValueError("Expected data or state, got neither")
+
+ if not nwkd_path:
+ nwkd_path = "/etc/systemd/network/"
+
+ renderer = networkd.Renderer(config={"network_conf_dir": nwkd_path})
+
+ renderer.render_network_state(ns, target=dir)
+ return dir2dict(dir)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_small_networkd(self, m_chown):
+ nwk_fn1 = "/etc/systemd/network/10-cloud-init-eth99.network"
+ nwk_fn2 = "/etc/systemd/network/10-cloud-init-eth1.network"
+ entry = NETWORK_CONFIGS["small"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+
+ actual = files[nwk_fn1].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd_eth99"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ actual = files[nwk_fn2].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd_eth1"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_v4_and_v6(self, m_chown):
+ nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network"
+ entry = NETWORK_CONFIGS["v4_and_v6"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_v4_and_v6_static(self, m_chown):
+ nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network"
+ entry = NETWORK_CONFIGS["v4_and_v6_static"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_dhcpv6_only(self, m_chown):
+ nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network"
+ entry = NETWORK_CONFIGS["dhcpv6_only"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def test_dhcpv6_accept_ra_config_v1(self, m_chown):
+ nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network"
+ entry = NETWORK_CONFIGS["dhcpv6_accept_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+ expected = entry["expected_networkd"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def test_dhcpv6_accept_ra_config_v2(self, m_chown):
+ nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network"
+ entry = NETWORK_CONFIGS["dhcpv6_accept_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def test_dhcpv6_reject_ra_config_v1(self, m_chown):
+ nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network"
+ entry = NETWORK_CONFIGS["dhcpv6_reject_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def test_dhcpv6_reject_ra_config_v2(self, m_chown):
+ nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network"
+ entry = NETWORK_CONFIGS["dhcpv6_reject_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+
+class TestRenderersSelect:
@pytest.mark.parametrize(
- 'renderer_selected,netplan,eni,nm,scfg,sys', (
+ "renderer_selected,netplan,eni,nm,scfg,sys,networkd",
+ (
# -netplan -ifupdown -nm -scfg -sys raises error
- (net.RendererNotFoundError, False, False, False, False, False),
+ (
+ net.RendererNotFoundError,
+ False,
+ False,
+ False,
+ False,
+ False,
+ False,
+ ),
# -netplan +ifupdown -nm -scfg -sys selects eni
- ('eni', False, True, False, False, False),
+ ("eni", False, True, False, False, False, False),
# +netplan +ifupdown -nm -scfg -sys selects eni
- ('eni', True, True, False, False, False),
+ ("eni", True, True, False, False, False, False),
# +netplan -ifupdown -nm -scfg -sys selects netplan
- ('netplan', True, False, False, False, False),
+ ("netplan", True, False, False, False, False, False),
# Ubuntu with Network-Manager installed
# +netplan -ifupdown +nm -scfg -sys selects netplan
- ('netplan', True, False, True, False, False),
+ ("netplan", True, False, True, False, False, False),
# Centos/OpenSuse with Network-Manager installed selects sysconfig
# -netplan -ifupdown +nm -scfg +sys selects netplan
- ('sysconfig', False, False, True, False, True),
+ ("sysconfig", False, False, True, False, True, False),
+ # -netplan -ifupdown -nm -scfg -sys +networkd selects networkd
+ ("networkd", False, False, False, False, False, True),
),
)
+ @mock.patch("cloudinit.net.renderers.networkd.available")
@mock.patch("cloudinit.net.renderers.netplan.available")
@mock.patch("cloudinit.net.renderers.sysconfig.available")
@mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig")
@mock.patch("cloudinit.net.renderers.sysconfig.available_nm")
@mock.patch("cloudinit.net.renderers.eni.available")
def test_valid_renderer_from_defaults_depending_on_availability(
- self, m_eni_avail, m_nm_avail, m_scfg_avail, m_sys_avail,
- m_netplan_avail, renderer_selected, netplan, eni, nm, scfg, sys
+ self,
+ m_eni_avail,
+ m_nm_avail,
+ m_scfg_avail,
+ m_sys_avail,
+ m_netplan_avail,
+ m_networkd_avail,
+ renderer_selected,
+ netplan,
+ eni,
+ nm,
+ scfg,
+ sys,
+ networkd,
):
"""Assert proper renderer per DEFAULT_PRIORITY given availability."""
- m_eni_avail.return_value = eni # ifupdown pkg presence
- m_nm_avail.return_value = nm # network-manager presence
- m_scfg_avail.return_value = scfg # sysconfig presence
- m_sys_avail.return_value = sys # sysconfig/ifup/down presence
+ m_eni_avail.return_value = eni # ifupdown pkg presence
+ m_nm_avail.return_value = nm # network-manager presence
+ m_scfg_avail.return_value = scfg # sysconfig presence
+ m_sys_avail.return_value = sys # sysconfig/ifup/down presence
m_netplan_avail.return_value = netplan # netplan presence
+ m_networkd_avail.return_value = networkd # networkd presence
if isinstance(renderer_selected, str):
(renderer_name, _rnd_class) = renderers.select(
priority=renderers.DEFAULT_PRIORITY
@@ -4971,14 +6237,14 @@ class TestNetRenderers(CiTestCase):
def test_eni_and_sysconfig_available(self, m_eni_avail, m_sysc_avail):
m_eni_avail.return_value = True
m_sysc_avail.return_value = True
- found = renderers.search(priority=['sysconfig', 'eni'], first=False)
+ found = renderers.search(priority=["sysconfig", "eni"], first=False)
names = [f[0] for f in found]
- self.assertEqual(['sysconfig', 'eni'], names)
+ self.assertEqual(["sysconfig", "eni"], names)
@mock.patch("cloudinit.net.renderers.eni.available")
def test_search_returns_empty_on_none(self, m_eni_avail):
m_eni_avail.return_value = False
- found = renderers.search(priority=['eni'], first=False)
+ found = renderers.search(priority=["eni"], first=False)
self.assertEqual([], found)
@mock.patch("cloudinit.net.renderers.sysconfig.available")
@@ -4987,16 +6253,16 @@ class TestNetRenderers(CiTestCase):
# available should only be called until one is found.
m_eni_avail.return_value = True
m_sysc_avail.side_effect = Exception("Should not call me")
- found = renderers.search(priority=['eni', 'sysconfig'], first=True)
- self.assertEqual(['eni'], [found[0]])
+ found = renderers.search(priority=["eni", "sysconfig"], first=True)[0]
+ self.assertEqual(["eni"], [found[0]])
@mock.patch("cloudinit.net.renderers.sysconfig.available")
@mock.patch("cloudinit.net.renderers.eni.available")
def test_select_positive(self, m_eni_avail, m_sysc_avail):
m_eni_avail.return_value = True
m_sysc_avail.return_value = False
- found = renderers.select(priority=['sysconfig', 'eni'])
- self.assertEqual('eni', found[0])
+ found = renderers.select(priority=["sysconfig", "eni"])
+ self.assertEqual("eni", found[0])
@mock.patch("cloudinit.net.renderers.sysconfig.available")
@mock.patch("cloudinit.net.renderers.eni.available")
@@ -5005,89 +6271,120 @@ class TestNetRenderers(CiTestCase):
m_eni_avail.return_value = False
m_sysc_avail.return_value = False
- self.assertRaises(net.RendererNotFoundError, renderers.select,
- priority=['sysconfig', 'eni'])
+ self.assertRaises(
+ net.RendererNotFoundError,
+ renderers.select,
+ priority=["sysconfig", "eni"],
+ )
@mock.patch("cloudinit.net.sysconfig.available_sysconfig")
- @mock.patch("cloudinit.util.get_linux_distro")
- def test_sysconfig_available_uses_variant_mapping(self, m_distro, m_avail):
+ @mock.patch("cloudinit.util.system_info")
+ def test_sysconfig_available_uses_variant_mapping(self, m_info, m_avail):
m_avail.return_value = True
- distro_values = [
- ('opensuse', '', ''),
- ('opensuse-leap', '', ''),
- ('opensuse-tumbleweed', '', ''),
- ('sles', '', ''),
- ('centos', '', ''),
- ('fedora', '', ''),
- ('redhat', '', ''),
+ variants = [
+ "suse",
+ "centos",
+ "eurolinux",
+ "fedora",
+ "rhel",
]
- for (distro_name, distro_version, flavor) in distro_values:
- m_distro.return_value = (distro_name, distro_version, flavor)
+ for distro_name in variants:
+ m_info.return_value = {"variant": distro_name}
if hasattr(util.system_info, "cache_clear"):
util.system_info.cache_clear()
result = sysconfig.available()
self.assertTrue(result)
+ @mock.patch("cloudinit.net.renderers.networkd.available")
+ def test_networkd_available(self, m_nwkd_avail):
+ m_nwkd_avail.return_value = True
+ found = renderers.search(priority=["networkd"], first=False)
+ self.assertEqual("networkd", found[0][0])
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
class TestGetInterfaces(CiTestCase):
- _data = {'bonds': ['bond1'],
- 'bridges': ['bridge1'],
- 'vlans': ['bond1.101'],
- 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1',
- 'bond1.101', 'lo', 'eth1'],
- 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01',
- 'enp0s2': 'aa:aa:aa:aa:aa:02',
- 'bond1': 'aa:aa:aa:aa:aa:01',
- 'bond1.101': 'aa:aa:aa:aa:aa:01',
- 'bridge1': 'aa:aa:aa:aa:aa:03',
- 'bridge1-nic': 'aa:aa:aa:aa:aa:03',
- 'lo': '00:00:00:00:00:00',
- 'greptap0': '00:00:00:00:00:00',
- 'eth1': 'aa:aa:aa:aa:aa:01',
- 'tun0': None},
- 'drivers': {'enp0s1': 'virtio_net',
- 'enp0s2': 'e1000',
- 'bond1': None,
- 'bond1.101': None,
- 'bridge1': None,
- 'bridge1-nic': None,
- 'lo': None,
- 'greptap0': None,
- 'eth1': 'mlx4_core',
- 'tun0': None}}
+ _data = {
+ "bonds": ["bond1"],
+ "bridges": ["bridge1"],
+ "vlans": ["bond1.101"],
+ "own_macs": [
+ "enp0s1",
+ "enp0s2",
+ "bridge1-nic",
+ "bridge1",
+ "bond1.101",
+ "lo",
+ "eth1",
+ ],
+ "macs": {
+ "enp0s1": "aa:aa:aa:aa:aa:01",
+ "enp0s2": "aa:aa:aa:aa:aa:02",
+ "bond1": "aa:aa:aa:aa:aa:01",
+ "bond1.101": "aa:aa:aa:aa:aa:01",
+ "bridge1": "aa:aa:aa:aa:aa:03",
+ "bridge1-nic": "aa:aa:aa:aa:aa:03",
+ "lo": "00:00:00:00:00:00",
+ "greptap0": "00:00:00:00:00:00",
+ "eth1": "aa:aa:aa:aa:aa:01",
+ "tun0": None,
+ },
+ "drivers": {
+ "enp0s1": "virtio_net",
+ "enp0s2": "e1000",
+ "bond1": None,
+ "bond1.101": None,
+ "bridge1": None,
+ "bridge1-nic": None,
+ "lo": None,
+ "greptap0": None,
+ "eth1": "mlx4_core",
+ "tun0": None,
+ },
+ }
data = {}
def _se_get_devicelist(self):
- return list(self.data['devices'])
+ return list(self.data["devices"])
def _se_device_driver(self, name):
- return self.data['drivers'][name]
+ return self.data["drivers"][name]
def _se_device_devid(self, name):
- return '0x%s' % sorted(list(self.data['drivers'].keys())).index(name)
+ return "0x%s" % sorted(list(self.data["drivers"].keys())).index(name)
def _se_get_interface_mac(self, name):
- return self.data['macs'][name]
+ return self.data["macs"][name]
def _se_is_bridge(self, name):
- return name in self.data['bridges']
+ return name in self.data["bridges"]
def _se_is_vlan(self, name):
- return name in self.data['vlans']
+ return name in self.data["vlans"]
def _se_interface_has_own_mac(self, name):
- return name in self.data['own_macs']
+ return name in self.data["own_macs"]
def _mock_setup(self):
self.data = copy.deepcopy(self._data)
- self.data['devices'] = set(list(self.data['macs'].keys()))
- mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge',
- 'interface_has_own_mac', 'is_vlan', 'device_driver',
- 'device_devid')
+ self.data["devices"] = set(list(self.data["macs"].keys()))
+ mocks = (
+ "get_devicelist",
+ "get_interface_mac",
+ "is_bridge",
+ "interface_has_own_mac",
+ "is_vlan",
+ "device_driver",
+ "device_devid",
+ )
self.mocks = {}
for n in mocks:
- m = mock.patch('cloudinit.net.' + n,
- side_effect=getattr(self, '_se_' + n))
+ m = mock.patch(
+ "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n)
+ )
self.addCleanup(m.stop)
self.mocks[n] = m.start()
@@ -5095,30 +6392,31 @@ class TestGetInterfaces(CiTestCase):
self._mock_setup()
ret = net.get_interfaces()
- self.assertIn('enp0s1', self._se_get_devicelist())
- self.assertIn('eth1', self._se_get_devicelist())
- found = [ent for ent in ret if 'aa:aa:aa:aa:aa:01' in ent]
+ self.assertIn("enp0s1", self._se_get_devicelist())
+ self.assertIn("eth1", self._se_get_devicelist())
+ found = [ent for ent in ret if "aa:aa:aa:aa:aa:01" in ent]
self.assertEqual(len(found), 2)
def test_gi_excludes_any_without_mac_address(self):
self._mock_setup()
ret = net.get_interfaces()
- self.assertIn('tun0', self._se_get_devicelist())
- found = [ent for ent in ret if 'tun0' in ent]
+ self.assertIn("tun0", self._se_get_devicelist())
+ found = [ent for ent in ret if "tun0" in ent]
self.assertEqual(len(found), 0)
def test_gi_excludes_stolen_macs(self):
self._mock_setup()
ret = net.get_interfaces()
- self.mocks['interface_has_own_mac'].assert_has_calls(
- [mock.call('enp0s1'), mock.call('bond1')], any_order=True)
+ self.mocks["interface_has_own_mac"].assert_has_calls(
+ [mock.call("enp0s1"), mock.call("bond1")], any_order=True
+ )
expected = [
- ('enp0s2', 'aa:aa:aa:aa:aa:02', 'e1000', '0x5'),
- ('enp0s1', 'aa:aa:aa:aa:aa:01', 'virtio_net', '0x4'),
- ('eth1', 'aa:aa:aa:aa:aa:01', 'mlx4_core', '0x6'),
- ('lo', '00:00:00:00:00:00', None, '0x8'),
- ('bridge1-nic', 'aa:aa:aa:aa:aa:03', None, '0x3'),
+ ("enp0s2", "aa:aa:aa:aa:aa:02", "e1000", "0x5"),
+ ("enp0s1", "aa:aa:aa:aa:aa:01", "virtio_net", "0x4"),
+ ("eth1", "aa:aa:aa:aa:aa:01", "mlx4_core", "0x6"),
+ ("lo", "00:00:00:00:00:00", None, "0x8"),
+ ("bridge1-nic", "aa:aa:aa:aa:aa:03", None, "0x3"),
]
self.assertEqual(sorted(expected), sorted(ret))
@@ -5127,24 +6425,29 @@ class TestGetInterfaces(CiTestCase):
# add a device 'b1', make all return they have their "own mac",
# set everything other than 'b1' to be a bridge.
# then expect b1 is the only thing left.
- self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1'
- self.data['drivers']['b1'] = None
- self.data['devices'].add('b1')
- self.data['bonds'] = []
- self.data['own_macs'] = self.data['devices']
- self.data['bridges'] = [f for f in self.data['devices'] if f != "b1"]
+ self.data["macs"]["b1"] = "aa:aa:aa:aa:aa:b1"
+ self.data["drivers"]["b1"] = None
+ self.data["devices"].add("b1")
+ self.data["bonds"] = []
+ self.data["own_macs"] = self.data["devices"]
+ self.data["bridges"] = [f for f in self.data["devices"] if f != "b1"]
ret = net.get_interfaces()
- self.assertEqual([('b1', 'aa:aa:aa:aa:aa:b1', None, '0x0')], ret)
- self.mocks['is_bridge'].assert_has_calls(
- [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'),
- mock.call('b1')],
- any_order=True)
+ self.assertEqual([("b1", "aa:aa:aa:aa:aa:b1", None, "0x0")], ret)
+ self.mocks["is_bridge"].assert_has_calls(
+ [
+ mock.call("bridge1"),
+ mock.call("enp0s1"),
+ mock.call("bond1"),
+ mock.call("b1"),
+ ],
+ any_order=True,
+ )
class TestInterfaceHasOwnMac(CiTestCase):
"""Test interface_has_own_mac. This is admittedly a bit whitebox."""
- @mock.patch('cloudinit.net.read_sys_net_int', return_value=None)
+ @mock.patch("cloudinit.net.read_sys_net_int", return_value=None)
def test_non_strict_with_no_addr_assign_type(self, m_read_sys_net_int):
"""If nic does not have addr_assign_type, it is not "stolen".
@@ -5161,229 +6464,301 @@ class TestInterfaceHasOwnMac(CiTestCase):
"""
self.assertTrue(interface_has_own_mac("eth0"))
- @mock.patch('cloudinit.net.read_sys_net_int', return_value=None)
+ @mock.patch("cloudinit.net.read_sys_net_int", return_value=None)
def test_strict_with_no_addr_assign_type_raises(self, m_read_sys_net_int):
with self.assertRaises(ValueError):
interface_has_own_mac("eth0", True)
- @mock.patch('cloudinit.net.read_sys_net_int')
+ @mock.patch("cloudinit.net.read_sys_net_int")
def test_expected_values(self, m_read_sys_net_int):
msg = "address_assign_type=%d said to not have own mac"
for address_assign_type in (0, 1, 3):
m_read_sys_net_int.return_value = address_assign_type
self.assertTrue(
- interface_has_own_mac("eth0", msg % address_assign_type))
+ interface_has_own_mac("eth0", msg % address_assign_type)
+ )
m_read_sys_net_int.return_value = 2
self.assertFalse(interface_has_own_mac("eth0"))
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
class TestGetInterfacesByMac(CiTestCase):
- _data = {'bonds': ['bond1'],
- 'bridges': ['bridge1'],
- 'vlans': ['bond1.101'],
- 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1',
- 'bond1.101', 'lo'],
- 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01',
- 'enp0s2': 'aa:aa:aa:aa:aa:02',
- 'bond1': 'aa:aa:aa:aa:aa:01',
- 'bond1.101': 'aa:aa:aa:aa:aa:01',
- 'bridge1': 'aa:aa:aa:aa:aa:03',
- 'bridge1-nic': 'aa:aa:aa:aa:aa:03',
- 'lo': '00:00:00:00:00:00',
- 'greptap0': '00:00:00:00:00:00',
- 'tun0': None}}
+ _data = {
+ "bonds": ["bond1"],
+ "bridges": ["bridge1"],
+ "vlans": ["bond1.101"],
+ "own_macs": [
+ "enp0s1",
+ "enp0s2",
+ "bridge1-nic",
+ "bridge1",
+ "bond1.101",
+ "lo",
+ ],
+ "macs": {
+ "enp0s1": "aa:aa:aa:aa:aa:01",
+ "enp0s2": "aa:aa:aa:aa:aa:02",
+ "bond1": "aa:aa:aa:aa:aa:01",
+ "bond1.101": "aa:aa:aa:aa:aa:01",
+ "bridge1": "aa:aa:aa:aa:aa:03",
+ "bridge1-nic": "aa:aa:aa:aa:aa:03",
+ "lo": "00:00:00:00:00:00",
+ "greptap0": "00:00:00:00:00:00",
+ "tun0": None,
+ },
+ }
data = {}
def _se_get_devicelist(self):
- return list(self.data['devices'])
+ return list(self.data["devices"])
def _se_get_interface_mac(self, name):
- return self.data['macs'][name]
+ return self.data["macs"][name]
def _se_is_bridge(self, name):
- return name in self.data['bridges']
+ return name in self.data["bridges"]
def _se_is_vlan(self, name):
- return name in self.data['vlans']
+ return name in self.data["vlans"]
def _se_interface_has_own_mac(self, name):
- return name in self.data['own_macs']
+ return name in self.data["own_macs"]
def _se_get_ib_interface_hwaddr(self, name, ethernet_format):
- ib_hwaddr = self.data.get('ib_hwaddr', {})
+ ib_hwaddr = self.data.get("ib_hwaddr", {})
return ib_hwaddr.get(name, {}).get(ethernet_format)
def _mock_setup(self):
self.data = copy.deepcopy(self._data)
- self.data['devices'] = set(list(self.data['macs'].keys()))
- mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge',
- 'interface_has_own_mac', 'is_vlan', 'get_ib_interface_hwaddr')
+ self.data["devices"] = set(list(self.data["macs"].keys()))
+ mocks = (
+ "get_devicelist",
+ "get_interface_mac",
+ "is_bridge",
+ "interface_has_own_mac",
+ "is_vlan",
+ "get_ib_interface_hwaddr",
+ )
self.mocks = {}
for n in mocks:
- m = mock.patch('cloudinit.net.' + n,
- side_effect=getattr(self, '_se_' + n))
+ m = mock.patch(
+ "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n)
+ )
self.addCleanup(m.stop)
self.mocks[n] = m.start()
def test_raise_exception_on_duplicate_macs(self):
self._mock_setup()
- self.data['macs']['bridge1-nic'] = self.data['macs']['enp0s1']
+ self.data["macs"]["bridge1-nic"] = self.data["macs"]["enp0s1"]
self.assertRaises(RuntimeError, net.get_interfaces_by_mac)
def test_excludes_any_without_mac_address(self):
self._mock_setup()
ret = net.get_interfaces_by_mac()
- self.assertIn('tun0', self._se_get_devicelist())
- self.assertNotIn('tun0', ret.values())
+ self.assertIn("tun0", self._se_get_devicelist())
+ self.assertNotIn("tun0", ret.values())
def test_excludes_stolen_macs(self):
self._mock_setup()
ret = net.get_interfaces_by_mac()
- self.mocks['interface_has_own_mac'].assert_has_calls(
- [mock.call('enp0s1'), mock.call('bond1')], any_order=True)
+ self.mocks["interface_has_own_mac"].assert_has_calls(
+ [mock.call("enp0s1"), mock.call("bond1")], any_order=True
+ )
self.assertEqual(
- {'aa:aa:aa:aa:aa:01': 'enp0s1', 'aa:aa:aa:aa:aa:02': 'enp0s2',
- 'aa:aa:aa:aa:aa:03': 'bridge1-nic', '00:00:00:00:00:00': 'lo'},
- ret)
+ {
+ "aa:aa:aa:aa:aa:01": "enp0s1",
+ "aa:aa:aa:aa:aa:02": "enp0s2",
+ "aa:aa:aa:aa:aa:03": "bridge1-nic",
+ "00:00:00:00:00:00": "lo",
+ },
+ ret,
+ )
def test_excludes_bridges(self):
self._mock_setup()
# add a device 'b1', make all return they have their "own mac",
# set everything other than 'b1' to be a bridge.
# then expect b1 is the only thing left.
- self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1'
- self.data['devices'].add('b1')
- self.data['bonds'] = []
- self.data['own_macs'] = self.data['devices']
- self.data['bridges'] = [f for f in self.data['devices'] if f != "b1"]
+ self.data["macs"]["b1"] = "aa:aa:aa:aa:aa:b1"
+ self.data["devices"].add("b1")
+ self.data["bonds"] = []
+ self.data["own_macs"] = self.data["devices"]
+ self.data["bridges"] = [f for f in self.data["devices"] if f != "b1"]
ret = net.get_interfaces_by_mac()
- self.assertEqual({'aa:aa:aa:aa:aa:b1': 'b1'}, ret)
- self.mocks['is_bridge'].assert_has_calls(
- [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'),
- mock.call('b1')],
- any_order=True)
+ self.assertEqual({"aa:aa:aa:aa:aa:b1": "b1"}, ret)
+ self.mocks["is_bridge"].assert_has_calls(
+ [
+ mock.call("bridge1"),
+ mock.call("enp0s1"),
+ mock.call("bond1"),
+ mock.call("b1"),
+ ],
+ any_order=True,
+ )
def test_excludes_vlans(self):
self._mock_setup()
# add a device 'b1', make all return they have their "own mac",
# set everything other than 'b1' to be a vlan.
# then expect b1 is the only thing left.
- self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1'
- self.data['devices'].add('b1')
- self.data['bonds'] = []
- self.data['bridges'] = []
- self.data['own_macs'] = self.data['devices']
- self.data['vlans'] = [f for f in self.data['devices'] if f != "b1"]
+ self.data["macs"]["b1"] = "aa:aa:aa:aa:aa:b1"
+ self.data["devices"].add("b1")
+ self.data["bonds"] = []
+ self.data["bridges"] = []
+ self.data["own_macs"] = self.data["devices"]
+ self.data["vlans"] = [f for f in self.data["devices"] if f != "b1"]
ret = net.get_interfaces_by_mac()
- self.assertEqual({'aa:aa:aa:aa:aa:b1': 'b1'}, ret)
- self.mocks['is_vlan'].assert_has_calls(
- [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'),
- mock.call('b1')],
- any_order=True)
+ self.assertEqual({"aa:aa:aa:aa:aa:b1": "b1"}, ret)
+ self.mocks["is_vlan"].assert_has_calls(
+ [
+ mock.call("bridge1"),
+ mock.call("enp0s1"),
+ mock.call("bond1"),
+ mock.call("b1"),
+ ],
+ any_order=True,
+ )
def test_duplicates_of_empty_mac_are_ok(self):
"""Duplicate macs of 00:00:00:00:00:00 should be skipped."""
self._mock_setup()
empty_mac = "00:00:00:00:00:00"
- addnics = ('greptap1', 'lo', 'greptap2')
- self.data['macs'].update(dict((k, empty_mac) for k in addnics))
- self.data['devices'].update(set(addnics))
- self.data['own_macs'].extend(list(addnics))
+ addnics = ("greptap1", "lo", "greptap2")
+ self.data["macs"].update(dict((k, empty_mac) for k in addnics))
+ self.data["devices"].update(set(addnics))
+ self.data["own_macs"].extend(list(addnics))
ret = net.get_interfaces_by_mac()
- self.assertEqual('lo', ret[empty_mac])
+ self.assertEqual("lo", ret[empty_mac])
def test_skip_all_zeros(self):
"""Any mac of 00:... should be skipped."""
self._mock_setup()
emac1, emac2, emac4, emac6 = (
- '00', '00:00', '00:00:00:00', '00:00:00:00:00:00')
- addnics = {'empty1': emac1, 'emac2a': emac2, 'emac2b': emac2,
- 'emac4': emac4, 'emac6': emac6}
- self.data['macs'].update(addnics)
- self.data['devices'].update(set(addnics))
- self.data['own_macs'].extend(addnics.keys())
+ "00",
+ "00:00",
+ "00:00:00:00",
+ "00:00:00:00:00:00",
+ )
+ addnics = {
+ "empty1": emac1,
+ "emac2a": emac2,
+ "emac2b": emac2,
+ "emac4": emac4,
+ "emac6": emac6,
+ }
+ self.data["macs"].update(addnics)
+ self.data["devices"].update(set(addnics))
+ self.data["own_macs"].extend(addnics.keys())
ret = net.get_interfaces_by_mac()
- self.assertEqual('lo', ret['00:00:00:00:00:00'])
+ self.assertEqual("lo", ret["00:00:00:00:00:00"])
def test_ib(self):
- ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56'
- ib_addr_eth_format = '00:11:22:33:44:56'
+ ib_addr = "80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56"
+ ib_addr_eth_format = "00:11:22:33:44:56"
self._mock_setup()
- self.data['devices'] = ['enp0s1', 'ib0']
- self.data['own_macs'].append('ib0')
- self.data['macs']['ib0'] = ib_addr
- self.data['ib_hwaddr'] = {'ib0': {True: ib_addr_eth_format,
- False: ib_addr}}
+ self.data["devices"] = ["enp0s1", "ib0"]
+ self.data["own_macs"].append("ib0")
+ self.data["macs"]["ib0"] = ib_addr
+ self.data["ib_hwaddr"] = {
+ "ib0": {True: ib_addr_eth_format, False: ib_addr}
+ }
result = net.get_interfaces_by_mac()
- expected = {'aa:aa:aa:aa:aa:01': 'enp0s1',
- ib_addr_eth_format: 'ib0', ib_addr: 'ib0'}
+ expected = {
+ "aa:aa:aa:aa:aa:01": "enp0s1",
+ ib_addr_eth_format: "ib0",
+ ib_addr: "ib0",
+ }
self.assertEqual(expected, result)
class TestInterfacesSorting(CiTestCase):
-
def test_natural_order(self):
- data = ['ens5', 'ens6', 'ens3', 'ens20', 'ens13', 'ens2']
+ data = ["ens5", "ens6", "ens3", "ens20", "ens13", "ens2"]
self.assertEqual(
sorted(data, key=natural_sort_key),
- ['ens2', 'ens3', 'ens5', 'ens6', 'ens13', 'ens20'])
- data2 = ['enp2s0', 'enp2s3', 'enp0s3', 'enp0s13', 'enp0s8', 'enp1s2']
+ ["ens2", "ens3", "ens5", "ens6", "ens13", "ens20"],
+ )
+ data2 = ["enp2s0", "enp2s3", "enp0s3", "enp0s13", "enp0s8", "enp1s2"]
self.assertEqual(
sorted(data2, key=natural_sort_key),
- ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3'])
+ ["enp0s3", "enp0s8", "enp0s13", "enp1s2", "enp2s0", "enp2s3"],
+ )
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
class TestGetIBHwaddrsByInterface(CiTestCase):
- _ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56'
- _ib_addr_eth_format = '00:11:22:33:44:56'
- _data = {'devices': ['enp0s1', 'enp0s2', 'bond1', 'bridge1',
- 'bridge1-nic', 'tun0', 'ib0'],
- 'bonds': ['bond1'],
- 'bridges': ['bridge1'],
- 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', 'ib0'],
- 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01',
- 'enp0s2': 'aa:aa:aa:aa:aa:02',
- 'bond1': 'aa:aa:aa:aa:aa:01',
- 'bridge1': 'aa:aa:aa:aa:aa:03',
- 'bridge1-nic': 'aa:aa:aa:aa:aa:03',
- 'tun0': None,
- 'ib0': _ib_addr},
- 'ib_hwaddr': {'ib0': {True: _ib_addr_eth_format,
- False: _ib_addr}}}
+ _ib_addr = "80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56"
+ _ib_addr_eth_format = "00:11:22:33:44:56"
+ _data = {
+ "devices": [
+ "enp0s1",
+ "enp0s2",
+ "bond1",
+ "bridge1",
+ "bridge1-nic",
+ "tun0",
+ "ib0",
+ ],
+ "bonds": ["bond1"],
+ "bridges": ["bridge1"],
+ "own_macs": ["enp0s1", "enp0s2", "bridge1-nic", "bridge1", "ib0"],
+ "macs": {
+ "enp0s1": "aa:aa:aa:aa:aa:01",
+ "enp0s2": "aa:aa:aa:aa:aa:02",
+ "bond1": "aa:aa:aa:aa:aa:01",
+ "bridge1": "aa:aa:aa:aa:aa:03",
+ "bridge1-nic": "aa:aa:aa:aa:aa:03",
+ "tun0": None,
+ "ib0": _ib_addr,
+ },
+ "ib_hwaddr": {"ib0": {True: _ib_addr_eth_format, False: _ib_addr}},
+ }
data = {}
def _mock_setup(self):
self.data = copy.deepcopy(self._data)
- mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge',
- 'interface_has_own_mac', 'get_ib_interface_hwaddr')
+ mocks = (
+ "get_devicelist",
+ "get_interface_mac",
+ "is_bridge",
+ "interface_has_own_mac",
+ "get_ib_interface_hwaddr",
+ )
self.mocks = {}
for n in mocks:
- m = mock.patch('cloudinit.net.' + n,
- side_effect=getattr(self, '_se_' + n))
+ m = mock.patch(
+ "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n)
+ )
self.addCleanup(m.stop)
self.mocks[n] = m.start()
def _se_get_devicelist(self):
- return self.data['devices']
+ return self.data["devices"]
def _se_get_interface_mac(self, name):
- return self.data['macs'][name]
+ return self.data["macs"][name]
def _se_is_bridge(self, name):
- return name in self.data['bridges']
+ return name in self.data["bridges"]
def _se_interface_has_own_mac(self, name):
- return name in self.data['own_macs']
+ return name in self.data["own_macs"]
def _se_get_ib_interface_hwaddr(self, name, ethernet_format):
- ib_hwaddr = self.data.get('ib_hwaddr', {})
+ ib_hwaddr = self.data.get("ib_hwaddr", {})
return ib_hwaddr.get(name, {}).get(ethernet_format)
def test_ethernet(self):
self._mock_setup()
- self.data['devices'].remove('ib0')
+ self.data["devices"].remove("ib0")
result = net.get_ib_hwaddrs_by_interface()
expected = {}
self.assertEqual(expected, result)
@@ -5391,7 +6766,7 @@ class TestGetIBHwaddrsByInterface(CiTestCase):
def test_ib(self):
self._mock_setup()
result = net.get_ib_hwaddrs_by_interface()
- expected = {'ib0': self._ib_addr}
+ expected = {"ib0": self._ib_addr}
self.assertEqual(expected, result)
@@ -5404,239 +6779,305 @@ def _gzip_data(data):
class TestRenameInterfaces(CiTestCase):
-
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_rename_all(self, mock_subp):
renames = [
- ('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'),
- ('00:11:22:33:44:aa', 'interface2', 'virtio_net', '0x5'),
+ ("00:11:22:33:44:55", "interface0", "virtio_net", "0x3"),
+ ("00:11:22:33:44:aa", "interface2", "virtio_net", "0x5"),
]
current_info = {
- 'ens3': {
- 'downable': True,
- 'device_id': '0x3',
- 'driver': 'virtio_net',
- 'mac': '00:11:22:33:44:55',
- 'name': 'ens3',
- 'up': False},
- 'ens5': {
- 'downable': True,
- 'device_id': '0x5',
- 'driver': 'virtio_net',
- 'mac': '00:11:22:33:44:aa',
- 'name': 'ens5',
- 'up': False},
+ "ens3": {
+ "downable": True,
+ "device_id": "0x3",
+ "driver": "virtio_net",
+ "mac": "00:11:22:33:44:55",
+ "name": "ens3",
+ "up": False,
+ },
+ "ens5": {
+ "downable": True,
+ "device_id": "0x5",
+ "driver": "virtio_net",
+ "mac": "00:11:22:33:44:aa",
+ "name": "ens5",
+ "up": False,
+ },
}
net._rename_interfaces(renames, current_info=current_info)
print(mock_subp.call_args_list)
- mock_subp.assert_has_calls([
- mock.call(['ip', 'link', 'set', 'ens3', 'name', 'interface0'],
- capture=True),
- mock.call(['ip', 'link', 'set', 'ens5', 'name', 'interface2'],
- capture=True),
- ])
-
- @mock.patch('cloudinit.subp.subp')
+ mock_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "ens3", "name", "interface0"],
+ capture=True,
+ ),
+ mock.call(
+ ["ip", "link", "set", "ens5", "name", "interface2"],
+ capture=True,
+ ),
+ ]
+ )
+
+ @mock.patch("cloudinit.subp.subp")
def test_rename_no_driver_no_device_id(self, mock_subp):
renames = [
- ('00:11:22:33:44:55', 'interface0', None, None),
- ('00:11:22:33:44:aa', 'interface1', None, None),
+ ("00:11:22:33:44:55", "interface0", None, None),
+ ("00:11:22:33:44:aa", "interface1", None, None),
]
current_info = {
- 'eth0': {
- 'downable': True,
- 'device_id': None,
- 'driver': None,
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth0',
- 'up': False},
- 'eth1': {
- 'downable': True,
- 'device_id': None,
- 'driver': None,
- 'mac': '00:11:22:33:44:aa',
- 'name': 'eth1',
- 'up': False},
+ "eth0": {
+ "downable": True,
+ "device_id": None,
+ "driver": None,
+ "mac": "00:11:22:33:44:55",
+ "name": "eth0",
+ "up": False,
+ },
+ "eth1": {
+ "downable": True,
+ "device_id": None,
+ "driver": None,
+ "mac": "00:11:22:33:44:aa",
+ "name": "eth1",
+ "up": False,
+ },
}
net._rename_interfaces(renames, current_info=current_info)
print(mock_subp.call_args_list)
- mock_subp.assert_has_calls([
- mock.call(['ip', 'link', 'set', 'eth0', 'name', 'interface0'],
- capture=True),
- mock.call(['ip', 'link', 'set', 'eth1', 'name', 'interface1'],
- capture=True),
- ])
-
- @mock.patch('cloudinit.subp.subp')
+ mock_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "eth0", "name", "interface0"],
+ capture=True,
+ ),
+ mock.call(
+ ["ip", "link", "set", "eth1", "name", "interface1"],
+ capture=True,
+ ),
+ ]
+ )
+
+ @mock.patch("cloudinit.subp.subp")
def test_rename_all_bounce(self, mock_subp):
renames = [
- ('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'),
- ('00:11:22:33:44:aa', 'interface2', 'virtio_net', '0x5'),
+ ("00:11:22:33:44:55", "interface0", "virtio_net", "0x3"),
+ ("00:11:22:33:44:aa", "interface2", "virtio_net", "0x5"),
]
current_info = {
- 'ens3': {
- 'downable': True,
- 'device_id': '0x3',
- 'driver': 'virtio_net',
- 'mac': '00:11:22:33:44:55',
- 'name': 'ens3',
- 'up': True},
- 'ens5': {
- 'downable': True,
- 'device_id': '0x5',
- 'driver': 'virtio_net',
- 'mac': '00:11:22:33:44:aa',
- 'name': 'ens5',
- 'up': True},
+ "ens3": {
+ "downable": True,
+ "device_id": "0x3",
+ "driver": "virtio_net",
+ "mac": "00:11:22:33:44:55",
+ "name": "ens3",
+ "up": True,
+ },
+ "ens5": {
+ "downable": True,
+ "device_id": "0x5",
+ "driver": "virtio_net",
+ "mac": "00:11:22:33:44:aa",
+ "name": "ens5",
+ "up": True,
+ },
}
net._rename_interfaces(renames, current_info=current_info)
print(mock_subp.call_args_list)
- mock_subp.assert_has_calls([
- mock.call(['ip', 'link', 'set', 'ens3', 'down'], capture=True),
- mock.call(['ip', 'link', 'set', 'ens3', 'name', 'interface0'],
- capture=True),
- mock.call(['ip', 'link', 'set', 'ens5', 'down'], capture=True),
- mock.call(['ip', 'link', 'set', 'ens5', 'name', 'interface2'],
- capture=True),
- mock.call(['ip', 'link', 'set', 'interface0', 'up'], capture=True),
- mock.call(['ip', 'link', 'set', 'interface2', 'up'], capture=True)
- ])
-
- @mock.patch('cloudinit.subp.subp')
+ mock_subp.assert_has_calls(
+ [
+ mock.call(["ip", "link", "set", "ens3", "down"], capture=True),
+ mock.call(
+ ["ip", "link", "set", "ens3", "name", "interface0"],
+ capture=True,
+ ),
+ mock.call(["ip", "link", "set", "ens5", "down"], capture=True),
+ mock.call(
+ ["ip", "link", "set", "ens5", "name", "interface2"],
+ capture=True,
+ ),
+ mock.call(
+ ["ip", "link", "set", "interface0", "up"], capture=True
+ ),
+ mock.call(
+ ["ip", "link", "set", "interface2", "up"], capture=True
+ ),
+ ]
+ )
+
+ @mock.patch("cloudinit.subp.subp")
def test_rename_duplicate_macs(self, mock_subp):
renames = [
- ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'),
- ('00:11:22:33:44:55', 'vf1', 'mlx4_core', '0x5'),
+ ("00:11:22:33:44:55", "eth0", "hv_netsvc", "0x3"),
+ ("00:11:22:33:44:55", "vf1", "mlx4_core", "0x5"),
]
current_info = {
- 'eth0': {
- 'downable': True,
- 'device_id': '0x3',
- 'driver': 'hv_netsvc',
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth0',
- 'up': False},
- 'eth1': {
- 'downable': True,
- 'device_id': '0x5',
- 'driver': 'mlx4_core',
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth1',
- 'up': False},
+ "eth0": {
+ "downable": True,
+ "device_id": "0x3",
+ "driver": "hv_netsvc",
+ "mac": "00:11:22:33:44:55",
+ "name": "eth0",
+ "up": False,
+ },
+ "eth1": {
+ "downable": True,
+ "device_id": "0x5",
+ "driver": "mlx4_core",
+ "mac": "00:11:22:33:44:55",
+ "name": "eth1",
+ "up": False,
+ },
}
net._rename_interfaces(renames, current_info=current_info)
print(mock_subp.call_args_list)
- mock_subp.assert_has_calls([
- mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'],
- capture=True),
- ])
+ mock_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "eth1", "name", "vf1"], capture=True
+ ),
+ ]
+ )
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_rename_duplicate_macs_driver_no_devid(self, mock_subp):
renames = [
- ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', None),
- ('00:11:22:33:44:55', 'vf1', 'mlx4_core', None),
+ ("00:11:22:33:44:55", "eth0", "hv_netsvc", None),
+ ("00:11:22:33:44:55", "vf1", "mlx4_core", None),
]
current_info = {
- 'eth0': {
- 'downable': True,
- 'device_id': '0x3',
- 'driver': 'hv_netsvc',
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth0',
- 'up': False},
- 'eth1': {
- 'downable': True,
- 'device_id': '0x5',
- 'driver': 'mlx4_core',
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth1',
- 'up': False},
+ "eth0": {
+ "downable": True,
+ "device_id": "0x3",
+ "driver": "hv_netsvc",
+ "mac": "00:11:22:33:44:55",
+ "name": "eth0",
+ "up": False,
+ },
+ "eth1": {
+ "downable": True,
+ "device_id": "0x5",
+ "driver": "mlx4_core",
+ "mac": "00:11:22:33:44:55",
+ "name": "eth1",
+ "up": False,
+ },
}
net._rename_interfaces(renames, current_info=current_info)
print(mock_subp.call_args_list)
- mock_subp.assert_has_calls([
- mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'],
- capture=True),
- ])
+ mock_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "eth1", "name", "vf1"], capture=True
+ ),
+ ]
+ )
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_rename_multi_mac_dups(self, mock_subp):
renames = [
- ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'),
- ('00:11:22:33:44:55', 'vf1', 'mlx4_core', '0x5'),
- ('00:11:22:33:44:55', 'vf2', 'mlx4_core', '0x7'),
+ ("00:11:22:33:44:55", "eth0", "hv_netsvc", "0x3"),
+ ("00:11:22:33:44:55", "vf1", "mlx4_core", "0x5"),
+ ("00:11:22:33:44:55", "vf2", "mlx4_core", "0x7"),
]
current_info = {
- 'eth0': {
- 'downable': True,
- 'device_id': '0x3',
- 'driver': 'hv_netsvc',
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth0',
- 'up': False},
- 'eth1': {
- 'downable': True,
- 'device_id': '0x5',
- 'driver': 'mlx4_core',
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth1',
- 'up': False},
- 'eth2': {
- 'downable': True,
- 'device_id': '0x7',
- 'driver': 'mlx4_core',
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth2',
- 'up': False},
+ "eth0": {
+ "downable": True,
+ "device_id": "0x3",
+ "driver": "hv_netsvc",
+ "mac": "00:11:22:33:44:55",
+ "name": "eth0",
+ "up": False,
+ },
+ "eth1": {
+ "downable": True,
+ "device_id": "0x5",
+ "driver": "mlx4_core",
+ "mac": "00:11:22:33:44:55",
+ "name": "eth1",
+ "up": False,
+ },
+ "eth2": {
+ "downable": True,
+ "device_id": "0x7",
+ "driver": "mlx4_core",
+ "mac": "00:11:22:33:44:55",
+ "name": "eth2",
+ "up": False,
+ },
}
net._rename_interfaces(renames, current_info=current_info)
print(mock_subp.call_args_list)
- mock_subp.assert_has_calls([
- mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'],
- capture=True),
- mock.call(['ip', 'link', 'set', 'eth2', 'name', 'vf2'],
- capture=True),
- ])
-
- @mock.patch('cloudinit.subp.subp')
+ mock_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "eth1", "name", "vf1"], capture=True
+ ),
+ mock.call(
+ ["ip", "link", "set", "eth2", "name", "vf2"], capture=True
+ ),
+ ]
+ )
+
+ @mock.patch("cloudinit.subp.subp")
def test_rename_macs_case_insensitive(self, mock_subp):
"""_rename_interfaces must support upper or lower case macs."""
renames = [
- ('aa:aa:aa:aa:aa:aa', 'en0', None, None),
- ('BB:BB:BB:BB:BB:BB', 'en1', None, None),
- ('cc:cc:cc:cc:cc:cc', 'en2', None, None),
- ('DD:DD:DD:DD:DD:DD', 'en3', None, None),
+ ("aa:aa:aa:aa:aa:aa", "en0", None, None),
+ ("BB:BB:BB:BB:BB:BB", "en1", None, None),
+ ("cc:cc:cc:cc:cc:cc", "en2", None, None),
+ ("DD:DD:DD:DD:DD:DD", "en3", None, None),
]
current_info = {
- 'eth0': {'downable': True, 'mac': 'AA:AA:AA:AA:AA:AA',
- 'name': 'eth0', 'up': False},
- 'eth1': {'downable': True, 'mac': 'bb:bb:bb:bb:bb:bb',
- 'name': 'eth1', 'up': False},
- 'eth2': {'downable': True, 'mac': 'cc:cc:cc:cc:cc:cc',
- 'name': 'eth2', 'up': False},
- 'eth3': {'downable': True, 'mac': 'DD:DD:DD:DD:DD:DD',
- 'name': 'eth3', 'up': False},
+ "eth0": {
+ "downable": True,
+ "mac": "AA:AA:AA:AA:AA:AA",
+ "name": "eth0",
+ "up": False,
+ },
+ "eth1": {
+ "downable": True,
+ "mac": "bb:bb:bb:bb:bb:bb",
+ "name": "eth1",
+ "up": False,
+ },
+ "eth2": {
+ "downable": True,
+ "mac": "cc:cc:cc:cc:cc:cc",
+ "name": "eth2",
+ "up": False,
+ },
+ "eth3": {
+ "downable": True,
+ "mac": "DD:DD:DD:DD:DD:DD",
+ "name": "eth3",
+ "up": False,
+ },
}
net._rename_interfaces(renames, current_info=current_info)
expected = [
- mock.call(['ip', 'link', 'set', 'eth%d' % i, 'name', 'en%d' % i],
- capture=True)
- for i in range(len(renames))]
+ mock.call(
+ ["ip", "link", "set", "eth%d" % i, "name", "en%d" % i],
+ capture=True,
+ )
+ for i in range(len(renames))
+ ]
mock_subp.assert_has_calls(expected)
class TestNetworkState(CiTestCase):
-
def test_bcast_addr(self):
"""Test mask_and_ipv4_to_bcast_addr proper execution."""
bcast_addr = network_state.mask_and_ipv4_to_bcast_addr
- self.assertEqual("192.168.1.255",
- bcast_addr("255.255.255.0", "192.168.1.1"))
- self.assertEqual("128.42.7.255",
- bcast_addr("255.255.248.0", "128.42.5.4"))
- self.assertEqual("10.1.21.255",
- bcast_addr("255.255.255.0", "10.1.21.4"))
+ self.assertEqual(
+ "192.168.1.255", bcast_addr("255.255.255.0", "192.168.1.1")
+ )
+ self.assertEqual(
+ "128.42.7.255", bcast_addr("255.255.248.0", "128.42.5.4")
+ )
+ self.assertEqual(
+ "10.1.21.255", bcast_addr("255.255.255.0", "10.1.21.4")
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py
new file mode 100644
index 00000000..3c29e2f7
--- /dev/null
+++ b/tests/unittests/test_net_activators.py
@@ -0,0 +1,262 @@
+from collections import namedtuple
+from unittest.mock import patch
+
+import pytest
+
+from cloudinit.net.activators import (
+ DEFAULT_PRIORITY,
+ IfUpDownActivator,
+ NetplanActivator,
+ NetworkdActivator,
+ NetworkManagerActivator,
+ NoActivatorException,
+ search_activator,
+ select_activator,
+)
+from cloudinit.net.network_state import parse_net_config_data
+from cloudinit.safeyaml import load
+
+V1_CONFIG = """\
+version: 1
+config:
+- type: physical
+ name: eth0
+- type: physical
+ name: eth1
+"""
+
+V2_CONFIG = """\
+version: 2
+ethernets:
+ eth0:
+ dhcp4: true
+ eth1:
+ dhcp4: true
+"""
+
+NETPLAN_CALL_LIST = [
+ ((["netplan", "apply"],), {}),
+]
+
+
+@pytest.fixture
+def available_mocks():
+ mocks = namedtuple("Mocks", "m_which, m_file")
+ with patch("cloudinit.subp.which", return_value=True) as m_which:
+ with patch("os.path.isfile", return_value=True) as m_file:
+ yield mocks(m_which, m_file)
+
+
+@pytest.fixture
+def unavailable_mocks():
+ mocks = namedtuple("Mocks", "m_which, m_file")
+ with patch("cloudinit.subp.which", return_value=False) as m_which:
+ with patch("os.path.isfile", return_value=False) as m_file:
+ yield mocks(m_which, m_file)
+
+
+class TestSearchAndSelect:
+ def test_defaults(self, available_mocks):
+ resp = search_activator()
+ assert resp == DEFAULT_PRIORITY
+
+ activator = select_activator()
+ assert activator == DEFAULT_PRIORITY[0]
+
+ def test_priority(self, available_mocks):
+ new_order = [NetplanActivator, NetworkManagerActivator]
+ resp = search_activator(priority=new_order)
+ assert resp == new_order
+
+ activator = select_activator(priority=new_order)
+ assert activator == new_order[0]
+
+ def test_target(self, available_mocks):
+ search_activator(target="/tmp")
+ assert "/tmp" == available_mocks.m_which.call_args[1]["target"]
+
+ select_activator(target="/tmp")
+ assert "/tmp" == available_mocks.m_which.call_args[1]["target"]
+
+ @patch(
+ "cloudinit.net.activators.IfUpDownActivator.available",
+ return_value=False,
+ )
+ def test_first_not_available(self, m_available, available_mocks):
+ resp = search_activator()
+ assert resp == DEFAULT_PRIORITY[1:]
+
+ resp = select_activator()
+ assert resp == DEFAULT_PRIORITY[1]
+
+ def test_priority_not_exist(self, available_mocks):
+ with pytest.raises(ValueError):
+ search_activator(priority=["spam", "eggs"])
+ with pytest.raises(ValueError):
+ select_activator(priority=["spam", "eggs"])
+
+ def test_none_available(self, unavailable_mocks):
+ resp = search_activator()
+ assert resp == []
+
+ with pytest.raises(NoActivatorException):
+ select_activator()
+
+
+IF_UP_DOWN_AVAILABLE_CALLS = [
+ (("ifquery",), {"search": ["/sbin", "/usr/sbin"], "target": None}),
+ (("ifup",), {"search": ["/sbin", "/usr/sbin"], "target": None}),
+ (("ifdown",), {"search": ["/sbin", "/usr/sbin"], "target": None}),
+]
+
+NETPLAN_AVAILABLE_CALLS = [
+ (("netplan",), {"search": ["/usr/sbin", "/sbin"], "target": None}),
+]
+
+NETWORK_MANAGER_AVAILABLE_CALLS = [
+ (("nmcli",), {"target": None}),
+]
+
+NETWORKD_AVAILABLE_CALLS = [
+ (("ip",), {"search": ["/usr/sbin", "/bin"], "target": None}),
+ (("systemctl",), {"search": ["/usr/sbin", "/bin"], "target": None}),
+]
+
+
+@pytest.mark.parametrize(
+ "activator, available_calls",
+ [
+ (IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS),
+ (NetplanActivator, NETPLAN_AVAILABLE_CALLS),
+ (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS),
+ (NetworkdActivator, NETWORKD_AVAILABLE_CALLS),
+ ],
+)
+class TestActivatorsAvailable:
+ def test_available(self, activator, available_calls, available_mocks):
+ activator.available()
+ assert available_mocks.m_which.call_args_list == available_calls
+
+
+IF_UP_DOWN_BRING_UP_CALL_LIST = [
+ ((["ifup", "eth0"],), {}),
+ ((["ifup", "eth1"],), {}),
+]
+
+NETWORK_MANAGER_BRING_UP_CALL_LIST = [
+ ((["nmcli", "connection", "up", "ifname", "eth0"],), {}),
+ ((["nmcli", "connection", "up", "ifname", "eth1"],), {}),
+]
+
+NETWORKD_BRING_UP_CALL_LIST = [
+ ((["ip", "link", "set", "up", "eth0"],), {}),
+ ((["ip", "link", "set", "up", "eth1"],), {}),
+ ((["systemctl", "restart", "systemd-networkd", "systemd-resolved"],), {}),
+]
+
+
+@pytest.mark.parametrize(
+ "activator, expected_call_list",
+ [
+ (IfUpDownActivator, IF_UP_DOWN_BRING_UP_CALL_LIST),
+ (NetplanActivator, NETPLAN_CALL_LIST),
+ (NetworkManagerActivator, NETWORK_MANAGER_BRING_UP_CALL_LIST),
+ (NetworkdActivator, NETWORKD_BRING_UP_CALL_LIST),
+ ],
+)
+class TestActivatorsBringUp:
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_up_interface(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ activator.bring_up_interface("eth0")
+ assert len(m_subp.call_args_list) == 1
+ assert m_subp.call_args_list[0] == expected_call_list[0]
+
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_up_interfaces(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ index = 0
+ activator.bring_up_interfaces(["eth0", "eth1"])
+ for call in m_subp.call_args_list:
+ assert call == expected_call_list[index]
+ index += 1
+
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_up_all_interfaces_v1(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V1_CONFIG))
+ activator.bring_up_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
+
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_up_all_interfaces_v2(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V2_CONFIG))
+ activator.bring_up_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
+
+
+IF_UP_DOWN_BRING_DOWN_CALL_LIST = [
+ ((["ifdown", "eth0"],), {}),
+ ((["ifdown", "eth1"],), {}),
+]
+
+NETWORK_MANAGER_BRING_DOWN_CALL_LIST = [
+ ((["nmcli", "connection", "down", "eth0"],), {}),
+ ((["nmcli", "connection", "down", "eth1"],), {}),
+]
+
+NETWORKD_BRING_DOWN_CALL_LIST = [
+ ((["ip", "link", "set", "down", "eth0"],), {}),
+ ((["ip", "link", "set", "down", "eth1"],), {}),
+]
+
+
+@pytest.mark.parametrize(
+ "activator, expected_call_list",
+ [
+ (IfUpDownActivator, IF_UP_DOWN_BRING_DOWN_CALL_LIST),
+ (NetplanActivator, NETPLAN_CALL_LIST),
+ (NetworkManagerActivator, NETWORK_MANAGER_BRING_DOWN_CALL_LIST),
+ (NetworkdActivator, NETWORKD_BRING_DOWN_CALL_LIST),
+ ],
+)
+class TestActivatorsBringDown:
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_down_interface(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ activator.bring_down_interface("eth0")
+ assert len(m_subp.call_args_list) == 1
+ assert m_subp.call_args_list[0] == expected_call_list[0]
+
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_down_interfaces(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ activator.bring_down_interfaces(["eth0", "eth1"])
+ assert expected_call_list == m_subp.call_args_list
+
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_down_all_interfaces_v1(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V1_CONFIG))
+ activator.bring_down_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
+
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_down_all_interfaces_v2(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V2_CONFIG))
+ activator.bring_down_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py
index 414b4830..3facb2bb 100644
--- a/tests/unittests/test_net_freebsd.py
+++ b/tests/unittests/test_net_freebsd.py
@@ -1,19 +1,79 @@
-from cloudinit import net
+import os
-from cloudinit.tests.helpers import (CiTestCase, mock, readResource)
+import cloudinit.net
+import cloudinit.net.network_state
+from cloudinit import safeyaml
+from tests.unittests.helpers import CiTestCase, dir2dict, mock, readResource
SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output")
+V1 = """
+config:
+- id: eno1
+ mac_address: 08:94:ef:51:ae:e0
+ mtu: 1470
+ name: eno1
+ subnets:
+ - address: 172.20.80.129/25
+ type: static
+ type: physical
+version: 1
+"""
class TestInterfacesByMac(CiTestCase):
-
- @mock.patch('cloudinit.subp.subp')
- @mock.patch('cloudinit.util.is_FreeBSD')
+ @mock.patch("cloudinit.subp.subp")
+ @mock.patch("cloudinit.util.is_FreeBSD")
def test_get_interfaces_by_mac(self, mock_is_FreeBSD, mock_subp):
mock_is_FreeBSD.return_value = True
mock_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, 0)
- a = net.get_interfaces_by_mac()
- assert a == {'52:54:00:50:b7:0d': 'vtnet0',
- '80:00:73:63:5c:48': 're0.33',
- '02:14:39:0e:25:00': 'bridge0',
- '02:ff:60:8c:f3:72': 'vnet0:11'}
+ a = cloudinit.net.get_interfaces_by_mac()
+ assert a == {
+ "52:54:00:50:b7:0d": "vtnet0",
+ "80:00:73:63:5c:48": "re0.33",
+ "02:14:39:0e:25:00": "bridge0",
+ "02:ff:60:8c:f3:72": "vnet0:11",
+ }
+
+
+class TestFreeBSDRoundTrip(CiTestCase):
+ def _render_and_read(
+ self, network_config=None, state=None, netplan_path=None, target=None
+ ):
+ if target is None:
+ target = self.tmp_dir()
+ os.mkdir("%s/etc" % target)
+ with open("%s/etc/rc.conf" % target, "a") as fd:
+ fd.write("# dummy rc.conf\n")
+ with open("%s/etc/resolv.conf" % target, "a") as fd:
+ fd.write("# dummy resolv.conf\n")
+
+ if network_config:
+ ns = cloudinit.net.network_state.parse_net_config_data(
+ network_config
+ )
+ elif state:
+ ns = state
+ else:
+ raise ValueError("Expected data or state, got neither")
+
+ renderer = cloudinit.net.freebsd.Renderer()
+ renderer.render_network_state(ns, target=target)
+ return dir2dict(target)
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_render_output_has_yaml(self, mock_subp):
+
+ entry = {
+ "yaml": V1,
+ }
+ network_config = safeyaml.load(entry["yaml"])
+ ns = cloudinit.net.network_state.parse_net_config_data(network_config)
+ files = self._render_and_read(state=ns)
+ assert files == {
+ "/etc/resolv.conf": "# dummy resolv.conf\n",
+ "/etc/rc.conf": (
+ "# dummy rc.conf\n"
+ "ifconfig_eno1="
+ "'172.20.80.129 netmask 255.255.255.128 mtu 1470'\n"
+ ),
+ }
diff --git a/tests/unittests/test_netinfo.py b/tests/unittests/test_netinfo.py
new file mode 100644
index 00000000..aecce921
--- /dev/null
+++ b/tests/unittests/test_netinfo.py
@@ -0,0 +1,353 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests netinfo module functions and classes."""
+
+import json
+from copy import copy
+
+import pytest
+
+from cloudinit import subp
+from cloudinit.netinfo import (
+ _netdev_info_iproute_json,
+ netdev_info,
+ netdev_pformat,
+ route_pformat,
+)
+from tests.unittests.helpers import mock, readResource
+
+# Example ifconfig and route output
+SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output")
+SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output")
+SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output")
+SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output")
+SAMPLE_IPADDRSHOW_JSON = readResource("netinfo/sample-ipaddrshow-json")
+SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4")
+SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6")
+SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4")
+SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6")
+NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output")
+ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output")
+FREEBSD_NETDEV_OUT = readResource("netinfo/freebsd-netdev-formatted-output")
+
+
+class TestNetInfo:
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_netdev_old_nettools_pformat(self, m_subp, m_which):
+ """netdev_pformat properly rendering old nettools info."""
+ m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, "")
+ m_which.side_effect = lambda x: x if x == "ifconfig" else None
+ content = netdev_pformat()
+ assert NETDEV_FORMATTED_OUT == content
+
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_netdev_new_nettools_pformat(self, m_subp, m_which):
+ """netdev_pformat properly rendering netdev new nettools info."""
+ m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, "")
+ m_which.side_effect = lambda x: x if x == "ifconfig" else None
+ content = netdev_pformat()
+ assert NETDEV_FORMATTED_OUT == content
+
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which):
+ """netdev_pformat properly rendering netdev new nettools info."""
+ m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, "")
+ m_which.side_effect = lambda x: x if x == "ifconfig" else None
+ content = netdev_pformat()
+ print()
+ print(content)
+ print()
+ assert FREEBSD_NETDEV_OUT == content
+
+ @pytest.mark.parametrize(
+ "resource,is_json",
+ [(SAMPLE_IPADDRSHOW_OUT, False), (SAMPLE_IPADDRSHOW_JSON, True)],
+ )
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_netdev_iproute_pformat(self, m_subp, m_which, resource, is_json):
+ """netdev_pformat properly rendering ip route info (non json)."""
+ m_subp.return_value = (resource, "")
+ if not is_json:
+ m_subp.side_effect = [subp.ProcessExecutionError, (resource, "")]
+ m_which.side_effect = lambda x: x if x == "ip" else None
+ content = netdev_pformat()
+ new_output = copy(NETDEV_FORMATTED_OUT)
+ # ip route show describes global scopes on ipv4 addresses
+ # whereas ifconfig does not. Add proper global/host scope to output.
+ new_output = new_output.replace("| . | 50:7b", "| global | 50:7b")
+ new_output = new_output.replace(
+ "255.0.0.0 | . |", "255.0.0.0 | host |"
+ )
+ assert new_output == content
+
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_netdev_warn_on_missing_commands(self, m_subp, m_which, caplog):
+ """netdev_pformat warns when missing both ip and 'netstat'."""
+ m_which.return_value = None # Niether ip nor netstat found
+ content = netdev_pformat()
+ assert "\n" == content
+ log = caplog.records[0]
+ assert log.levelname == "WARNING"
+ assert log.msg == (
+ "Could not print networks: missing 'ip' and 'ifconfig' commands"
+ )
+ m_subp.assert_not_called()
+
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_netdev_info_nettools_down(self, m_subp, m_which):
+ """test netdev_info using nettools and down interfaces."""
+ m_subp.return_value = (
+ readResource("netinfo/new-ifconfig-output-down"),
+ "",
+ )
+ m_which.side_effect = lambda x: x if x == "ifconfig" else None
+ assert netdev_info(".") == {
+ "eth0": {
+ "ipv4": [],
+ "ipv6": [],
+ "hwaddr": "00:16:3e:de:51:a6",
+ "up": False,
+ },
+ "lo": {
+ "ipv4": [{"ip": "127.0.0.1", "mask": "255.0.0.0"}],
+ "ipv6": [{"ip": "::1/128", "scope6": "host"}],
+ "hwaddr": ".",
+ "up": True,
+ },
+ }
+
+ @pytest.mark.parametrize(
+ "resource,is_json",
+ [
+ ("netinfo/sample-ipaddrshow-output-down", False),
+ ("netinfo/sample-ipaddrshow-json-down", True),
+ ],
+ )
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_netdev_info_iproute_down(
+ self, m_subp, m_which, resource, is_json
+ ):
+ """Test netdev_info with ip and down interfaces."""
+ m_subp.return_value = (readResource(resource), "")
+ if not is_json:
+ m_subp.side_effect = [
+ subp.ProcessExecutionError,
+ (readResource(resource), ""),
+ ]
+ m_which.side_effect = lambda x: x if x == "ip" else None
+ assert netdev_info(".") == {
+ "lo": {
+ "ipv4": [
+ {
+ "ip": "127.0.0.1",
+ "bcast": ".",
+ "mask": "255.0.0.0",
+ "scope": "host",
+ }
+ ],
+ "ipv6": [{"ip": "::1/128", "scope6": "host"}],
+ "hwaddr": ".",
+ "up": True,
+ },
+ "eth0": {
+ "ipv4": [],
+ "ipv6": [],
+ "hwaddr": "00:16:3e:de:51:a6",
+ "up": False,
+ },
+ }
+
+ @mock.patch("cloudinit.netinfo.netdev_info")
+ def test_netdev_pformat_with_down(self, m_netdev_info):
+ """test netdev_pformat when netdev_info returns 'down' interfaces."""
+ m_netdev_info.return_value = {
+ "lo": {
+ "ipv4": [
+ {"ip": "127.0.0.1", "mask": "255.0.0.0", "scope": "host"}
+ ],
+ "ipv6": [{"ip": "::1/128", "scope6": "host"}],
+ "hwaddr": ".",
+ "up": True,
+ },
+ "eth0": {
+ "ipv4": [],
+ "ipv6": [],
+ "hwaddr": "00:16:3e:de:51:a6",
+ "up": False,
+ },
+ }
+ assert (
+ readResource("netinfo/netdev-formatted-output-down")
+ == netdev_pformat()
+ )
+
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_route_nettools_pformat(self, m_subp, m_which):
+ """route_pformat properly rendering nettools route info."""
+
+ def subp_netstat_route_selector(*args, **kwargs):
+ if args[0] == ["netstat", "--route", "--numeric", "--extend"]:
+ return (SAMPLE_ROUTE_OUT_V4, "")
+ if args[0] == ["netstat", "-A", "inet6", "--route", "--numeric"]:
+ return (SAMPLE_ROUTE_OUT_V6, "")
+ raise Exception("Unexpected subp call %s" % args[0])
+
+ m_subp.side_effect = subp_netstat_route_selector
+ m_which.side_effect = lambda x: x if x == "netstat" else None
+ content = route_pformat()
+ assert ROUTE_FORMATTED_OUT == content
+
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_route_iproute_pformat(self, m_subp, m_which):
+ """route_pformat properly rendering ip route info."""
+
+ def subp_iproute_selector(*args, **kwargs):
+ if ["ip", "-o", "route", "list"] == args[0]:
+ return (SAMPLE_IPROUTE_OUT_V4, "")
+ v6cmd = ["ip", "--oneline", "-6", "route", "list", "table", "all"]
+ if v6cmd == args[0]:
+ return (SAMPLE_IPROUTE_OUT_V6, "")
+ raise Exception("Unexpected subp call %s" % args[0])
+
+ m_subp.side_effect = subp_iproute_selector
+ m_which.side_effect = lambda x: x if x == "ip" else None
+ content = route_pformat()
+ assert ROUTE_FORMATTED_OUT == content
+
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_route_warn_on_missing_commands(self, m_subp, m_which, caplog):
+ """route_pformat warns when missing both ip and 'netstat'."""
+ m_which.return_value = None # Niether ip nor netstat found
+ content = route_pformat()
+ assert "\n" == content
+ log = caplog.records[0]
+ assert log.levelname == "WARNING"
+ assert log.msg == (
+ "Could not print routes: missing 'ip' and 'netstat' commands"
+ )
+ m_subp.assert_not_called()
+
+ @pytest.mark.parametrize(
+ "input,expected",
+ [
+ # Test hwaddr set when link_type is ether,
+ # Test up True when flags contains UP and LOWER_UP
+ (
+ [
+ {
+ "ifname": "eth0",
+ "link_type": "ether",
+ "address": "00:00:00:00:00:00",
+ "flags": ["LOOPBACK", "UP", "LOWER_UP"],
+ }
+ ],
+ {
+ "eth0": {
+ "hwaddr": "00:00:00:00:00:00",
+ "ipv4": [],
+ "ipv6": [],
+ "up": True,
+ }
+ },
+ ),
+ # Test hwaddr not set when link_type is not ether
+ # Test up False when flags does not contain both UP and LOWER_UP
+ (
+ [
+ {
+ "ifname": "eth0",
+ "link_type": "none",
+ "address": "00:00:00:00:00:00",
+ "flags": ["LOOPBACK", "UP"],
+ }
+ ],
+ {
+ "eth0": {
+ "hwaddr": "",
+ "ipv4": [],
+ "ipv6": [],
+ "up": False,
+ }
+ },
+ ),
+ (
+ [
+ {
+ "ifname": "eth0",
+ "addr_info": [
+ # Test for ipv4:
+ # ip set correctly
+ # mask set correctly
+ # bcast set correctly
+ # scope set correctly
+ {
+ "family": "inet",
+ "local": "10.0.0.1",
+ "broadcast": "10.0.0.255",
+ "prefixlen": 24,
+ "scope": "global",
+ },
+ # Test for ipv6:
+ # ip set correctly
+ # mask set correctly when no 'address' present
+ # scope6 set correctly
+ {
+ "family": "inet6",
+ "local": "fd12:3456:7890:1234::5678:9012",
+ "prefixlen": 64,
+ "scope": "global",
+ },
+ # Test for ipv6:
+ # mask not set when 'address' present
+ {
+ "family": "inet6",
+ "local": "fd12:3456:7890:1234::5678:9012",
+ "address": "fd12:3456:7890:1234::1",
+ "prefixlen": 64,
+ },
+ ],
+ }
+ ],
+ {
+ "eth0": {
+ "hwaddr": "",
+ "ipv4": [
+ {
+ "ip": "10.0.0.1",
+ "mask": "255.255.255.0",
+ "bcast": "10.0.0.255",
+ "scope": "global",
+ }
+ ],
+ "ipv6": [
+ {
+ "ip": "fd12:3456:7890:1234::5678:9012/64",
+ "scope6": "global",
+ },
+ {
+ "ip": "fd12:3456:7890:1234::5678:9012",
+ "scope6": "",
+ },
+ ],
+ "up": False,
+ }
+ },
+ ),
+ ],
+ )
+ def test_netdev_info_iproute_json(self, input, expected):
+ out = _netdev_info_iproute_json(json.dumps(input))
+ assert out == expected
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_pathprefix2dict.py b/tests/unittests/test_pathprefix2dict.py
index abbb29b8..83141263 100644
--- a/tests/unittests/test_pathprefix2dict.py
+++ b/tests/unittests/test_pathprefix2dict.py
@@ -1,46 +1,46 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import util
-
-from cloudinit.tests.helpers import TestCase, populate_dir
-
import shutil
import tempfile
+from cloudinit import util
+from tests.unittests.helpers import TestCase, populate_dir
-class TestPathPrefix2Dict(TestCase):
+class TestPathPrefix2Dict(TestCase):
def setUp(self):
super(TestPathPrefix2Dict, self).setUp()
self.tmp = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp)
def test_required_only(self):
- dirdata = {'f1': b'f1content', 'f2': b'f2content'}
+ dirdata = {"f1": b"f1content", "f2": b"f2content"}
populate_dir(self.tmp, dirdata)
- ret = util.pathprefix2dict(self.tmp, required=['f1', 'f2'])
+ ret = util.pathprefix2dict(self.tmp, required=["f1", "f2"])
self.assertEqual(dirdata, ret)
def test_required_missing(self):
- dirdata = {'f1': b'f1content'}
+ dirdata = {"f1": b"f1content"}
populate_dir(self.tmp, dirdata)
- kwargs = {'required': ['f1', 'f2']}
+ kwargs = {"required": ["f1", "f2"]}
self.assertRaises(ValueError, util.pathprefix2dict, self.tmp, **kwargs)
def test_no_required_and_optional(self):
- dirdata = {'f1': b'f1c', 'f2': b'f2c'}
+ dirdata = {"f1": b"f1c", "f2": b"f2c"}
populate_dir(self.tmp, dirdata)
- ret = util.pathprefix2dict(self.tmp, required=None,
- optional=['f1', 'f2'])
+ ret = util.pathprefix2dict(
+ self.tmp, required=None, optional=["f1", "f2"]
+ )
self.assertEqual(dirdata, ret)
def test_required_and_optional(self):
- dirdata = {'f1': b'f1c', 'f2': b'f2c'}
+ dirdata = {"f1": b"f1c", "f2": b"f2c"}
populate_dir(self.tmp, dirdata)
- ret = util.pathprefix2dict(self.tmp, required=['f1'], optional=['f2'])
+ ret = util.pathprefix2dict(self.tmp, required=["f1"], optional=["f2"])
self.assertEqual(dirdata, ret)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_persistence.py b/tests/unittests/test_persistence.py
index ec1152a9..ec1152a9 100644
--- a/cloudinit/tests/test_persistence.py
+++ b/tests/unittests/test_persistence.py
diff --git a/tests/unittests/test_registry.py b/tests/unittests/test_registry.py
index 2b625026..28ee04ec 100644
--- a/tests/unittests/test_registry.py
+++ b/tests/unittests/test_registry.py
@@ -1,32 +1,33 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.registry import DictRegistry
-
-from cloudinit.tests.helpers import (mock, TestCase)
+from tests.unittests.helpers import TestCase, mock
class TestDictRegistry(TestCase):
-
def test_added_item_included_in_output(self):
registry = DictRegistry()
- item_key, item_to_register = 'test_key', mock.Mock()
+ item_key, item_to_register = "test_key", mock.Mock()
registry.register_item(item_key, item_to_register)
- self.assertEqual({item_key: item_to_register},
- registry.registered_items)
+ self.assertEqual(
+ {item_key: item_to_register}, registry.registered_items
+ )
def test_registry_starts_out_empty(self):
self.assertEqual({}, DictRegistry().registered_items)
def test_modifying_registered_items_isnt_exposed_to_other_callers(self):
registry = DictRegistry()
- registry.registered_items['test_item'] = mock.Mock()
+ registry.registered_items["test_item"] = mock.Mock()
self.assertEqual({}, registry.registered_items)
def test_keys_cannot_be_replaced(self):
registry = DictRegistry()
- item_key = 'test_key'
+ item_key = "test_key"
registry.register_item(item_key, mock.Mock())
- self.assertRaises(ValueError,
- registry.register_item, item_key, mock.Mock())
+ self.assertRaises(
+ ValueError, registry.register_item, item_key, mock.Mock()
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py
index 495e2669..30fbd1a4 100644
--- a/tests/unittests/test_render_cloudcfg.py
+++ b/tests/unittests/test_render_cloudcfg.py
@@ -1,59 +1,96 @@
"""Tests for tools/render-cloudcfg"""
-import os
import sys
import pytest
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, templater, util
+from tests.unittests.helpers import cloud_init_project_dir
# TODO(Look to align with tools.render-cloudcfg or cloudinit.distos.OSFAMILIES)
-DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd",
- "netbsd", "openbsd", "rhel", "suse", "ubuntu", "unknown"]
+DISTRO_VARIANTS = [
+ "amazon",
+ "arch",
+ "centos",
+ "debian",
+ "eurolinux",
+ "fedora",
+ "freebsd",
+ "gentoo",
+ "netbsd",
+ "openbsd",
+ "photon",
+ "rhel",
+ "suse",
+ "ubuntu",
+ "unknown",
+]
@pytest.mark.allow_subp_for(sys.executable)
class TestRenderCloudCfg:
- cmd = [sys.executable, os.path.realpath('tools/render-cloudcfg')]
- tmpl_path = os.path.realpath('config/cloud.cfg.tmpl')
+ cmd = [sys.executable, cloud_init_project_dir("tools/render-cloudcfg")]
+ tmpl_path = cloud_init_project_dir("config/cloud.cfg.tmpl")
- @pytest.mark.parametrize('variant', (DISTRO_VARIANTS))
+ def test_variant_sets_distro_in_cloud_cfg_subp(self, tmpdir):
+ outfile = tmpdir.join("outcfg").strpath
+
+ subp.subp(self.cmd + ["--variant", "ubuntu", self.tmpl_path, outfile])
+ with open(outfile) as stream:
+ system_cfg = util.load_yaml(stream.read())
+ assert system_cfg["system_info"]["distro"] == "ubuntu"
+
+ @pytest.mark.parametrize("variant", (DISTRO_VARIANTS))
def test_variant_sets_distro_in_cloud_cfg(self, variant, tmpdir):
- outfile = tmpdir.join('outcfg').strpath
- subp.subp(
- self.cmd + ['--variant', variant, self.tmpl_path, outfile])
+ """Testing parametrized inputs with imported function saves ~0.5s per
+ call versus calling as subp
+ """
+ outfile = tmpdir.join("outcfg").strpath
+
+ templater.render_cloudcfg(variant, self.tmpl_path, outfile)
with open(outfile) as stream:
system_cfg = util.load_yaml(stream.read())
- if variant == 'unknown':
- variant = 'ubuntu' # Unknown is defaulted to ubuntu
- assert system_cfg['system_info']['distro'] == variant
+ if variant == "unknown":
+ variant = "ubuntu" # Unknown is defaulted to ubuntu
+ assert system_cfg["system_info"]["distro"] == variant
- @pytest.mark.parametrize('variant', (DISTRO_VARIANTS))
+ @pytest.mark.parametrize("variant", (DISTRO_VARIANTS))
def test_variant_sets_default_user_in_cloud_cfg(self, variant, tmpdir):
- outfile = tmpdir.join('outcfg').strpath
- subp.subp(
- self.cmd + ['--variant', variant, self.tmpl_path, outfile])
+ """Testing parametrized inputs with imported function saves ~0.5s per
+ call versus calling as subp
+ """
+ outfile = tmpdir.join("outcfg").strpath
+ templater.render_cloudcfg(variant, self.tmpl_path, outfile)
with open(outfile) as stream:
system_cfg = util.load_yaml(stream.read())
default_user_exceptions = {
- 'amazon': 'ec2-user', 'debian': 'ubuntu', 'unknown': 'ubuntu'}
- default_user = system_cfg['system_info']['default_user']['name']
+ "amazon": "ec2-user",
+ "debian": "ubuntu",
+ "unknown": "ubuntu",
+ }
+ default_user = system_cfg["system_info"]["default_user"]["name"]
assert default_user == default_user_exceptions.get(variant, variant)
- @pytest.mark.parametrize('variant,renderers', (
- ('freebsd', ['freebsd']), ('netbsd', ['netbsd']),
- ('openbsd', ['openbsd']), ('ubuntu', ['netplan', 'eni', 'sysconfig']))
+ @pytest.mark.parametrize(
+ "variant,renderers",
+ (
+ ("freebsd", ["freebsd"]),
+ ("netbsd", ["netbsd"]),
+ ("openbsd", ["openbsd"]),
+ ("ubuntu", ["netplan", "eni", "sysconfig"]),
+ ),
)
def test_variant_sets_network_renderer_priority_in_cloud_cfg(
self, variant, renderers, tmpdir
):
- outfile = tmpdir.join('outcfg').strpath
- subp.subp(
- self.cmd + ['--variant', variant, self.tmpl_path, outfile])
+ """Testing parametrized inputs with imported function saves ~0.5s per
+ call versus calling as subp
+ """
+ outfile = tmpdir.join("outcfg").strpath
+ templater.render_cloudcfg(variant, self.tmpl_path, outfile)
with open(outfile) as stream:
system_cfg = util.load_yaml(stream.read())
- assert renderers == system_cfg['system_info']['network']['renderers']
+ assert renderers == system_cfg["system_info"]["network"]["renderers"]
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
index 9f11fd5c..f6dd96e0 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/test_reporting.py
@@ -5,109 +5,133 @@
from unittest import mock
from cloudinit import reporting
-from cloudinit.reporting import events
-from cloudinit.reporting import handlers
-
-from cloudinit.tests.helpers import TestCase
+from cloudinit.reporting import events, handlers
+from tests.unittests.helpers import TestCase
def _fake_registry():
- return mock.Mock(registered_items={'a': mock.MagicMock(),
- 'b': mock.MagicMock()})
+ return mock.Mock(
+ registered_items={"a": mock.MagicMock(), "b": mock.MagicMock()}
+ )
class TestReportStartEvent(TestCase):
-
- @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
- new_callable=_fake_registry)
+ @mock.patch(
+ "cloudinit.reporting.events.instantiated_handler_registry",
+ new_callable=_fake_registry,
+ )
def test_report_start_event_passes_something_with_as_string_to_handlers(
- self, instantiated_handler_registry):
- event_name, event_description = 'my_test_event', 'my description'
+ self, instantiated_handler_registry
+ ):
+ event_name, event_description = "my_test_event", "my description"
events.report_start_event(event_name, event_description)
- expected_string_representation = ': '.join(
- ['start', event_name, event_description])
- for _, handler in (
- instantiated_handler_registry.registered_items.items()):
+ expected_string_representation = ": ".join(
+ ["start", event_name, event_description]
+ )
+ for (
+ _,
+ handler,
+ ) in instantiated_handler_registry.registered_items.items():
self.assertEqual(1, handler.publish_event.call_count)
event = handler.publish_event.call_args[0][0]
self.assertEqual(expected_string_representation, event.as_string())
class TestReportFinishEvent(TestCase):
-
def _report_finish_event(self, result=events.status.SUCCESS):
- event_name, event_description = 'my_test_event', 'my description'
+ event_name, event_description = "my_test_event", "my description"
events.report_finish_event(
- event_name, event_description, result=result)
+ event_name, event_description, result=result
+ )
return event_name, event_description
def assertHandlersPassedObjectWithAsString(
- self, handlers, expected_as_string):
+ self, handlers, expected_as_string
+ ):
for _, handler in handlers.items():
self.assertEqual(1, handler.publish_event.call_count)
event = handler.publish_event.call_args[0][0]
self.assertEqual(expected_as_string, event.as_string())
- @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
- new_callable=_fake_registry)
+ @mock.patch(
+ "cloudinit.reporting.events.instantiated_handler_registry",
+ new_callable=_fake_registry,
+ )
def test_report_finish_event_passes_something_with_as_string_to_handlers(
- self, instantiated_handler_registry):
+ self, instantiated_handler_registry
+ ):
event_name, event_description = self._report_finish_event()
- expected_string_representation = ': '.join(
- ['finish', event_name, events.status.SUCCESS,
- event_description])
+ expected_string_representation = ": ".join(
+ ["finish", event_name, events.status.SUCCESS, event_description]
+ )
self.assertHandlersPassedObjectWithAsString(
instantiated_handler_registry.registered_items,
- expected_string_representation)
+ expected_string_representation,
+ )
- @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
- new_callable=_fake_registry)
+ @mock.patch(
+ "cloudinit.reporting.events.instantiated_handler_registry",
+ new_callable=_fake_registry,
+ )
def test_reporting_successful_finish_has_sensible_string_repr(
- self, instantiated_handler_registry):
+ self, instantiated_handler_registry
+ ):
event_name, event_description = self._report_finish_event(
- result=events.status.SUCCESS)
- expected_string_representation = ': '.join(
- ['finish', event_name, events.status.SUCCESS,
- event_description])
+ result=events.status.SUCCESS
+ )
+ expected_string_representation = ": ".join(
+ ["finish", event_name, events.status.SUCCESS, event_description]
+ )
self.assertHandlersPassedObjectWithAsString(
instantiated_handler_registry.registered_items,
- expected_string_representation)
+ expected_string_representation,
+ )
- @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
- new_callable=_fake_registry)
+ @mock.patch(
+ "cloudinit.reporting.events.instantiated_handler_registry",
+ new_callable=_fake_registry,
+ )
def test_reporting_unsuccessful_finish_has_sensible_string_repr(
- self, instantiated_handler_registry):
+ self, instantiated_handler_registry
+ ):
event_name, event_description = self._report_finish_event(
- result=events.status.FAIL)
- expected_string_representation = ': '.join(
- ['finish', event_name, events.status.FAIL, event_description])
+ result=events.status.FAIL
+ )
+ expected_string_representation = ": ".join(
+ ["finish", event_name, events.status.FAIL, event_description]
+ )
self.assertHandlersPassedObjectWithAsString(
instantiated_handler_registry.registered_items,
- expected_string_representation)
+ expected_string_representation,
+ )
def test_invalid_result_raises_attribute_error(self):
self.assertRaises(ValueError, self._report_finish_event, ("BOGUS",))
class TestReportingEvent(TestCase):
-
def test_as_string(self):
- event_type, name, description = 'test_type', 'test_name', 'test_desc'
+ event_type, name, description = "test_type", "test_name", "test_desc"
event = events.ReportingEvent(event_type, name, description)
- expected_string_representation = ': '.join(
- [event_type, name, description])
+ expected_string_representation = ": ".join(
+ [event_type, name, description]
+ )
self.assertEqual(expected_string_representation, event.as_string())
def test_as_dict(self):
- event_type, name, desc = 'test_type', 'test_name', 'test_desc'
+ event_type, name, desc = "test_type", "test_name", "test_desc"
event = events.ReportingEvent(event_type, name, desc)
- expected = {'event_type': event_type, 'name': name,
- 'description': desc, 'origin': 'cloudinit'}
+ expected = {
+ "event_type": event_type,
+ "name": name,
+ "description": desc,
+ "origin": "cloudinit",
+ }
# allow for timestamp to differ, but must be present
as_dict = event.as_dict()
- self.assertIn('timestamp', as_dict)
- del as_dict['timestamp']
+ self.assertIn("timestamp", as_dict)
+ del as_dict["timestamp"]
self.assertEqual(expected, as_dict)
@@ -115,145 +139,190 @@ class TestReportingEvent(TestCase):
class TestFinishReportingEvent(TestCase):
def test_as_has_result(self):
result = events.status.SUCCESS
- name, desc = 'test_name', 'test_desc'
+ name, desc = "test_name", "test_desc"
event = events.FinishReportingEvent(name, desc, result)
ret = event.as_dict()
- self.assertTrue('result' in ret)
- self.assertEqual(ret['result'], result)
+ self.assertTrue("result" in ret)
+ self.assertEqual(ret["result"], result)
+ def test_has_result_with_optional_post_files(self):
+ result = events.status.SUCCESS
+ name, desc, files = (
+ "test_name",
+ "test_desc",
+ ["/really/fake/path/install.log"],
+ )
+ event = events.FinishReportingEvent(
+ name, desc, result, post_files=files
+ )
+ ret = event.as_dict()
+ self.assertTrue("result" in ret)
+ self.assertTrue("files" in ret)
+ self.assertEqual(ret["result"], result)
+ posted_install_log = ret["files"][0]
+ self.assertTrue("path" in posted_install_log)
+ self.assertTrue("content" in posted_install_log)
+ self.assertTrue("encoding" in posted_install_log)
+ self.assertEqual(posted_install_log["path"], files[0])
+ self.assertEqual(posted_install_log["encoding"], "base64")
-class TestBaseReportingHandler(TestCase):
+class TestBaseReportingHandler(TestCase):
def test_base_reporting_handler_is_abstract(self):
regexp = r".*abstract.*publish_event.*"
self.assertRaisesRegex(TypeError, regexp, handlers.ReportingHandler)
class TestLogHandler(TestCase):
-
- @mock.patch.object(reporting.handlers.logging, 'getLogger')
+ @mock.patch.object(reporting.handlers.logging, "getLogger")
def test_appropriate_logger_used(self, getLogger):
- event_type, event_name = 'test_type', 'test_name'
- event = events.ReportingEvent(event_type, event_name, 'description')
+ event_type, event_name = "test_type", "test_name"
+ event = events.ReportingEvent(event_type, event_name, "description")
reporting.handlers.LogHandler().publish_event(event)
self.assertEqual(
- [mock.call(
- 'cloudinit.reporting.{0}.{1}'.format(event_type, event_name))],
- getLogger.call_args_list)
-
- @mock.patch.object(reporting.handlers.logging, 'getLogger')
+ [
+ mock.call(
+ "cloudinit.reporting.{0}.{1}".format(
+ event_type, event_name
+ )
+ )
+ ],
+ getLogger.call_args_list,
+ )
+
+ @mock.patch.object(reporting.handlers.logging, "getLogger")
def test_single_log_message_at_info_published(self, getLogger):
- event = events.ReportingEvent('type', 'name', 'description')
+ event = events.ReportingEvent("type", "name", "description")
reporting.handlers.LogHandler().publish_event(event)
self.assertEqual(1, getLogger.return_value.log.call_count)
- @mock.patch.object(reporting.handlers.logging, 'getLogger')
+ @mock.patch.object(reporting.handlers.logging, "getLogger")
def test_log_message_uses_event_as_string(self, getLogger):
- event = events.ReportingEvent('type', 'name', 'description')
+ event = events.ReportingEvent("type", "name", "description")
reporting.handlers.LogHandler(level="INFO").publish_event(event)
- self.assertIn(event.as_string(),
- getLogger.return_value.log.call_args[0][1])
+ self.assertIn(
+ event.as_string(), getLogger.return_value.log.call_args[0][1]
+ )
class TestDefaultRegisteredHandler(TestCase):
-
def test_log_handler_registered_by_default(self):
registered_items = (
- reporting.instantiated_handler_registry.registered_items)
+ reporting.instantiated_handler_registry.registered_items
+ )
for _, item in registered_items.items():
if isinstance(item, reporting.handlers.LogHandler):
break
else:
- self.fail('No reporting LogHandler registered by default.')
+ self.fail("No reporting LogHandler registered by default.")
class TestReportingConfiguration(TestCase):
-
- @mock.patch.object(reporting, 'instantiated_handler_registry')
+ @mock.patch.object(reporting, "instantiated_handler_registry")
def test_empty_configuration_doesnt_add_handlers(
- self, instantiated_handler_registry):
+ self, instantiated_handler_registry
+ ):
reporting.update_configuration({})
self.assertEqual(
- 0, instantiated_handler_registry.register_item.call_count)
+ 0, instantiated_handler_registry.register_item.call_count
+ )
@mock.patch.object(
- reporting, 'instantiated_handler_registry', reporting.DictRegistry())
- @mock.patch.object(reporting, 'available_handlers')
+ reporting, "instantiated_handler_registry", reporting.DictRegistry()
+ )
+ @mock.patch.object(reporting, "available_handlers")
def test_looks_up_handler_by_type_and_adds_it(self, available_handlers):
- handler_type_name = 'test_handler'
+ handler_type_name = "test_handler"
handler_cls = mock.Mock()
available_handlers.registered_items = {handler_type_name: handler_cls}
- handler_name = 'my_test_handler'
+ handler_name = "my_test_handler"
reporting.update_configuration(
- {handler_name: {'type': handler_type_name}})
+ {handler_name: {"type": handler_type_name}}
+ )
self.assertEqual(
{handler_name: handler_cls.return_value},
- reporting.instantiated_handler_registry.registered_items)
+ reporting.instantiated_handler_registry.registered_items,
+ )
@mock.patch.object(
- reporting, 'instantiated_handler_registry', reporting.DictRegistry())
- @mock.patch.object(reporting, 'available_handlers')
+ reporting, "instantiated_handler_registry", reporting.DictRegistry()
+ )
+ @mock.patch.object(reporting, "available_handlers")
def test_uses_non_type_parts_of_config_dict_as_kwargs(
- self, available_handlers):
- handler_type_name = 'test_handler'
+ self, available_handlers
+ ):
+ handler_type_name = "test_handler"
handler_cls = mock.Mock()
available_handlers.registered_items = {handler_type_name: handler_cls}
- extra_kwargs = {'foo': 'bar', 'bar': 'baz'}
+ extra_kwargs = {"foo": "bar", "bar": "baz"}
handler_config = extra_kwargs.copy()
- handler_config.update({'type': handler_type_name})
- handler_name = 'my_test_handler'
+ handler_config.update({"type": handler_type_name})
+ handler_name = "my_test_handler"
reporting.update_configuration({handler_name: handler_config})
self.assertEqual(
handler_cls.return_value,
reporting.instantiated_handler_registry.registered_items[
- handler_name])
- self.assertEqual([mock.call(**extra_kwargs)],
- handler_cls.call_args_list)
+ handler_name
+ ],
+ )
+ self.assertEqual(
+ [mock.call(**extra_kwargs)], handler_cls.call_args_list
+ )
@mock.patch.object(
- reporting, 'instantiated_handler_registry', reporting.DictRegistry())
- @mock.patch.object(reporting, 'available_handlers')
+ reporting, "instantiated_handler_registry", reporting.DictRegistry()
+ )
+ @mock.patch.object(reporting, "available_handlers")
def test_handler_config_not_modified(self, available_handlers):
- handler_type_name = 'test_handler'
+ handler_type_name = "test_handler"
handler_cls = mock.Mock()
available_handlers.registered_items = {handler_type_name: handler_cls}
- handler_config = {'type': handler_type_name, 'foo': 'bar'}
+ handler_config = {"type": handler_type_name, "foo": "bar"}
expected_handler_config = handler_config.copy()
- reporting.update_configuration({'my_test_handler': handler_config})
+ reporting.update_configuration({"my_test_handler": handler_config})
self.assertEqual(expected_handler_config, handler_config)
@mock.patch.object(
- reporting, 'instantiated_handler_registry', reporting.DictRegistry())
- @mock.patch.object(reporting, 'available_handlers')
+ reporting, "instantiated_handler_registry", reporting.DictRegistry()
+ )
+ @mock.patch.object(reporting, "available_handlers")
def test_handlers_removed_if_falseish_specified(self, available_handlers):
- handler_type_name = 'test_handler'
+ handler_type_name = "test_handler"
handler_cls = mock.Mock()
available_handlers.registered_items = {handler_type_name: handler_cls}
- handler_name = 'my_test_handler'
+ handler_name = "my_test_handler"
reporting.update_configuration(
- {handler_name: {'type': handler_type_name}})
+ {handler_name: {"type": handler_type_name}}
+ )
self.assertEqual(
- 1, len(reporting.instantiated_handler_registry.registered_items))
+ 1, len(reporting.instantiated_handler_registry.registered_items)
+ )
reporting.update_configuration({handler_name: None})
self.assertEqual(
- 0, len(reporting.instantiated_handler_registry.registered_items))
+ 0, len(reporting.instantiated_handler_registry.registered_items)
+ )
class TestReportingEventStack(TestCase):
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- @mock.patch('cloudinit.reporting.events.report_start_event')
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
+ @mock.patch("cloudinit.reporting.events.report_start_event")
def test_start_and_finish_success(self, report_start, report_finish):
with events.ReportEventStack(name="myname", description="mydesc"):
pass
self.assertEqual(
- [mock.call('myname', 'mydesc')], report_start.call_args_list)
+ [mock.call("myname", "mydesc")], report_start.call_args_list
+ )
self.assertEqual(
- [mock.call('myname', 'mydesc', events.status.SUCCESS,
- post_files=[])],
- report_finish.call_args_list)
-
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- @mock.patch('cloudinit.reporting.events.report_start_event')
+ [
+ mock.call(
+ "myname", "mydesc", events.status.SUCCESS, post_files=[]
+ )
+ ],
+ report_finish.call_args_list,
+ )
+
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
+ @mock.patch("cloudinit.reporting.events.report_start_event")
def test_finish_exception_defaults_fail(self, report_start, report_finish):
name = "myname"
desc = "mydesc"
@@ -265,31 +334,34 @@ class TestReportingEventStack(TestCase):
self.assertEqual([mock.call(name, desc)], report_start.call_args_list)
self.assertEqual(
[mock.call(name, desc, events.status.FAIL, post_files=[])],
- report_finish.call_args_list)
+ report_finish.call_args_list,
+ )
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- @mock.patch('cloudinit.reporting.events.report_start_event')
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
+ @mock.patch("cloudinit.reporting.events.report_start_event")
def test_result_on_exception_used(self, report_start, report_finish):
name = "myname"
desc = "mydesc"
try:
with events.ReportEventStack(
- name, desc, result_on_exception=events.status.WARN):
+ name, desc, result_on_exception=events.status.WARN
+ ):
raise ValueError("This didnt work")
except ValueError:
pass
self.assertEqual([mock.call(name, desc)], report_start.call_args_list)
self.assertEqual(
[mock.call(name, desc, events.status.WARN, post_files=[])],
- report_finish.call_args_list)
+ report_finish.call_args_list,
+ )
- @mock.patch('cloudinit.reporting.events.report_start_event')
+ @mock.patch("cloudinit.reporting.events.report_start_event")
def test_child_fullname_respects_parent(self, report_start):
parent_name = "topname"
c1_name = "c1name"
c2_name = "c2name"
- c2_expected_fullname = '/'.join([parent_name, c1_name, c2_name])
- c1_expected_fullname = '/'.join([parent_name, c1_name])
+ c2_expected_fullname = "/".join([parent_name, c1_name, c2_name])
+ c1_expected_fullname = "/".join([parent_name, c1_name])
parent = events.ReportEventStack(parent_name, "topdesc")
c1 = events.ReportEventStack(c1_name, "c1desc", parent=parent)
@@ -299,8 +371,8 @@ class TestReportingEventStack(TestCase):
with c2:
report_start.assert_called_with(c2_expected_fullname, "c2desc")
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- @mock.patch('cloudinit.reporting.events.report_start_event')
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
+ @mock.patch("cloudinit.reporting.events.report_start_event")
def test_child_result_bubbles_up(self, report_start, report_finish):
parent = events.ReportEventStack("topname", "topdesc")
child = events.ReportEventStack("c_name", "c_desc", parent=parent)
@@ -309,42 +381,53 @@ class TestReportingEventStack(TestCase):
child.result = events.status.WARN
report_finish.assert_called_with(
- "topname", "topdesc", events.status.WARN, post_files=[])
+ "topname", "topdesc", events.status.WARN, post_files=[]
+ )
- @mock.patch('cloudinit.reporting.events.report_finish_event')
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
def test_message_used_in_finish(self, report_finish):
- with events.ReportEventStack("myname", "mydesc",
- message="mymessage"):
+ with events.ReportEventStack("myname", "mydesc", message="mymessage"):
pass
self.assertEqual(
- [mock.call("myname", "mymessage", events.status.SUCCESS,
- post_files=[])],
- report_finish.call_args_list)
-
- @mock.patch('cloudinit.reporting.events.report_finish_event')
+ [
+ mock.call(
+ "myname", "mymessage", events.status.SUCCESS, post_files=[]
+ )
+ ],
+ report_finish.call_args_list,
+ )
+
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
def test_message_updatable(self, report_finish):
with events.ReportEventStack("myname", "mydesc") as c:
c.message = "all good"
self.assertEqual(
- [mock.call("myname", "all good", events.status.SUCCESS,
- post_files=[])],
- report_finish.call_args_list)
-
- @mock.patch('cloudinit.reporting.events.report_start_event')
- @mock.patch('cloudinit.reporting.events.report_finish_event')
+ [
+ mock.call(
+ "myname", "all good", events.status.SUCCESS, post_files=[]
+ )
+ ],
+ report_finish.call_args_list,
+ )
+
+ @mock.patch("cloudinit.reporting.events.report_start_event")
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
def test_reporting_disabled_does_not_report_events(
- self, report_start, report_finish):
+ self, report_start, report_finish
+ ):
with events.ReportEventStack("a", "b", reporting_enabled=False):
pass
self.assertEqual(report_start.call_count, 0)
self.assertEqual(report_finish.call_count, 0)
- @mock.patch('cloudinit.reporting.events.report_start_event')
- @mock.patch('cloudinit.reporting.events.report_finish_event')
+ @mock.patch("cloudinit.reporting.events.report_start_event")
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
def test_reporting_child_default_to_parent(
- self, report_start, report_finish):
+ self, report_start, report_finish
+ ):
parent = events.ReportEventStack(
- "pname", "pdesc", reporting_enabled=False)
+ "pname", "pdesc", reporting_enabled=False
+ )
child = events.ReportEventStack("cname", "cdesc", parent=parent)
with parent:
with child:
@@ -353,8 +436,9 @@ class TestReportingEventStack(TestCase):
self.assertEqual(report_finish.call_count, 0)
def test_reporting_event_has_sane_repr(self):
- myrep = events.ReportEventStack("fooname", "foodesc",
- reporting_enabled=True).__repr__()
+ myrep = events.ReportEventStack(
+ "fooname", "foodesc", reporting_enabled=True
+ ).__repr__()
self.assertIn("fooname", myrep)
self.assertIn("foodesc", myrep)
self.assertIn("True", myrep)
@@ -368,4 +452,5 @@ class TestStatusAccess(TestCase):
def test_invalid_status_access_raises_value_error(self):
self.assertRaises(AttributeError, getattr, events.status, "BOGUS")
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py
index 9324b78d..35ab0c58 100644
--- a/tests/unittests/test_reporting_hyperv.py
+++ b/tests/unittests/test_reporting_hyperv.py
@@ -1,27 +1,25 @@
# This file is part of cloud-init. See LICENSE file for license information.
import base64
-import zlib
-
-from cloudinit.reporting import events, instantiated_handler_registry
-from cloudinit.reporting.handlers import HyperVKvpReportingHandler, LogHandler
-
import json
import os
+import re
import struct
import time
-import re
+import zlib
from unittest import mock
from cloudinit import util
-from cloudinit.tests.helpers import CiTestCase
+from cloudinit.reporting import events, instantiated_handler_registry
+from cloudinit.reporting.handlers import HyperVKvpReportingHandler, LogHandler
from cloudinit.sources.helpers import azure
+from tests.unittests.helpers import CiTestCase
class TestKvpEncoding(CiTestCase):
def test_encode_decode(self):
- kvp = {'key': 'key1', 'value': 'value1'}
+ kvp = {"key": "key1", "value": "value1"}
kvp_reporting = HyperVKvpReportingHandler()
- data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value'])
+ data = kvp_reporting._encode_kvp_item(kvp["key"], kvp["value"])
self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE)
decoded_kvp = kvp_reporting._decode_kvp_item(data)
self.assertEqual(kvp, decoded_kvp)
@@ -30,71 +28,72 @@ class TestKvpEncoding(CiTestCase):
class TextKvpReporter(CiTestCase):
def setUp(self):
super(TextKvpReporter, self).setUp()
- self.tmp_file_path = self.tmp_path('kvp_pool_file')
+ self.tmp_file_path = self.tmp_path("kvp_pool_file")
util.ensure_file(self.tmp_file_path)
def test_events_with_higher_incarnation_not_over_written(self):
- reporter = HyperVKvpReportingHandler(
- kvp_file_path=self.tmp_file_path)
+ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
self.assertEqual(0, len(list(reporter._iterate_kvps(0))))
reporter.publish_event(
- events.ReportingEvent('foo', 'name1', 'description'))
+ events.ReportingEvent("foo", "name1", "description")
+ )
reporter.publish_event(
- events.ReportingEvent('foo', 'name2', 'description'))
+ events.ReportingEvent("foo", "name2", "description")
+ )
reporter.q.join()
self.assertEqual(2, len(list(reporter._iterate_kvps(0))))
- reporter3 = HyperVKvpReportingHandler(
- kvp_file_path=self.tmp_file_path)
+ reporter3 = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
reporter3.incarnation_no = reporter.incarnation_no - 1
reporter3.publish_event(
- events.ReportingEvent('foo', 'name3', 'description'))
+ events.ReportingEvent("foo", "name3", "description")
+ )
reporter3.q.join()
self.assertEqual(3, len(list(reporter3._iterate_kvps(0))))
def test_finish_event_result_is_logged(self):
- reporter = HyperVKvpReportingHandler(
- kvp_file_path=self.tmp_file_path)
+ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
reporter.publish_event(
- events.FinishReportingEvent('name2', 'description1',
- result=events.status.FAIL))
+ events.FinishReportingEvent(
+ "name2", "description1", result=events.status.FAIL
+ )
+ )
reporter.q.join()
- self.assertIn('FAIL', list(reporter._iterate_kvps(0))[0]['value'])
+ self.assertIn("FAIL", list(reporter._iterate_kvps(0))[0]["value"])
def test_file_operation_issue(self):
os.remove(self.tmp_file_path)
- reporter = HyperVKvpReportingHandler(
- kvp_file_path=self.tmp_file_path)
+ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
reporter.publish_event(
- events.FinishReportingEvent('name2', 'description1',
- result=events.status.FAIL))
+ events.FinishReportingEvent(
+ "name2", "description1", result=events.status.FAIL
+ )
+ )
reporter.q.join()
def test_event_very_long(self):
- reporter = HyperVKvpReportingHandler(
- kvp_file_path=self.tmp_file_path)
- description = 'ab' * reporter.HV_KVP_AZURE_MAX_VALUE_SIZE
+ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
+ description = "ab" * reporter.HV_KVP_AZURE_MAX_VALUE_SIZE
long_event = events.FinishReportingEvent(
- 'event_name',
- description,
- result=events.status.FAIL)
+ "event_name", description, result=events.status.FAIL
+ )
reporter.publish_event(long_event)
reporter.q.join()
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(3, len(kvps))
# restore from the kvp to see the content are all there
- full_description = ''
+ full_description = ""
for i in range(len(kvps)):
- msg_slice = json.loads(kvps[i]['value'])
- self.assertEqual(msg_slice['msg_i'], i)
- full_description += msg_slice['msg']
+ msg_slice = json.loads(kvps[i]["value"])
+ self.assertEqual(msg_slice["msg_i"], i)
+ full_description += msg_slice["msg"]
self.assertEqual(description, full_description)
def test_not_truncate_kvp_file_modified_after_boot(self):
with open(self.tmp_file_path, "wb+") as f:
- kvp = {'key': 'key1', 'value': 'value1'}
+ kvp = {"key": "key1", "value": "value1"}
data = struct.pack(
"%ds%ds"
% (
@@ -118,11 +117,16 @@ class TextKvpReporter(CiTestCase):
def test_truncate_stale_kvp_file(self):
with open(self.tmp_file_path, "wb+") as f:
- kvp = {'key': 'key1', 'value': 'value1'}
- data = (struct.pack("%ds%ds" % (
- HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
- HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
- kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8')))
+ kvp = {"key": "key1", "value": "value1"}
+ data = struct.pack(
+ "%ds%ds"
+ % (
+ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
+ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE,
+ ),
+ kvp["key"].encode("utf-8"),
+ kvp["value"].encode("utf-8"),
+ )
f.write(data)
# set the time ways back to make it look like
@@ -137,8 +141,8 @@ class TextKvpReporter(CiTestCase):
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(0, len(kvps))
- @mock.patch('cloudinit.distros.uses_systemd')
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.distros.uses_systemd")
+ @mock.patch("cloudinit.subp.subp")
def test_get_boot_telemetry(self, m_subp, m_sysd):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
datetime_pattern = (
@@ -149,8 +153,9 @@ class TextKvpReporter(CiTestCase):
# get_boot_telemetry makes two subp calls to systemctl. We provide
# a list of values that the subp calls should return
m_subp.side_effect = [
- ('UserspaceTimestampMonotonic=1844838', ''),
- ('InactiveExitTimestampMonotonic=3068203', '')]
+ ("UserspaceTimestampMonotonic=1844838", ""),
+ ("InactiveExitTimestampMonotonic=3068203", ""),
+ ]
m_sysd.return_value = True
reporter.publish_event(azure.get_boot_telemetry())
@@ -158,15 +163,13 @@ class TextKvpReporter(CiTestCase):
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(1, len(kvps))
- evt_msg = kvps[0]['value']
+ evt_msg = kvps[0]["value"]
if not re.search("kernel_start=" + datetime_pattern, evt_msg):
raise AssertionError("missing kernel_start timestamp")
if not re.search("user_start=" + datetime_pattern, evt_msg):
raise AssertionError("missing user_start timestamp")
- if not re.search("cloudinit_activation=" + datetime_pattern,
- evt_msg):
- raise AssertionError(
- "missing cloudinit_activation timestamp")
+ if not re.search("cloudinit_activation=" + datetime_pattern, evt_msg):
+ raise AssertionError("missing cloudinit_activation timestamp")
def test_get_system_info(self):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
@@ -176,7 +179,7 @@ class TextKvpReporter(CiTestCase):
reporter.q.join()
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(1, len(kvps))
- evt_msg = kvps[0]['value']
+ evt_msg = kvps[0]["value"]
# the most important information is cloudinit version,
# kernel_version, and the distro variant. It is ok if
@@ -191,12 +194,11 @@ class TextKvpReporter(CiTestCase):
def test_report_diagnostic_event_without_logger_func(self):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
diagnostic_msg = "test_diagnostic"
- reporter.publish_event(
- azure.report_diagnostic_event(diagnostic_msg))
+ reporter.publish_event(azure.report_diagnostic_event(diagnostic_msg))
reporter.q.join()
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(1, len(kvps))
- evt_msg = kvps[0]['value']
+ evt_msg = kvps[0]["value"]
if diagnostic_msg not in evt_msg:
raise AssertionError("missing expected diagnostic message")
@@ -206,12 +208,14 @@ class TextKvpReporter(CiTestCase):
logger_func = mock.MagicMock()
diagnostic_msg = "test_diagnostic"
reporter.publish_event(
- azure.report_diagnostic_event(diagnostic_msg,
- logger_func=logger_func))
+ azure.report_diagnostic_event(
+ diagnostic_msg, logger_func=logger_func
+ )
+ )
reporter.q.join()
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(1, len(kvps))
- evt_msg = kvps[0]['value']
+ evt_msg = kvps[0]["value"]
if diagnostic_msg not in evt_msg:
raise AssertionError("missing expected diagnostic message")
@@ -221,18 +225,18 @@ class TextKvpReporter(CiTestCase):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
try:
instantiated_handler_registry.register_item("telemetry", reporter)
- event_desc = b'test_compressed'
- azure.report_compressed_event(
- "compressed event", event_desc)
+ event_desc = b"test_compressed"
+ azure.report_compressed_event("compressed event", event_desc)
self.validate_compressed_kvps(reporter, 1, [event_desc])
finally:
- instantiated_handler_registry.unregister_item("telemetry",
- force=False)
+ instantiated_handler_registry.unregister_item(
+ "telemetry", force=False
+ )
- @mock.patch('cloudinit.sources.helpers.azure.report_compressed_event')
- @mock.patch('cloudinit.sources.helpers.azure.report_diagnostic_event')
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.sources.helpers.azure.report_compressed_event")
+ @mock.patch("cloudinit.sources.helpers.azure.report_diagnostic_event")
+ @mock.patch("cloudinit.subp.subp")
def test_push_log_to_kvp_exception_handling(self, m_subp, m_diag, m_com):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
try:
@@ -240,7 +244,8 @@ class TextKvpReporter(CiTestCase):
log_file = self.tmp_path("cloud-init.log")
azure.MAX_LOG_TO_KVP_LENGTH = 100
azure.LOG_PUSHED_TO_KVP_INDEX_FILE = self.tmp_path(
- 'log_pushed_to_kvp')
+ "log_pushed_to_kvp"
+ )
with open(log_file, "w") as f:
log_content = "A" * 50 + "B" * 100
f.write(log_content)
@@ -251,11 +256,12 @@ class TextKvpReporter(CiTestCase):
# exceptions will trigger diagnostic reporting calls
self.assertEqual(m_diag.call_count, 3)
finally:
- instantiated_handler_registry.unregister_item("telemetry",
- force=False)
+ instantiated_handler_registry.unregister_item(
+ "telemetry", force=False
+ )
- @mock.patch('cloudinit.subp.subp')
- @mock.patch.object(LogHandler, 'publish_event')
+ @mock.patch("cloudinit.subp.subp")
+ @mock.patch.object(LogHandler, "publish_event")
def test_push_log_to_kvp(self, publish_event, m_subp):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
try:
@@ -263,7 +269,8 @@ class TextKvpReporter(CiTestCase):
log_file = self.tmp_path("cloud-init.log")
azure.MAX_LOG_TO_KVP_LENGTH = 100
azure.LOG_PUSHED_TO_KVP_INDEX_FILE = self.tmp_path(
- 'log_pushed_to_kvp')
+ "log_pushed_to_kvp"
+ )
with open(log_file, "w") as f:
log_content = "A" * 50 + "B" * 100
f.write(log_content)
@@ -275,20 +282,25 @@ class TextKvpReporter(CiTestCase):
azure.push_log_to_kvp(log_file)
# make sure dmesg is called every time
- m_subp.assert_called_with(
- ['dmesg'], capture=True, decode=False)
+ m_subp.assert_called_with(["dmesg"], capture=True, decode=False)
for call_arg in publish_event.call_args_list:
event = call_arg[0][0]
self.assertNotEqual(
- event.event_type, azure.COMPRESSED_EVENT_TYPE)
+ event.event_type, azure.COMPRESSED_EVENT_TYPE
+ )
self.validate_compressed_kvps(
- reporter, 2,
- [log_content[-azure.MAX_LOG_TO_KVP_LENGTH:].encode(),
- extra_content.encode()])
+ reporter,
+ 2,
+ [
+ log_content[-azure.MAX_LOG_TO_KVP_LENGTH :].encode(),
+ extra_content.encode(),
+ ],
+ )
finally:
- instantiated_handler_registry.unregister_item("telemetry",
- force=False)
+ instantiated_handler_registry.unregister_item(
+ "telemetry", force=False
+ )
def validate_compressed_kvps(self, reporter, count, values):
reporter.q.join()
@@ -296,7 +308,7 @@ class TextKvpReporter(CiTestCase):
compressed_count = 0
for i in range(len(kvps)):
kvp = kvps[i]
- kvp_value = kvp['value']
+ kvp_value = kvp["value"]
kvp_value_json = json.loads(kvp_value)
evt_msg = kvp_value_json["msg"]
evt_type = kvp_value_json["type"]
@@ -305,7 +317,8 @@ class TextKvpReporter(CiTestCase):
evt_msg_json = json.loads(evt_msg)
evt_encoding = evt_msg_json["encoding"]
evt_data = zlib.decompress(
- base64.decodebytes(evt_msg_json["data"].encode("ascii")))
+ base64.decodebytes(evt_msg_json["data"].encode("ascii"))
+ )
self.assertLess(compressed_count, len(values))
self.assertEqual(evt_data, values[compressed_count])
@@ -316,17 +329,21 @@ class TextKvpReporter(CiTestCase):
def test_unique_kvp_key(self):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
evt1 = events.ReportingEvent(
- "event_type", 'event_message',
- "event_description")
+ "event_type", "event_message", "event_description"
+ )
reporter.publish_event(evt1)
evt2 = events.ReportingEvent(
- "event_type", 'event_message',
- "event_description", timestamp=evt1.timestamp + 1)
+ "event_type",
+ "event_message",
+ "event_description",
+ timestamp=evt1.timestamp + 1,
+ )
reporter.publish_event(evt2)
reporter.q.join()
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(2, len(kvps))
- self.assertNotEqual(kvps[0]["key"], kvps[1]["key"],
- "duplicate keys for KVP entries")
+ self.assertNotEqual(
+ kvps[0]["key"], kvps[1]["key"], "duplicate keys for KVP entries"
+ )
diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py
deleted file mode 100644
index 53d3cd5a..00000000
--- a/tests/unittests/test_rh_subscription.py
+++ /dev/null
@@ -1,234 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Tests for registering RHEL subscription via rh_subscription."""
-
-import copy
-import logging
-
-from cloudinit.config import cc_rh_subscription
-from cloudinit import subp
-
-from cloudinit.tests.helpers import CiTestCase, mock
-
-SUBMGR = cc_rh_subscription.SubscriptionManager
-SUB_MAN_CLI = 'cloudinit.config.cc_rh_subscription._sub_man_cli'
-
-
-@mock.patch(SUB_MAN_CLI)
-class GoodTests(CiTestCase):
- with_logs = True
-
- def setUp(self):
- super(GoodTests, self).setUp()
- self.name = "cc_rh_subscription"
- self.cloud_init = None
- self.log = logging.getLogger("good_tests")
- self.args = []
- self.handle = cc_rh_subscription.handle
-
- self.config = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks'
- }}
- self.config_full = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks',
- 'auto-attach': True,
- 'service-level': 'self-support',
- 'add-pool': ['pool1', 'pool2', 'pool3'],
- 'enable-repo': ['repo1', 'repo2', 'repo3'],
- 'disable-repo': ['repo4', 'repo5']
- }}
-
- def test_already_registered(self, m_sman_cli):
- '''
- Emulates a system that is already registered. Ensure it gets
- a non-ProcessExecution error from is_registered()
- '''
- self.handle(self.name, self.config, self.cloud_init,
- self.log, self.args)
- self.assertEqual(m_sman_cli.call_count, 1)
- self.assertIn('System is already registered', self.logs.getvalue())
-
- def test_simple_registration(self, m_sman_cli):
- '''
- Simple registration with username and password
- '''
- reg = "The system has been registered with ID:" \
- " 12345678-abde-abcde-1234-1234567890abc"
- m_sman_cli.side_effect = [subp.ProcessExecutionError, (reg, 'bar')]
- self.handle(self.name, self.config, self.cloud_init,
- self.log, self.args)
- self.assertIn(mock.call(['identity']), m_sman_cli.call_args_list)
- self.assertIn(mock.call(['register', '--username=scooby@do.com',
- '--password=scooby-snacks'],
- logstring_val=True),
- m_sman_cli.call_args_list)
- self.assertIn('rh_subscription plugin completed successfully',
- self.logs.getvalue())
- self.assertEqual(m_sman_cli.call_count, 2)
-
- @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_getRepos")
- def test_update_repos_disable_with_none(self, m_get_repos, m_sman_cli):
- cfg = copy.deepcopy(self.config)
- m_get_repos.return_value = ([], ['repo1'])
- cfg['rh_subscription'].update(
- {'enable-repo': ['repo1'], 'disable-repo': None})
- mysm = cc_rh_subscription.SubscriptionManager(cfg)
- self.assertEqual(True, mysm.update_repos())
- m_get_repos.assert_called_with()
- self.assertEqual(m_sman_cli.call_args_list,
- [mock.call(['repos', '--enable=repo1'])])
-
- def test_full_registration(self, m_sman_cli):
- '''
- Registration with auto-attach, service-level, adding pools,
- and enabling and disabling yum repos
- '''
- call_lists = []
- call_lists.append(['attach', '--pool=pool1', '--pool=pool3'])
- call_lists.append(['repos', '--disable=repo5', '--enable=repo2',
- '--enable=repo3'])
- call_lists.append(['attach', '--auto', '--servicelevel=self-support'])
- reg = "The system has been registered with ID:" \
- " 12345678-abde-abcde-1234-1234567890abc"
- m_sman_cli.side_effect = [
- subp.ProcessExecutionError,
- (reg, 'bar'),
- ('Service level set to: self-support', ''),
- ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''),
- ('Repo ID: repo1\nRepo ID: repo5\n', ''),
- ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4', ''),
- ('', '')]
- self.handle(self.name, self.config_full, self.cloud_init,
- self.log, self.args)
- self.assertEqual(m_sman_cli.call_count, 9)
- for call in call_lists:
- self.assertIn(mock.call(call), m_sman_cli.call_args_list)
- self.assertIn("rh_subscription plugin completed successfully",
- self.logs.getvalue())
-
-
-@mock.patch(SUB_MAN_CLI)
-class TestBadInput(CiTestCase):
- with_logs = True
- name = "cc_rh_subscription"
- cloud_init = None
- log = logging.getLogger("bad_tests")
- args = []
- SM = cc_rh_subscription.SubscriptionManager
- reg = "The system has been registered with ID:" \
- " 12345678-abde-abcde-1234-1234567890abc"
-
- config_no_password = {'rh_subscription':
- {'username': 'scooby@do.com'
- }}
-
- config_no_key = {'rh_subscription':
- {'activation-key': '1234abcde',
- }}
-
- config_service = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks',
- 'service-level': 'self-support'
- }}
-
- config_badpool = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks',
- 'add-pool': 'not_a_list'
- }}
- config_badrepo = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks',
- 'enable-repo': 'not_a_list'
- }}
- config_badkey = {'rh_subscription':
- {'activation-key': 'abcdef1234',
- 'fookey': 'bar',
- 'org': '123',
- }}
-
- def setUp(self):
- super(TestBadInput, self).setUp()
- self.handle = cc_rh_subscription.handle
-
- def assert_logged_warnings(self, warnings):
- logs = self.logs.getvalue()
- missing = [w for w in warnings if "WARNING: " + w not in logs]
- self.assertEqual([], missing, "Missing expected warnings.")
-
- def test_no_password(self, m_sman_cli):
- '''Attempt to register without the password key/value.'''
- m_sman_cli.side_effect = [subp.ProcessExecutionError,
- (self.reg, 'bar')]
- self.handle(self.name, self.config_no_password, self.cloud_init,
- self.log, self.args)
- self.assertEqual(m_sman_cli.call_count, 0)
-
- def test_no_org(self, m_sman_cli):
- '''Attempt to register without the org key/value.'''
- m_sman_cli.side_effect = [subp.ProcessExecutionError]
- self.handle(self.name, self.config_no_key, self.cloud_init,
- self.log, self.args)
- m_sman_cli.assert_called_with(['identity'])
- self.assertEqual(m_sman_cli.call_count, 1)
- self.assert_logged_warnings((
- 'Unable to register system due to incomplete information.',
- 'Use either activationkey and org *or* userid and password',
- 'Registration failed or did not run completely',
- 'rh_subscription plugin did not complete successfully'))
-
- def test_service_level_without_auto(self, m_sman_cli):
- '''Attempt to register using service-level without auto-attach key.'''
- m_sman_cli.side_effect = [subp.ProcessExecutionError,
- (self.reg, 'bar')]
- self.handle(self.name, self.config_service, self.cloud_init,
- self.log, self.args)
- self.assertEqual(m_sman_cli.call_count, 1)
- self.assert_logged_warnings((
- 'The service-level key must be used in conjunction with ',
- 'rh_subscription plugin did not complete successfully'))
-
- def test_pool_not_a_list(self, m_sman_cli):
- '''
- Register with pools that are not in the format of a list
- '''
- m_sman_cli.side_effect = [subp.ProcessExecutionError,
- (self.reg, 'bar')]
- self.handle(self.name, self.config_badpool, self.cloud_init,
- self.log, self.args)
- self.assertEqual(m_sman_cli.call_count, 2)
- self.assert_logged_warnings((
- 'Pools must in the format of a list',
- 'rh_subscription plugin did not complete successfully'))
-
- def test_repo_not_a_list(self, m_sman_cli):
- '''
- Register with repos that are not in the format of a list
- '''
- m_sman_cli.side_effect = [subp.ProcessExecutionError,
- (self.reg, 'bar')]
- self.handle(self.name, self.config_badrepo, self.cloud_init,
- self.log, self.args)
- self.assertEqual(m_sman_cli.call_count, 2)
- self.assert_logged_warnings((
- 'Repo IDs must in the format of a list.',
- 'Unable to add or remove repos',
- 'rh_subscription plugin did not complete successfully'))
-
- def test_bad_key_value(self, m_sman_cli):
- '''
- Attempt to register with a key that we don't know
- '''
- m_sman_cli.side_effect = [subp.ProcessExecutionError,
- (self.reg, 'bar')]
- self.handle(self.name, self.config_badkey, self.cloud_init,
- self.log, self.args)
- self.assertEqual(m_sman_cli.call_count, 1)
- self.assert_logged_warnings((
- 'fookey is not a valid key for rh_subscription. Valid keys are:',
- 'rh_subscription plugin did not complete successfully'))
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_runs/__init__.py b/tests/unittests/test_runs/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/unittests/test_runs/__init__.py
+++ /dev/null
diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py
deleted file mode 100644
index ff27a280..00000000
--- a/tests/unittests/test_runs/test_merge_run.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import os
-import shutil
-import tempfile
-
-from cloudinit.tests import helpers
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import safeyaml
-from cloudinit import stages
-from cloudinit import util
-
-
-class TestMergeRun(helpers.FilesystemMockingTestCase):
- def _patchIn(self, root):
- self.patchOS(root)
- self.patchUtils(root)
-
- def test_none_ds(self):
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self.replicateTestRoot('simple_ubuntu', new_root)
- cfg = {
- 'datasource_list': ['None'],
- 'cloud_init_modules': ['write-files'],
- 'system_info': {'paths': {'run_dir': new_root}}
- }
- ud = helpers.readResource('user_data.1.txt')
- cloud_cfg = safeyaml.dumps(cfg)
- util.ensure_dir(os.path.join(new_root, 'etc', 'cloud'))
- util.write_file(os.path.join(new_root, 'etc',
- 'cloud', 'cloud.cfg'), cloud_cfg)
- self._patchIn(new_root)
-
- # Now start verifying whats created
- initer = stages.Init()
- initer.read_cfg()
- initer.initialize()
- initer.fetch()
- initer.datasource.userdata_raw = ud
- initer.instancify()
- initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
- mirrors = initer.distro.get_option('package_mirrors')
- self.assertEqual(1, len(mirrors))
- mirror = mirrors[0]
- self.assertEqual(mirror['arches'], ['i386', 'amd64', 'blah'])
- mods = stages.Modules(initer)
- (which_ran, failures) = mods.run_section('cloud_init_modules')
- self.assertTrue(len(failures) == 0)
- self.assertTrue(os.path.exists('/etc/blah.ini'))
- self.assertIn('write-files', which_ran)
- contents = util.load_file('/etc/blah.ini')
- self.assertEqual(contents, 'blah')
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_simpletable.py b/tests/unittests/test_simpletable.py
index a12a62a0..ee7eb0b4 100644
--- a/cloudinit/tests/test_simpletable.py
+++ b/tests/unittests/test_simpletable.py
@@ -10,17 +10,22 @@ reimplement the entire library, only the minimal parts we actually use.
"""
from cloudinit.simpletable import SimpleTable
-from cloudinit.tests.helpers import CiTestCase
+from tests.unittests.helpers import CiTestCase
# Examples rendered by cloud-init using PrettyTable
-NET_DEVICE_FIELDS = (
- 'Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address')
+NET_DEVICE_FIELDS = ("Device", "Up", "Address", "Mask", "Scope", "Hw-Address")
NET_DEVICE_ROWS = (
- ('ens3', True, '172.31.4.203', '255.255.240.0', '.', '0a:1f:07:15:98:70'),
- ('ens3', True, 'fe80::81f:7ff:fe15:9870/64', '.', 'link',
- '0a:1f:07:15:98:70'),
- ('lo', True, '127.0.0.1', '255.0.0.0', '.', '.'),
- ('lo', True, '::1/128', '.', 'host', '.'),
+ ("ens3", True, "172.31.4.203", "255.255.240.0", ".", "0a:1f:07:15:98:70"),
+ (
+ "ens3",
+ True,
+ "fe80::81f:7ff:fe15:9870/64",
+ ".",
+ "link",
+ "0a:1f:07:15:98:70",
+ ),
+ ("lo", True, "127.0.0.1", "255.0.0.0", ".", "."),
+ ("lo", True, "::1/128", ".", "host", "."),
)
NET_DEVICE_TABLE = """\
+--------+------+----------------------------+---------------+-------+-------------------+
@@ -32,11 +37,17 @@ NET_DEVICE_TABLE = """\
| lo | True | ::1/128 | . | host | . |
+--------+------+----------------------------+---------------+-------+-------------------+""" # noqa: E501
ROUTE_IPV4_FIELDS = (
- 'Route', 'Destination', 'Gateway', 'Genmask', 'Interface', 'Flags')
+ "Route",
+ "Destination",
+ "Gateway",
+ "Genmask",
+ "Interface",
+ "Flags",
+)
ROUTE_IPV4_ROWS = (
- ('0', '0.0.0.0', '172.31.0.1', '0.0.0.0', 'ens3', 'UG'),
- ('1', '169.254.0.0', '0.0.0.0', '255.255.0.0', 'ens3', 'U'),
- ('2', '172.31.0.0', '0.0.0.0', '255.255.240.0', 'ens3', 'U'),
+ ("0", "0.0.0.0", "172.31.0.1", "0.0.0.0", "ens3", "UG"),
+ ("1", "169.254.0.0", "0.0.0.0", "255.255.0.0", "ens3", "U"),
+ ("2", "172.31.0.0", "0.0.0.0", "255.255.240.0", "ens3", "U"),
)
ROUTE_IPV4_TABLE = """\
+-------+-------------+------------+---------------+-----------+-------+
@@ -47,11 +58,14 @@ ROUTE_IPV4_TABLE = """\
| 2 | 172.31.0.0 | 0.0.0.0 | 255.255.240.0 | ens3 | U |
+-------+-------------+------------+---------------+-----------+-------+"""
-AUTHORIZED_KEYS_FIELDS = (
- 'Keytype', 'Fingerprint (md5)', 'Options', 'Comment')
+AUTHORIZED_KEYS_FIELDS = ("Keytype", "Fingerprint (md5)", "Options", "Comment")
AUTHORIZED_KEYS_ROWS = (
- ('ssh-rsa', '24:c7:41:49:47:12:31:a0:de:6f:62:79:9b:13:06:36', '-',
- 'ajorgens'),
+ (
+ "ssh-rsa",
+ "24:c7:41:49:47:12:31:a0:de:6f:62:79:9b:13:06:36",
+ "-",
+ "ajorgens",
+ ),
)
AUTHORIZED_KEYS_TABLE = """\
+---------+-------------------------------------------------+---------+----------+
@@ -63,7 +77,7 @@ AUTHORIZED_KEYS_TABLE = """\
# from prettytable import PrettyTable
# pt = PrettyTable(('HEADER',))
# print(pt)
-NO_ROWS_FIELDS = ('HEADER',)
+NO_ROWS_FIELDS = ("HEADER",)
NO_ROWS_TABLE = """\
+--------+
| HEADER |
@@ -72,7 +86,6 @@ NO_ROWS_TABLE = """\
class TestSimpleTable(CiTestCase):
-
def test_no_rows(self):
"""An empty table is rendered as PrettyTable would have done it."""
table = SimpleTable(NO_ROWS_FIELDS)
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index 88a111e3..d614350e 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -1,25 +1,65 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import os
from collections import namedtuple
+from functools import partial
from unittest.mock import patch
-from cloudinit import ssh_util
-from cloudinit.tests import helpers as test_helpers
-from cloudinit import util
+from cloudinit import ssh_util, util
+from tests.unittests import helpers as test_helpers
# https://stackoverflow.com/questions/11351032/
FakePwEnt = namedtuple(
- 'FakePwEnt',
- ['pw_dir', 'pw_gecos', 'pw_name', 'pw_passwd', 'pw_shell', 'pwd_uid'])
+ "FakePwEnt",
+ [
+ "pw_name",
+ "pw_passwd",
+ "pw_uid",
+ "pw_gid",
+ "pw_gecos",
+ "pw_dir",
+ "pw_shell",
+ ],
+)
FakePwEnt.__new__.__defaults__ = tuple(
- "UNSET_%s" % n for n in FakePwEnt._fields)
+ "UNSET_%s" % n for n in FakePwEnt._fields
+)
+
+
+def mock_get_owner(updated_permissions, value):
+ try:
+ return updated_permissions[value][0]
+ except ValueError:
+ return util.get_owner(value)
+
+
+def mock_get_group(updated_permissions, value):
+ try:
+ return updated_permissions[value][1]
+ except ValueError:
+ return util.get_group(value)
+
+
+def mock_get_user_groups(username):
+ return username
+
+
+def mock_get_permissions(updated_permissions, value):
+ try:
+ return updated_permissions[value][2]
+ except ValueError:
+ return util.get_permissions(value)
+
+
+def mock_getpwnam(users, username):
+ return users[username]
# Do not use these public keys, most of them are fetched from
# the testdata for OpenSSH, and their private keys are available
# https://github.com/openssh/openssh-portable/tree/master/regress/unittests/sshkey/testdata
VALID_CONTENT = {
- 'dsa': (
+ "dsa": (
"AAAAB3NzaC1kc3MAAACBAIrjOQSlSea19bExXBMBKBvcLhBoVvNBjCppNzllipF"
"W4jgIOMcNanULRrZGjkOKat6MWJNetSbV1E6IOFDQ16rQgsh/OvYU9XhzM8seLa"
"A21VszZuhIV7/2DE3vxu7B54zVzueG1O1Deq6goQCRGWBUnqO2yluJiG4HzrnDa"
@@ -31,12 +71,12 @@ VALID_CONTENT = {
"JNDnIqDHxTkc6LY2vu8Y2pQ3/bVnllZZOda2oD5HQ7ovygQa6CH+fbaZHbdDUX/"
"5z7u2rVAlDw=="
),
- 'ecdsa': (
+ "ecdsa": (
"AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBITrGBB3cgJ"
"J7fPxvtMW9H3oRisNpJ3OAslxZeyP7I0A9BPAW0RQIwHVtVnM7zrp4nI+JLZov/"
"Ql7lc2leWL7CY="
),
- 'rsa': (
+ "rsa": (
"AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5oz"
"emNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbD"
"c1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q"
@@ -44,11 +84,10 @@ VALID_CONTENT = {
"YWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07"
"/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw=="
),
- 'ed25519': (
- "AAAAC3NzaC1lZDI1NTE5AAAAIA1J77+CrJ8p6/vWCEzuylqJNMHUP/XmeYyGVWb"
- "8lnDd"
+ "ed25519": (
+ "AAAAC3NzaC1lZDI1NTE5AAAAIA1J77+CrJ8p6/vWCEzuylqJNMHUP/XmeYyGVWb8lnDd"
),
- 'ecdsa-sha2-nistp256-cert-v01@openssh.com': (
+ "ecdsa-sha2-nistp256-cert-v01@openssh.com": (
"AAAAKGVjZHNhLXNoYTItbmlzdHAyNTYtY2VydC12MDFAb3BlbnNzaC5jb20AAAA"
"gQIfwT/+UX68/hlKsdKuaOuAVB6ftTg03SlP/uH4OBEwAAAAIbmlzdHAyNTYAAA"
"BBBEjA0gjJmPM6La3sXyfNlnjilvvGY6I2M8SvJj4o3X/46wcUbPWTaj4RF3EXw"
@@ -63,12 +102,12 @@ VALID_CONTENT = {
"2tM3QXkDcwdP0SxSEW5yy4XV5oAAAAhANNMm1cdVlAt3hmycQgdD82zPlg5YvVO"
"iN0SQTbgVD8i"
),
- 'ecdsa-sha2-nistp256': (
+ "ecdsa-sha2-nistp256": (
"AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEjA0gjJmPM"
"6La3sXyfNlnjilvvGY6I2M8SvJj4o3X/46wcUbPWTaj4RF3EXwHvNxplYBwdPlk"
"2zEecvf9Cs2BM="
),
- 'ecdsa-sha2-nistp384-cert-v01@openssh.com': (
+ "ecdsa-sha2-nistp384-cert-v01@openssh.com": (
"AAAAKGVjZHNhLXNoYTItbmlzdHAzODQtY2VydC12MDFAb3BlbnNzaC5jb20AAAA"
"grnSvDsK1EnCZndO1IyGWcGkVgVSkPWi/XO2ybPFyLVUAAAAIbmlzdHAzODQAAA"
"BhBAaYSQs+8TT0Tzciy0dorwhur6yzOGUrYQ6ueUQYWbE7eNdHmhsVrlpGPgSaY"
@@ -85,12 +124,12 @@ VALID_CONTENT = {
"RVYqYQgAAADAiit0UCMDAUbjD+R2x4LvU3x/t8G3sdqDLRNfMRpjZpvcS8AwC+Y"
"VFVSQNn0AyzW0="
),
- 'ecdsa-sha2-nistp384': (
+ "ecdsa-sha2-nistp384": (
"AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBAaYSQs+8TT"
"0Tzciy0dorwhur6yzOGUrYQ6ueUQYWbE7eNdHmhsVrlpGPgSaYByhXtAJiPOMqL"
"U5h0eb3sCtM3ek4NvjXFTGTqPrrxJI6q0OsgrtkGE7UM9ZsfMm7q6BOA=="
),
- 'ecdsa-sha2-nistp521-cert-v01@openssh.com': (
+ "ecdsa-sha2-nistp521-cert-v01@openssh.com": (
"AAAAKGVjZHNhLXNoYTItbmlzdHA1MjEtY2VydC12MDFAb3BlbnNzaC5jb20AAAA"
"gGmRzkkMvRFk1V5U3m3mQ2nfW20SJVXk1NKnT5iZGDcEAAAAIbmlzdHA1MjEAAA"
"CFBAHosAOHAI1ZkerbKYQ72S6uit1u77PCj/OalZtXgsxv0TTAZB273puG2X94C"
@@ -109,13 +148,13 @@ VALID_CONTENT = {
"AAAQgEzkIpX3yKXPaPcK17mNx40ujEDitm4ARmbhAge0sFhZtf7YIgI55b6vkI8"
"JvMJkzQCBF1cpNOaIpVh1nFZNBphMQ=="
),
- 'ecdsa-sha2-nistp521': (
+ "ecdsa-sha2-nistp521": (
"AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAHosAOHAI1"
"ZkerbKYQ72S6uit1u77PCj/OalZtXgsxv0TTAZB273puG2X94CQ8yyNHcby87zF"
"ZHdv5BSKyZ/cyREAAeiAcSakop9VS3+bUfZpEIqwBZXarwUjnRnxprkcQ0rfCCd"
"agkGZr/OA7DemK2D8tKLTHsKoEEWNImo6/pXDkFxA=="
),
- 'sk-ecdsa-sha2-nistp256-cert-v01@openssh.com': (
+ "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com": (
"AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIIxzuxl4z3u"
"wAIslne8Huft+1n1IhHAlNbWZkQyyECCGAAAAIFOG6kY7Rf4UtCFvPwKgo/BztX"
"ck2xC4a2WyA34XtIwZAAAAAAAAAAgAAAACAAAABmp1bGl1cwAAABIAAAAFaG9zd"
@@ -124,12 +163,12 @@ VALID_CONTENT = {
"AAFMAAAALc3NoLWVkMjU1MTkAAABABGTn+Bmz86Ajk+iqKCSdP5NClsYzn4alJd"
"0V5bizhP0Kumc/HbqQfSt684J1WdSzih+EjvnTgBhK9jTBKb90AQ=="
),
- 'sk-ecdsa-sha2-nistp256@openssh.com': (
+ "sk-ecdsa-sha2-nistp256@openssh.com": (
"AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHA"
"yNTYAAABBBIELQJ2DgvaX1yQlKFokfWM2suuaCFI2qp0eJodHyg6O4ifxc3XpRK"
"d1OS8dNYQtE/YjdXSrA+AOnMF5ns2Nkx4AAAAEc3NoOg=="
),
- 'sk-ssh-ed25519-cert-v01@openssh.com': (
+ "sk-ssh-ed25519-cert-v01@openssh.com": (
"AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIIxzuxl4z3u"
"wAIslne8Huft+1n1IhHAlNbWZkQyyECCGAAAAIFOG6kY7Rf4UtCFvPwKgo/BztX"
"ck2xC4a2WyA34XtIwZAAAAAAAAAAgAAAACAAAABmp1bGl1cwAAABIAAAAFaG9zd"
@@ -138,11 +177,11 @@ VALID_CONTENT = {
"AAFMAAAALc3NoLWVkMjU1MTkAAABABGTn+Bmz86Ajk+iqKCSdP5NClsYzn4alJd"
"0V5bizhP0Kumc/HbqQfSt684J1WdSzih+EjvnTgBhK9jTBKb90AQ=="
),
- 'sk-ssh-ed25519@openssh.com': (
+ "sk-ssh-ed25519@openssh.com": (
"AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAICFo/k5LU8863u66YC9"
"eUO2170QduohPURkQnbLa/dczAAAABHNzaDo="
),
- 'ssh-dss-cert-v01@openssh.com': (
+ "ssh-dss-cert-v01@openssh.com": (
"AAAAHHNzaC1kc3MtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgdTlbNU9Hn9Qng3F"
"HxwH971bxCIoq1ern/QWFFDWXgmYAAACBAPqS600VGwdPAQC/p3f0uGyrLVql0c"
"Fn1zYd/JGvtabKnIYjLaYprje/NcjwI3CZFJiz4Dp3S8kLs+X5/1DMn/Tg1Y4D4"
@@ -159,7 +198,7 @@ VALID_CONTENT = {
"+F7SMGQAAAFMAAAALc3NoLWVkMjU1MTkAAABAh/z1LIdNL1b66tQ8t9DY9BTB3B"
"QKpTKmc7ezyFKLwl96yaIniZwD9Ticdbe/8i/Li3uCFE3EAt8NAIv9zff8Bg=="
),
- 'ssh-dss': (
+ "ssh-dss": (
"AAAAB3NzaC1kc3MAAACBAPqS600VGwdPAQC/p3f0uGyrLVql0cFn1zYd/JGvtab"
"KnIYjLaYprje/NcjwI3CZFJiz4Dp3S8kLs+X5/1DMn/Tg1Y4D4yLB+6vCtHcJF7"
"rVBFhvw/KZwc7G54ez3khyOtsg82fzpyOc8/mq+/+C5TMKO7DDjMF0k5emWKCsa"
@@ -171,7 +210,7 @@ VALID_CONTENT = {
"GIf95LiLSgaXMjko7joot+LK84ltLymwZ4QMnYjnZSSclf1UuyQMcUtb34+I0u9"
"Ycnyhp2mSFsQt"
),
- 'ssh-ed25519-cert-v01@openssh.com': (
+ "ssh-ed25519-cert-v01@openssh.com": (
"AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIIxzuxl4z3u"
"wAIslne8Huft+1n1IhHAlNbWZkQyyECCGAAAAIFOG6kY7Rf4UtCFvPwKgo/BztX"
"ck2xC4a2WyA34XtIwZAAAAAAAAAAgAAAACAAAABmp1bGl1cwAAABIAAAAFaG9zd"
@@ -180,11 +219,10 @@ VALID_CONTENT = {
"AAFMAAAALc3NoLWVkMjU1MTkAAABABGTn+Bmz86Ajk+iqKCSdP5NClsYzn4alJd"
"0V5bizhP0Kumc/HbqQfSt684J1WdSzih+EjvnTgBhK9jTBKb90AQ=="
),
- 'ssh-ed25519': (
- "AAAAC3NzaC1lZDI1NTE5AAAAIFOG6kY7Rf4UtCFvPwKgo/BztXck2xC4a2WyA34"
- "XtIwZ"
+ "ssh-ed25519": (
+ "AAAAC3NzaC1lZDI1NTE5AAAAIFOG6kY7Rf4UtCFvPwKgo/BztXck2xC4a2WyA34XtIwZ"
),
- 'ssh-rsa-cert-v01@openssh.com': (
+ "ssh-rsa-cert-v01@openssh.com": (
"AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAg98LhS2EHxLOWCLo"
"pZPwHdg/RJXusnkOqQXSc9R7aITkAAAADAQABAAAAgQDLV5lUTt7FrADseB/CGh"
"EZzpoojjEW5y8+ePvLppmK3MmMI18ud6vxzpK3bwZLYkVSyfJYI0HmIuGhdu7yM"
@@ -195,13 +233,13 @@ VALID_CONTENT = {
"he0jBkAAABTAAAAC3NzaC1lZDI1NTE5AAAAQI3QGlUCzC07KorupxpDkkGy6tni"
"aZ8EvBflzvv+itXWNchGvfUeHmVT6aX0sRqehdz/lR+GmXRoZBhofwh0qAM="
),
- 'ssh-rsa': (
+ "ssh-rsa": (
"AAAAB3NzaC1yc2EAAAADAQABAAAAgQDLV5lUTt7FrADseB/CGhEZzpoojjEW5y8"
"+ePvLppmK3MmMI18ud6vxzpK3bwZLYkVSyfJYI0HmIuGhdu7yMrW6wb84gbq8C3"
"1Xoe9EORcIUuGSvDKdNSM1SjlhDquRblDFB8kToqXyx1lqrXecXylxIUOL0jE+u"
"0rU1967pDJx+w=="
),
- 'ssh-xmss-cert-v01@openssh.com': (
+ "ssh-xmss-cert-v01@openssh.com": (
"AAAAHXNzaC14bXNzLWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIM2UD0IH+Igsekq"
"xjTO5f36exX4WGRMCtDGPjwfbXblxAAAAFVhNU1NfU0hBMi0yNTZfVzE2X0gxMA"
"AAAEDI83/K5JMOy0BMJgQypRdz35ApAnoQinMJ8ZMoZPaEJF8Z4rANQlfzaAXum"
@@ -267,7 +305,7 @@ VALID_CONTENT = {
"rNYClh8fQEQ8XuOCDpomMWu58YOTfbZNMDWs/Ou7RfCjX+VNwjPShDK9joMwWKc"
"Jy3QalZbaoWtcyyvXxR2sqhVR9F7Cmasq4="
),
- 'ssh-xmss@openssh.com': (
+ "ssh-xmss@openssh.com": (
"AAAAFHNzaC14bXNzQG9wZW5zc2guY29tAAAAFVhNU1NfU0hBMi0yNTZfVzE2X0g"
"xMAAAAECqptWnK94d+Sj2xcdTu8gz+75lawZoLSZFqC5IhbYuT/Z3oBZCim6yt+"
"HAmk6MKldl3Fg+74v4sR/SII0I0Jv/"
@@ -278,19 +316,25 @@ KEY_TYPES = list(VALID_CONTENT.keys())
TEST_OPTIONS = (
"no-port-forwarding,no-agent-forwarding,no-X11-forwarding,"
- 'command="echo \'Please login as the user \"ubuntu\" rather than the'
- 'user \"root\".\';echo;sleep 10"')
+ 'command="echo \'Please login as the user "ubuntu" rather than the'
+ 'user "root".\';echo;sleep 10"'
+)
class TestAuthKeyLineParser(test_helpers.CiTestCase):
-
def test_simple_parse(self):
# test key line with common 3 fields (keytype, base64, comment)
parser = ssh_util.AuthKeyLineParser()
for ktype in KEY_TYPES:
content = VALID_CONTENT[ktype]
- comment = 'user-%s@host' % ktype
- line = ' '.join((ktype, content, comment,))
+ comment = "user-%s@host" % ktype
+ line = " ".join(
+ (
+ ktype,
+ content,
+ comment,
+ )
+ )
key = parser.parse(line)
self.assertEqual(key.base64, content)
@@ -303,7 +347,12 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase):
parser = ssh_util.AuthKeyLineParser()
for ktype in KEY_TYPES:
content = VALID_CONTENT[ktype]
- line = ' '.join((ktype, content,))
+ line = " ".join(
+ (
+ ktype,
+ content,
+ )
+ )
key = parser.parse(line)
self.assertEqual(key.base64, content)
@@ -317,8 +366,15 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase):
options = TEST_OPTIONS
for ktype in KEY_TYPES:
content = VALID_CONTENT[ktype]
- comment = 'user-%s@host' % ktype
- line = ' '.join((options, ktype, content, comment,))
+ comment = "user-%s@host" % ktype
+ line = " ".join(
+ (
+ options,
+ ktype,
+ content,
+ comment,
+ )
+ )
key = parser.parse(line)
self.assertEqual(key.base64, content)
@@ -330,7 +386,7 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase):
# test key line with key type and base64 only
parser = ssh_util.AuthKeyLineParser()
- baseline = ' '.join(("rsa", VALID_CONTENT['rsa'], "user@host"))
+ baseline = " ".join(("rsa", VALID_CONTENT["rsa"], "user@host"))
myopts = "no-port-forwarding,no-agent-forwarding"
key = parser.parse("allowedopt" + " " + baseline)
@@ -341,59 +397,62 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase):
def test_parse_invalid_keytype(self):
parser = ssh_util.AuthKeyLineParser()
- key = parser.parse(' '.join(["badkeytype", VALID_CONTENT['rsa']]))
+ key = parser.parse(" ".join(["badkeytype", VALID_CONTENT["rsa"]]))
self.assertFalse(key.valid())
class TestUpdateAuthorizedKeys(test_helpers.CiTestCase):
-
def test_new_keys_replace(self):
"""new entries with the same base64 should replace old."""
orig_entries = [
- ' '.join(('rsa', VALID_CONTENT['rsa'], 'orig_comment1')),
- ' '.join(('dsa', VALID_CONTENT['dsa'], 'orig_comment2'))]
+ " ".join(("rsa", VALID_CONTENT["rsa"], "orig_comment1")),
+ " ".join(("dsa", VALID_CONTENT["dsa"], "orig_comment2")),
+ ]
new_entries = [
- ' '.join(('rsa', VALID_CONTENT['rsa'], 'new_comment1')), ]
+ " ".join(("rsa", VALID_CONTENT["rsa"], "new_comment1")),
+ ]
- expected = '\n'.join([new_entries[0], orig_entries[1]]) + '\n'
+ expected = "\n".join([new_entries[0], orig_entries[1]]) + "\n"
parser = ssh_util.AuthKeyLineParser()
found = ssh_util.update_authorized_keys(
[parser.parse(p) for p in orig_entries],
- [parser.parse(p) for p in new_entries])
+ [parser.parse(p) for p in new_entries],
+ )
self.assertEqual(expected, found)
def test_new_invalid_keys_are_ignored(self):
"""new entries that are invalid should be skipped."""
orig_entries = [
- ' '.join(('rsa', VALID_CONTENT['rsa'], 'orig_comment1')),
- ' '.join(('dsa', VALID_CONTENT['dsa'], 'orig_comment2'))]
+ " ".join(("rsa", VALID_CONTENT["rsa"], "orig_comment1")),
+ " ".join(("dsa", VALID_CONTENT["dsa"], "orig_comment2")),
+ ]
new_entries = [
- ' '.join(('rsa', VALID_CONTENT['rsa'], 'new_comment1')),
- 'xxx-invalid-thing1',
- 'xxx-invalid-blob2'
+ " ".join(("rsa", VALID_CONTENT["rsa"], "new_comment1")),
+ "xxx-invalid-thing1",
+ "xxx-invalid-blob2",
]
- expected = '\n'.join([new_entries[0], orig_entries[1]]) + '\n'
+ expected = "\n".join([new_entries[0], orig_entries[1]]) + "\n"
parser = ssh_util.AuthKeyLineParser()
found = ssh_util.update_authorized_keys(
[parser.parse(p) for p in orig_entries],
- [parser.parse(p) for p in new_entries])
+ [parser.parse(p) for p in new_entries],
+ )
self.assertEqual(expected, found)
class TestParseSSHConfig(test_helpers.CiTestCase):
-
def setUp(self):
- self.load_file_patch = patch('cloudinit.ssh_util.util.load_file')
+ self.load_file_patch = patch("cloudinit.ssh_util.util.load_file")
self.load_file = self.load_file_patch.start()
- self.isfile_patch = patch('cloudinit.ssh_util.os.path.isfile')
+ self.isfile_patch = patch("cloudinit.ssh_util.os.path.isfile")
self.isfile = self.isfile_patch.start()
self.isfile.return_value = True
@@ -404,60 +463,61 @@ class TestParseSSHConfig(test_helpers.CiTestCase):
def test_not_a_file(self):
self.isfile.return_value = False
self.load_file.side_effect = IOError
- ret = ssh_util.parse_ssh_config('not a real file')
+ ret = ssh_util.parse_ssh_config("not a real file")
self.assertEqual([], ret)
def test_empty_file(self):
- self.load_file.return_value = ''
- ret = ssh_util.parse_ssh_config('some real file')
+ self.load_file.return_value = ""
+ ret = ssh_util.parse_ssh_config("some real file")
self.assertEqual([], ret)
def test_comment_line(self):
- comment_line = '# This is a comment'
+ comment_line = "# This is a comment"
self.load_file.return_value = comment_line
- ret = ssh_util.parse_ssh_config('some real file')
+ ret = ssh_util.parse_ssh_config("some real file")
self.assertEqual(1, len(ret))
self.assertEqual(comment_line, ret[0].line)
def test_blank_lines(self):
- lines = ['', '\t', ' ']
- self.load_file.return_value = '\n'.join(lines)
- ret = ssh_util.parse_ssh_config('some real file')
+ lines = ["", "\t", " "]
+ self.load_file.return_value = "\n".join(lines)
+ ret = ssh_util.parse_ssh_config("some real file")
self.assertEqual(len(lines), len(ret))
for line in ret:
- self.assertEqual('', line.line)
+ self.assertEqual("", line.line)
def test_lower_case_config(self):
- self.load_file.return_value = 'foo bar'
- ret = ssh_util.parse_ssh_config('some real file')
+ self.load_file.return_value = "foo bar"
+ ret = ssh_util.parse_ssh_config("some real file")
self.assertEqual(1, len(ret))
- self.assertEqual('foo', ret[0].key)
- self.assertEqual('bar', ret[0].value)
+ self.assertEqual("foo", ret[0].key)
+ self.assertEqual("bar", ret[0].value)
def test_upper_case_config(self):
- self.load_file.return_value = 'Foo Bar'
- ret = ssh_util.parse_ssh_config('some real file')
+ self.load_file.return_value = "Foo Bar"
+ ret = ssh_util.parse_ssh_config("some real file")
self.assertEqual(1, len(ret))
- self.assertEqual('foo', ret[0].key)
- self.assertEqual('Bar', ret[0].value)
+ self.assertEqual("foo", ret[0].key)
+ self.assertEqual("Bar", ret[0].value)
def test_lower_case_with_equals(self):
- self.load_file.return_value = 'foo=bar'
- ret = ssh_util.parse_ssh_config('some real file')
+ self.load_file.return_value = "foo=bar"
+ ret = ssh_util.parse_ssh_config("some real file")
self.assertEqual(1, len(ret))
- self.assertEqual('foo', ret[0].key)
- self.assertEqual('bar', ret[0].value)
+ self.assertEqual("foo", ret[0].key)
+ self.assertEqual("bar", ret[0].value)
def test_upper_case_with_equals(self):
- self.load_file.return_value = 'Foo=bar'
- ret = ssh_util.parse_ssh_config('some real file')
+ self.load_file.return_value = "Foo=bar"
+ ret = ssh_util.parse_ssh_config("some real file")
self.assertEqual(1, len(ret))
- self.assertEqual('foo', ret[0].key)
- self.assertEqual('bar', ret[0].value)
+ self.assertEqual("foo", ret[0].key)
+ self.assertEqual("bar", ret[0].value)
class TestUpdateSshConfigLines(test_helpers.CiTestCase):
"""Test the update_ssh_config_lines method."""
+
exlines = [
"#PasswordAuthentication yes",
"UsePAM yes",
@@ -476,8 +536,8 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase):
def test_new_option_added(self):
"""A single update of non-existing option."""
lines = ssh_util.parse_ssh_config_lines(list(self.exlines))
- result = ssh_util.update_ssh_config_lines(lines, {'MyKey': 'MyVal'})
- self.assertEqual(['MyKey'], result)
+ result = ssh_util.update_ssh_config_lines(lines, {"MyKey": "MyVal"})
+ self.assertEqual(["MyKey"], result)
self.check_line(lines[-1], "MyKey", "MyVal")
def test_commented_out_not_updated_but_appended(self):
@@ -487,6 +547,14 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase):
self.assertEqual([self.pwauth], result)
self.check_line(lines[-1], self.pwauth, "no")
+ def test_option_without_value(self):
+ """Implementation only accepts key-value pairs."""
+ extended_exlines = self.exlines.copy()
+ denyusers_opt = "DenyUsers"
+ extended_exlines.append(denyusers_opt)
+ lines = ssh_util.parse_ssh_config_lines(list(extended_exlines))
+ self.assertNotIn(denyusers_opt, str(lines))
+
def test_single_option_updated(self):
"""A single update should have change made and line updated."""
opt, val = ("UsePAM", "no")
@@ -497,8 +565,12 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase):
def test_multiple_updates_with_add(self):
"""Verify multiple updates some added some changed, some not."""
- updates = {"UsePAM": "no", "X11Forwarding": "no", "NewOpt": "newval",
- "AcceptEnv": "LANG ADD LC_*"}
+ updates = {
+ "UsePAM": "no",
+ "X11Forwarding": "no",
+ "NewOpt": "newval",
+ "AcceptEnv": "LANG ADD LC_*",
+ }
lines = ssh_util.parse_ssh_config_lines(list(self.exlines))
result = ssh_util.update_ssh_config_lines(lines, updates)
self.assertEqual(set(["UsePAM", "NewOpt", "AcceptEnv"]), set(result))
@@ -523,7 +595,7 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase):
class TestUpdateSshConfig(test_helpers.CiTestCase):
- cfgdata = '\n'.join(["#Option val", "MyKey ORIG_VAL", ""])
+ cfgdata = "\n".join(["#Option val", "MyKey ORIG_VAL", ""])
def test_modified(self):
mycfg = self.tmp_path("ssh_config_1")
@@ -533,7 +605,7 @@ class TestUpdateSshConfig(test_helpers.CiTestCase):
found = util.load_file(mycfg)
self.assertEqual(self.cfgdata.replace("ORIG_VAL", "NEW_VAL"), found)
# assert there is a newline at end of file (LP: #1677205)
- self.assertEqual('\n', found[-1])
+ self.assertEqual("\n", found[-1])
def test_not_modified(self):
mycfg = self.tmp_path("ssh_config_2")
@@ -550,76 +622,949 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase):
self.assertEqual(
["/opt/bobby/keys"],
ssh_util.render_authorizedkeysfile_paths(
- "/opt/%u/keys", "/home/bobby", "bobby"))
+ "/opt/%u/keys", "/home/bobby", "bobby"
+ ),
+ )
+
+ def test_user_file(self):
+ self.assertEqual(
+ ["/opt/bobby"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "/opt/%u", "/home/bobby", "bobby"
+ ),
+ )
+
+ def test_user_file2(self):
+ self.assertEqual(
+ ["/opt/bobby/bobby"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "/opt/%u/%u", "/home/bobby", "bobby"
+ ),
+ )
def test_multiple(self):
self.assertEqual(
["/keys/path1", "/keys/path2"],
ssh_util.render_authorizedkeysfile_paths(
- "/keys/path1 /keys/path2", "/home/bobby", "bobby"))
+ "/keys/path1 /keys/path2", "/home/bobby", "bobby"
+ ),
+ )
+
+ def test_multiple2(self):
+ self.assertEqual(
+ ["/keys/path1", "/keys/bobby"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "/keys/path1 /keys/%u", "/home/bobby", "bobby"
+ ),
+ )
def test_relative(self):
self.assertEqual(
["/home/bobby/.secret/keys"],
ssh_util.render_authorizedkeysfile_paths(
- ".secret/keys", "/home/bobby", "bobby"))
+ ".secret/keys", "/home/bobby", "bobby"
+ ),
+ )
def test_home(self):
self.assertEqual(
["/homedirs/bobby/.keys"],
ssh_util.render_authorizedkeysfile_paths(
- "%h/.keys", "/homedirs/bobby", "bobby"))
+ "%h/.keys", "/homedirs/bobby", "bobby"
+ ),
+ )
+
+ def test_all(self):
+ self.assertEqual(
+ [
+ "/homedirs/bobby/.keys",
+ "/homedirs/bobby/.secret/keys",
+ "/keys/path1",
+ "/opt/bobby/keys",
+ ],
+ ssh_util.render_authorizedkeysfile_paths(
+ "%h/.keys .secret/keys /keys/path1 /opt/%u/keys",
+ "/homedirs/bobby",
+ "bobby",
+ ),
+ )
class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
+ def create_fake_users(
+ self,
+ names,
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ ):
+ homes = []
+
+ root = "/tmp/root"
+ fpw = FakePwEnt(pw_name="root", pw_dir=root)
+ users["root"] = fpw
+
+ for name in names:
+ home = "/tmp/home/" + name
+ fpw = FakePwEnt(pw_name=name, pw_dir=home)
+ users[name] = fpw
+ homes.append(home)
+
+ m_get_permissions.side_effect = partial(
+ mock_get_permissions, mock_permissions
+ )
+ m_get_owner.side_effect = partial(mock_get_owner, mock_permissions)
+ m_get_group.side_effect = partial(mock_get_group, mock_permissions)
+ m_getpwnam.side_effect = partial(mock_getpwnam, users)
+ return homes
+
+ def create_user_authorized_file(self, home, filename, content_key, keys):
+ user_ssh_folder = "%s/.ssh" % home
+ # /tmp/home/<user>/.ssh/authorized_keys = content_key
+ authorized_keys = self.tmp_path(filename, dir=user_ssh_folder)
+ util.write_file(authorized_keys, VALID_CONTENT[content_key])
+ keys[authorized_keys] = content_key
+ return authorized_keys
+
+ def create_global_authorized_file(self, filename, content_key, keys):
+ authorized_keys = self.tmp_path(filename, dir="/tmp")
+ util.write_file(authorized_keys, VALID_CONTENT[content_key])
+ keys[authorized_keys] = content_key
+ return authorized_keys
+
+ def create_sshd_config(self, authorized_keys_files):
+ sshd_config = self.tmp_path("sshd_config", dir="/tmp")
+ util.write_file(
+ sshd_config, "AuthorizedKeysFile " + authorized_keys_files
+ )
+ return sshd_config
+
+ def execute_and_check(
+ self, user, sshd_config, solution, keys, delete_keys=True
+ ):
+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
+ user, sshd_config
+ )
+ content = ssh_util.update_authorized_keys(auth_key_entries, [])
+
+ self.assertEqual(auth_key_fn, solution)
+ for path, key in keys.items():
+ if path == solution:
+ self.assertTrue(VALID_CONTENT[key] in content)
+ else:
+ self.assertFalse(VALID_CONTENT[key] in content)
+
+ if delete_keys and os.path.isdir("/tmp/home/"):
+ util.delete_dir_contents("/tmp/home/")
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_two_local_files(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = "bobby"
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home = homes[0]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, "authorized_keys", "rsa", keys
+ )
+
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, "user_keys", "dsa", keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s %s" % (authorized_keys, user_keys)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys)
@patch("cloudinit.ssh_util.pwd.getpwnam")
- def test_multiple_authorizedkeys_file_order1(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='bobby', pw_dir='/home2/bobby')
- m_getpwnam.return_value = fpw
- authorized_keys = self.tmp_path('authorized_keys')
- util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_two_local_files_inverted(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = "bobby"
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home = homes[0]
- user_keys = self.tmp_path('user_keys')
- util.write_file(user_keys, VALID_CONTENT['dsa'])
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, "authorized_keys", "rsa", keys
+ )
- sshd_config = self.tmp_path('sshd_config')
- util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, "user_keys", "dsa", keys
)
- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
- content = ssh_util.update_authorized_keys(auth_key_entries, [])
+ # /tmp/sshd_config
+ options = "%s %s" % (user_keys, authorized_keys)
+ sshd_config = self.create_sshd_config(options)
- self.assertEqual(authorized_keys, auth_key_fn)
- self.assertTrue(VALID_CONTENT['rsa'] in content)
- self.assertTrue(VALID_CONTENT['dsa'] in content)
+ self.execute_and_check(user_bobby, sshd_config, user_keys, keys)
@patch("cloudinit.ssh_util.pwd.getpwnam")
- def test_multiple_authorizedkeys_file_order2(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='suzie', pw_dir='/home/suzie')
- m_getpwnam.return_value = fpw
- authorized_keys = self.tmp_path('authorized_keys')
- util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_local_global_files(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = "bobby"
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home = homes[0]
- user_keys = self.tmp_path('user_keys')
- util.write_file(user_keys, VALID_CONTENT['dsa'])
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, "authorized_keys", "rsa", keys
+ )
- sshd_config = self.tmp_path('sshd_config')
- util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %s" % (user_keys, authorized_keys)
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, "user_keys", "dsa", keys
)
- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config
+ authorized_keys_global = self.create_global_authorized_file(
+ "etc/ssh/authorized_keys", "ecdsa", keys
)
- content = ssh_util.update_authorized_keys(auth_key_entries, [])
- self.assertEqual(user_keys, auth_key_fn)
- self.assertTrue(VALID_CONTENT['rsa'] in content)
- self.assertTrue(VALID_CONTENT['dsa'] in content)
+ options = "%s %s %s" % (
+ authorized_keys_global,
+ user_keys,
+ authorized_keys,
+ )
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(user_bobby, sshd_config, user_keys, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_local_global_files_inverted(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = "bobby"
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600),
+ "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home = homes[0]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, "authorized_keys2", "rsa", keys
+ )
+
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, "user_keys3", "dsa", keys
+ )
+
+ authorized_keys_global = self.create_global_authorized_file(
+ "etc/ssh/authorized_keys", "ecdsa", keys
+ )
+
+ options = "%s %s %s" % (
+ authorized_keys_global,
+ authorized_keys,
+ user_keys,
+ )
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_global_file(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = "bobby"
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home = homes[0]
+
+ # /tmp/etc/ssh/authorized_keys = rsa
+ authorized_keys_global = self.create_global_authorized_file(
+ "etc/ssh/authorized_keys", "rsa", keys
+ )
+
+ options = "%s" % authorized_keys_global
+ sshd_config = self.create_sshd_config(options)
+
+ default = "%s/.ssh/authorized_keys" % home
+ self.execute_and_check(user_bobby, sshd_config, default, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_file_standard(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ "/tmp/home/suzie": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
+ }
+
+ user_bobby = "bobby"
+ user_suzie = "suzie"
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, "authorized_keys", "rsa", keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = rsa
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, "authorized_keys", "ssh-xmss@openssh.com", keys
+ )
+
+ options = ".ssh/authorized_keys"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_file_custom(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
+ "/tmp/home/suzie": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh/authorized_keys2": ("suzie", "suzie", 0o600),
+ }
+
+ user_bobby = "bobby"
+ user_suzie = "suzie"
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, "authorized_keys2", "rsa", keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys2 = rsa
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, "authorized_keys2", "ssh-xmss@openssh.com", keys
+ )
+
+ options = ".ssh/authorized_keys2"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_global_files(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
+ "/tmp/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600),
+ "/tmp/home/suzie": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh/authorized_keys2": ("suzie", "suzie", 0o600),
+ "/tmp/home/suzie/.ssh/user_keys3": ("suzie", "suzie", 0o600),
+ }
+
+ user_bobby = "bobby"
+ user_suzie = "suzie"
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa
+ self.create_user_authorized_file(
+ home_bobby, "authorized_keys2", "rsa", keys
+ )
+ # /tmp/home/bobby/.ssh/user_keys3 = dsa
+ user_keys = self.create_user_authorized_file(
+ home_bobby, "user_keys3", "dsa", keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys2 = rsa
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, "authorized_keys2", "ssh-xmss@openssh.com", keys
+ )
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.create_global_authorized_file(
+ "etc/ssh/authorized_keys2", "ecdsa", keys
+ )
+
+ options = "%s %s %%h/.ssh/authorized_keys2" % (
+ authorized_keys_global,
+ user_keys,
+ )
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, user_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_global_files_badguy(
+ self,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ m_get_user_groups,
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
+ "/tmp/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600),
+ "/tmp/home/badguy": ("root", "root", 0o755),
+ "/tmp/home/badguy/home": ("root", "root", 0o755),
+ "/tmp/home/badguy/home/bobby": ("root", "root", 0o655),
+ }
+
+ user_bobby = "bobby"
+ user_badguy = "badguy"
+ home_bobby, *_ = self.create_fake_users(
+ [user_bobby, user_badguy],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, "authorized_keys2", "rsa", keys
+ )
+ # /tmp/home/bobby/.ssh/user_keys3 = dsa
+ user_keys = self.create_user_authorized_file(
+ home_bobby, "user_keys3", "dsa", keys
+ )
+
+ # /tmp/home/badguy/home/bobby = ""
+ authorized_keys2 = self.tmp_path("home/bobby", dir="/tmp/home/badguy")
+ util.write_file(authorized_keys2, "")
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.create_global_authorized_file(
+ "etc/ssh/authorized_keys2", "ecdsa", keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s %%h/.ssh/authorized_keys2 %s %s" % (
+ authorized_keys2,
+ authorized_keys_global,
+ user_keys,
+ )
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(
+ user_badguy, sshd_config, authorized_keys2, keys
+ )
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_unaccessible_file(
+ self,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ m_get_user_groups,
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ "/tmp/etc": ("root", "root", 0o755),
+ "/tmp/etc/ssh": ("root", "root", 0o755),
+ "/tmp/etc/ssh/userkeys": ("root", "root", 0o700),
+ "/tmp/etc/ssh/userkeys/bobby": ("bobby", "bobby", 0o600),
+ "/tmp/etc/ssh/userkeys/badguy": ("badguy", "badguy", 0o600),
+ "/tmp/home/badguy": ("badguy", "badguy", 0o700),
+ "/tmp/home/badguy/.ssh": ("badguy", "badguy", 0o700),
+ "/tmp/home/badguy/.ssh/authorized_keys": (
+ "badguy",
+ "badguy",
+ 0o600,
+ ),
+ }
+
+ user_bobby = "bobby"
+ user_badguy = "badguy"
+ homes = self.create_fake_users(
+ [user_bobby, user_badguy],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ m_get_user_groups.side_effect = mock_get_user_groups
+ home_bobby = homes[0]
+ home_badguy = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, "authorized_keys", "rsa", keys
+ )
+ # /tmp/etc/ssh/userkeys/bobby = dsa
+ # assume here that we can bypass userkeys, despite permissions
+ self.create_global_authorized_file(
+ "etc/ssh/userkeys/bobby", "dsa", keys
+ )
+
+ # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com
+ authorized_keys2 = self.create_user_authorized_file(
+ home_badguy, "authorized_keys", "ssh-xmss@openssh.com", keys
+ )
+
+ # /tmp/etc/ssh/userkeys/badguy = ecdsa
+ self.create_global_authorized_file(
+ "etc/ssh/userkeys/badguy", "ecdsa", keys
+ )
+
+ # /tmp/sshd_config
+ options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(
+ user_badguy, sshd_config, authorized_keys2, keys
+ )
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_accessible_file(
+ self,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ m_get_user_groups,
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ "/tmp/etc": ("root", "root", 0o755),
+ "/tmp/etc/ssh": ("root", "root", 0o755),
+ "/tmp/etc/ssh/userkeys": ("root", "root", 0o755),
+ "/tmp/etc/ssh/userkeys/bobby": ("bobby", "bobby", 0o600),
+ "/tmp/etc/ssh/userkeys/badguy": ("badguy", "badguy", 0o600),
+ "/tmp/home/badguy": ("badguy", "badguy", 0o700),
+ "/tmp/home/badguy/.ssh": ("badguy", "badguy", 0o700),
+ "/tmp/home/badguy/.ssh/authorized_keys": (
+ "badguy",
+ "badguy",
+ 0o600,
+ ),
+ }
+
+ user_bobby = "bobby"
+ user_badguy = "badguy"
+ homes = self.create_fake_users(
+ [user_bobby, user_badguy],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ m_get_user_groups.side_effect = mock_get_user_groups
+ home_bobby = homes[0]
+ home_badguy = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ self.create_user_authorized_file(
+ home_bobby, "authorized_keys", "rsa", keys
+ )
+ # /tmp/etc/ssh/userkeys/bobby = dsa
+ # assume here that we can bypass userkeys, despite permissions
+ authorized_keys = self.create_global_authorized_file(
+ "etc/ssh/userkeys/bobby", "dsa", keys
+ )
+
+ # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com
+ self.create_user_authorized_file(
+ home_badguy, "authorized_keys", "ssh-xmss@openssh.com", keys
+ )
+
+ # /tmp/etc/ssh/userkeys/badguy = ecdsa
+ authorized_keys2 = self.create_global_authorized_file(
+ "etc/ssh/userkeys/badguy", "ecdsa", keys
+ )
+
+ # /tmp/sshd_config
+ options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(
+ user_badguy, sshd_config, authorized_keys2, keys
+ )
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_hardcoded_single_user_file(
+ self,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ m_get_user_groups,
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ "/tmp/home/suzie": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
+ }
+
+ user_bobby = "bobby"
+ user_suzie = "suzie"
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, "authorized_keys", "rsa", keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com
+ self.create_user_authorized_file(
+ home_suzie, "authorized_keys", "ssh-xmss@openssh.com", keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s" % (authorized_keys)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ default = "%s/.ssh/authorized_keys" % home_suzie
+ self.execute_and_check(user_suzie, sshd_config, default, keys)
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_hardcoded_single_user_file_inverted(
+ self,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ m_get_user_groups,
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ "/tmp/home/suzie": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
+ }
+
+ user_bobby = "bobby"
+ user_suzie = "suzie"
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ self.create_user_authorized_file(
+ home_bobby, "authorized_keys", "rsa", keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, "authorized_keys", "ssh-xmss@openssh.com", keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s" % (authorized_keys2)
+ sshd_config = self.create_sshd_config(options)
+
+ default = "%s/.ssh/authorized_keys" % home_bobby
+ self.execute_and_check(
+ user_bobby, sshd_config, default, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_hardcoded_user_files(
+ self,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ m_get_user_groups,
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ "/tmp/home/suzie": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
+ }
+
+ user_bobby = "bobby"
+ user_suzie = "suzie"
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, "authorized_keys", "rsa", keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, "authorized_keys", "ssh-xmss@openssh.com", keys
+ )
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.create_global_authorized_file(
+ "etc/ssh/authorized_keys", "ecdsa", keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s %s %s" % (
+ authorized_keys_global,
+ authorized_keys,
+ authorized_keys2,
+ )
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_stages.py b/tests/unittests/test_stages.py
new file mode 100644
index 00000000..3214410b
--- /dev/null
+++ b/tests/unittests/test_stages.py
@@ -0,0 +1,568 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests related to cloudinit.stages module."""
+import os
+import stat
+
+import pytest
+
+from cloudinit import sources, stages
+from cloudinit.event import EventScope, EventType
+from cloudinit.sources import NetworkConfigSource
+from cloudinit.util import write_file
+from tests.unittests.helpers import CiTestCase, mock
+
+TEST_INSTANCE_ID = "i-testing"
+
+
+class FakeDataSource(sources.DataSource):
+ def __init__(
+ self, paths=None, userdata=None, vendordata=None, network_config=""
+ ):
+ super(FakeDataSource, self).__init__({}, None, paths=paths)
+ self.metadata = {"instance-id": TEST_INSTANCE_ID}
+ self.userdata_raw = userdata
+ self.vendordata_raw = vendordata
+ self._network_config = None
+ if network_config: # Permit for None value to setup attribute
+ self._network_config = network_config
+
+ @property
+ def network_config(self):
+ return self._network_config
+
+ def _get_data(self):
+ return True
+
+
+class TestInit(CiTestCase):
+ with_logs = True
+ allowed_subp = False
+
+ def setUp(self):
+ super(TestInit, self).setUp()
+ self.tmpdir = self.tmp_dir()
+ self.init = stages.Init()
+ # Setup fake Paths for Init to reference
+ self.init._cfg = {
+ "system_info": {
+ "distro": "ubuntu",
+ "paths": {"cloud_dir": self.tmpdir, "run_dir": self.tmpdir},
+ }
+ }
+ self.init.datasource = FakeDataSource(paths=self.init.paths)
+ self._real_is_new_instance = self.init.is_new_instance
+ self.init.is_new_instance = mock.Mock(return_value=True)
+
+ def test_wb__find_networking_config_disabled(self):
+ """find_networking_config returns no config when disabled."""
+ disable_file = os.path.join(
+ self.init.paths.get_cpath("data"), "upgraded-network"
+ )
+ write_file(disable_file, "")
+ self.assertEqual(
+ (None, disable_file), self.init._find_networking_config()
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_disabled_by_kernel(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns when disabled by kernel cmdline."""
+ m_cmdline.return_value = {"config": "disabled"}
+ m_initramfs.return_value = {"config": ["fake_initrd"]}
+ self.assertEqual(
+ (None, NetworkConfigSource.cmdline),
+ self.init._find_networking_config(),
+ )
+ self.assertEqual(
+ "DEBUG: network config disabled by cmdline\n", self.logs.getvalue()
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_disabled_by_initrd(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns when disabled by kernel cmdline."""
+ m_cmdline.return_value = {}
+ m_initramfs.return_value = {"config": "disabled"}
+ self.assertEqual(
+ (None, NetworkConfigSource.initramfs),
+ self.init._find_networking_config(),
+ )
+ self.assertEqual(
+ "DEBUG: network config disabled by initramfs\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_disabled_by_datasrc(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns when disabled by datasource cfg."""
+ m_cmdline.return_value = {} # Kernel doesn't disable networking
+ m_initramfs.return_value = {} # initramfs doesn't disable networking
+ self.init._cfg = {
+ "system_info": {"paths": {"cloud_dir": self.tmpdir}},
+ "network": {},
+ } # system config doesn't disable
+
+ self.init.datasource = FakeDataSource(
+ network_config={"config": "disabled"}
+ )
+ self.assertEqual(
+ (None, NetworkConfigSource.ds), self.init._find_networking_config()
+ )
+ self.assertEqual(
+ "DEBUG: network config disabled by ds\n", self.logs.getvalue()
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_disabled_by_sysconfig(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns when disabled by system config."""
+ m_cmdline.return_value = {} # Kernel doesn't disable networking
+ m_initramfs.return_value = {} # initramfs doesn't disable networking
+ self.init._cfg = {
+ "system_info": {"paths": {"cloud_dir": self.tmpdir}},
+ "network": {"config": "disabled"},
+ }
+ self.assertEqual(
+ (None, NetworkConfigSource.system_cfg),
+ self.init._find_networking_config(),
+ )
+ self.assertEqual(
+ "DEBUG: network config disabled by system_cfg\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test__find_networking_config_uses_datasrc_order(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config should check sources in DS defined order"""
+ # cmdline and initramfs, which would normally be preferred over other
+ # sources, disable networking; in this case, though, the DS moves them
+ # later so its own config is preferred
+ m_cmdline.return_value = {"config": "disabled"}
+ m_initramfs.return_value = {"config": "disabled"}
+
+ ds_net_cfg = {"config": {"needle": True}}
+ self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
+ self.init.datasource.network_config_sources = [
+ NetworkConfigSource.ds,
+ NetworkConfigSource.system_cfg,
+ NetworkConfigSource.cmdline,
+ NetworkConfigSource.initramfs,
+ ]
+
+ self.assertEqual(
+ (ds_net_cfg, NetworkConfigSource.ds),
+ self.init._find_networking_config(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test__find_networking_config_warns_if_datasrc_uses_invalid_src(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config should check sources in DS defined order"""
+ ds_net_cfg = {"config": {"needle": True}}
+ self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
+ self.init.datasource.network_config_sources = [
+ "invalid_src",
+ NetworkConfigSource.ds,
+ ]
+
+ self.assertEqual(
+ (ds_net_cfg, NetworkConfigSource.ds),
+ self.init._find_networking_config(),
+ )
+ self.assertIn(
+ "WARNING: data source specifies an invalid network"
+ " cfg_source: invalid_src",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test__find_networking_config_warns_if_datasrc_uses_unavailable_src(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config should check sources in DS defined order"""
+ ds_net_cfg = {"config": {"needle": True}}
+ self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
+ self.init.datasource.network_config_sources = [
+ NetworkConfigSource.fallback,
+ NetworkConfigSource.ds,
+ ]
+
+ self.assertEqual(
+ (ds_net_cfg, NetworkConfigSource.ds),
+ self.init._find_networking_config(),
+ )
+ self.assertIn(
+ "WARNING: data source specifies an unavailable network"
+ " cfg_source: fallback",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_returns_kernel(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns kernel cmdline config if present."""
+ expected_cfg = {"config": ["fakekernel"]}
+ m_cmdline.return_value = expected_cfg
+ m_initramfs.return_value = {"config": ["fake_initrd"]}
+ self.init._cfg = {
+ "system_info": {"paths": {"cloud_dir": self.tmpdir}},
+ "network": {"config": ["fakesys_config"]},
+ }
+ self.init.datasource = FakeDataSource(
+ network_config={"config": ["fakedatasource"]}
+ )
+ self.assertEqual(
+ (expected_cfg, NetworkConfigSource.cmdline),
+ self.init._find_networking_config(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_returns_initramfs(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns kernel cmdline config if present."""
+ expected_cfg = {"config": ["fake_initrd"]}
+ m_cmdline.return_value = {}
+ m_initramfs.return_value = expected_cfg
+ self.init._cfg = {
+ "system_info": {"paths": {"cloud_dir": self.tmpdir}},
+ "network": {"config": ["fakesys_config"]},
+ }
+ self.init.datasource = FakeDataSource(
+ network_config={"config": ["fakedatasource"]}
+ )
+ self.assertEqual(
+ (expected_cfg, NetworkConfigSource.initramfs),
+ self.init._find_networking_config(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_returns_system_cfg(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns system config when present."""
+ m_cmdline.return_value = {} # No kernel network config
+ m_initramfs.return_value = {} # no initramfs network config
+ expected_cfg = {"config": ["fakesys_config"]}
+ self.init._cfg = {
+ "system_info": {"paths": {"cloud_dir": self.tmpdir}},
+ "network": expected_cfg,
+ }
+ self.init.datasource = FakeDataSource(
+ network_config={"config": ["fakedatasource"]}
+ )
+ self.assertEqual(
+ (expected_cfg, NetworkConfigSource.system_cfg),
+ self.init._find_networking_config(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_returns_datasrc_cfg(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns datasource net config if present."""
+ m_cmdline.return_value = {} # No kernel network config
+ m_initramfs.return_value = {} # no initramfs network config
+ # No system config for network in setUp
+ expected_cfg = {"config": ["fakedatasource"]}
+ self.init.datasource = FakeDataSource(network_config=expected_cfg)
+ self.assertEqual(
+ (expected_cfg, NetworkConfigSource.ds),
+ self.init._find_networking_config(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_returns_fallback(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns fallback config if not defined."""
+ m_cmdline.return_value = {} # Kernel doesn't disable networking
+ m_initramfs.return_value = {} # no initramfs network config
+ # Neither datasource nor system_info disable or provide network
+
+ fake_cfg = {
+ "config": [{"type": "physical", "name": "eth9"}],
+ "version": 1,
+ }
+
+ def fake_generate_fallback():
+ return fake_cfg
+
+ # Monkey patch distro which gets cached on self.init
+ distro = self.init.distro
+ distro.generate_fallback_config = fake_generate_fallback
+ self.assertEqual(
+ (fake_cfg, NetworkConfigSource.fallback),
+ self.init._find_networking_config(),
+ )
+ self.assertNotIn("network config disabled", self.logs.getvalue())
+
+ def test_apply_network_config_disabled(self):
+ """Log when network is disabled by upgraded-network."""
+ disable_file = os.path.join(
+ self.init.paths.get_cpath("data"), "upgraded-network"
+ )
+
+ def fake_network_config():
+ return (None, disable_file)
+
+ self.init._find_networking_config = fake_network_config
+
+ self.init.apply_network_config(True)
+ self.assertIn(
+ "INFO: network config is disabled by %s" % disable_file,
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.distros.ubuntu.Distro")
+ def test_apply_network_on_new_instance(self, m_ubuntu, m_macs):
+ """Call distro apply_network_config methods on is_new_instance."""
+ net_cfg = {
+ "version": 1,
+ "config": [
+ {
+ "subnets": [{"type": "dhcp"}],
+ "type": "physical",
+ "name": "eth9",
+ "mac_address": "42:42:42:42:42:42",
+ }
+ ],
+ }
+
+ def fake_network_config():
+ return net_cfg, NetworkConfigSource.fallback
+
+ m_macs.return_value = {"42:42:42:42:42:42": "eth9"}
+
+ self.init._find_networking_config = fake_network_config
+
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ self.init.distro.apply_network_config.assert_called_with(
+ net_cfg, bring_up=True
+ )
+
+ @mock.patch("cloudinit.distros.ubuntu.Distro")
+ def test_apply_network_on_same_instance_id(self, m_ubuntu):
+ """Only call distro.apply_network_config_names on same instance id."""
+ self.init.is_new_instance = self._real_is_new_instance
+ old_instance_id = os.path.join(
+ self.init.paths.get_cpath("data"), "instance-id"
+ )
+ write_file(old_instance_id, TEST_INSTANCE_ID)
+ net_cfg = {
+ "version": 1,
+ "config": [
+ {
+ "subnets": [{"type": "dhcp"}],
+ "type": "physical",
+ "name": "eth9",
+ "mac_address": "42:42:42:42:42:42",
+ }
+ ],
+ }
+
+ def fake_network_config():
+ return net_cfg, NetworkConfigSource.fallback
+
+ self.init._find_networking_config = fake_network_config
+
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ self.init.distro.apply_network_config.assert_not_called()
+ assert (
+ "No network config applied. Neither a new instance nor datasource "
+ "network update allowed" in self.logs.getvalue()
+ )
+
+ # CiTestCase doesn't work with pytest.mark.parametrize, and moving this
+ # functionality to a separate class is more cumbersome than it'd be worth
+ # at the moment, so use this as a simple setup
+ def _apply_network_setup(self, m_macs):
+ old_instance_id = os.path.join(
+ self.init.paths.get_cpath("data"), "instance-id"
+ )
+ write_file(old_instance_id, TEST_INSTANCE_ID)
+ net_cfg = {
+ "version": 1,
+ "config": [
+ {
+ "subnets": [{"type": "dhcp"}],
+ "type": "physical",
+ "name": "eth9",
+ "mac_address": "42:42:42:42:42:42",
+ }
+ ],
+ }
+
+ def fake_network_config():
+ return net_cfg, NetworkConfigSource.fallback
+
+ m_macs.return_value = {"42:42:42:42:42:42": "eth9"}
+
+ self.init._find_networking_config = fake_network_config
+ self.init.datasource = FakeDataSource(paths=self.init.paths)
+ self.init.is_new_instance = mock.Mock(return_value=False)
+ return net_cfg
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.distros.ubuntu.Distro")
+ @mock.patch.dict(
+ sources.DataSource.default_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}},
+ )
+ def test_apply_network_allowed_when_default_boot(self, m_ubuntu, m_macs):
+ """Apply network if datasource permits BOOT event."""
+ net_cfg = self._apply_network_setup(m_macs)
+
+ self.init.apply_network_config(True)
+ assert (
+ mock.call(net_cfg)
+ == self.init.distro.apply_network_config_names.call_args_list[-1]
+ )
+ assert (
+ mock.call(net_cfg, bring_up=True)
+ == self.init.distro.apply_network_config.call_args_list[-1]
+ )
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.distros.ubuntu.Distro")
+ @mock.patch.dict(
+ sources.DataSource.default_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ def test_apply_network_disabled_when_no_default_boot(
+ self, m_ubuntu, m_macs
+ ):
+ """Don't apply network if datasource has no BOOT event."""
+ self._apply_network_setup(m_macs)
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config.assert_not_called()
+ assert (
+ "No network config applied. Neither a new instance nor datasource "
+ "network update allowed" in self.logs.getvalue()
+ )
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.distros.ubuntu.Distro")
+ @mock.patch.dict(
+ sources.DataSource.default_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ def test_apply_network_allowed_with_userdata_overrides(
+ self, m_ubuntu, m_macs
+ ):
+ """Apply network if userdata overrides default config"""
+ net_cfg = self._apply_network_setup(m_macs)
+ self.init._cfg = {"updates": {"network": {"when": ["boot"]}}}
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ self.init.distro.apply_network_config.assert_called_with(
+ net_cfg, bring_up=True
+ )
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.distros.ubuntu.Distro")
+ @mock.patch.dict(
+ sources.DataSource.supported_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ def test_apply_network_disabled_when_unsupported(self, m_ubuntu, m_macs):
+ """Don't apply network config if unsupported.
+
+ Shouldn't work even when specified as userdata
+ """
+ self._apply_network_setup(m_macs)
+
+ self.init._cfg = {"updates": {"network": {"when": ["boot"]}}}
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config.assert_not_called()
+ assert (
+ "No network config applied. Neither a new instance nor datasource "
+ "network update allowed" in self.logs.getvalue()
+ )
+
+
+class TestInit_InitializeFilesystem:
+ """Tests for cloudinit.stages.Init._initialize_filesystem.
+
+ TODO: Expand these tests to cover all of _initialize_filesystem's behavior.
+ """
+
+ @pytest.fixture
+ def init(self, paths):
+ """A fixture which yields a stages.Init instance with paths and cfg set
+
+ As it is replaced with a mock, consumers of this fixture can set
+ `init._cfg` if the default empty dict configuration is not appropriate.
+ """
+ with mock.patch("cloudinit.stages.util.ensure_dirs"):
+ init = stages.Init()
+ init._cfg = {}
+ init._paths = paths
+ yield init
+
+ @mock.patch("cloudinit.stages.util.ensure_file")
+ def test_ensure_file_not_called_if_no_log_file_configured(
+ self, m_ensure_file, init
+ ):
+ """If no log file is configured, we should not ensure its existence."""
+ init._cfg = {}
+
+ init._initialize_filesystem()
+
+ assert 0 == m_ensure_file.call_count
+
+ def test_log_files_existence_is_ensured_if_configured(self, init, tmpdir):
+ """If a log file is configured, we should ensure its existence."""
+ log_file = tmpdir.join("cloud-init.log")
+ init._cfg = {"def_log_file": str(log_file)}
+
+ init._initialize_filesystem()
+
+ assert log_file.exists()
+ # Assert we create it 0o640 by default if it doesn't already exist
+ assert 0o640 == stat.S_IMODE(log_file.stat().mode)
+
+ def test_existing_file_permissions_are_not_modified(self, init, tmpdir):
+ """If the log file already exists, we should not modify its permissions
+
+ See https://bugs.launchpad.net/cloud-init/+bug/1900837.
+ """
+ # Use a mode that will never be made the default so this test will
+ # always be valid
+ mode = 0o606
+ log_file = tmpdir.join("cloud-init.log")
+ log_file.ensure()
+ log_file.chmod(mode)
+ init._cfg = {"def_log_file": str(log_file)}
+
+ init._initialize_filesystem()
+
+ assert mode == stat.S_IMODE(log_file.stat().mode)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_subp.py b/tests/unittests/test_subp.py
new file mode 100644
index 00000000..7cd1339b
--- /dev/null
+++ b/tests/unittests/test_subp.py
@@ -0,0 +1,353 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests for cloudinit.subp utility functions"""
+
+import json
+import os
+import stat
+import sys
+from unittest import mock
+
+from cloudinit import subp, util
+from tests.unittests.helpers import CiTestCase, get_top_level_dir
+
+BASH = subp.which("bash")
+BOGUS_COMMAND = "this-is-not-expected-to-be-a-program-name"
+
+
+class TestPrependBaseCommands(CiTestCase):
+
+ with_logs = True
+
+ def test_prepend_base_command_errors_on_neither_string_nor_list(self):
+ """Raise an error for each command which is not a string or list."""
+ orig_commands = ["ls", 1, {"not": "gonna work"}, ["basecmd", "list"]]
+ with self.assertRaises(TypeError) as context_manager:
+ subp.prepend_base_command(
+ base_command="basecmd", commands=orig_commands
+ )
+ self.assertEqual(
+ "Invalid basecmd config. These commands are not a string or"
+ " list:\n1\n{'not': 'gonna work'}",
+ str(context_manager.exception),
+ )
+
+ def test_prepend_base_command_warns_on_non_base_string_commands(self):
+ """Warn on each non-base for commands of type string."""
+ orig_commands = [
+ "ls",
+ "basecmd list",
+ "touch /blah",
+ "basecmd install x",
+ ]
+ fixed_commands = subp.prepend_base_command(
+ base_command="basecmd", commands=orig_commands
+ )
+ self.assertEqual(
+ "WARNING: Non-basecmd commands in basecmd config:\n"
+ "ls\ntouch /blah\n",
+ self.logs.getvalue(),
+ )
+ self.assertEqual(orig_commands, fixed_commands)
+
+ def test_prepend_base_command_prepends_on_non_base_list_commands(self):
+ """Prepend 'basecmd' for each non-basecmd command of type list."""
+ orig_commands = [
+ ["ls"],
+ ["basecmd", "list"],
+ ["basecmda", "/blah"],
+ ["basecmd", "install", "x"],
+ ]
+ expected = [
+ ["basecmd", "ls"],
+ ["basecmd", "list"],
+ ["basecmd", "basecmda", "/blah"],
+ ["basecmd", "install", "x"],
+ ]
+ fixed_commands = subp.prepend_base_command(
+ base_command="basecmd", commands=orig_commands
+ )
+ self.assertEqual("", self.logs.getvalue())
+ self.assertEqual(expected, fixed_commands)
+
+ def test_prepend_base_command_removes_first_item_when_none(self):
+ """Remove the first element of a non-basecmd when it is None."""
+ orig_commands = [
+ [None, "ls"],
+ ["basecmd", "list"],
+ [None, "touch", "/blah"],
+ ["basecmd", "install", "x"],
+ ]
+ expected = [
+ ["ls"],
+ ["basecmd", "list"],
+ ["touch", "/blah"],
+ ["basecmd", "install", "x"],
+ ]
+ fixed_commands = subp.prepend_base_command(
+ base_command="basecmd", commands=orig_commands
+ )
+ self.assertEqual("", self.logs.getvalue())
+ self.assertEqual(expected, fixed_commands)
+
+
+class TestSubp(CiTestCase):
+ allowed_subp = [
+ BASH,
+ "cat",
+ CiTestCase.SUBP_SHELL_TRUE,
+ BOGUS_COMMAND,
+ sys.executable,
+ ]
+
+ stdin2err = [BASH, "-c", "cat >&2"]
+ stdin2out = ["cat"]
+ utf8_invalid = b"ab\xaadef"
+ utf8_valid = b"start \xc3\xa9 end"
+ utf8_valid_2 = b"d\xc3\xa9j\xc8\xa7"
+ printenv = [BASH, "-c", 'for n in "$@"; do echo "$n=${!n}"; done', "--"]
+
+ def printf_cmd(self, *args):
+ # bash's printf supports \xaa. So does /usr/bin/printf
+ # but by using bash, we remove dependency on another program.
+ return [BASH, "-c", 'printf "$@"', "printf"] + list(args)
+
+ def test_subp_handles_bytestrings(self):
+ """subp can run a bytestring command if shell is True."""
+ tmp_file = self.tmp_path("test.out")
+ cmd = "echo HI MOM >> {tmp_file}".format(tmp_file=tmp_file)
+ (out, _err) = subp.subp(cmd.encode("utf-8"), shell=True)
+ self.assertEqual("", out)
+ self.assertEqual("", _err)
+ self.assertEqual("HI MOM\n", util.load_file(tmp_file))
+
+ def test_subp_handles_strings(self):
+ """subp can run a string command if shell is True."""
+ tmp_file = self.tmp_path("test.out")
+ cmd = "echo HI MOM >> {tmp_file}".format(tmp_file=tmp_file)
+ (out, _err) = subp.subp(cmd, shell=True)
+ self.assertEqual("", out)
+ self.assertEqual("", _err)
+ self.assertEqual("HI MOM\n", util.load_file(tmp_file))
+
+ def test_subp_handles_utf8(self):
+ # The given bytes contain utf-8 accented characters as seen in e.g.
+ # the "deja dup" package in Ubuntu.
+ cmd = self.printf_cmd(self.utf8_valid_2)
+ (out, _err) = subp.subp(cmd, capture=True)
+ self.assertEqual(out, self.utf8_valid_2.decode("utf-8"))
+
+ def test_subp_respects_decode_false(self):
+ (out, err) = subp.subp(
+ self.stdin2out, capture=True, decode=False, data=self.utf8_valid
+ )
+ self.assertTrue(isinstance(out, bytes))
+ self.assertTrue(isinstance(err, bytes))
+ self.assertEqual(out, self.utf8_valid)
+
+ def test_subp_decode_ignore(self):
+ # this executes a string that writes invalid utf-8 to stdout
+ (out, _err) = subp.subp(
+ self.printf_cmd("abc\\xaadef"), capture=True, decode="ignore"
+ )
+ self.assertEqual(out, "abcdef")
+
+ def test_subp_decode_strict_valid_utf8(self):
+ (out, _err) = subp.subp(
+ self.stdin2out, capture=True, decode="strict", data=self.utf8_valid
+ )
+ self.assertEqual(out, self.utf8_valid.decode("utf-8"))
+
+ def test_subp_decode_invalid_utf8_replaces(self):
+ (out, _err) = subp.subp(
+ self.stdin2out, capture=True, data=self.utf8_invalid
+ )
+ expected = self.utf8_invalid.decode("utf-8", "replace")
+ self.assertEqual(out, expected)
+
+ def test_subp_decode_strict_raises(self):
+ args = []
+ kwargs = {
+ "args": self.stdin2out,
+ "capture": True,
+ "decode": "strict",
+ "data": self.utf8_invalid,
+ }
+ self.assertRaises(UnicodeDecodeError, subp.subp, *args, **kwargs)
+
+ def test_subp_capture_stderr(self):
+ data = b"hello world"
+ (out, err) = subp.subp(
+ self.stdin2err,
+ capture=True,
+ decode=False,
+ data=data,
+ update_env={"LC_ALL": "C"},
+ )
+ self.assertEqual(err, data)
+ self.assertEqual(out, b"")
+
+ def test_subp_reads_env(self):
+ with mock.patch.dict("os.environ", values={"FOO": "BAR"}):
+ out, _err = subp.subp(self.printenv + ["FOO"], capture=True)
+ self.assertEqual("FOO=BAR", out.splitlines()[0])
+
+ def test_subp_env_and_update_env(self):
+ out, _err = subp.subp(
+ self.printenv + ["FOO", "HOME", "K1", "K2"],
+ capture=True,
+ env={"FOO": "BAR"},
+ update_env={"HOME": "/myhome", "K2": "V2"},
+ )
+ self.assertEqual(
+ ["FOO=BAR", "HOME=/myhome", "K1=", "K2=V2"], out.splitlines()
+ )
+
+ def test_subp_update_env(self):
+ extra = {"FOO": "BAR", "HOME": "/root", "K1": "V1"}
+ with mock.patch.dict("os.environ", values=extra):
+ out, _err = subp.subp(
+ self.printenv + ["FOO", "HOME", "K1", "K2"],
+ capture=True,
+ update_env={"HOME": "/myhome", "K2": "V2"},
+ )
+
+ self.assertEqual(
+ ["FOO=BAR", "HOME=/myhome", "K1=V1", "K2=V2"], out.splitlines()
+ )
+
+ def test_subp_warn_missing_shebang(self):
+ """Warn on no #! in script"""
+ noshebang = self.tmp_path("noshebang")
+ util.write_file(noshebang, "true\n")
+
+ print("os is %s" % os)
+ os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC)
+ with self.allow_subp([noshebang]):
+ self.assertRaisesRegex(
+ subp.ProcessExecutionError,
+ r"Missing #! in script\?",
+ subp.subp,
+ (noshebang,),
+ )
+
+ def test_subp_combined_stderr_stdout(self):
+ """Providing combine_capture as True redirects stderr to stdout."""
+ data = b"hello world"
+ (out, err) = subp.subp(
+ self.stdin2err,
+ capture=True,
+ combine_capture=True,
+ decode=False,
+ data=data,
+ )
+ self.assertEqual(b"", err)
+ self.assertEqual(data, out)
+
+ def test_returns_none_if_no_capture(self):
+ (out, err) = subp.subp(self.stdin2out, data=b"", capture=False)
+ self.assertIsNone(err)
+ self.assertIsNone(out)
+
+ def test_exception_has_out_err_are_bytes_if_decode_false(self):
+ """Raised exc should have stderr, stdout as bytes if no decode."""
+ with self.assertRaises(subp.ProcessExecutionError) as cm:
+ subp.subp([BOGUS_COMMAND], decode=False)
+ self.assertTrue(isinstance(cm.exception.stdout, bytes))
+ self.assertTrue(isinstance(cm.exception.stderr, bytes))
+
+ def test_exception_has_out_err_are_bytes_if_decode_true(self):
+ """Raised exc should have stderr, stdout as string if no decode."""
+ with self.assertRaises(subp.ProcessExecutionError) as cm:
+ subp.subp([BOGUS_COMMAND], decode=True)
+ self.assertTrue(isinstance(cm.exception.stdout, str))
+ self.assertTrue(isinstance(cm.exception.stderr, str))
+
+ def test_bunch_of_slashes_in_path(self):
+ self.assertEqual(
+ "/target/my/path/", subp.target_path("/target/", "//my/path/")
+ )
+ self.assertEqual(
+ "/target/my/path/", subp.target_path("/target/", "///my/path/")
+ )
+
+ def test_c_lang_can_take_utf8_args(self):
+ """Independent of system LC_CTYPE, args can contain utf-8 strings.
+
+ When python starts up, its default encoding gets set based on
+ the value of LC_CTYPE. If no system locale is set, the default
+ encoding for both python2 and python3 in some paths will end up
+ being ascii.
+
+ Attempts to use setlocale or patching (or changing) os.environ
+ in the current environment seem to not be effective.
+
+ This test starts up a python with LC_CTYPE set to C so that
+ the default encoding will be set to ascii. In such an environment
+ Popen(['command', 'non-ascii-arg']) would cause a UnicodeDecodeError.
+ """
+ python_prog = "\n".join(
+ [
+ "import json, sys",
+ 'sys.path.insert(0, "{}")'.format(get_top_level_dir()),
+ "from cloudinit.subp import subp",
+ "data = sys.stdin.read()",
+ "cmd = json.loads(data)",
+ "subp(cmd, capture=False)",
+ "",
+ ]
+ )
+ cmd = [
+ BASH,
+ "-c",
+ 'echo -n "$@"',
+ "--",
+ self.utf8_valid.decode("utf-8"),
+ ]
+ python_subp = [sys.executable, "-c", python_prog]
+
+ out, _err = subp.subp(
+ python_subp,
+ update_env={"LC_CTYPE": "C"},
+ data=json.dumps(cmd).encode("utf-8"),
+ decode=False,
+ )
+ self.assertEqual(self.utf8_valid, out)
+
+ def test_bogus_command_logs_status_messages(self):
+ """status_cb gets status messages logs on bogus commands provided."""
+ logs = []
+
+ def status_cb(log):
+ logs.append(log)
+
+ with self.assertRaises(subp.ProcessExecutionError):
+ subp.subp([BOGUS_COMMAND], status_cb=status_cb)
+
+ expected = [
+ "Begin run command: {cmd}\n".format(cmd=BOGUS_COMMAND),
+ "ERROR: End run command: invalid command provided\n",
+ ]
+ self.assertEqual(expected, logs)
+
+ def test_command_logs_exit_codes_to_status_cb(self):
+ """status_cb gets status messages containing command exit code."""
+ logs = []
+
+ def status_cb(log):
+ logs.append(log)
+
+ with self.assertRaises(subp.ProcessExecutionError):
+ subp.subp([BASH, "-c", "exit 2"], status_cb=status_cb)
+ subp.subp([BASH, "-c", "exit 0"], status_cb=status_cb)
+
+ expected = [
+ "Begin run command: %s -c exit 2\n" % BASH,
+ "ERROR: End run command: exit(2)\n",
+ "Begin run command: %s -c exit 0\n" % BASH,
+ "End run command: exit(0)\n",
+ ]
+ self.assertEqual(expected, logs)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_temp_utils.py b/tests/unittests/test_temp_utils.py
new file mode 100644
index 00000000..e91f389b
--- /dev/null
+++ b/tests/unittests/test_temp_utils.py
@@ -0,0 +1,135 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests for cloudinit.temp_utils"""
+
+import os
+
+from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir
+from tests.unittests.helpers import CiTestCase, wrap_and_call
+
+
+class TestTempUtils(CiTestCase):
+ def test_mkdtemp_default_non_root(self):
+ """mkdtemp creates a dir under /tmp for the unprivileged."""
+ calls = []
+
+ def fake_mkdtemp(*args, **kwargs):
+ calls.append(kwargs)
+ return "/fake/return/path"
+
+ retval = wrap_and_call(
+ "cloudinit.temp_utils",
+ {
+ "os.getuid": 1000,
+ "tempfile.mkdtemp": {"side_effect": fake_mkdtemp},
+ "_TMPDIR": {"new": None},
+ "os.path.isdir": True,
+ },
+ mkdtemp,
+ )
+ self.assertEqual("/fake/return/path", retval)
+ self.assertEqual([{"dir": "/tmp"}], calls)
+
+ def test_mkdtemp_default_non_root_needs_exe(self):
+ """mkdtemp creates a dir under /var/tmp/cloud-init when needs_exe."""
+ calls = []
+
+ def fake_mkdtemp(*args, **kwargs):
+ calls.append(kwargs)
+ return "/fake/return/path"
+
+ retval = wrap_and_call(
+ "cloudinit.temp_utils",
+ {
+ "os.getuid": 1000,
+ "tempfile.mkdtemp": {"side_effect": fake_mkdtemp},
+ "_TMPDIR": {"new": None},
+ "os.path.isdir": True,
+ },
+ mkdtemp,
+ needs_exe=True,
+ )
+ self.assertEqual("/fake/return/path", retval)
+ self.assertEqual([{"dir": "/var/tmp/cloud-init"}], calls)
+
+ def test_mkdtemp_default_root(self):
+ """mkdtemp creates a dir under /run/cloud-init for the privileged."""
+ calls = []
+
+ def fake_mkdtemp(*args, **kwargs):
+ calls.append(kwargs)
+ return "/fake/return/path"
+
+ retval = wrap_and_call(
+ "cloudinit.temp_utils",
+ {
+ "os.getuid": 0,
+ "tempfile.mkdtemp": {"side_effect": fake_mkdtemp},
+ "_TMPDIR": {"new": None},
+ "os.path.isdir": True,
+ },
+ mkdtemp,
+ )
+ self.assertEqual("/fake/return/path", retval)
+ self.assertEqual([{"dir": "/run/cloud-init/tmp"}], calls)
+
+ def test_mkstemp_default_non_root(self):
+ """mkstemp creates secure tempfile under /tmp for the unprivileged."""
+ calls = []
+
+ def fake_mkstemp(*args, **kwargs):
+ calls.append(kwargs)
+ return "/fake/return/path"
+
+ retval = wrap_and_call(
+ "cloudinit.temp_utils",
+ {
+ "os.getuid": 1000,
+ "tempfile.mkstemp": {"side_effect": fake_mkstemp},
+ "_TMPDIR": {"new": None},
+ "os.path.isdir": True,
+ },
+ mkstemp,
+ )
+ self.assertEqual("/fake/return/path", retval)
+ self.assertEqual([{"dir": "/tmp"}], calls)
+
+ def test_mkstemp_default_root(self):
+ """mkstemp creates a secure tempfile in /run/cloud-init for root."""
+ calls = []
+
+ def fake_mkstemp(*args, **kwargs):
+ calls.append(kwargs)
+ return "/fake/return/path"
+
+ retval = wrap_and_call(
+ "cloudinit.temp_utils",
+ {
+ "os.getuid": 0,
+ "tempfile.mkstemp": {"side_effect": fake_mkstemp},
+ "_TMPDIR": {"new": None},
+ "os.path.isdir": True,
+ },
+ mkstemp,
+ )
+ self.assertEqual("/fake/return/path", retval)
+ self.assertEqual([{"dir": "/run/cloud-init/tmp"}], calls)
+
+ def test_tempdir_error_suppression(self):
+ """test tempdir suppresses errors during directory removal."""
+
+ with self.assertRaises(OSError):
+ with tempdir(prefix="cloud-init-dhcp-") as tdir:
+ os.rmdir(tdir)
+ # As a result, the directory is already gone,
+ # so shutil.rmtree should raise OSError
+
+ with tempdir(
+ rmtree_ignore_errors=True, prefix="cloud-init-dhcp-"
+ ) as tdir:
+ os.rmdir(tdir)
+ # Since the directory is already gone, shutil.rmtree would raise
+ # OSError, but we suppress that
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py
index cba09830..c1fec27c 100644
--- a/tests/unittests/test_templating.py
+++ b/tests/unittests/test_templating.py
@@ -4,14 +4,15 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.tests import helpers as test_helpers
import textwrap
from cloudinit import templater
from cloudinit.util import load_file, write_file
+from tests.unittests import helpers as test_helpers
try:
import Cheetah
+
HAS_CHEETAH = True
c = Cheetah # make pyflakes and pylint happy, as Cheetah is not used here
except ImportError:
@@ -22,32 +23,36 @@ class TestTemplates(test_helpers.CiTestCase):
with_logs = True
- jinja_utf8 = b'It\xe2\x80\x99s not ascii, {{name}}\n'
- jinja_utf8_rbob = b'It\xe2\x80\x99s not ascii, bob\n'.decode('utf-8')
+ jinja_utf8 = b"It\xe2\x80\x99s not ascii, {{name}}\n"
+ jinja_utf8_rbob = b"It\xe2\x80\x99s not ascii, bob\n".decode("utf-8")
@staticmethod
def add_header(renderer, data):
"""Return text (py2 unicode/py3 str) with template header."""
if isinstance(data, bytes):
- data = data.decode('utf-8')
+ data = data.decode("utf-8")
return "## template: %s\n" % renderer + data
def test_render_basic(self):
- in_data = textwrap.dedent("""
+ in_data = textwrap.dedent(
+ """
${b}
c = d
- """)
+ """
+ )
in_data = in_data.strip()
- expected_data = textwrap.dedent("""
+ expected_data = textwrap.dedent(
+ """
2
c = d
- """)
- out_data = templater.basic_render(in_data, {'b': 2})
+ """
+ )
+ out_data = templater.basic_render(in_data, {"b": 2})
self.assertEqual(expected_data.strip(), out_data)
- @test_helpers.skipIf(not HAS_CHEETAH, 'cheetah renderer not available')
+ @test_helpers.skipIf(not HAS_CHEETAH, "cheetah renderer not available")
def test_detection(self):
blob = "## template:cheetah"
@@ -60,28 +65,28 @@ class TestTemplates(test_helpers.CiTestCase):
self.assertIn("cheetah", template_type)
self.assertEqual(blob, contents)
- blob = '##template:something-new'
+ blob = "##template:something-new"
self.assertRaises(ValueError, templater.detect_template, blob)
def test_render_cheetah(self):
- blob = '''## template:cheetah
-$a,$b'''
+ blob = """## template:cheetah
+$a,$b"""
c = templater.render_string(blob, {"a": 1, "b": 2})
self.assertEqual("1,2", c)
def test_render_jinja(self):
- blob = '''## template:jinja
-{{a}},{{b}}'''
+ blob = """## template:jinja
+{{a}},{{b}}"""
c = templater.render_string(blob, {"a": 1, "b": 2})
self.assertEqual("1,2", c)
def test_render_default(self):
- blob = '''$a,$b'''
+ blob = """$a,$b"""
c = templater.render_string(blob, {"a": 1, "b": 2})
self.assertEqual("1,2", c)
def test_render_basic_deeper(self):
- hn = 'myfoohost.yahoo.com'
+ hn = "myfoohost.yahoo.com"
expected_data = "h=%s\nc=d\n" % hn
in_data = "h=$hostname.canonical_name\nc=d\n"
params = {
@@ -96,59 +101,69 @@ $a,$b'''
hn = "myfoohost"
in_data = "h=$hostname\nc=d\n"
expected_data = "h=%s\nc=d\n" % hn
- out_data = templater.basic_render(in_data, {'hostname': hn})
+ out_data = templater.basic_render(in_data, {"hostname": hn})
self.assertEqual(expected_data, out_data)
def test_render_basic_parens(self):
hn = "myfoohost"
in_data = "h = ${hostname}\nc=d\n"
expected_data = "h = %s\nc=d\n" % hn
- out_data = templater.basic_render(in_data, {'hostname': hn})
+ out_data = templater.basic_render(in_data, {"hostname": hn})
self.assertEqual(expected_data, out_data)
def test_render_basic2(self):
mirror = "mymirror"
codename = "zany"
in_data = "deb $mirror $codename-updates main contrib non-free"
- ex_data = "deb %s %s-updates main contrib non-free" % (mirror,
- codename)
-
- out_data = templater.basic_render(in_data,
- {'mirror': mirror,
- 'codename': codename})
+ ex_data = "deb %s %s-updates main contrib non-free" % (
+ mirror,
+ codename,
+ )
+
+ out_data = templater.basic_render(
+ in_data, {"mirror": mirror, "codename": codename}
+ )
self.assertEqual(ex_data, out_data)
def test_jinja_nonascii_render_to_string(self):
"""Test jinja render_to_string with non-ascii content."""
self.assertEqual(
templater.render_string(
- self.add_header("jinja", self.jinja_utf8), {"name": "bob"}),
- self.jinja_utf8_rbob)
+ self.add_header("jinja", self.jinja_utf8), {"name": "bob"}
+ ),
+ self.jinja_utf8_rbob,
+ )
def test_jinja_nonascii_render_undefined_variables_to_default_py3(self):
"""Test py3 jinja render_to_string with undefined variable default."""
self.assertEqual(
templater.render_string(
- self.add_header("jinja", self.jinja_utf8), {}),
- self.jinja_utf8_rbob.replace('bob', 'CI_MISSING_JINJA_VAR/name'))
+ self.add_header("jinja", self.jinja_utf8), {}
+ ),
+ self.jinja_utf8_rbob.replace("bob", "CI_MISSING_JINJA_VAR/name"),
+ )
def test_jinja_nonascii_render_to_file(self):
"""Test jinja render_to_file of a filename with non-ascii content."""
tmpl_fn = self.tmp_path("j-render-to-file.template")
out_fn = self.tmp_path("j-render-to-file.out")
- write_file(filename=tmpl_fn, omode="wb",
- content=self.add_header(
- "jinja", self.jinja_utf8).encode('utf-8'))
+ write_file(
+ filename=tmpl_fn,
+ omode="wb",
+ content=self.add_header("jinja", self.jinja_utf8).encode("utf-8"),
+ )
templater.render_to_file(tmpl_fn, out_fn, {"name": "bob"})
- result = load_file(out_fn, decode=False).decode('utf-8')
+ result = load_file(out_fn, decode=False).decode("utf-8")
self.assertEqual(result, self.jinja_utf8_rbob)
def test_jinja_nonascii_render_from_file(self):
"""Test jinja render_from_file with non-ascii content."""
tmpl_fn = self.tmp_path("j-render-from-file.template")
- write_file(tmpl_fn, omode="wb",
- content=self.add_header(
- "jinja", self.jinja_utf8).encode('utf-8'))
+ write_file(
+ tmpl_fn,
+ omode="wb",
+ content=self.add_header("jinja", self.jinja_utf8).encode("utf-8"),
+ )
result = templater.render_from_file(tmpl_fn, {"name": "bob"})
self.assertEqual(result, self.jinja_utf8_rbob)
@@ -156,14 +171,18 @@ $a,$b'''
def test_jinja_warns_on_missing_dep_and_uses_basic_renderer(self):
"""Test jinja render_from_file will fallback to basic renderer."""
tmpl_fn = self.tmp_path("j-render-from-file.template")
- write_file(tmpl_fn, omode="wb",
- content=self.add_header(
- "jinja", self.jinja_utf8).encode('utf-8'))
+ write_file(
+ tmpl_fn,
+ omode="wb",
+ content=self.add_header("jinja", self.jinja_utf8).encode("utf-8"),
+ )
result = templater.render_from_file(tmpl_fn, {"name": "bob"})
self.assertEqual(result, self.jinja_utf8.decode())
self.assertIn(
- 'WARNING: Jinja not available as the selected renderer for desired'
- ' template, reverting to the basic renderer.',
- self.logs.getvalue())
+ "WARNING: Jinja not available as the selected renderer for desired"
+ " template, reverting to the basic renderer.",
+ self.logs.getvalue(),
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_upgrade.py b/tests/unittests/test_upgrade.py
index f79a2536..d7a721a2 100644
--- a/cloudinit/tests/test_upgrade.py
+++ b/tests/unittests/test_upgrade.py
@@ -19,7 +19,7 @@ import pathlib
import pytest
from cloudinit.stages import _pkl_load
-from cloudinit.tests.helpers import resourceLocation
+from tests.unittests.helpers import resourceLocation
class TestUpgrade:
@@ -43,3 +43,10 @@ class TestUpgrade:
def test_blacklist_drivers_set_on_networking(self, previous_obj_pkl):
"""We always expect Networking.blacklist_drivers to be initialised."""
assert previous_obj_pkl.distro.networking.blacklist_drivers is None
+
+ def test_paths_has_run_dir_attribute(self, previous_obj_pkl):
+ assert previous_obj_pkl.paths.run_dir is not None
+
+ def test_vendordata_exists(self, previous_obj_pkl):
+ assert previous_obj_pkl.vendordata2 is None
+ assert previous_obj_pkl.vendordata2_raw is None
diff --git a/cloudinit/tests/test_url_helper.py b/tests/unittests/test_url_helper.py
index 364ec822..85810e00 100644
--- a/cloudinit/tests/test_url_helper.py
+++ b/tests/unittests/test_url_helper.py
@@ -1,53 +1,63 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.url_helper import (
- NOT_FOUND, UrlError, REDACTED, oauth_headers, read_file_or_url,
- retry_on_url_exc)
-from cloudinit.tests.helpers import CiTestCase, mock, skipIf
-from cloudinit import util
-from cloudinit import version
+import logging
import httpretty
import requests
+from cloudinit import util, version
+from cloudinit.url_helper import (
+ NOT_FOUND,
+ REDACTED,
+ UrlError,
+ oauth_headers,
+ read_file_or_url,
+ retry_on_url_exc,
+)
+from tests.unittests.helpers import CiTestCase, mock, skipIf
try:
import oauthlib
+
assert oauthlib # avoid pyflakes error F401: import unused
_missing_oauthlib_dep = False
except ImportError:
_missing_oauthlib_dep = True
-M_PATH = 'cloudinit.url_helper.'
+M_PATH = "cloudinit.url_helper."
class TestOAuthHeaders(CiTestCase):
-
def test_oauth_headers_raises_not_implemented_when_oathlib_missing(self):
"""oauth_headers raises a NotImplemented error when oauth absent."""
- with mock.patch.dict('sys.modules', {'oauthlib': None}):
+ with mock.patch.dict("sys.modules", {"oauthlib": None}):
with self.assertRaises(NotImplementedError) as context_manager:
oauth_headers(1, 2, 3, 4, 5)
self.assertEqual(
- 'oauth support is not available',
- str(context_manager.exception))
+ "oauth support is not available", str(context_manager.exception)
+ )
@skipIf(_missing_oauthlib_dep, "No python-oauthlib dependency")
- @mock.patch('oauthlib.oauth1.Client')
+ @mock.patch("oauthlib.oauth1.Client")
def test_oauth_headers_calls_oathlibclient_when_available(self, m_client):
"""oauth_headers calls oaut1.hClient.sign with the provided url."""
+
class fakeclient(object):
def sign(self, url):
# The first and 3rd item of the client.sign tuple are ignored
- return ('junk', url, 'junk2')
+ return ("junk", url, "junk2")
m_client.return_value = fakeclient()
return_value = oauth_headers(
- 'url', 'consumer_key', 'token_key', 'token_secret',
- 'consumer_secret')
- self.assertEqual('url', return_value)
+ "url",
+ "consumer_key",
+ "token_key",
+ "token_secret",
+ "consumer_secret",
+ )
+ self.assertEqual("url", return_value)
class TestReadFileOrUrl(CiTestCase):
@@ -58,42 +68,45 @@ class TestReadFileOrUrl(CiTestCase):
"""Test that str(result.contents) on file is text version of contents.
It should not be "b'data'", but just "'data'" """
tmpf = self.tmp_path("myfile1")
- data = b'This is my file content\n'
+ data = b"This is my file content\n"
util.write_file(tmpf, data, omode="wb")
result = read_file_or_url("file://%s" % tmpf)
self.assertEqual(result.contents, data)
- self.assertEqual(str(result), data.decode('utf-8'))
+ self.assertEqual(str(result), data.decode("utf-8"))
@httpretty.activate
def test_read_file_or_url_str_from_url(self):
"""Test that str(result.contents) on url is text version of contents.
It should not be "b'data'", but just "'data'" """
- url = 'http://hostname/path'
- data = b'This is my url content\n'
+ url = "http://hostname/path"
+ data = b"This is my url content\n"
httpretty.register_uri(httpretty.GET, url, data)
result = read_file_or_url(url)
self.assertEqual(result.contents, data)
- self.assertEqual(str(result), data.decode('utf-8'))
+ self.assertEqual(str(result), data.decode("utf-8"))
@httpretty.activate
def test_read_file_or_url_str_from_url_redacting_headers_from_logs(self):
"""Headers are redacted from logs but unredacted in requests."""
- url = 'http://hostname/path'
- headers = {'sensitive': 'sekret', 'server': 'blah'}
+ url = "http://hostname/path"
+ headers = {"sensitive": "sekret", "server": "blah"}
httpretty.register_uri(httpretty.GET, url)
+ # By default, httpretty will log our request along with the header,
+ # so if we don't change this the secret will show up in the logs
+ logging.getLogger("httpretty.core").setLevel(logging.CRITICAL)
- read_file_or_url(url, headers=headers, headers_redact=['sensitive'])
+ read_file_or_url(url, headers=headers, headers_redact=["sensitive"])
logs = self.logs.getvalue()
for k in headers.keys():
self.assertEqual(headers[k], httpretty.last_request().headers[k])
self.assertIn(REDACTED, logs)
- self.assertNotIn('sekret', logs)
+ self.assertNotIn("sekret", logs)
@httpretty.activate
def test_read_file_or_url_str_from_url_redacts_noheaders(self):
"""When no headers_redact, header values are in logs and requests."""
- url = 'http://hostname/path'
- headers = {'sensitive': 'sekret', 'server': 'blah'}
+ url = "http://hostname/path"
+ headers = {"sensitive": "sekret", "server": "blah"}
httpretty.register_uri(httpretty.GET, url)
read_file_or_url(url, headers=headers)
@@ -101,21 +114,27 @@ class TestReadFileOrUrl(CiTestCase):
self.assertEqual(headers[k], httpretty.last_request().headers[k])
logs = self.logs.getvalue()
self.assertNotIn(REDACTED, logs)
- self.assertIn('sekret', logs)
+ self.assertIn("sekret", logs)
- @mock.patch(M_PATH + 'readurl')
+ @mock.patch(M_PATH + "readurl")
def test_read_file_or_url_passes_params_to_readurl(self, m_readurl):
"""read_file_or_url passes all params through to readurl."""
- url = 'http://hostname/path'
- response = 'This is my url content\n'
+ url = "http://hostname/path"
+ response = "This is my url content\n"
m_readurl.return_value = response
- params = {'url': url, 'timeout': 1, 'retries': 2,
- 'headers': {'somehdr': 'val'},
- 'data': 'data', 'sec_between': 1,
- 'ssl_details': {'cert_file': '/path/cert.pem'},
- 'headers_cb': 'headers_cb', 'exception_cb': 'exception_cb'}
+ params = {
+ "url": url,
+ "timeout": 1,
+ "retries": 2,
+ "headers": {"somehdr": "val"},
+ "data": "data",
+ "sec_between": 1,
+ "ssl_details": {"cert_file": "/path/cert.pem"},
+ "headers_cb": "headers_cb",
+ "exception_cb": "exception_cb",
+ }
self.assertEqual(response, read_file_or_url(**params))
- params.pop('url') # url is passed in as a positional arg
+ params.pop("url") # url is passed in as a positional arg
self.assertEqual([mock.call(url, **params)], m_readurl.call_args_list)
def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self):
@@ -125,7 +144,7 @@ class TestReadFileOrUrl(CiTestCase):
retries: 0, additional headers None beyond default, method: GET,
data: None, check_status: True and allow_redirects: True
"""
- url = 'http://hostname/path'
+ url = "http://hostname/path"
m_response = mock.MagicMock()
@@ -133,20 +152,26 @@ class TestReadFileOrUrl(CiTestCase):
@classmethod
def request(cls, **kwargs):
self.assertEqual(
- {'url': url, 'allow_redirects': True, 'method': 'GET',
- 'headers': {
- 'User-Agent': 'Cloud-Init/%s' % (
- version.version_string())}},
- kwargs)
+ {
+ "url": url,
+ "allow_redirects": True,
+ "method": "GET",
+ "headers": {
+ "User-Agent": "Cloud-Init/%s"
+ % (version.version_string())
+ },
+ },
+ kwargs,
+ )
return m_response
- with mock.patch(M_PATH + 'requests.Session') as m_session:
- error = requests.exceptions.HTTPError('broke')
+ with mock.patch(M_PATH + "requests.Session") as m_session:
+ error = requests.exceptions.HTTPError("broke")
m_session.side_effect = [error, FakeSession()]
# assert no retries and check_status == True
with self.assertRaises(UrlError) as context_manager:
response = read_file_or_url(url)
- self.assertEqual('broke', str(context_manager.exception))
+ self.assertEqual("broke", str(context_manager.exception))
# assert default headers, method, url and allow_redirects True
# Success on 2nd call with FakeSession
response = read_file_or_url(url)
@@ -154,21 +179,22 @@ class TestReadFileOrUrl(CiTestCase):
class TestRetryOnUrlExc(CiTestCase):
-
def test_do_not_retry_non_urlerror(self):
"""When exception is not UrlError return False."""
- myerror = IOError('something unexcpected')
- self.assertFalse(retry_on_url_exc(msg='', exc=myerror))
+ myerror = IOError("something unexcpected")
+ self.assertFalse(retry_on_url_exc(msg="", exc=myerror))
def test_perform_retries_on_not_found(self):
"""When exception is UrlError with a 404 status code return True."""
- myerror = UrlError(cause=RuntimeError(
- 'something was not found'), code=NOT_FOUND)
- self.assertTrue(retry_on_url_exc(msg='', exc=myerror))
+ myerror = UrlError(
+ cause=RuntimeError("something was not found"), code=NOT_FOUND
+ )
+ self.assertTrue(retry_on_url_exc(msg="", exc=myerror))
def test_perform_retries_on_timeout(self):
"""When exception is a requests.Timout return True."""
- myerror = UrlError(cause=requests.Timeout('something timed out'))
- self.assertTrue(retry_on_url_exc(msg='', exc=myerror))
+ myerror = UrlError(cause=requests.Timeout("something timed out"))
+ self.assertTrue(retry_on_url_exc(msg="", exc=myerror))
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 857629f1..3765511b 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -1,23 +1,1339 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Tests for cloudinit.util"""
+
+import base64
import io
+import json
import logging
import os
+import platform
import re
import shutil
import stat
import tempfile
+from textwrap import dedent
+from unittest import mock
+
import pytest
import yaml
-from unittest import mock
-from cloudinit import subp
-from cloudinit import importer, util
-from cloudinit.tests import helpers
+from cloudinit import importer, subp, util
+from tests.unittests import helpers
+from tests.unittests.helpers import CiTestCase
+
+LOG = logging.getLogger(__name__)
+
+MOUNT_INFO = [
+ "68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64",
+ "153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2",
+]
+
+OS_RELEASE_SLES = dedent(
+ """\
+ NAME="SLES"
+ VERSION="12-SP3"
+ VERSION_ID="12.3"
+ PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"
+ ID="sles"
+ ANSI_COLOR="0;32"
+ CPE_NAME="cpe:/o:suse:sles:12:sp3"
+"""
+)
+
+OS_RELEASE_OPENSUSE = dedent(
+ """\
+ NAME="openSUSE Leap"
+ VERSION="42.3"
+ ID=opensuse
+ ID_LIKE="suse"
+ VERSION_ID="42.3"
+ PRETTY_NAME="openSUSE Leap 42.3"
+ ANSI_COLOR="0;32"
+ CPE_NAME="cpe:/o:opensuse:leap:42.3"
+ BUG_REPORT_URL="https://bugs.opensuse.org"
+ HOME_URL="https://www.opensuse.org/"
+"""
+)
+
+OS_RELEASE_OPENSUSE_L15 = dedent(
+ """\
+ NAME="openSUSE Leap"
+ VERSION="15.0"
+ ID="opensuse-leap"
+ ID_LIKE="suse opensuse"
+ VERSION_ID="15.0"
+ PRETTY_NAME="openSUSE Leap 15.0"
+ ANSI_COLOR="0;32"
+ CPE_NAME="cpe:/o:opensuse:leap:15.0"
+ BUG_REPORT_URL="https://bugs.opensuse.org"
+ HOME_URL="https://www.opensuse.org/"
+"""
+)
+
+OS_RELEASE_OPENSUSE_TW = dedent(
+ """\
+ NAME="openSUSE Tumbleweed"
+ ID="opensuse-tumbleweed"
+ ID_LIKE="opensuse suse"
+ VERSION_ID="20180920"
+ PRETTY_NAME="openSUSE Tumbleweed"
+ ANSI_COLOR="0;32"
+ CPE_NAME="cpe:/o:opensuse:tumbleweed:20180920"
+ BUG_REPORT_URL="https://bugs.opensuse.org"
+ HOME_URL="https://www.opensuse.org/"
+"""
+)
+
+OS_RELEASE_CENTOS = dedent(
+ """\
+ NAME="CentOS Linux"
+ VERSION="7 (Core)"
+ ID="centos"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="7"
+ PRETTY_NAME="CentOS Linux 7 (Core)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:centos:centos:7"
+ HOME_URL="https://www.centos.org/"
+ BUG_REPORT_URL="https://bugs.centos.org/"
+
+ CENTOS_MANTISBT_PROJECT="CentOS-7"
+ CENTOS_MANTISBT_PROJECT_VERSION="7"
+ REDHAT_SUPPORT_PRODUCT="centos"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7"
+"""
+)
+
+OS_RELEASE_REDHAT_7 = dedent(
+ """\
+ NAME="Red Hat Enterprise Linux Server"
+ VERSION="7.5 (Maipo)"
+ ID="rhel"
+ ID_LIKE="fedora"
+ VARIANT="Server"
+ VARIANT_ID="server"
+ VERSION_ID="7.5"
+ PRETTY_NAME="Red Hat"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:redhat:enterprise_linux:7.5:GA:server"
+ HOME_URL="https://www.redhat.com/"
+ BUG_REPORT_URL="https://bugzilla.redhat.com/"
+
+ REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7"
+ REDHAT_BUGZILLA_PRODUCT_VERSION=7.5
+ REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7.5"
+"""
+)
+
+OS_RELEASE_ALMALINUX_8 = dedent(
+ """\
+ NAME="AlmaLinux"
+ VERSION="8.3 (Purple Manul)"
+ ID="almalinux"
+ ID_LIKE="rhel centos fedora"
+ VERSION_ID="8.3"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="AlmaLinux 8.3 (Purple Manul)"
+ ANSI_COLOR="0;34"
+ CPE_NAME="cpe:/o:almalinux:almalinux:8.3:GA"
+ HOME_URL="https://almalinux.org/"
+ BUG_REPORT_URL="https://bugs.almalinux.org/"
+
+ ALMALINUX_MANTISBT_PROJECT="AlmaLinux-8"
+ ALMALINUX_MANTISBT_PROJECT_VERSION="8.3"
+"""
+)
+
+OS_RELEASE_EUROLINUX_7 = dedent(
+ """\
+ VERSION="7.9 (Minsk)"
+ ID="eurolinux"
+ ID_LIKE="rhel scientific centos fedora"
+ VERSION_ID="7.9"
+ PRETTY_NAME="EuroLinux 7.9 (Minsk)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:eurolinux:eurolinux:7.9:GA"
+ HOME_URL="http://www.euro-linux.com/"
+ BUG_REPORT_URL="mailto:support@euro-linux.com"
+ REDHAT_BUGZILLA_PRODUCT="EuroLinux 7"
+ REDHAT_BUGZILLA_PRODUCT_VERSION=7.9
+ REDHAT_SUPPORT_PRODUCT="EuroLinux"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7.9"
+"""
+)
+
+OS_RELEASE_EUROLINUX_8 = dedent(
+ """\
+ NAME="EuroLinux"
+ VERSION="8.4 (Vaduz)"
+ ID="eurolinux"
+ ID_LIKE="rhel fedora centos"
+ VERSION_ID="8.4"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="EuroLinux 8.4 (Vaduz)"
+ ANSI_COLOR="0;34"
+ CPE_NAME="cpe:/o:eurolinux:eurolinux:8"
+ HOME_URL="https://www.euro-linux.com/"
+ BUG_REPORT_URL="https://github.com/EuroLinux/eurolinux-distro-bugs-and-rfc/"
+ REDHAT_SUPPORT_PRODUCT="EuroLinux"
+ REDHAT_SUPPORT_PRODUCT_VERSION="8"
+"""
+)
+
+OS_RELEASE_MIRACLELINUX_8 = dedent(
+ """\
+ NAME="MIRACLE LINUX"
+ VERSION="8.4 (Peony)"
+ ID="miraclelinux"
+ ID_LIKE="rhel fedora"
+ PLATFORM_ID="platform:el8"
+ VERSION_ID="8"
+ PRETTY_NAME="MIRACLE LINUX 8.4 (Peony)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:cybertrust_japan:miracle_linux:8"
+ HOME_URL="https://www.cybertrust.co.jp/miracle-linux/"
+ DOCUMENTATION_URL="https://www.miraclelinux.com/support/miraclelinux8"
+ BUG_REPORT_URL="https://bugzilla.asianux.com/"
+ MIRACLELINUX_SUPPORT_PRODUCT="MIRACLE LINUX"
+ MIRACLELINUX_SUPPORT_PRODUCT_VERSION="8"
+"""
+)
+
+OS_RELEASE_ROCKY_8 = dedent(
+ """\
+ NAME="Rocky Linux"
+ VERSION="8.3 (Green Obsidian)"
+ ID="rocky"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="8.3"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="Rocky Linux 8.3 (Green Obsidian)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:rocky:rocky:8"
+ HOME_URL="https://rockylinux.org/"
+ BUG_REPORT_URL="https://bugs.rockylinux.org/"
+ ROCKY_SUPPORT_PRODUCT="Rocky Linux"
+ ROCKY_SUPPORT_PRODUCT_VERSION="8"
+"""
+)
+
+OS_RELEASE_VIRTUOZZO_8 = dedent(
+ """\
+ NAME="Virtuozzo Linux"
+ VERSION="8"
+ ID="virtuozzo"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="8"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="Virtuozzo Linux"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:virtuozzoproject:vzlinux:8"
+ HOME_URL="https://www.vzlinux.org"
+ BUG_REPORT_URL="https://bugs.openvz.org"
+"""
+)
+
+OS_RELEASE_CLOUDLINUX_8 = dedent(
+ """\
+ NAME="CloudLinux"
+ VERSION="8.4 (Valery Rozhdestvensky)"
+ ID="cloudlinux"
+ ID_LIKE="rhel fedora centos"
+ VERSION_ID="8.4"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="CloudLinux 8.4 (Valery Rozhdestvensky)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:cloudlinux:cloudlinux:8.4:GA:server"
+ HOME_URL="https://www.cloudlinux.com/"
+ BUG_REPORT_URL="https://www.cloudlinux.com/support"
+"""
+)
+
+OS_RELEASE_OPENEULER_20 = dedent(
+ """\
+ NAME="openEuler"
+ VERSION="20.03 (LTS-SP2)"
+ ID="openEuler"
+ VERSION_ID="20.03"
+ PRETTY_NAME="openEuler 20.03 (LTS-SP2)"
+ ANSI_COLOR="0;31"
+"""
+)
+
+REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)"
+REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)"
+REDHAT_RELEASE_REDHAT_6 = (
+ "Red Hat Enterprise Linux Server release 6.10 (Santiago)"
+)
+REDHAT_RELEASE_REDHAT_7 = "Red Hat Enterprise Linux Server release 7.5 (Maipo)"
+REDHAT_RELEASE_ALMALINUX_8 = "AlmaLinux release 8.3 (Purple Manul)"
+REDHAT_RELEASE_EUROLINUX_7 = "EuroLinux release 7.9 (Minsk)"
+REDHAT_RELEASE_EUROLINUX_8 = "EuroLinux release 8.4 (Vaduz)"
+REDHAT_RELEASE_MIRACLELINUX_8 = "MIRACLE LINUX release 8.4 (Peony)"
+REDHAT_RELEASE_ROCKY_8 = "Rocky Linux release 8.3 (Green Obsidian)"
+REDHAT_RELEASE_VIRTUOZZO_8 = "Virtuozzo Linux release 8"
+REDHAT_RELEASE_CLOUDLINUX_8 = "CloudLinux release 8.4 (Valery Rozhdestvensky)"
+OS_RELEASE_DEBIAN = dedent(
+ """\
+ PRETTY_NAME="Debian GNU/Linux 9 (stretch)"
+ NAME="Debian GNU/Linux"
+ VERSION_ID="9"
+ VERSION="9 (stretch)"
+ ID=debian
+ HOME_URL="https://www.debian.org/"
+ SUPPORT_URL="https://www.debian.org/support"
+ BUG_REPORT_URL="https://bugs.debian.org/"
+"""
+)
+
+OS_RELEASE_UBUNTU = dedent(
+ """\
+ NAME="Ubuntu"\n
+ # comment test
+ VERSION="16.04.3 LTS (Xenial Xerus)"\n
+ ID=ubuntu\n
+ ID_LIKE=debian\n
+ PRETTY_NAME="Ubuntu 16.04.3 LTS"\n
+ VERSION_ID="16.04"\n
+ HOME_URL="http://www.ubuntu.com/"\n
+ SUPPORT_URL="http://help.ubuntu.com/"\n
+ BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n
+ VERSION_CODENAME=xenial\n
+ UBUNTU_CODENAME=xenial\n
+"""
+)
+
+OS_RELEASE_PHOTON = """\
+ NAME="VMware Photon OS"
+ VERSION="4.0"
+ ID=photon
+ VERSION_ID=4.0
+ PRETTY_NAME="VMware Photon OS/Linux"
+ ANSI_COLOR="1;34"
+ HOME_URL="https://vmware.github.io/photon/"
+ BUG_REPORT_URL="https://github.com/vmware/photon/issues"
+"""
+
+
+class FakeCloud(object):
+ def __init__(self, hostname, fqdn):
+ self.hostname = hostname
+ self.fqdn = fqdn
+ self.calls = []
+
+ def get_hostname(self, fqdn=None, metadata_only=None):
+ myargs = {}
+ if fqdn is not None:
+ myargs["fqdn"] = fqdn
+ if metadata_only is not None:
+ myargs["metadata_only"] = metadata_only
+ self.calls.append(myargs)
+ if fqdn:
+ return self.fqdn
+ return self.hostname
+
+
+class TestUtil(CiTestCase):
+ def test_parse_mount_info_no_opts_no_arg(self):
+ result = util.parse_mount_info("/home", MOUNT_INFO, LOG)
+ self.assertEqual(("/dev/sda2", "xfs", "/home"), result)
+
+ def test_parse_mount_info_no_opts_arg(self):
+ result = util.parse_mount_info("/home", MOUNT_INFO, LOG, False)
+ self.assertEqual(("/dev/sda2", "xfs", "/home"), result)
+
+ def test_parse_mount_info_with_opts(self):
+ result = util.parse_mount_info("/", MOUNT_INFO, LOG, True)
+ self.assertEqual(("/dev/sda1", "btrfs", "/", "ro,relatime"), result)
+
+ @mock.patch("cloudinit.util.get_mount_info")
+ def test_mount_is_rw(self, m_mount_info):
+ m_mount_info.return_value = ("/dev/sda1", "btrfs", "/", "rw,relatime")
+ is_rw = util.mount_is_read_write("/")
+ self.assertEqual(is_rw, True)
+
+ @mock.patch("cloudinit.util.get_mount_info")
+ def test_mount_is_ro(self, m_mount_info):
+ m_mount_info.return_value = ("/dev/sda1", "btrfs", "/", "ro,relatime")
+ is_rw = util.mount_is_read_write("/")
+ self.assertEqual(is_rw, False)
+
+
+class TestUptime(CiTestCase):
+ @mock.patch("cloudinit.util.boottime")
+ @mock.patch("cloudinit.util.os.path.exists")
+ @mock.patch("cloudinit.util.time.time")
+ def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime):
+ boottime = 1000.0
+ uptime = 10.0
+ m_boottime.return_value = boottime
+ m_time.return_value = boottime + uptime
+ m_exists.return_value = False
+ result = util.uptime()
+ self.assertEqual(str(uptime), result)
+
+
+class TestShellify(CiTestCase):
+ def test_input_dict_raises_type_error(self):
+ self.assertRaisesRegex(
+ TypeError,
+ "Input.*was.*dict.*xpected",
+ util.shellify,
+ {"mykey": "myval"},
+ )
+
+ def test_input_str_raises_type_error(self):
+ self.assertRaisesRegex(
+ TypeError, "Input.*was.*str.*xpected", util.shellify, "foobar"
+ )
+ def test_value_with_int_raises_type_error(self):
+ self.assertRaisesRegex(
+ TypeError, "shellify.*int", util.shellify, ["foo", 1]
+ )
-class FakeSelinux(object):
+ def test_supports_strings_and_lists(self):
+ self.assertEqual(
+ "\n".join(
+ [
+ "#!/bin/sh",
+ "echo hi mom",
+ "'echo' 'hi dad'",
+ "'echo' 'hi' 'sis'",
+ "",
+ ]
+ ),
+ util.shellify(
+ ["echo hi mom", ["echo", "hi dad"], ("echo", "hi", "sis")]
+ ),
+ )
+
+ def test_supports_comments(self):
+ self.assertEqual(
+ "\n".join(["#!/bin/sh", "echo start", "echo end", ""]),
+ util.shellify(["echo start", None, "echo end"]),
+ )
+
+
+class TestGetHostnameFqdn(CiTestCase):
+ def test_get_hostname_fqdn_from_only_cfg_fqdn(self):
+ """When cfg only has the fqdn key, derive hostname and fqdn from it."""
+ hostname, fqdn = util.get_hostname_fqdn(
+ cfg={"fqdn": "myhost.domain.com"}, cloud=None
+ )
+ self.assertEqual("myhost", hostname)
+ self.assertEqual("myhost.domain.com", fqdn)
+
+ def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self):
+ """When cfg has both fqdn and hostname keys, return them."""
+ hostname, fqdn = util.get_hostname_fqdn(
+ cfg={"fqdn": "myhost.domain.com", "hostname": "other"}, cloud=None
+ )
+ self.assertEqual("other", hostname)
+ self.assertEqual("myhost.domain.com", fqdn)
+
+ def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self):
+ """When cfg has only hostname key which represents a fqdn, use that."""
+ hostname, fqdn = util.get_hostname_fqdn(
+ cfg={"hostname": "myhost.domain.com"}, cloud=None
+ )
+ self.assertEqual("myhost", hostname)
+ self.assertEqual("myhost.domain.com", fqdn)
+
+ def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self):
+ """When cfg has a hostname without a '.' query cloud.get_hostname."""
+ mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com")
+ hostname, fqdn = util.get_hostname_fqdn(
+ cfg={"hostname": "myhost"}, cloud=mycloud
+ )
+ self.assertEqual("myhost", hostname)
+ self.assertEqual("cloudhost.mycloud.com", fqdn)
+ self.assertEqual(
+ [{"fqdn": True, "metadata_only": False}], mycloud.calls
+ )
+
+ def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self):
+ """When cfg has neither hostname nor fqdn cloud.get_hostname."""
+ mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com")
+ hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud)
+ self.assertEqual("cloudhost", hostname)
+ self.assertEqual("cloudhost.mycloud.com", fqdn)
+ self.assertEqual(
+ [{"fqdn": True, "metadata_only": False}, {"metadata_only": False}],
+ mycloud.calls,
+ )
+
+ def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self):
+ """Calls to cloud.get_hostname pass the metadata_only parameter."""
+ mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com")
+ _hn, _fqdn = util.get_hostname_fqdn(
+ cfg={}, cloud=mycloud, metadata_only=True
+ )
+ self.assertEqual(
+ [{"fqdn": True, "metadata_only": True}, {"metadata_only": True}],
+ mycloud.calls,
+ )
+
+
+class TestBlkid(CiTestCase):
+ ids = {
+ "id01": "1111-1111",
+ "id02": "22222222-2222",
+ "id03": "33333333-3333",
+ "id04": "44444444-4444",
+ "id05": "55555555-5555-5555-5555-555555555555",
+ "id06": "66666666-6666-6666-6666-666666666666",
+ "id07": "52894610484658920398",
+ "id08": "86753098675309867530",
+ "id09": "99999999-9999-9999-9999-999999999999",
+ }
+
+ blkid_out = dedent(
+ """\
+ /dev/loop0: TYPE="squashfs"
+ /dev/loop1: TYPE="squashfs"
+ /dev/loop2: TYPE="squashfs"
+ /dev/loop3: TYPE="squashfs"
+ /dev/sda1: UUID="{id01}" TYPE="vfat" PARTUUID="{id02}"
+ /dev/sda2: UUID="{id03}" TYPE="ext4" PARTUUID="{id04}"
+ /dev/sda3: UUID="{id05}" TYPE="ext4" PARTUUID="{id06}"
+ /dev/sda4: LABEL="default" UUID="{id07}" UUID_SUB="{id08}" """
+ """TYPE="zfs_member" PARTUUID="{id09}"
+ /dev/loop4: TYPE="squashfs"
+ """
+ )
+
+ maxDiff = None
+
+ def _get_expected(self):
+ return {
+ "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"},
+ "/dev/loop1": {"DEVNAME": "/dev/loop1", "TYPE": "squashfs"},
+ "/dev/loop2": {"DEVNAME": "/dev/loop2", "TYPE": "squashfs"},
+ "/dev/loop3": {"DEVNAME": "/dev/loop3", "TYPE": "squashfs"},
+ "/dev/loop4": {"DEVNAME": "/dev/loop4", "TYPE": "squashfs"},
+ "/dev/sda1": {
+ "DEVNAME": "/dev/sda1",
+ "TYPE": "vfat",
+ "UUID": self.ids["id01"],
+ "PARTUUID": self.ids["id02"],
+ },
+ "/dev/sda2": {
+ "DEVNAME": "/dev/sda2",
+ "TYPE": "ext4",
+ "UUID": self.ids["id03"],
+ "PARTUUID": self.ids["id04"],
+ },
+ "/dev/sda3": {
+ "DEVNAME": "/dev/sda3",
+ "TYPE": "ext4",
+ "UUID": self.ids["id05"],
+ "PARTUUID": self.ids["id06"],
+ },
+ "/dev/sda4": {
+ "DEVNAME": "/dev/sda4",
+ "TYPE": "zfs_member",
+ "LABEL": "default",
+ "UUID": self.ids["id07"],
+ "UUID_SUB": self.ids["id08"],
+ "PARTUUID": self.ids["id09"],
+ },
+ }
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_functional_blkid(self, m_subp):
+ m_subp.return_value = (self.blkid_out.format(**self.ids), "")
+ self.assertEqual(self._get_expected(), util.blkid())
+ m_subp.assert_called_with(
+ ["blkid", "-o", "full"], capture=True, decode="replace"
+ )
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_blkid_no_cache_uses_no_cache(self, m_subp):
+ """blkid should turn off cache if disable_cache is true."""
+ m_subp.return_value = (self.blkid_out.format(**self.ids), "")
+ self.assertEqual(self._get_expected(), util.blkid(disable_cache=True))
+ m_subp.assert_called_with(
+ ["blkid", "-o", "full", "-c", "/dev/null"],
+ capture=True,
+ decode="replace",
+ )
+
+
+@mock.patch("cloudinit.subp.subp")
+class TestUdevadmSettle(CiTestCase):
+ def test_with_no_params(self, m_subp):
+ """called with no parameters."""
+ util.udevadm_settle()
+ m_subp.called_once_with(mock.call(["udevadm", "settle"]))
+
+ def test_with_exists_and_not_exists(self, m_subp):
+ """with exists=file where file does not exist should invoke subp."""
+ mydev = self.tmp_path("mydev")
+ util.udevadm_settle(exists=mydev)
+ m_subp.called_once_with(
+ ["udevadm", "settle", "--exit-if-exists=%s" % mydev]
+ )
+
+ def test_with_exists_and_file_exists(self, m_subp):
+ """with exists=file where file does exist should not invoke subp."""
+ mydev = self.tmp_path("mydev")
+ util.write_file(mydev, "foo\n")
+ util.udevadm_settle(exists=mydev)
+ self.assertIsNone(m_subp.call_args)
+
+ def test_with_timeout_int(self, m_subp):
+ """timeout can be an integer."""
+ timeout = 9
+ util.udevadm_settle(timeout=timeout)
+ m_subp.called_once_with(
+ ["udevadm", "settle", "--timeout=%s" % timeout]
+ )
+
+ def test_with_timeout_string(self, m_subp):
+ """timeout can be a string."""
+ timeout = "555"
+ util.udevadm_settle(timeout=timeout)
+ m_subp.assert_called_once_with(
+ ["udevadm", "settle", "--timeout=%s" % timeout]
+ )
+
+ def test_with_exists_and_timeout(self, m_subp):
+ """test call with both exists and timeout."""
+ mydev = self.tmp_path("mydev")
+ timeout = "3"
+ util.udevadm_settle(exists=mydev)
+ m_subp.called_once_with(
+ [
+ "udevadm",
+ "settle",
+ "--exit-if-exists=%s" % mydev,
+ "--timeout=%s" % timeout,
+ ]
+ )
+
+ def test_subp_exception_raises_to_caller(self, m_subp):
+ m_subp.side_effect = subp.ProcessExecutionError("BOOM")
+ self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle)
+
+
+@mock.patch("os.path.exists")
+class TestGetLinuxDistro(CiTestCase):
+ def setUp(self):
+ # python2 has no lru_cache, and therefore, no cache_clear()
+ if hasattr(util.get_linux_distro, "cache_clear"):
+ util.get_linux_distro.cache_clear()
+
+ @classmethod
+ def os_release_exists(self, path):
+ """Side effect function"""
+ if path == "/etc/os-release":
+ return 1
+
+ @classmethod
+ def redhat_release_exists(self, path):
+ """Side effect function"""
+ if path == "/etc/redhat-release":
+ return 1
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
+ """Verify we get the correct name if the os-release file has
+ the distro name in quotes"""
+ m_os_release.return_value = OS_RELEASE_SLES
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("sles", "12.3", platform.machine()), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists):
+ """Verify we get the correct name if the os-release file does not
+ have the distro name in quotes"""
+ m_os_release.return_value = OS_RELEASE_UBUNTU
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("ubuntu", "16.04", "xenial"), dist)
+
+ @mock.patch("platform.system")
+ @mock.patch("platform.release")
+ @mock.patch("cloudinit.util._parse_redhat_release")
+ def test_get_linux_freebsd(
+ self,
+ m_parse_redhat_release,
+ m_platform_release,
+ m_platform_system,
+ m_path_exists,
+ ):
+ """Verify we get the correct name and release name on FreeBSD."""
+ m_path_exists.return_value = False
+ m_platform_release.return_value = "12.0-RELEASE-p10"
+ m_platform_system.return_value = "FreeBSD"
+ m_parse_redhat_release.return_value = {}
+ util.is_BSD.cache_clear()
+ dist = util.get_linux_distro()
+ self.assertEqual(("freebsd", "12.0-RELEASE-p10", ""), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_centos6(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on CentOS 6."""
+ m_os_release.return_value = REDHAT_RELEASE_CENTOS_6
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("centos", "6.10", "Final"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists):
+ """Verify the correct release info on CentOS 7 without os-release."""
+ m_os_release.return_value = REDHAT_RELEASE_CENTOS_7
+ m_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("centos", "7.5.1804", "Core"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 7 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_REDHAT_7
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("redhat", "7.5", "Maipo"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 7 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_REDHAT_7
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("redhat", "7.5", "Maipo"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 6 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_REDHAT_6
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("redhat", "6.10", "Santiago"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_copr_centos(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on COPR CentOS."""
+ m_os_release.return_value = OS_RELEASE_CENTOS
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("centos", "7", "Core"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify almalinux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_ALMALINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("almalinux", "8.3", "Purple Manul"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists):
+ """Verify almalinux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_ALMALINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("almalinux", "8.3", "Purple Manul"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 7 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("eurolinux", "7.9", "Minsk"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 7 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_EUROLINUX_7
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("eurolinux", "7.9", "Minsk"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("eurolinux", "8.4", "Vaduz"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_EUROLINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("eurolinux", "8.4", "Vaduz"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_miraclelinux8_rhrelease(
+ self, m_os_release, m_path_exists
+ ):
+ """Verify miraclelinux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_MIRACLELINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("miracle", "8.4", "Peony"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_miraclelinux8_osrelease(
+ self, m_os_release, m_path_exists
+ ):
+ """Verify miraclelinux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_MIRACLELINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("miraclelinux", "8", "Peony"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify rocky linux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_ROCKY_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("rocky", "8.3", "Green Obsidian"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists):
+ """Verify rocky linux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_ROCKY_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("rocky", "8.3", "Green Obsidian"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify virtuozzo linux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("virtuozzo", "8", "Virtuozzo Linux"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists):
+ """Verify virtuozzo linux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("virtuozzo", "8", "Virtuozzo Linux"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify cloudlinux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_CLOUDLINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("cloudlinux", "8.4", "Valery Rozhdestvensky"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists):
+ """Verify cloudlinux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_CLOUDLINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("cloudlinux", "8.4", "Valery Rozhdestvensky"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_debian(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on Debian."""
+ m_os_release.return_value = OS_RELEASE_DEBIAN
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("debian", "9", "stretch"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_openeuler(self, m_os_release, m_path_exists):
+ """Verify get the correct name and release name on Openeuler."""
+ m_os_release.return_value = OS_RELEASE_OPENEULER_20
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("openEuler", "20.03", "LTS-SP2"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_opensuse(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on openSUSE
+ prior to openSUSE Leap 15.
+ """
+ m_os_release.return_value = OS_RELEASE_OPENSUSE
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("opensuse", "42.3", platform.machine()), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on openSUSE
+ for openSUSE Leap 15.0 and later.
+ """
+ m_os_release.return_value = OS_RELEASE_OPENSUSE_L15
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("opensuse-leap", "15.0", platform.machine()), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on openSUSE
+ for openSUSE Tumbleweed
+ """
+ m_os_release.return_value = OS_RELEASE_OPENSUSE_TW
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(
+ ("opensuse-tumbleweed", "20180920", platform.machine()), dist
+ )
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_photon_os_release(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on PhotonOS"""
+ m_os_release.return_value = OS_RELEASE_PHOTON
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("photon", "4.0", "VMware Photon OS/Linux"), dist)
+
+ @mock.patch("platform.system")
+ @mock.patch("platform.dist", create=True)
+ def test_get_linux_distro_no_data(
+ self, m_platform_dist, m_platform_system, m_path_exists
+ ):
+ """Verify we get no information if os-release does not exist"""
+ m_platform_dist.return_value = ("", "", "")
+ m_platform_system.return_value = "Linux"
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(("", "", ""), dist)
+
+ @mock.patch("platform.system")
+ @mock.patch("platform.dist", create=True)
+ def test_get_linux_distro_no_impl(
+ self, m_platform_dist, m_platform_system, m_path_exists
+ ):
+ """Verify we get an empty tuple when no information exists and
+ Exceptions are not propagated"""
+ m_platform_dist.side_effect = Exception()
+ m_platform_system.return_value = "Linux"
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(("", "", ""), dist)
+
+ @mock.patch("platform.system")
+ @mock.patch("platform.dist", create=True)
+ def test_get_linux_distro_plat_data(
+ self, m_platform_dist, m_platform_system, m_path_exists
+ ):
+ """Verify we get the correct platform information"""
+ m_platform_dist.return_value = ("foo", "1.1", "aarch64")
+ m_platform_system.return_value = "Linux"
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(("foo", "1.1", "aarch64"), dist)
+
+
+class TestGetVariant:
+ @pytest.mark.parametrize(
+ "info, expected_variant",
+ [
+ ({"system": "Linux", "dist": ("almalinux",)}, "almalinux"),
+ ({"system": "linux", "dist": ("alpine",)}, "alpine"),
+ ({"system": "linux", "dist": ("arch",)}, "arch"),
+ ({"system": "linux", "dist": ("centos",)}, "centos"),
+ ({"system": "linux", "dist": ("cloudlinux",)}, "cloudlinux"),
+ ({"system": "linux", "dist": ("debian",)}, "debian"),
+ ({"system": "linux", "dist": ("eurolinux",)}, "eurolinux"),
+ ({"system": "linux", "dist": ("fedora",)}, "fedora"),
+ ({"system": "linux", "dist": ("openEuler",)}, "openeuler"),
+ ({"system": "linux", "dist": ("photon",)}, "photon"),
+ ({"system": "linux", "dist": ("rhel",)}, "rhel"),
+ ({"system": "linux", "dist": ("rocky",)}, "rocky"),
+ ({"system": "linux", "dist": ("suse",)}, "suse"),
+ ({"system": "linux", "dist": ("virtuozzo",)}, "virtuozzo"),
+ ({"system": "linux", "dist": ("ubuntu",)}, "ubuntu"),
+ ({"system": "linux", "dist": ("linuxmint",)}, "ubuntu"),
+ ({"system": "linux", "dist": ("mint",)}, "ubuntu"),
+ ({"system": "linux", "dist": ("redhat",)}, "rhel"),
+ ({"system": "linux", "dist": ("opensuse",)}, "suse"),
+ ({"system": "linux", "dist": ("opensuse-tumbleweed",)}, "suse"),
+ ({"system": "linux", "dist": ("opensuse-leap",)}, "suse"),
+ ({"system": "linux", "dist": ("sles",)}, "suse"),
+ ({"system": "linux", "dist": ("sle_hpc",)}, "suse"),
+ ({"system": "linux", "dist": ("my_distro",)}, "linux"),
+ ({"system": "Windows", "dist": ("dontcare",)}, "windows"),
+ ({"system": "Darwin", "dist": ("dontcare",)}, "darwin"),
+ ({"system": "Freebsd", "dist": ("dontcare",)}, "freebsd"),
+ ({"system": "Netbsd", "dist": ("dontcare",)}, "netbsd"),
+ ({"system": "Openbsd", "dist": ("dontcare",)}, "openbsd"),
+ ({"system": "Dragonfly", "dist": ("dontcare",)}, "dragonfly"),
+ ],
+ )
+ def test_get_variant(self, info, expected_variant):
+ """Verify we get the correct variant name"""
+ assert util._get_variant(info) == expected_variant
+
+
+class TestJsonDumps(CiTestCase):
+ def test_is_str(self):
+ """json_dumps should return a string."""
+ self.assertTrue(isinstance(util.json_dumps({"abc": "123"}), str))
+
+ def test_utf8(self):
+ smiley = "\\ud83d\\ude03"
+ self.assertEqual(
+ {"smiley": smiley}, json.loads(util.json_dumps({"smiley": smiley}))
+ )
+
+ def test_non_utf8(self):
+ blob = b"\xba\x03Qx-#y\xea"
+ self.assertEqual(
+ {"blob": "ci-b64:" + base64.b64encode(blob).decode("utf-8")},
+ json.loads(util.json_dumps({"blob": blob})),
+ )
+
+
+@mock.patch("os.path.exists")
+class TestIsLXD(CiTestCase):
+ def test_is_lxd_true_on_sock_device(self, m_exists):
+ """When lxd's /dev/lxd/sock exists, is_lxd returns true."""
+ m_exists.return_value = True
+ self.assertTrue(util.is_lxd())
+ m_exists.assert_called_once_with("/dev/lxd/sock")
+
+ def test_is_lxd_false_when_sock_device_absent(self, m_exists):
+ """When lxd's /dev/lxd/sock is absent, is_lxd returns false."""
+ m_exists.return_value = False
+ self.assertFalse(util.is_lxd())
+ m_exists.assert_called_once_with("/dev/lxd/sock")
+
+
+class TestReadCcFromCmdline:
+ if hasattr(pytest, "param"):
+ random_string = pytest.param(
+ CiTestCase.random_string(), None, id="random_string"
+ )
+ else:
+ random_string = (CiTestCase.random_string(), None)
+
+ @pytest.mark.parametrize(
+ "cmdline,expected_cfg",
+ [
+ # Return None if cmdline has no cc:<YAML>end_cc content.
+ random_string,
+ # Return None if YAML content is empty string.
+ ("foo cc: end_cc bar", None),
+ # Return expected dictionary without trailing end_cc marker.
+ ("foo cc: ssh_pwauth: true", {"ssh_pwauth": True}),
+ # Return expected dictionary w escaped newline and no end_cc.
+ ("foo cc: ssh_pwauth: true\\n", {"ssh_pwauth": True}),
+ # Return expected dictionary of yaml between cc: and end_cc.
+ ("foo cc: ssh_pwauth: true end_cc bar", {"ssh_pwauth": True}),
+ # Return dict with list value w escaped newline, no end_cc.
+ (
+ "cc: ssh_import_id: [smoser, kirkland]\\n",
+ {"ssh_import_id": ["smoser", "kirkland"]},
+ ),
+ # Parse urlencoded brackets in yaml content.
+ (
+ "cc: ssh_import_id: %5Bsmoser, kirkland%5D end_cc",
+ {"ssh_import_id": ["smoser", "kirkland"]},
+ ),
+ # Parse complete urlencoded yaml content.
+ (
+ "cc: ssh_import_id%3A%20%5Buser1%2C%20user2%5D end_cc",
+ {"ssh_import_id": ["user1", "user2"]},
+ ),
+ # Parse nested dictionary in yaml content.
+ (
+ "cc: ntp: {enabled: true, ntp_client: myclient} end_cc",
+ {"ntp": {"enabled": True, "ntp_client": "myclient"}},
+ ),
+ # Parse single mapping value in yaml content.
+ ("cc: ssh_import_id: smoser end_cc", {"ssh_import_id": "smoser"}),
+ # Parse multiline content with multiple mapping and nested lists.
+ (
+ "cc: ssh_import_id: [smoser, bob]\\n"
+ "runcmd: [ [ ls, -l ], echo hi ] end_cc",
+ {
+ "ssh_import_id": ["smoser", "bob"],
+ "runcmd": [["ls", "-l"], "echo hi"],
+ },
+ ),
+ # Parse multiline encoded content w/ mappings and nested lists.
+ (
+ "cc: ssh_import_id: %5Bsmoser, bob%5D\\n"
+ "runcmd: [ [ ls, -l ], echo hi ] end_cc",
+ {
+ "ssh_import_id": ["smoser", "bob"],
+ "runcmd": [["ls", "-l"], "echo hi"],
+ },
+ ),
+ # test encoded escaped newlines work.
+ #
+ # unquote(encoded_content)
+ # 'ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ]'
+ (
+ (
+ "cc: " + "ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%5Cn"
+ "runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C"
+ "%20echo%20hi%20%5D" + " end_cc"
+ ),
+ {
+ "ssh_import_id": ["smoser", "bob"],
+ "runcmd": [["ls", "-l"], "echo hi"],
+ },
+ ),
+ # test encoded newlines work.
+ #
+ # unquote(encoded_content)
+ # 'ssh_import_id: [smoser, bob]\nruncmd: [ [ ls, -l ], echo hi ]'
+ (
+ (
+ "cc: " + "ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%0A"
+ "runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C"
+ "%20echo%20hi%20%5D" + " end_cc"
+ ),
+ {
+ "ssh_import_id": ["smoser", "bob"],
+ "runcmd": [["ls", "-l"], "echo hi"],
+ },
+ ),
+ # Parse and merge multiple yaml content sections.
+ (
+ "cc:ssh_import_id: [smoser, bob] end_cc "
+ "cc: runcmd: [ [ ls, -l ] ] end_cc",
+ {"ssh_import_id": ["smoser", "bob"], "runcmd": [["ls", "-l"]]},
+ ),
+ # Parse and merge multiple encoded yaml content sections.
+ (
+ "cc:ssh_import_id%3A%20%5Bsmoser%5D end_cc "
+ "cc:runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%20%5D end_cc",
+ {"ssh_import_id": ["smoser"], "runcmd": [["ls", "-l"]]},
+ ),
+ ],
+ )
+ def test_read_conf_from_cmdline_config(self, expected_cfg, cmdline):
+ assert expected_cfg == util.read_conf_from_cmdline(cmdline=cmdline)
+
+
+class TestMountCb:
+ """Tests for ``util.mount_cb``.
+
+ These tests consider the "unit" under test to be ``util.mount_cb`` and
+ ``util.unmounter``, which is only used by ``mount_cb``.
+
+ TODO: Test default mtype determination
+ TODO: Test the if/else branch that actually performs the mounting operation
+ """
+
+ @pytest.fixture
+ def already_mounted_device_and_mountdict(self):
+ """Mock an already-mounted device, and yield (device, mount dict)"""
+ device = "/dev/fake0"
+ mountpoint = "/mnt/fake"
+ with mock.patch("cloudinit.util.subp.subp"):
+ with mock.patch("cloudinit.util.mounts") as m_mounts:
+ mounts = {device: {"mountpoint": mountpoint}}
+ m_mounts.return_value = mounts
+ yield device, mounts[device]
+
+ @pytest.fixture
+ def already_mounted_device(self, already_mounted_device_and_mountdict):
+ """already_mounted_device_and_mountdict, but return only the device"""
+ return already_mounted_device_and_mountdict[0]
+
+ @pytest.mark.parametrize(
+ "mtype,expected",
+ [
+ # While the filesystem is called iso9660, the mount type is cd9660
+ ("iso9660", "cd9660"),
+ # vfat is generally called "msdos" on BSD
+ ("vfat", "msdos"),
+ # judging from man pages, only FreeBSD has this alias
+ ("msdosfs", "msdos"),
+ # Test happy path
+ ("ufs", "ufs"),
+ ],
+ )
+ @mock.patch("cloudinit.util.is_Linux", autospec=True)
+ @mock.patch("cloudinit.util.is_BSD", autospec=True)
+ @mock.patch("cloudinit.util.subp.subp")
+ @mock.patch("cloudinit.temp_utils.tempdir", autospec=True)
+ def test_normalize_mtype_on_bsd(
+ self, m_tmpdir, m_subp, m_is_BSD, m_is_Linux, mtype, expected
+ ):
+ m_is_BSD.return_value = True
+ m_is_Linux.return_value = False
+ m_tmpdir.return_value.__enter__ = mock.Mock(
+ autospec=True, return_value="/tmp/fake"
+ )
+ m_tmpdir.return_value.__exit__ = mock.Mock(
+ autospec=True, return_value=True
+ )
+ callback = mock.Mock(autospec=True)
+
+ util.mount_cb("/dev/fake0", callback, mtype=mtype)
+ assert (
+ mock.call(
+ [
+ "mount",
+ "-o",
+ "ro",
+ "-t",
+ expected,
+ "/dev/fake0",
+ "/tmp/fake",
+ ],
+ update_env=None,
+ )
+ in m_subp.call_args_list
+ )
+
+ @pytest.mark.parametrize("invalid_mtype", [int(0), float(0.0), dict()])
+ def test_typeerror_raised_for_invalid_mtype(self, invalid_mtype):
+ with pytest.raises(TypeError):
+ util.mount_cb(mock.Mock(), mock.Mock(), mtype=invalid_mtype)
+
+ @mock.patch("cloudinit.util.subp.subp")
+ def test_already_mounted_does_not_mount_or_umount_anything(
+ self, m_subp, already_mounted_device
+ ):
+ util.mount_cb(already_mounted_device, mock.Mock())
+
+ assert 0 == m_subp.call_count
+
+ @pytest.mark.parametrize("trailing_slash_in_mounts", ["/", ""])
+ def test_already_mounted_calls_callback(
+ self, trailing_slash_in_mounts, already_mounted_device_and_mountdict
+ ):
+ device, mount_dict = already_mounted_device_and_mountdict
+ mountpoint = mount_dict["mountpoint"]
+ mount_dict["mountpoint"] += trailing_slash_in_mounts
+
+ callback = mock.Mock()
+ util.mount_cb(device, callback)
+
+ # The mountpoint passed to callback should always have a trailing
+ # slash, regardless of the input
+ assert [mock.call(mountpoint + "/")] == callback.call_args_list
+
+ def test_already_mounted_calls_callback_with_data(
+ self, already_mounted_device
+ ):
+ callback = mock.Mock()
+ util.mount_cb(
+ already_mounted_device, callback, data=mock.sentinel.data
+ )
+
+ assert [
+ mock.call(mock.ANY, mock.sentinel.data)
+ ] == callback.call_args_list
+
+
+@mock.patch("cloudinit.util.write_file")
+class TestEnsureFile:
+ """Tests for ``cloudinit.util.ensure_file``."""
+
+ def test_parameters_passed_through(self, m_write_file):
+ """Test the parameters in the signature are passed to write_file."""
+ util.ensure_file(
+ mock.sentinel.path,
+ mode=mock.sentinel.mode,
+ preserve_mode=mock.sentinel.preserve_mode,
+ )
+
+ assert 1 == m_write_file.call_count
+ args, kwargs = m_write_file.call_args
+ assert (mock.sentinel.path,) == args
+ assert mock.sentinel.mode == kwargs["mode"]
+ assert mock.sentinel.preserve_mode == kwargs["preserve_mode"]
+
+ @pytest.mark.parametrize(
+ "kwarg,expected",
+ [
+ # Files should be world-readable by default
+ ("mode", 0o644),
+ # The previous behaviour of not preserving mode should be retained
+ ("preserve_mode", False),
+ ],
+ )
+ def test_defaults(self, m_write_file, kwarg, expected):
+ """Test that ensure_file defaults appropriately."""
+ util.ensure_file(mock.sentinel.path)
+
+ assert 1 == m_write_file.call_count
+ _args, kwargs = m_write_file.call_args
+ assert expected == kwargs[kwarg]
+
+ def test_static_parameters_are_passed(self, m_write_file):
+ """Test that the static write_files parameters are passed correctly."""
+ util.ensure_file(mock.sentinel.path)
+
+ assert 1 == m_write_file.call_count
+ _args, kwargs = m_write_file.call_args
+ assert "" == kwargs["content"]
+ assert "ab" == kwargs["omode"]
+
+
+@mock.patch("cloudinit.util.grp.getgrnam")
+@mock.patch("cloudinit.util.os.setgid")
+@mock.patch("cloudinit.util.os.umask")
+class TestRedirectOutputPreexecFn:
+ """This tests specifically the preexec_fn used in redirect_output."""
+
+ @pytest.fixture(params=["outfmt", "errfmt"])
+ def preexec_fn(self, request):
+ """A fixture to gather the preexec_fn used by redirect_output.
+
+ This enables simpler direct testing of it, and parameterises any tests
+ using it to cover both the stdout and stderr code paths.
+ """
+ test_string = "| piped output to invoke subprocess"
+ if request.param == "outfmt":
+ args = (test_string, None)
+ elif request.param == "errfmt":
+ args = (None, test_string)
+ with mock.patch("cloudinit.util.subprocess.Popen") as m_popen:
+ util.redirect_output(*args)
+
+ assert 1 == m_popen.call_count
+ _args, kwargs = m_popen.call_args
+ assert "preexec_fn" in kwargs, "preexec_fn not passed to Popen"
+ return kwargs["preexec_fn"]
+
+ def test_preexec_fn_sets_umask(
+ self, m_os_umask, _m_setgid, _m_getgrnam, preexec_fn
+ ):
+ """preexec_fn should set a mask that avoids world-readable files."""
+ preexec_fn()
+
+ assert [mock.call(0o037)] == m_os_umask.call_args_list
+
+ def test_preexec_fn_sets_group_id_if_adm_group_present(
+ self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn
+ ):
+ """We should setgrp to adm if present, so files are owned by them."""
+ fake_group = mock.Mock(gr_gid=mock.sentinel.gr_gid)
+ m_getgrnam.return_value = fake_group
+
+ preexec_fn()
+
+ assert [mock.call("adm")] == m_getgrnam.call_args_list
+ assert [mock.call(mock.sentinel.gr_gid)] == m_setgid.call_args_list
+
+ def test_preexec_fn_handles_absent_adm_group_gracefully(
+ self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn
+ ):
+ """We should handle an absent adm group gracefully."""
+ m_getgrnam.side_effect = KeyError("getgrnam(): name not found: 'adm'")
+
+ preexec_fn()
+
+ assert 0 == m_setgid.call_count
+
+
+class FakeSelinux(object):
def __init__(self, match_what):
self.match_what = match_what
self.restored = []
@@ -141,7 +1457,7 @@ class TestWriteFile(helpers.TestCase):
path = os.path.join(self.tmp, "NewFile.txt")
contents = "Hey there"
- open(path, 'w').close()
+ open(path, "w").close()
os.chmod(path, 0o666)
util.write_file(path, contents, preserve_mode=True)
@@ -175,15 +1491,16 @@ class TestWriteFile(helpers.TestCase):
fake_se = FakeSelinux(my_file)
- with mock.patch.object(importer, 'import_module',
- return_value=fake_se) as mockobj:
+ with mock.patch.object(
+ importer, "import_module", return_value=fake_se
+ ) as mockobj:
with util.SeLinuxGuard(my_file) as is_on:
self.assertTrue(is_on)
self.assertEqual(1, len(fake_se.restored))
self.assertEqual(my_file, fake_se.restored[0])
- mockobj.assert_called_once_with('selinux')
+ mockobj.assert_called_once_with("selinux")
class TestDeleteDirContents(helpers.TestCase):
@@ -254,15 +1571,16 @@ class TestDeleteDirContents(helpers.TestCase):
class TestKeyValStrings(helpers.TestCase):
def test_keyval_str_to_dict(self):
- expected = {'1': 'one', '2': 'one+one', 'ro': True}
+ expected = {"1": "one", "2": "one+one", "ro": True}
cmdline = "1=one ro 2=one+one"
self.assertEqual(expected, util.keyval_str_to_dict(cmdline))
class TestGetCmdline(helpers.TestCase):
def test_cmdline_reads_debug_env(self):
- with mock.patch.dict("os.environ",
- values={'DEBUG_PROC_CMDLINE': 'abcd 123'}):
+ with mock.patch.dict(
+ "os.environ", values={"DEBUG_PROC_CMDLINE": "abcd 123"}
+ ):
ret = util.get_cmdline()
self.assertEqual("abcd 123", ret)
@@ -272,59 +1590,75 @@ class TestLoadYaml(helpers.CiTestCase):
with_logs = True
def test_simple(self):
- mydata = {'1': "one", '2': "two"}
+ mydata = {"1": "one", "2": "two"}
self.assertEqual(util.load_yaml(yaml.dump(mydata)), mydata)
def test_nonallowed_returns_default(self):
- '''Any unallowed types result in returning default; log the issue.'''
+ """Any unallowed types result in returning default; log the issue."""
# for now, anything not in the allowed list just returns the default.
- myyaml = yaml.dump({'1': "one"})
- self.assertEqual(util.load_yaml(blob=myyaml,
- default=self.mydefault,
- allowed=(str,)),
- self.mydefault)
+ myyaml = yaml.dump({"1": "one"})
+ self.assertEqual(
+ util.load_yaml(
+ blob=myyaml, default=self.mydefault, allowed=(str,)
+ ),
+ self.mydefault,
+ )
regex = re.compile(
- r'Yaml load allows \(<(class|type) \'str\'>,\) root types, but'
- r' got dict')
- self.assertTrue(regex.search(self.logs.getvalue()),
- msg='Missing expected yaml load error')
+ r"Yaml load allows \(<(class|type) \'str\'>,\) root types, but"
+ r" got dict"
+ )
+ self.assertTrue(
+ regex.search(self.logs.getvalue()),
+ msg="Missing expected yaml load error",
+ )
def test_bogus_scan_error_returns_default(self):
- '''On Yaml scan error, load_yaml returns the default and logs issue.'''
+ """On Yaml scan error, load_yaml returns the default and logs issue."""
badyaml = "1\n 2:"
- self.assertEqual(util.load_yaml(blob=badyaml,
- default=self.mydefault),
- self.mydefault)
+ self.assertEqual(
+ util.load_yaml(blob=badyaml, default=self.mydefault),
+ self.mydefault,
+ )
self.assertIn(
- 'Failed loading yaml blob. Invalid format at line 2 column 3:'
+ "Failed loading yaml blob. Invalid format at line 2 column 3:"
' "mapping values are not allowed here',
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
def test_bogus_parse_error_returns_default(self):
- '''On Yaml parse error, load_yaml returns default and logs issue.'''
+ """On Yaml parse error, load_yaml returns default and logs issue."""
badyaml = "{}}"
- self.assertEqual(util.load_yaml(blob=badyaml,
- default=self.mydefault),
- self.mydefault)
+ self.assertEqual(
+ util.load_yaml(blob=badyaml, default=self.mydefault),
+ self.mydefault,
+ )
self.assertIn(
- 'Failed loading yaml blob. Invalid format at line 1 column 3:'
- " \"expected \'<document start>\', but found \'}\'",
- self.logs.getvalue())
+ "Failed loading yaml blob. Invalid format at line 1 column 3:"
+ " \"expected '<document start>', but found '}'",
+ self.logs.getvalue(),
+ )
def test_unsafe_types(self):
# should not load complex types
- unsafe_yaml = yaml.dump((1, 2, 3,))
- self.assertEqual(util.load_yaml(blob=unsafe_yaml,
- default=self.mydefault),
- self.mydefault)
+ unsafe_yaml = yaml.dump(
+ (
+ 1,
+ 2,
+ 3,
+ )
+ )
+ self.assertEqual(
+ util.load_yaml(blob=unsafe_yaml, default=self.mydefault),
+ self.mydefault,
+ )
def test_python_unicode(self):
# complex type of python/unicode is explicitly allowed
- myobj = {'1': "FOOBAR"}
+ myobj = {"1": "FOOBAR"}
safe_yaml = yaml.dump(myobj)
- self.assertEqual(util.load_yaml(blob=safe_yaml,
- default=self.mydefault),
- myobj)
+ self.assertEqual(
+ util.load_yaml(blob=safe_yaml, default=self.mydefault), myobj
+ )
def test_none_returns_default(self):
"""If yaml.load returns None, then default should be returned."""
@@ -332,168 +1666,177 @@ class TestLoadYaml(helpers.CiTestCase):
mdef = self.mydefault
self.assertEqual(
[(b, self.mydefault) for b in blobs],
- [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs])
+ [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs],
+ )
class TestMountinfoParsing(helpers.ResourceUsingTestCase):
def test_invalid_mountinfo(self):
- line = ("20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root"
- "rw,errors=remount-ro,data=ordered")
+ line = (
+ "20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root"
+ "rw,errors=remount-ro,data=ordered"
+ )
elements = line.split()
for i in range(len(elements) + 1):
- lines = [' '.join(elements[0:i])]
+ lines = [" ".join(elements[0:i])]
if i < 10:
expected = None
else:
- expected = ('/dev/mapper/vg0-root', 'ext4', '/')
- self.assertEqual(expected, util.parse_mount_info('/', lines))
+ expected = ("/dev/mapper/vg0-root", "ext4", "/")
+ self.assertEqual(expected, util.parse_mount_info("/", lines))
def test_precise_ext4_root(self):
- lines = helpers.readResource('mountinfo_precise_ext4.txt').splitlines()
+ lines = helpers.readResource("mountinfo_precise_ext4.txt").splitlines()
- expected = ('/dev/mapper/vg0-root', 'ext4', '/')
- self.assertEqual(expected, util.parse_mount_info('/', lines))
- self.assertEqual(expected, util.parse_mount_info('/usr', lines))
- self.assertEqual(expected, util.parse_mount_info('/usr/bin', lines))
+ expected = ("/dev/mapper/vg0-root", "ext4", "/")
+ self.assertEqual(expected, util.parse_mount_info("/", lines))
+ self.assertEqual(expected, util.parse_mount_info("/usr", lines))
+ self.assertEqual(expected, util.parse_mount_info("/usr/bin", lines))
- expected = ('/dev/md0', 'ext4', '/boot')
- self.assertEqual(expected, util.parse_mount_info('/boot', lines))
- self.assertEqual(expected, util.parse_mount_info('/boot/grub', lines))
+ expected = ("/dev/md0", "ext4", "/boot")
+ self.assertEqual(expected, util.parse_mount_info("/boot", lines))
+ self.assertEqual(expected, util.parse_mount_info("/boot/grub", lines))
- expected = ('/dev/mapper/vg0-root', 'ext4', '/')
- self.assertEqual(expected, util.parse_mount_info('/home', lines))
- self.assertEqual(expected, util.parse_mount_info('/home/me', lines))
+ expected = ("/dev/mapper/vg0-root", "ext4", "/")
+ self.assertEqual(expected, util.parse_mount_info("/home", lines))
+ self.assertEqual(expected, util.parse_mount_info("/home/me", lines))
- expected = ('tmpfs', 'tmpfs', '/run')
- self.assertEqual(expected, util.parse_mount_info('/run', lines))
+ expected = ("tmpfs", "tmpfs", "/run")
+ self.assertEqual(expected, util.parse_mount_info("/run", lines))
- expected = ('none', 'tmpfs', '/run/lock')
- self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))
+ expected = ("none", "tmpfs", "/run/lock")
+ self.assertEqual(expected, util.parse_mount_info("/run/lock", lines))
def test_raring_btrfs_root(self):
- lines = helpers.readResource('mountinfo_raring_btrfs.txt').splitlines()
+ lines = helpers.readResource("mountinfo_raring_btrfs.txt").splitlines()
- expected = ('/dev/vda1', 'btrfs', '/')
- self.assertEqual(expected, util.parse_mount_info('/', lines))
- self.assertEqual(expected, util.parse_mount_info('/usr', lines))
- self.assertEqual(expected, util.parse_mount_info('/usr/bin', lines))
- self.assertEqual(expected, util.parse_mount_info('/boot', lines))
- self.assertEqual(expected, util.parse_mount_info('/boot/grub', lines))
+ expected = ("/dev/vda1", "btrfs", "/")
+ self.assertEqual(expected, util.parse_mount_info("/", lines))
+ self.assertEqual(expected, util.parse_mount_info("/usr", lines))
+ self.assertEqual(expected, util.parse_mount_info("/usr/bin", lines))
+ self.assertEqual(expected, util.parse_mount_info("/boot", lines))
+ self.assertEqual(expected, util.parse_mount_info("/boot/grub", lines))
- expected = ('/dev/vda1', 'btrfs', '/home')
- self.assertEqual(expected, util.parse_mount_info('/home', lines))
- self.assertEqual(expected, util.parse_mount_info('/home/me', lines))
+ expected = ("/dev/vda1", "btrfs", "/home")
+ self.assertEqual(expected, util.parse_mount_info("/home", lines))
+ self.assertEqual(expected, util.parse_mount_info("/home/me", lines))
- expected = ('tmpfs', 'tmpfs', '/run')
- self.assertEqual(expected, util.parse_mount_info('/run', lines))
+ expected = ("tmpfs", "tmpfs", "/run")
+ self.assertEqual(expected, util.parse_mount_info("/run", lines))
- expected = ('none', 'tmpfs', '/run/lock')
- self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))
+ expected = ("none", "tmpfs", "/run/lock")
+ self.assertEqual(expected, util.parse_mount_info("/run/lock", lines))
- @mock.patch('cloudinit.util.os')
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.util.os")
+ @mock.patch("cloudinit.subp.subp")
def test_get_device_info_from_zpool(self, zpool_output, m_os):
# mock /dev/zfs exists
m_os.path.exists.return_value = True
# mock subp command from util.get_mount_info_fs_on_zpool
zpool_output.return_value = (
- helpers.readResource('zpool_status_simple.txt'), ''
+ helpers.readResource("zpool_status_simple.txt"),
+ "",
)
# save function return values and do asserts
- ret = util.get_device_info_from_zpool('vmzroot')
- self.assertEqual('gpt/system', ret)
+ ret = util.get_device_info_from_zpool("vmzroot")
+ self.assertEqual("gpt/system", ret)
self.assertIsNotNone(ret)
- m_os.path.exists.assert_called_with('/dev/zfs')
+ m_os.path.exists.assert_called_with("/dev/zfs")
- @mock.patch('cloudinit.util.os')
+ @mock.patch("cloudinit.util.os")
def test_get_device_info_from_zpool_no_dev_zfs(self, m_os):
# mock /dev/zfs missing
m_os.path.exists.return_value = False
# save function return values and do asserts
- ret = util.get_device_info_from_zpool('vmzroot')
+ ret = util.get_device_info_from_zpool("vmzroot")
self.assertIsNone(ret)
- @mock.patch('cloudinit.util.os')
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.util.os")
+ @mock.patch("cloudinit.subp.subp")
def test_get_device_info_from_zpool_handles_no_zpool(self, m_sub, m_os):
"""Handle case where there is no zpool command"""
# mock /dev/zfs exists
m_os.path.exists.return_value = True
m_sub.side_effect = subp.ProcessExecutionError("No zpool cmd")
- ret = util.get_device_info_from_zpool('vmzroot')
+ ret = util.get_device_info_from_zpool("vmzroot")
self.assertIsNone(ret)
- @mock.patch('cloudinit.util.os')
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.util.os")
+ @mock.patch("cloudinit.subp.subp")
def test_get_device_info_from_zpool_on_error(self, zpool_output, m_os):
# mock /dev/zfs exists
m_os.path.exists.return_value = True
# mock subp command from util.get_mount_info_fs_on_zpool
zpool_output.return_value = (
- helpers.readResource('zpool_status_simple.txt'), 'error'
+ helpers.readResource("zpool_status_simple.txt"),
+ "error",
)
# save function return values and do asserts
- ret = util.get_device_info_from_zpool('vmzroot')
+ ret = util.get_device_info_from_zpool("vmzroot")
self.assertIsNone(ret)
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_parse_mount_with_ext(self, mount_out):
mount_out.return_value = (
- helpers.readResource('mount_parse_ext.txt'), '')
+ helpers.readResource("mount_parse_ext.txt"),
+ "",
+ )
# this one is valid and exists in mount_parse_ext.txt
- ret = util.parse_mount('/var')
- self.assertEqual(('/dev/mapper/vg00-lv_var', 'ext4', '/var'), ret)
+ ret = util.parse_mount("/var")
+ self.assertEqual(("/dev/mapper/vg00-lv_var", "ext4", "/var"), ret)
# another one that is valid and exists
- ret = util.parse_mount('/')
- self.assertEqual(('/dev/mapper/vg00-lv_root', 'ext4', '/'), ret)
+ ret = util.parse_mount("/")
+ self.assertEqual(("/dev/mapper/vg00-lv_root", "ext4", "/"), ret)
# this one exists in mount_parse_ext.txt
- ret = util.parse_mount('/sys/kernel/debug')
+ ret = util.parse_mount("/sys/kernel/debug")
self.assertIsNone(ret)
# this one does not even exist in mount_parse_ext.txt
- ret = util.parse_mount('/not/existing/mount')
+ ret = util.parse_mount("/not/existing/mount")
self.assertIsNone(ret)
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_parse_mount_with_zfs(self, mount_out):
mount_out.return_value = (
- helpers.readResource('mount_parse_zfs.txt'), '')
+ helpers.readResource("mount_parse_zfs.txt"),
+ "",
+ )
# this one is valid and exists in mount_parse_zfs.txt
- ret = util.parse_mount('/var')
- self.assertEqual(('vmzroot/ROOT/freebsd/var', 'zfs', '/var'), ret)
+ ret = util.parse_mount("/var")
+ self.assertEqual(("vmzroot/ROOT/freebsd/var", "zfs", "/var"), ret)
# this one is the root, valid and also exists in mount_parse_zfs.txt
- ret = util.parse_mount('/')
- self.assertEqual(('vmzroot/ROOT/freebsd', 'zfs', '/'), ret)
+ ret = util.parse_mount("/")
+ self.assertEqual(("vmzroot/ROOT/freebsd", "zfs", "/"), ret)
# this one does not even exist in mount_parse_ext.txt
- ret = util.parse_mount('/not/existing/mount')
+ ret = util.parse_mount("/not/existing/mount")
self.assertIsNone(ret)
class TestIsX86(helpers.CiTestCase):
-
def test_is_x86_matches_x86_types(self):
"""is_x86 returns True if CPU architecture matches."""
- matched_arches = ['x86_64', 'i386', 'i586', 'i686']
+ matched_arches = ["x86_64", "i386", "i586", "i686"]
for arch in matched_arches:
self.assertTrue(
- util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch)
+ util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch
+ )
def test_is_x86_unmatched_types(self):
"""is_x86 returns Fale on non-intel x86 architectures."""
- unmatched_arches = ['ia64', '9000/800', 'arm64v71']
+ unmatched_arches = ["ia64", "9000/800", "arm64v71"]
for arch in unmatched_arches:
self.assertFalse(
- util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch)
+ util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch
+ )
- @mock.patch('cloudinit.util.os.uname')
+ @mock.patch("cloudinit.util.os.uname")
def test_is_x86_calls_uname_for_architecture(self, m_uname):
"""is_x86 returns True if platform from uname matches."""
- m_uname.return_value = [0, 1, 2, 3, 'x86_64']
+ m_uname.return_value = [0, 1, 2, 3, "x86_64"]
self.assertTrue(util.is_x86())
class TestGetConfigLogfiles(helpers.CiTestCase):
-
def test_empty_cfg_returns_empty_list(self):
"""An empty config passed to get_config_logfiles returns empty list."""
self.assertEqual([], util.get_config_logfiles(None))
@@ -502,39 +1845,56 @@ class TestGetConfigLogfiles(helpers.CiTestCase):
def test_default_log_file_present(self):
"""When default_log_file is set get_config_logfiles finds it."""
self.assertEqual(
- ['/my.log'],
- util.get_config_logfiles({'def_log_file': '/my.log'}))
+ ["/my.log"], util.get_config_logfiles({"def_log_file": "/my.log"})
+ )
def test_output_logs_parsed_when_teeing_files(self):
"""When output configuration is parsed when teeing files."""
self.assertEqual(
- ['/himom.log', '/my.log'],
- sorted(util.get_config_logfiles({
- 'def_log_file': '/my.log',
- 'output': {'all': '|tee -a /himom.log'}})))
+ ["/himom.log", "/my.log"],
+ sorted(
+ util.get_config_logfiles(
+ {
+ "def_log_file": "/my.log",
+ "output": {"all": "|tee -a /himom.log"},
+ }
+ )
+ ),
+ )
def test_output_logs_parsed_when_redirecting(self):
"""When output configuration is parsed when redirecting to a file."""
self.assertEqual(
- ['/my.log', '/test.log'],
- sorted(util.get_config_logfiles({
- 'def_log_file': '/my.log',
- 'output': {'all': '>/test.log'}})))
+ ["/my.log", "/test.log"],
+ sorted(
+ util.get_config_logfiles(
+ {
+ "def_log_file": "/my.log",
+ "output": {"all": ">/test.log"},
+ }
+ )
+ ),
+ )
def test_output_logs_parsed_when_appending(self):
"""When output configuration is parsed when appending to a file."""
self.assertEqual(
- ['/my.log', '/test.log'],
- sorted(util.get_config_logfiles({
- 'def_log_file': '/my.log',
- 'output': {'all': '>> /test.log'}})))
+ ["/my.log", "/test.log"],
+ sorted(
+ util.get_config_logfiles(
+ {
+ "def_log_file": "/my.log",
+ "output": {"all": ">> /test.log"},
+ }
+ )
+ ),
+ )
class TestMultiLog(helpers.FilesystemMockingTestCase):
-
def _createConsole(self, root):
- os.mkdir(os.path.join(root, 'dev'))
- open(os.path.join(root, 'dev', 'console'), 'a').close()
+ os.mkdir(os.path.join(root, "dev"))
+ open(os.path.join(root, "dev", "console"), "a").close()
def setUp(self):
super(TestMultiLog, self).setUp()
@@ -548,60 +1908,64 @@ class TestMultiLog(helpers.FilesystemMockingTestCase):
self.patchStdoutAndStderr(self.stdout, self.stderr)
def test_stderr_used_by_default(self):
- logged_string = 'test stderr output'
+ logged_string = "test stderr output"
util.multi_log(logged_string)
self.assertEqual(logged_string, self.stderr.getvalue())
def test_stderr_not_used_if_false(self):
- util.multi_log('should not see this', stderr=False)
- self.assertEqual('', self.stderr.getvalue())
+ util.multi_log("should not see this", stderr=False)
+ self.assertEqual("", self.stderr.getvalue())
def test_logs_go_to_console_by_default(self):
self._createConsole(self.root)
- logged_string = 'something very important'
+ logged_string = "something very important"
util.multi_log(logged_string)
- self.assertEqual(logged_string, open('/dev/console').read())
+ self.assertEqual(logged_string, open("/dev/console").read())
def test_logs_dont_go_to_stdout_if_console_exists(self):
self._createConsole(self.root)
- util.multi_log('something')
- self.assertEqual('', self.stdout.getvalue())
+ util.multi_log("something")
+ self.assertEqual("", self.stdout.getvalue())
def test_logs_go_to_stdout_if_console_does_not_exist(self):
- logged_string = 'something very important'
+ logged_string = "something very important"
util.multi_log(logged_string)
self.assertEqual(logged_string, self.stdout.getvalue())
+ def test_logs_dont_go_to_stdout_if_fallback_to_stdout_is_false(self):
+ util.multi_log("something", fallback_to_stdout=False)
+ self.assertEqual("", self.stdout.getvalue())
+
def test_logs_go_to_log_if_given(self):
log = mock.MagicMock()
- logged_string = 'something very important'
+ logged_string = "something very important"
util.multi_log(logged_string, log=log)
- self.assertEqual([((mock.ANY, logged_string), {})],
- log.log.call_args_list)
+ self.assertEqual(
+ [((mock.ANY, logged_string), {})], log.log.call_args_list
+ )
def test_newlines_stripped_from_log_call(self):
log = mock.MagicMock()
- expected_string = 'something very important'
- util.multi_log('{0}\n'.format(expected_string), log=log)
+ expected_string = "something very important"
+ util.multi_log("{0}\n".format(expected_string), log=log)
self.assertEqual((mock.ANY, expected_string), log.log.call_args[0])
def test_log_level_defaults_to_debug(self):
log = mock.MagicMock()
- util.multi_log('message', log=log)
+ util.multi_log("message", log=log)
self.assertEqual((logging.DEBUG, mock.ANY), log.log.call_args[0])
def test_given_log_level_used(self):
log = mock.MagicMock()
log_level = mock.Mock()
- util.multi_log('message', log=log, log_level=log_level)
+ util.multi_log("message", log=log, log_level=log_level)
self.assertEqual((log_level, mock.ANY), log.log.call_args[0])
class TestMessageFromString(helpers.TestCase):
-
def test_unicode_not_messed_up(self):
- roundtripped = util.message_from_string(u'\n').as_string()
- self.assertNotIn('\x00', roundtripped)
+ roundtripped = util.message_from_string("\n").as_string()
+ self.assertNotIn("\x00", roundtripped)
class TestReadSeeded(helpers.TestCase):
@@ -614,12 +1978,13 @@ class TestReadSeeded(helpers.TestCase):
ud = b"userdatablob"
vd = b"vendordatablob"
helpers.populate_dir(
- self.tmp, {'meta-data': "key1: val1", 'user-data': ud,
- 'vendor-data': vd})
+ self.tmp,
+ {"meta-data": "key1: val1", "user-data": ud, "vendor-data": vd},
+ )
sdir = self.tmp + os.path.sep
(found_md, found_ud, found_vd) = util.read_seeded(sdir)
- self.assertEqual(found_md, {'key1': 'val1'})
+ self.assertEqual(found_md, {"key1": "val1"})
self.assertEqual(found_ud, ud)
self.assertEqual(found_vd, vd)
@@ -634,157 +1999,189 @@ class TestReadSeededWithoutVendorData(helpers.TestCase):
ud = b"userdatablob"
vd = None
helpers.populate_dir(
- self.tmp, {'meta-data': "key1: val1", 'user-data': ud})
+ self.tmp, {"meta-data": "key1: val1", "user-data": ud}
+ )
sdir = self.tmp + os.path.sep
(found_md, found_ud, found_vd) = util.read_seeded(sdir)
- self.assertEqual(found_md, {'key1': 'val1'})
+ self.assertEqual(found_md, {"key1": "val1"})
self.assertEqual(found_ud, ud)
self.assertEqual(found_vd, vd)
class TestEncode(helpers.TestCase):
"""Test the encoding functions"""
+
def test_decode_binary_plain_text_with_hex(self):
- blob = 'BOOTABLE_FLAG=\x80init=/bin/systemd'
+ blob = "BOOTABLE_FLAG=\x80init=/bin/systemd"
text = util.decode_binary(blob)
self.assertEqual(text, blob)
class TestProcessExecutionError(helpers.TestCase):
- template = ('{description}\n'
- 'Command: {cmd}\n'
- 'Exit code: {exit_code}\n'
- 'Reason: {reason}\n'
- 'Stdout: {stdout}\n'
- 'Stderr: {stderr}')
- empty_attr = '-'
- empty_description = 'Unexpected error while running command.'
+ template = (
+ "{description}\n"
+ "Command: {cmd}\n"
+ "Exit code: {exit_code}\n"
+ "Reason: {reason}\n"
+ "Stdout: {stdout}\n"
+ "Stderr: {stderr}"
+ )
+ empty_attr = "-"
+ empty_description = "Unexpected error while running command."
def test_pexec_error_indent_text(self):
error = subp.ProcessExecutionError()
- msg = 'abc\ndef'
- formatted = 'abc\n{0}def'.format(' ' * 4)
+ msg = "abc\ndef"
+ formatted = "abc\n{0}def".format(" " * 4)
self.assertEqual(error._indent_text(msg, indent_level=4), formatted)
- self.assertEqual(error._indent_text(msg.encode(), indent_level=4),
- formatted.encode())
+ self.assertEqual(
+ error._indent_text(msg.encode(), indent_level=4),
+ formatted.encode(),
+ )
self.assertIsInstance(
- error._indent_text(msg.encode()), type(msg.encode()))
+ error._indent_text(msg.encode()), type(msg.encode())
+ )
def test_pexec_error_type(self):
self.assertIsInstance(subp.ProcessExecutionError(), IOError)
def test_pexec_error_empty_msgs(self):
error = subp.ProcessExecutionError()
- self.assertTrue(all(attr == self.empty_attr for attr in
- (error.stderr, error.stdout, error.reason)))
+ self.assertTrue(
+ all(
+ attr == self.empty_attr
+ for attr in (error.stderr, error.stdout, error.reason)
+ )
+ )
self.assertEqual(error.description, self.empty_description)
- self.assertEqual(str(error), self.template.format(
- description=self.empty_description, exit_code=self.empty_attr,
- reason=self.empty_attr, stdout=self.empty_attr,
- stderr=self.empty_attr, cmd=self.empty_attr))
+ self.assertEqual(
+ str(error),
+ self.template.format(
+ description=self.empty_description,
+ exit_code=self.empty_attr,
+ reason=self.empty_attr,
+ stdout=self.empty_attr,
+ stderr=self.empty_attr,
+ cmd=self.empty_attr,
+ ),
+ )
def test_pexec_error_single_line_msgs(self):
- stdout_msg = 'out out'
- stderr_msg = 'error error'
- cmd = 'test command'
+ stdout_msg = "out out"
+ stderr_msg = "error error"
+ cmd = "test command"
exit_code = 3
error = subp.ProcessExecutionError(
- stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd)
- self.assertEqual(str(error), self.template.format(
- description=self.empty_description, stdout=stdout_msg,
- stderr=stderr_msg, exit_code=str(exit_code),
- reason=self.empty_attr, cmd=cmd))
+ stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd
+ )
+ self.assertEqual(
+ str(error),
+ self.template.format(
+ description=self.empty_description,
+ stdout=stdout_msg,
+ stderr=stderr_msg,
+ exit_code=str(exit_code),
+ reason=self.empty_attr,
+ cmd=cmd,
+ ),
+ )
def test_pexec_error_multi_line_msgs(self):
# make sure bytes is converted handled properly when formatting
- stdout_msg = 'multi\nline\noutput message'.encode()
- stderr_msg = 'multi\nline\nerror message\n\n\n'
+ stdout_msg = "multi\nline\noutput message".encode()
+ stderr_msg = "multi\nline\nerror message\n\n\n"
error = subp.ProcessExecutionError(
- stdout=stdout_msg, stderr=stderr_msg)
+ stdout=stdout_msg, stderr=stderr_msg
+ )
self.assertEqual(
str(error),
- '\n'.join((
- '{description}',
- 'Command: {empty_attr}',
- 'Exit code: {empty_attr}',
- 'Reason: {empty_attr}',
- 'Stdout: multi',
- ' line',
- ' output message',
- 'Stderr: multi',
- ' line',
- ' error message',
- )).format(description=self.empty_description,
- empty_attr=self.empty_attr))
+ "\n".join(
+ (
+ "{description}",
+ "Command: {empty_attr}",
+ "Exit code: {empty_attr}",
+ "Reason: {empty_attr}",
+ "Stdout: multi",
+ " line",
+ " output message",
+ "Stderr: multi",
+ " line",
+ " error message",
+ )
+ ).format(
+ description=self.empty_description, empty_attr=self.empty_attr
+ ),
+ )
class TestSystemIsSnappy(helpers.FilesystemMockingTestCase):
def test_id_in_os_release_quoted(self):
"""os-release containing ID="ubuntu-core" is snappy."""
- orcontent = '\n'.join(['ID="ubuntu-core"', ''])
+ orcontent = "\n".join(['ID="ubuntu-core"', ""])
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {'etc/os-release': orcontent})
+ helpers.populate_dir(root_d, {"etc/os-release": orcontent})
self.reRoot(root_d)
self.assertTrue(util.system_is_snappy())
def test_id_in_os_release(self):
"""os-release containing ID=ubuntu-core is snappy."""
- orcontent = '\n'.join(['ID=ubuntu-core', ''])
+ orcontent = "\n".join(["ID=ubuntu-core", ""])
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {'etc/os-release': orcontent})
+ helpers.populate_dir(root_d, {"etc/os-release": orcontent})
self.reRoot(root_d)
self.assertTrue(util.system_is_snappy())
- @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch("cloudinit.util.get_cmdline")
def test_bad_content_in_os_release_no_effect(self, m_cmdline):
"""malformed os-release should not raise exception."""
- m_cmdline.return_value = 'root=/dev/sda'
- orcontent = '\n'.join(['IDubuntu-core', ''])
+ m_cmdline.return_value = "root=/dev/sda"
+ orcontent = "\n".join(["IDubuntu-core", ""])
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {'etc/os-release': orcontent})
+ helpers.populate_dir(root_d, {"etc/os-release": orcontent})
self.reRoot()
self.assertFalse(util.system_is_snappy())
- @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch("cloudinit.util.get_cmdline")
def test_snap_core_in_cmdline_is_snappy(self, m_cmdline):
"""The string snap_core= in kernel cmdline indicates snappy."""
cmdline = (
"BOOT_IMAGE=(loop)/kernel.img root=LABEL=writable "
"snap_core=core_x1.snap snap_kernel=pc-kernel_x1.snap ro "
"net.ifnames=0 init=/lib/systemd/systemd console=tty1 "
- "console=ttyS0 panic=-1")
+ "console=ttyS0 panic=-1"
+ )
m_cmdline.return_value = cmdline
self.assertTrue(util.system_is_snappy())
self.assertTrue(m_cmdline.call_count > 0)
- @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch("cloudinit.util.get_cmdline")
def test_nothing_found_is_not_snappy(self, m_cmdline):
"""If no positive identification, then not snappy."""
- m_cmdline.return_value = 'root=/dev/sda'
+ m_cmdline.return_value = "root=/dev/sda"
self.reRoot()
self.assertFalse(util.system_is_snappy())
self.assertTrue(m_cmdline.call_count > 0)
- @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch("cloudinit.util.get_cmdline")
def test_channel_ini_with_snappy_is_snappy(self, m_cmdline):
"""A Channel.ini file with 'ubuntu-core' indicates snappy."""
- m_cmdline.return_value = 'root=/dev/sda'
+ m_cmdline.return_value = "root=/dev/sda"
root_d = self.tmp_dir()
- content = '\n'.join(["[Foo]", "source = 'ubuntu-core'", ""])
- helpers.populate_dir(
- root_d, {'etc/system-image/channel.ini': content})
+ content = "\n".join(["[Foo]", "source = 'ubuntu-core'", ""])
+ helpers.populate_dir(root_d, {"etc/system-image/channel.ini": content})
self.reRoot(root_d)
self.assertTrue(util.system_is_snappy())
- @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch("cloudinit.util.get_cmdline")
def test_system_image_config_dir_is_snappy(self, m_cmdline):
"""Existence of /etc/system-image/config.d indicates snappy."""
- m_cmdline.return_value = 'root=/dev/sda'
+ m_cmdline.return_value = "root=/dev/sda"
root_d = self.tmp_dir()
helpers.populate_dir(
- root_d, {'etc/system-image/config.d/my.file': "_unused"})
+ root_d, {"etc/system-image/config.d/my.file": "_unused"}
+ )
self.reRoot(root_d)
self.assertTrue(util.system_is_snappy())
@@ -793,41 +2190,52 @@ class TestLoadShellContent(helpers.TestCase):
def test_comments_handled_correctly(self):
"""Shell comments should be allowed in the content."""
self.assertEqual(
- {'key1': 'val1', 'key2': 'val2', 'key3': 'val3 #tricky'},
- util.load_shell_content('\n'.join([
- "#top of file comment",
- "key1=val1 #this is a comment",
- "# second comment",
- 'key2="val2" # inlin comment'
- '#badkey=wark',
- 'key3="val3 #tricky"',
- ''])))
+ {"key1": "val1", "key2": "val2", "key3": "val3 #tricky"},
+ util.load_shell_content(
+ "\n".join(
+ [
+ "#top of file comment",
+ "key1=val1 #this is a comment",
+ "# second comment",
+ 'key2="val2" # inlin comment#badkey=wark',
+ 'key3="val3 #tricky"',
+ "",
+ ]
+ )
+ ),
+ )
class TestGetProcEnv(helpers.TestCase):
"""test get_proc_env."""
- null = b'\x00'
- simple1 = b'HOME=/'
- simple2 = b'PATH=/bin:/sbin'
- bootflag = b'BOOTABLE_FLAG=\x80' # from LP: #1775371
- mixed = b'MIXED=' + b'ab\xccde'
- def _val_decoded(self, blob, encoding='utf-8', errors='replace'):
+ null = b"\x00"
+ simple1 = b"HOME=/"
+ simple2 = b"PATH=/bin:/sbin"
+ bootflag = b"BOOTABLE_FLAG=\x80" # from LP: #1775371
+ mixed = b"MIXED=" + b"ab\xccde"
+
+ def _val_decoded(self, blob, encoding="utf-8", errors="replace"):
# return the value portion of key=val decoded.
- return blob.split(b'=', 1)[1].decode(encoding, errors)
+ return blob.split(b"=", 1)[1].decode(encoding, errors)
@mock.patch("cloudinit.util.load_file")
def test_non_utf8_in_environment(self, m_load_file):
"""env may have non utf-8 decodable content."""
content = self.null.join(
- (self.bootflag, self.simple1, self.simple2, self.mixed))
+ (self.bootflag, self.simple1, self.simple2, self.mixed)
+ )
m_load_file.return_value = content
self.assertEqual(
- {'BOOTABLE_FLAG': self._val_decoded(self.bootflag),
- 'HOME': '/', 'PATH': '/bin:/sbin',
- 'MIXED': self._val_decoded(self.mixed)},
- util.get_proc_env(1))
+ {
+ "BOOTABLE_FLAG": self._val_decoded(self.bootflag),
+ "HOME": "/",
+ "PATH": "/bin:/sbin",
+ "MIXED": self._val_decoded(self.mixed),
+ },
+ util.get_proc_env(1),
+ )
self.assertEqual(1, m_load_file.call_count)
@mock.patch("cloudinit.util.load_file")
@@ -838,8 +2246,9 @@ class TestGetProcEnv(helpers.TestCase):
m_load_file.return_value = content
self.assertEqual(
- dict([t.split(b'=') for t in lines]),
- util.get_proc_env(1, encoding=None))
+ dict([t.split(b"=") for t in lines]),
+ util.get_proc_env(1, encoding=None),
+ )
self.assertEqual(1, m_load_file.call_count)
@mock.patch("cloudinit.util.load_file")
@@ -848,8 +2257,8 @@ class TestGetProcEnv(helpers.TestCase):
content = self.null.join((self.simple1, self.simple2))
m_load_file.return_value = content
self.assertEqual(
- {'HOME': '/', 'PATH': '/bin:/sbin'},
- util.get_proc_env(1))
+ {"HOME": "/", "PATH": "/bin:/sbin"}, util.get_proc_env(1)
+ )
self.assertEqual(1, m_load_file.call_count)
@mock.patch("cloudinit.util.load_file")
@@ -867,16 +2276,17 @@ class TestGetProcEnv(helpers.TestCase):
self.assertEqual(my_ppid, util.get_proc_ppid(my_pid))
-class TestKernelVersion():
+class TestKernelVersion:
"""test kernel version function"""
params = [
- ('5.6.19-300.fc32.x86_64', (5, 6)),
- ('4.15.0-101-generic', (4, 15)),
- ('3.10.0-1062.12.1.vz7.131.10', (3, 10)),
- ('4.18.0-144.el8.x86_64', (4, 18))]
+ ("5.6.19-300.fc32.x86_64", (5, 6)),
+ ("4.15.0-101-generic", (4, 15)),
+ ("3.10.0-1062.12.1.vz7.131.10", (3, 10)),
+ ("4.18.0-144.el8.x86_64", (4, 18)),
+ ]
- @mock.patch('os.uname')
+ @mock.patch("os.uname")
@pytest.mark.parametrize("uname_release,expected", params)
def test_kernel_version(self, m_uname, uname_release, expected):
m_uname.return_value.release = uname_release
@@ -884,49 +2294,48 @@ class TestKernelVersion():
class TestFindDevs:
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_find_devs_with(self, m_subp):
m_subp.return_value = (
'/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"',
- ''
+ "",
)
devlist = util.find_devs_with()
assert devlist == [
- '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"']
+ '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"'
+ ]
devlist = util.find_devs_with("LABEL_FATBOOT=A_LABEL")
assert devlist == [
- '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"']
+ '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"'
+ ]
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_find_devs_with_openbsd(self, m_subp):
- m_subp.return_value = (
- 'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', ''
- )
+ m_subp.return_value = ("cd0:,sd0:630d98d32b5d3759,sd1:,fd0:", "")
devlist = util.find_devs_with_openbsd()
- assert devlist == ['/dev/cd0a', '/dev/sd1i']
+ assert devlist == ["/dev/cd0a", "/dev/sd1a", "/dev/sd1i"]
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_find_devs_with_openbsd_with_criteria(self, m_subp):
- m_subp.return_value = (
- 'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', ''
- )
+ m_subp.return_value = ("cd0:,sd0:630d98d32b5d3759,sd1:,fd0:", "")
devlist = util.find_devs_with_openbsd(criteria="TYPE=iso9660")
- assert devlist == ['/dev/cd0a']
+ assert devlist == ["/dev/cd0a", "/dev/sd1a", "/dev/sd1i"]
# lp: #1841466
devlist = util.find_devs_with_openbsd(criteria="LABEL_FATBOOT=A_LABEL")
- assert devlist == ['/dev/cd0a', '/dev/sd1i']
+ assert devlist == ["/dev/cd0a", "/dev/sd1a", "/dev/sd1i"]
@pytest.mark.parametrize(
- 'criteria,expected_devlist', (
- (None, ['/dev/msdosfs/EFISYS', '/dev/iso9660/config-2']),
- ('TYPE=iso9660', ['/dev/iso9660/config-2']),
- ('TYPE=vfat', ['/dev/msdosfs/EFISYS']),
- ('LABEL_FATBOOT=A_LABEL', []), # lp: #1841466
+ "criteria,expected_devlist",
+ (
+ (None, ["/dev/msdosfs/EFISYS", "/dev/iso9660/config-2"]),
+ ("TYPE=iso9660", ["/dev/iso9660/config-2"]),
+ ("TYPE=vfat", ["/dev/msdosfs/EFISYS"]),
+ ("LABEL_FATBOOT=A_LABEL", []), # lp: #1841466
),
)
- @mock.patch('glob.glob')
+ @mock.patch("glob.glob")
def test_find_devs_with_freebsd(self, m_glob, criteria, expected_devlist):
def fake_glob(pattern):
msdos = ["/dev/msdosfs/EFISYS"]
@@ -936,58 +2345,54 @@ class TestFindDevs:
elif pattern == "/dev/iso9660/*":
return iso9660
raise Exception
+
m_glob.side_effect = fake_glob
devlist = util.find_devs_with_freebsd(criteria=criteria)
assert devlist == expected_devlist
@pytest.mark.parametrize(
- 'criteria,expected_devlist', (
- (None, ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0']),
- ('TYPE=iso9660', ['/dev/cd0']),
- ('TYPE=vfat', ["/dev/ld0", "/dev/dk0", "/dev/dk1"]),
- ('LABEL_FATBOOT=A_LABEL', # lp: #1841466
- ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0']),
- )
+ "criteria,expected_devlist",
+ (
+ (None, ["/dev/ld0", "/dev/dk0", "/dev/dk1", "/dev/cd0"]),
+ ("TYPE=iso9660", ["/dev/cd0"]),
+ ("TYPE=vfat", ["/dev/ld0", "/dev/dk0", "/dev/dk1"]),
+ (
+ "LABEL_FATBOOT=A_LABEL", # lp: #1841466
+ ["/dev/ld0", "/dev/dk0", "/dev/dk1", "/dev/cd0"],
+ ),
+ ),
)
@mock.patch("cloudinit.subp.subp")
def test_find_devs_with_netbsd(self, m_subp, criteria, expected_devlist):
side_effect_values = [
("ld0 dk0 dk1 cd0", ""),
(
- (
- "mscdlabel: CDIOREADTOCHEADER: "
- "Inappropriate ioctl for device\n"
- "track (ctl=4) at sector 0\n"
- "disklabel not written\n"
- ),
+ "mscdlabel: CDIOREADTOCHEADER: "
+ "Inappropriate ioctl for device\n"
+ "track (ctl=4) at sector 0\n"
+ "disklabel not written\n",
"",
),
(
- (
- "mscdlabel: CDIOREADTOCHEADER: "
- "Inappropriate ioctl for device\n"
- "track (ctl=4) at sector 0\n"
- "disklabel not written\n"
- ),
+ "mscdlabel: CDIOREADTOCHEADER: "
+ "Inappropriate ioctl for device\n"
+ "track (ctl=4) at sector 0\n"
+ "disklabel not written\n",
"",
),
(
- (
- "mscdlabel: CDIOREADTOCHEADER: "
- "Inappropriate ioctl for device\n"
- "track (ctl=4) at sector 0\n"
- "disklabel not written\n"
- ),
+ "mscdlabel: CDIOREADTOCHEADER: "
+ "Inappropriate ioctl for device\n"
+ "track (ctl=4) at sector 0\n"
+ "disklabel not written\n",
"",
),
(
- (
- "track (ctl=4) at sector 0\n"
- 'ISO filesystem, label "config-2", '
- "creation time: 2020/03/31 17:29\n"
- "adding as 'a'\n"
- ),
+ "track (ctl=4) at sector 0\n"
+ 'ISO filesystem, label "config-2", '
+ "creation time: 2020/03/31 17:29\n"
+ "adding as 'a'\n",
"",
),
]
@@ -995,4 +2400,25 @@ class TestFindDevs:
devlist = util.find_devs_with_netbsd(criteria=criteria)
assert devlist == expected_devlist
+ @pytest.mark.parametrize(
+ "criteria,expected_devlist",
+ (
+ (None, ["/dev/vbd0", "/dev/cd0", "/dev/acd0"]),
+ ("TYPE=iso9660", ["/dev/cd0", "/dev/acd0"]),
+ ("TYPE=vfat", ["/dev/vbd0"]),
+ (
+ "LABEL_FATBOOT=A_LABEL", # lp: #1841466
+ ["/dev/vbd0", "/dev/cd0", "/dev/acd0"],
+ ),
+ ),
+ )
+ @mock.patch("cloudinit.subp.subp")
+ def test_find_devs_with_dragonflybsd(
+ self, m_subp, criteria, expected_devlist
+ ):
+ m_subp.return_value = ("md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0", "")
+ devlist = util.find_devs_with_dragonflybsd(criteria=criteria)
+ assert devlist == expected_devlist
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_version.py b/tests/unittests/test_version.py
index 778a762c..8ac8aea6 100644
--- a/cloudinit/tests/test_version.py
+++ b/tests/unittests/test_version.py
@@ -2,21 +2,22 @@
from unittest import mock
-from cloudinit.tests.helpers import CiTestCase
from cloudinit import version
+from tests.unittests.helpers import CiTestCase
class TestExportsFeatures(CiTestCase):
def test_has_network_config_v1(self):
- self.assertIn('NETWORK_CONFIG_V1', version.FEATURES)
+ self.assertIn("NETWORK_CONFIG_V1", version.FEATURES)
def test_has_network_config_v2(self):
- self.assertIn('NETWORK_CONFIG_V2', version.FEATURES)
+ self.assertIn("NETWORK_CONFIG_V2", version.FEATURES)
class TestVersionString(CiTestCase):
- @mock.patch("cloudinit.version._PACKAGED_VERSION",
- "17.2-3-gb05b9972-0ubuntu1")
+ @mock.patch(
+ "cloudinit.version._PACKAGED_VERSION", "17.2-3-gb05b9972-0ubuntu1"
+ )
def test_package_version_respected(self):
"""If _PACKAGED_VERSION is filled in, then it should be returned."""
self.assertEqual("17.2-3-gb05b9972-0ubuntu1", version.version_string())
diff --git a/tests/unittests/test_vmware/__init__.py b/tests/unittests/test_vmware/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/unittests/test_vmware/__init__.py
+++ /dev/null
diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py
deleted file mode 100644
index c8b59d83..00000000
--- a/tests/unittests/test_vmware/test_guestcust_util.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# Copyright (C) 2019 Canonical Ltd.
-# Copyright (C) 2019 VMware INC.
-#
-# Author: Xiaofeng Wang <xiaofengw@vmware.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import subp
-from cloudinit.sources.helpers.vmware.imc.config import Config
-from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
-from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
- get_tools_config,
- set_gc_status,
-)
-from cloudinit.tests.helpers import CiTestCase, mock
-
-
-class TestGuestCustUtil(CiTestCase):
- def test_get_tools_config_not_installed(self):
- """
- This test is designed to verify the behavior if vmware-toolbox-cmd
- is not installed.
- """
- with mock.patch.object(subp, 'which', return_value=None):
- self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'), 'defaultVal')
-
- def test_get_tools_config_internal_exception(self):
- """
- This test is designed to verify the behavior if internal exception
- is raised.
- """
- with mock.patch.object(subp, 'which', return_value='/dummy/path'):
- with mock.patch.object(subp, 'subp',
- return_value=('key=value', b''),
- side_effect=subp.ProcessExecutionError(
- "subp failed", exit_code=99)):
- # verify return value is 'defaultVal', not 'value'.
- self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- 'defaultVal')
-
- def test_get_tools_config_normal(self):
- """
- This test is designed to verify the value could be parsed from
- key = value of the given [section]
- """
- with mock.patch.object(subp, 'which', return_value='/dummy/path'):
- # value is not blank
- with mock.patch.object(subp, 'subp',
- return_value=('key = value ', b'')):
- self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- 'value')
- # value is blank
- with mock.patch.object(subp, 'subp',
- return_value=('key = ', b'')):
- self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- '')
- # value contains =
- with mock.patch.object(subp, 'subp',
- return_value=('key=Bar=Wark', b'')):
- self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- 'Bar=Wark')
-
- # value contains specific characters
- with mock.patch.object(subp, 'subp',
- return_value=('[a] b.c_d=e-f', b'')):
- self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- 'e-f')
-
- def test_set_gc_status(self):
- """
- This test is designed to verify the behavior of set_gc_status
- """
- # config is None, return None
- self.assertEqual(set_gc_status(None, 'Successful'), None)
-
- # post gc status is NO, return None
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
- conf = Config(cf)
- self.assertEqual(set_gc_status(conf, 'Successful'), None)
-
- # post gc status is YES, subp is called to execute command
- cf._insertKey("MISC|POST-GC-STATUS", "YES")
- conf = Config(cf)
- with mock.patch.object(subp, 'subp',
- return_value=('ok', b'')) as mockobj:
- self.assertEqual(
- set_gc_status(conf, 'Successful'), ('ok', b''))
- mockobj.assert_called_once_with(
- ['vmware-rpctool', 'info-set guestinfo.gc.status Successful'],
- rcs=[0])
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py
deleted file mode 100644
index 9c7d25fa..00000000
--- a/tests/unittests/test_vmware_config_file.py
+++ /dev/null
@@ -1,529 +0,0 @@
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2016 VMware INC.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-# Pengpeng Sun <pengpengs@vmware.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import logging
-import os
-import sys
-import tempfile
-import textwrap
-
-from cloudinit.sources.DataSourceOVF import get_network_config_from_conf
-from cloudinit.sources.DataSourceOVF import read_vmware_imc
-from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum
-from cloudinit.sources.helpers.vmware.imc.config import Config
-from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
-from cloudinit.sources.helpers.vmware.imc.config_nic import gen_subnet
-from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator
-from cloudinit.tests.helpers import CiTestCase
-
-logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
-logger = logging.getLogger(__name__)
-
-
-class TestVmwareConfigFile(CiTestCase):
-
- def test_utility_methods(self):
- """Tests basic utility methods of ConfigFile class"""
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- cf.clear()
-
- self.assertEqual(0, len(cf), "clear size")
-
- cf._insertKey(" PASSWORD|-PASS ", " foo ")
- cf._insertKey("BAR", " ")
-
- self.assertEqual(2, len(cf), "insert size")
- self.assertEqual('foo', cf["PASSWORD|-PASS"], "password")
- self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword")
- self.assertFalse(cf.should_keep_current_value("PASSWORD|-PASS"),
- "keepPassword")
- self.assertFalse(cf.should_remove_current_value("PASSWORD|-PASS"),
- "removePassword")
- self.assertFalse("FOO" in cf, "hasFoo")
- self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo")
- self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo")
- self.assertTrue("BAR" in cf, "hasBar")
- self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar")
- self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar")
-
- def test_datasource_instance_id(self):
- """Tests instance id for the DatasourceOVF"""
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- instance_id_prefix = 'iid-vmware-'
-
- conf = Config(cf)
-
- (md1, _, _) = read_vmware_imc(conf)
- self.assertIn(instance_id_prefix, md1["instance-id"])
- self.assertEqual(md1["instance-id"], 'iid-vmware-imc')
-
- (md2, _, _) = read_vmware_imc(conf)
- self.assertIn(instance_id_prefix, md2["instance-id"])
- self.assertEqual(md2["instance-id"], 'iid-vmware-imc')
-
- self.assertEqual(md2["instance-id"], md1["instance-id"])
-
- def test_configfile_static_2nics(self):
- """Tests Config class for a configuration with two static NICs."""
- cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
-
- conf = Config(cf)
-
- self.assertEqual('myhost1', conf.host_name, "hostName")
- self.assertEqual('Africa/Abidjan', conf.timezone, "tz")
- self.assertTrue(conf.utc, "utc")
-
- self.assertEqual(['10.20.145.1', '10.20.145.2'],
- conf.name_servers,
- "dns")
- self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'],
- conf.dns_suffixes,
- "suffixes")
-
- nics = conf.nics
- ipv40 = nics[0].staticIpv4
-
- self.assertEqual(2, len(nics), "nics")
- self.assertEqual('NIC1', nics[0].name, "nic0")
- self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0")
- self.assertEqual(BootProtoEnum.STATIC, nics[0].bootProto, "bootproto0")
- self.assertEqual('10.20.87.154', ipv40[0].ip, "ipv4Addr0")
- self.assertEqual('255.255.252.0', ipv40[0].netmask, "ipv4Mask0")
- self.assertEqual(2, len(ipv40[0].gateways), "ipv4Gw0")
- self.assertEqual('10.20.87.253', ipv40[0].gateways[0], "ipv4Gw0_0")
- self.assertEqual('10.20.87.105', ipv40[0].gateways[1], "ipv4Gw0_1")
-
- self.assertEqual(1, len(nics[0].staticIpv6), "ipv6Cnt0")
- self.assertEqual('fc00:10:20:87::154',
- nics[0].staticIpv6[0].ip,
- "ipv6Addr0")
-
- self.assertEqual('NIC2', nics[1].name, "nic1")
- self.assertTrue(not nics[1].staticIpv6, "ipv61 dhcp")
-
- def test_config_file_dhcp_2nics(self):
- """Tests Config class for a configuration with two DHCP NICs."""
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- conf = Config(cf)
- nics = conf.nics
- self.assertEqual(2, len(nics), "nics")
- self.assertEqual('NIC1', nics[0].name, "nic0")
- self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0")
- self.assertEqual(BootProtoEnum.DHCP, nics[0].bootProto, "bootproto0")
-
- def test_config_password(self):
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- cf._insertKey("PASSWORD|-PASS", "test-password")
- cf._insertKey("PASSWORD|RESET", "no")
-
- conf = Config(cf)
- self.assertEqual('test-password', conf.admin_password, "password")
- self.assertFalse(conf.reset_password, "do not reset password")
-
- def test_config_reset_passwd(self):
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- cf._insertKey("PASSWORD|-PASS", "test-password")
- cf._insertKey("PASSWORD|RESET", "random")
-
- conf = Config(cf)
- with self.assertRaises(ValueError):
- pw = conf.reset_password
- self.assertIsNone(pw)
-
- cf.clear()
- cf._insertKey("PASSWORD|RESET", "yes")
- self.assertEqual(1, len(cf), "insert size")
-
- conf = Config(cf)
- self.assertTrue(conf.reset_password, "reset password")
-
- def test_get_config_nameservers(self):
- """Tests DNS and nameserver settings in a configuration."""
- cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
-
- config = Config(cf)
-
- network_config = get_network_config_from_conf(config, False)
-
- self.assertEqual(1, network_config.get('version'))
-
- config_types = network_config.get('config')
- name_servers = None
- dns_suffixes = None
-
- for type in config_types:
- if type.get('type') == 'nameserver':
- name_servers = type.get('address')
- dns_suffixes = type.get('search')
- break
-
- self.assertEqual(['10.20.145.1', '10.20.145.2'],
- name_servers,
- "dns")
- self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'],
- dns_suffixes,
- "suffixes")
-
- def test_gen_subnet(self):
- """Tests if gen_subnet properly calculates network subnet from
- IPv4 address and netmask"""
- ip_subnet_list = [['10.20.87.253', '255.255.252.0', '10.20.84.0'],
- ['10.20.92.105', '255.255.252.0', '10.20.92.0'],
- ['192.168.0.10', '255.255.0.0', '192.168.0.0']]
- for entry in ip_subnet_list:
- self.assertEqual(entry[2], gen_subnet(entry[0], entry[1]),
- "Subnet for a specified ip and netmask")
-
- def test_get_config_dns_suffixes(self):
- """Tests if get_network_config_from_conf properly
- generates nameservers and dns settings from a
- specified configuration"""
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- config = Config(cf)
-
- network_config = get_network_config_from_conf(config, False)
-
- self.assertEqual(1, network_config.get('version'))
-
- config_types = network_config.get('config')
- name_servers = None
- dns_suffixes = None
-
- for type in config_types:
- if type.get('type') == 'nameserver':
- name_servers = type.get('address')
- dns_suffixes = type.get('search')
- break
-
- self.assertEqual([],
- name_servers,
- "dns")
- self.assertEqual(['eng.vmware.com'],
- dns_suffixes,
- "suffixes")
-
- def test_get_nics_list_dhcp(self):
- """Tests if NicConfigurator properly calculates network subnets
- for a configuration with a list of DHCP NICs"""
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- config = Config(cf)
-
- nicConfigurator = NicConfigurator(config.nics, False)
- nics_cfg_list = nicConfigurator.generate()
-
- self.assertEqual(2, len(nics_cfg_list), "number of config elements")
-
- nic1 = {'name': 'NIC1'}
- nic2 = {'name': 'NIC2'}
- for cfg in nics_cfg_list:
- if cfg.get('name') == nic1.get('name'):
- nic1.update(cfg)
- elif cfg.get('name') == nic2.get('name'):
- nic2.update(cfg)
-
- self.assertEqual('physical', nic1.get('type'), 'type of NIC1')
- self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1')
- self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'),
- 'mac address of NIC1')
- subnets = nic1.get('subnets')
- self.assertEqual(1, len(subnets), 'number of subnets for NIC1')
- subnet = subnets[0]
- self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC1')
- self.assertEqual('auto', subnet.get('control'), 'NIC1 Control type')
-
- self.assertEqual('physical', nic2.get('type'), 'type of NIC2')
- self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2')
- self.assertEqual('00:50:56:a6:5a:de', nic2.get('mac_address'),
- 'mac address of NIC2')
- subnets = nic2.get('subnets')
- self.assertEqual(1, len(subnets), 'number of subnets for NIC2')
- subnet = subnets[0]
- self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC2')
- self.assertEqual('auto', subnet.get('control'), 'NIC2 Control type')
-
- def test_get_nics_list_static(self):
- """Tests if NicConfigurator properly calculates network subnets
- for a configuration with 2 static NICs"""
- cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
-
- config = Config(cf)
-
- nicConfigurator = NicConfigurator(config.nics, False)
- nics_cfg_list = nicConfigurator.generate()
-
- self.assertEqual(2, len(nics_cfg_list), "number of elements")
-
- nic1 = {'name': 'NIC1'}
- nic2 = {'name': 'NIC2'}
- route_list = []
- for cfg in nics_cfg_list:
- cfg_type = cfg.get('type')
- if cfg_type == 'physical':
- if cfg.get('name') == nic1.get('name'):
- nic1.update(cfg)
- elif cfg.get('name') == nic2.get('name'):
- nic2.update(cfg)
-
- self.assertEqual('physical', nic1.get('type'), 'type of NIC1')
- self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1')
- self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'),
- 'mac address of NIC1')
-
- subnets = nic1.get('subnets')
- self.assertEqual(2, len(subnets), 'Number of subnets')
-
- static_subnet = []
- static6_subnet = []
-
- for subnet in subnets:
- subnet_type = subnet.get('type')
- if subnet_type == 'static':
- static_subnet.append(subnet)
- elif subnet_type == 'static6':
- static6_subnet.append(subnet)
- else:
- self.assertEqual(True, False, 'Unknown type')
- if 'route' in subnet:
- for route in subnet.get('routes'):
- route_list.append(route)
-
- self.assertEqual(1, len(static_subnet), 'Number of static subnet')
- self.assertEqual(1, len(static6_subnet), 'Number of static6 subnet')
-
- subnet = static_subnet[0]
- self.assertEqual('10.20.87.154', subnet.get('address'),
- 'IPv4 address of static subnet')
- self.assertEqual('255.255.252.0', subnet.get('netmask'),
- 'NetMask of static subnet')
- self.assertEqual('auto', subnet.get('control'),
- 'control for static subnet')
-
- subnet = static6_subnet[0]
- self.assertEqual('fc00:10:20:87::154', subnet.get('address'),
- 'IPv6 address of static subnet')
- self.assertEqual('64', subnet.get('netmask'),
- 'NetMask of static6 subnet')
-
- route_set = set(['10.20.87.253', '10.20.87.105', '192.168.0.10'])
- for route in route_list:
- self.assertEqual(10000, route.get('metric'), 'metric of route')
- gateway = route.get('gateway')
- if gateway in route_set:
- route_set.discard(gateway)
- else:
- self.assertEqual(True, False, 'invalid gateway %s' % (gateway))
-
- self.assertEqual('physical', nic2.get('type'), 'type of NIC2')
- self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2')
- self.assertEqual('00:50:56:a6:ef:7d', nic2.get('mac_address'),
- 'mac address of NIC2')
-
- subnets = nic2.get('subnets')
- self.assertEqual(1, len(subnets), 'Number of subnets for NIC2')
-
- subnet = subnets[0]
- self.assertEqual('static', subnet.get('type'), 'Subnet type')
- self.assertEqual('192.168.6.102', subnet.get('address'),
- 'Subnet address')
- self.assertEqual('255.255.0.0', subnet.get('netmask'),
- 'Subnet netmask')
-
- def test_custom_script(self):
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
- conf = Config(cf)
- self.assertIsNone(conf.custom_script_name)
- cf._insertKey("CUSTOM-SCRIPT|SCRIPT-NAME", "test-script")
- conf = Config(cf)
- self.assertEqual("test-script", conf.custom_script_name)
-
- def test_post_gc_status(self):
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
- conf = Config(cf)
- self.assertFalse(conf.post_gc_status)
- cf._insertKey("MISC|POST-GC-STATUS", "YES")
- conf = Config(cf)
- self.assertTrue(conf.post_gc_status)
-
- def test_no_default_run_post_script(self):
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
- conf = Config(cf)
- self.assertFalse(conf.default_run_post_script)
- cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "NO")
- conf = Config(cf)
- self.assertFalse(conf.default_run_post_script)
-
- def test_yes_default_run_post_script(self):
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
- cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "yes")
- conf = Config(cf)
- self.assertTrue(conf.default_run_post_script)
-
-
-class TestVmwareNetConfig(CiTestCase):
- """Test conversion of vmware config to cloud-init config."""
-
- maxDiff = None
-
- def _get_NicConfigurator(self, text):
- fp = None
- try:
- with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(),
- delete=False) as fp:
- fp.write(text)
- fp.close()
- cfg = Config(ConfigFile(fp.name))
- return NicConfigurator(cfg.nics, use_system_devices=False)
- finally:
- if fp:
- os.unlink(fp.name)
-
- def test_non_primary_nic_without_gateway(self):
- """A non primary nic set is not required to have a gateway."""
- config = textwrap.dedent("""\
- [NETWORK]
- NETWORKING = yes
- BOOTPROTO = dhcp
- HOSTNAME = myhost1
- DOMAINNAME = eng.vmware.com
-
- [NIC-CONFIG]
- NICS = NIC1
-
- [NIC1]
- MACADDR = 00:50:56:a6:8c:08
- ONBOOT = yes
- IPv4_MODE = BACKWARDS_COMPATIBLE
- BOOTPROTO = static
- IPADDR = 10.20.87.154
- NETMASK = 255.255.252.0
- """)
- nc = self._get_NicConfigurator(config)
- self.assertEqual(
- [{'type': 'physical', 'name': 'NIC1',
- 'mac_address': '00:50:56:a6:8c:08',
- 'subnets': [
- {'control': 'auto', 'type': 'static',
- 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}],
- nc.generate())
-
- def test_non_primary_nic_with_gateway(self):
- """A non primary nic set can have a gateway."""
- config = textwrap.dedent("""\
- [NETWORK]
- NETWORKING = yes
- BOOTPROTO = dhcp
- HOSTNAME = myhost1
- DOMAINNAME = eng.vmware.com
-
- [NIC-CONFIG]
- NICS = NIC1
-
- [NIC1]
- MACADDR = 00:50:56:a6:8c:08
- ONBOOT = yes
- IPv4_MODE = BACKWARDS_COMPATIBLE
- BOOTPROTO = static
- IPADDR = 10.20.87.154
- NETMASK = 255.255.252.0
- GATEWAY = 10.20.87.253
- """)
- nc = self._get_NicConfigurator(config)
- self.assertEqual(
- [{'type': 'physical', 'name': 'NIC1',
- 'mac_address': '00:50:56:a6:8c:08',
- 'subnets': [
- {'control': 'auto', 'type': 'static',
- 'address': '10.20.87.154', 'netmask': '255.255.252.0',
- 'routes':
- [{'type': 'route', 'destination': '10.20.84.0/22',
- 'gateway': '10.20.87.253', 'metric': 10000}]}]}],
- nc.generate())
-
- def test_cust_non_primary_nic_with_gateway_(self):
- """A customer non primary nic set can have a gateway."""
- config = textwrap.dedent("""\
- [NETWORK]
- NETWORKING = yes
- BOOTPROTO = dhcp
- HOSTNAME = static-debug-vm
- DOMAINNAME = cluster.local
-
- [NIC-CONFIG]
- NICS = NIC1
-
- [NIC1]
- MACADDR = 00:50:56:ac:d1:8a
- ONBOOT = yes
- IPv4_MODE = BACKWARDS_COMPATIBLE
- BOOTPROTO = static
- IPADDR = 100.115.223.75
- NETMASK = 255.255.255.0
- GATEWAY = 100.115.223.254
-
-
- [DNS]
- DNSFROMDHCP=no
-
- NAMESERVER|1 = 8.8.8.8
-
- [DATETIME]
- UTC = yes
- """)
- nc = self._get_NicConfigurator(config)
- self.assertEqual(
- [{'type': 'physical', 'name': 'NIC1',
- 'mac_address': '00:50:56:ac:d1:8a',
- 'subnets': [
- {'control': 'auto', 'type': 'static',
- 'address': '100.115.223.75', 'netmask': '255.255.255.0',
- 'routes':
- [{'type': 'route', 'destination': '100.115.223.0/24',
- 'gateway': '100.115.223.254', 'metric': 10000}]}]}],
- nc.generate())
-
- def test_a_primary_nic_with_gateway(self):
- """A primary nic set can have a gateway."""
- config = textwrap.dedent("""\
- [NETWORK]
- NETWORKING = yes
- BOOTPROTO = dhcp
- HOSTNAME = myhost1
- DOMAINNAME = eng.vmware.com
-
- [NIC-CONFIG]
- NICS = NIC1
-
- [NIC1]
- MACADDR = 00:50:56:a6:8c:08
- ONBOOT = yes
- IPv4_MODE = BACKWARDS_COMPATIBLE
- BOOTPROTO = static
- IPADDR = 10.20.87.154
- NETMASK = 255.255.252.0
- PRIMARY = true
- GATEWAY = 10.20.87.253
- """)
- nc = self._get_NicConfigurator(config)
- self.assertEqual(
- [{'type': 'physical', 'name': 'NIC1',
- 'mac_address': '00:50:56:a6:8c:08',
- 'subnets': [
- {'control': 'auto', 'type': 'static',
- 'address': '10.20.87.154', 'netmask': '255.255.252.0',
- 'gateway': '10.20.87.253'}]}],
- nc.generate())
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/util.py b/tests/unittests/util.py
new file mode 100644
index 00000000..79a6e1d0
--- /dev/null
+++ b/tests/unittests/util.py
@@ -0,0 +1,145 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from cloudinit import cloud, distros, helpers
+from cloudinit.sources.DataSourceNone import DataSourceNone
+
+
+def get_cloud(distro=None, paths=None, sys_cfg=None, metadata=None):
+ """Obtain a "cloud" that can be used for testing.
+
+ Modules take a 'cloud' parameter to call into things that are
+ datasource/distro specific. In most cases, the specifics of this cloud
+ implementation aren't needed to test the module, so provide a fake
+ datasource/distro with stubbed calls to methods that may attempt to
+ read/write files or shell out. If a specific distro is needed, it can
+ be passed in as the distro parameter.
+ """
+ paths = paths or helpers.Paths({})
+ sys_cfg = sys_cfg or {}
+ cls = distros.fetch(distro) if distro else MockDistro
+ mydist = cls(distro, sys_cfg, paths)
+ myds = DataSourceTesting(sys_cfg, mydist, paths)
+ if metadata:
+ myds.metadata.update(metadata)
+ if paths:
+ paths.datasource = myds
+ return cloud.Cloud(myds, paths, sys_cfg, mydist, None)
+
+
+def abstract_to_concrete(abclass):
+ """Takes an abstract class and returns a concrete version of it."""
+
+ class concreteCls(abclass):
+ pass
+
+ concreteCls.__abstractmethods__ = frozenset()
+ return type("DummyConcrete" + abclass.__name__, (concreteCls,), {})
+
+
+class DataSourceTesting(DataSourceNone):
+ def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
+ return "hostname"
+
+ def persist_instance_data(self):
+ return True
+
+ @property
+ def fallback_interface(self):
+ return None
+
+ @property
+ def cloud_name(self):
+ return "testing"
+
+
+class MockDistro(distros.Distro):
+ # MockDistro is here to test base Distro class implementations
+ def __init__(self, name="testingdistro", cfg=None, paths=None):
+ if not cfg:
+ cfg = {}
+ if not paths:
+ paths = {}
+ super(MockDistro, self).__init__(name, cfg, paths)
+
+ def install_packages(self, pkglist):
+ pass
+
+ def set_hostname(self, hostname, fqdn=None):
+ pass
+
+ def uses_systemd(self):
+ return True
+
+ def get_primary_arch(self):
+ return "i386"
+
+ def get_package_mirror_info(self, arch=None, data_source=None):
+ pass
+
+ def apply_network(self, settings, bring_up=True):
+ return False
+
+ def generate_fallback_config(self):
+ return {}
+
+ def apply_network_config(self, netconfig, bring_up=False) -> bool:
+ return False
+
+ def apply_network_config_names(self, netconfig):
+ pass
+
+ def apply_locale(self, locale, out_fn=None):
+ pass
+
+ def set_timezone(self, tz):
+ pass
+
+ def _read_hostname(self, filename, default=None):
+ raise NotImplementedError()
+
+ def _write_hostname(self, hostname, filename):
+ raise NotImplementedError()
+
+ def _read_system_hostname(self):
+ raise NotImplementedError()
+
+ def update_hostname(self, hostname, fqdn, prev_hostname_fn):
+ pass
+
+ def update_etc_hosts(self, hostname, fqdn):
+ pass
+
+ def add_user(self, name, **kwargs):
+ pass
+
+ def add_snap_user(self, name, **kwargs):
+ return "snap_user"
+
+ def create_user(self, name, **kwargs):
+ return True
+
+ def lock_passwd(self, name):
+ pass
+
+ def expire_passwd(self, user):
+ pass
+
+ def set_passwd(self, user, passwd, hashed=False):
+ return True
+
+ def ensure_sudo_dir(self, path, sudo_base="/etc/sudoers"):
+ pass
+
+ def write_sudo_rules(self, user, rules, sudo_file=None):
+ pass
+
+ def create_group(self, name, members=None):
+ pass
+
+ def shutdown_command(self, *, mode, delay, message):
+ pass
+
+ def package_command(self, command, args=None, pkgs=None):
+ pass
+
+ def update_package_sources(self):
+ return (True, "yay")
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index 1e0c3ea4..ac157a2f 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -1,33 +1,92 @@
ader1990
+ajmyyra
+akutz
AlexBaranowski
Aman306
+andgein
+andrewbogott
+andrewlukoshko
+antonyc
aswinrajamannar
+beantaxi
beezly
bipinbachhao
BirknerAlex
+bmhughes
candlerb
+cawamata
+cclauss
+chrislalos
+ciprianbadescu
+citrus-it
+cjp256
+dankenigsberg
+ddymko
dermotbradley
dhensby
eandersson
+eb3095
emmanuelthome
+eslerm
+esposem
+GabrielNagy
+giggsoff
+hamalq
+holmanb
+impl
+irishgordo
izzyleung
+j5awry
+Jille
+JohnKepplers
johnsonshi
+jordimassaguerpla
jqueuniet
jsf9k
+jshen28
+klausenbusk
+KsenijaS
landon912
lucasmoura
+lucendio
lungj
+mal
+mamercad
manuelisimo
marlluslustosa
matthewruffell
+maxnet
+megian
+mitechie
+nazunalika
+nicolasbock
nishigori
+olivierlemasle
omBratteng
onitake
+qubidt
+renanrodrigo
+rhansen
riedel
+sarahwzadara
+slingamn
slyon
smoser
sshedi
+stappersg
+steverweber
+t-8ch
TheRealFalcon
+taoyama
+timothegenzmer
+tnt-dev
tomponline
tsanghan
+tSU-RooT
+vorlonofportland
+vteratipally
+Vultaire
WebSpider
+xiachen-rh
+xnox
+zhuzaifangxuele
diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user
index 21171ac6..9b09d568 100644
--- a/tools/.lp-to-git-user
+++ b/tools/.lp-to-git-user
@@ -4,6 +4,7 @@
"ahosmanmsft": "AOhassan",
"andreipoltavchenko": "pa-yourserveradmin-com",
"askon": "ask0n",
+ "b1sandmann": "B1Sandmann",
"bitfehler": "bitfehler",
"chad.smith": "blackboxsw",
"chcheng": "chengcheng-chcheng",
@@ -29,6 +30,7 @@
"rjschwei": "rjschwei",
"tribaal": "chrisglass",
"trstringer": "trstringer",
+ "vlastimil-holer": "vholer",
"vtqanh": "anhvoms",
"xiaofengw": "xiaofengw-vmware"
}
diff --git a/tools/build-on-netbsd b/tools/build-on-netbsd
index d2a7067d..0d4eb58b 100755
--- a/tools/build-on-netbsd
+++ b/tools/build-on-netbsd
@@ -2,17 +2,26 @@
fail() { echo "FAILED:" "$@" 1>&2; exit 1; }
+PYTHON="${PYTHON:-python3}"
+if [ ! $(which ${PYTHON}) ]; then
+ echo "Please install python first."
+ exit 1
+fi
+py_prefix=$(${PYTHON} -c 'import sys; print("py%d%d" % (sys.version_info.major, sys.version_info.minor))')
+
# Check dependencies:
depschecked=/tmp/c-i.dependencieschecked
pkgs="
bash
dmidecode
- py37-configobj
- py37-jinja2
- py37-oauthlib
- py37-requests
- py37-setuptools
- py37-yaml
+ ${py_prefix}-configobj
+ ${py_prefix}-jinja2
+ ${py_prefix}-oauthlib
+ ${py_prefix}-requests
+ ${py_prefix}-setuptools
+ ${py_prefix}-netifaces
+ ${py_prefix}-yaml
+ ${py_prefix}-jsonschema
sudo
"
[ -f "$depschecked" ] || pkg_add ${pkgs} || fail "install packages"
@@ -20,8 +29,8 @@ pkgs="
touch $depschecked
# Build the code and install in /usr/pkg/:
-python3.7 setup.py build
-python3.7 setup.py install -O1 --distro netbsd --skip-build --init-system sysvinit_netbsd
+${PYTHON} setup.py build
+${PYTHON} setup.py install -O1 --distro netbsd --skip-build --init-system sysvinit_netbsd
mv -v /usr/local/etc/rc.d/cloud* /etc/rc.d
# Enable cloud-init in /etc/rc.conf:
diff --git a/tools/ds-identify b/tools/ds-identify
index 496dbb8a..794a96f4 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -1,5 +1,5 @@
#!/bin/sh
-# shellcheck disable=2015,2039,2162,2166
+# shellcheck disable=2015,2039,2162,2166,3043
#
# ds-identify is configured via /etc/cloud/ds-identify.cfg
# or on the kernel command line. It takes the following inputs:
@@ -124,8 +124,9 @@ DI_DSNAME=""
# this has to match the builtin list in cloud-init, it is what will
# be searched if there is no setting found in config.
DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
-CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \
-OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud"
+CloudSigma CloudStack DigitalOcean Vultr AliYun Ec2 GCE OpenNebula OpenStack \
+OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud VMware \
+LXD"
DI_DSLIST=""
DI_MODE=""
DI_ON_FOUND=""
@@ -141,6 +142,7 @@ error() {
debug 0 "$@"
stderr "$@"
}
+
warn() {
set -- "WARN:" "$@"
debug 0 "$@"
@@ -344,7 +346,6 @@ geom_label_status_as() {
return $ret
}
-
read_fs_info_freebsd() {
local oifs="$IFS" line="" delim=","
local ret=0 labels="" dev="" label="" ftype="" isodevs=""
@@ -404,7 +405,6 @@ cached() {
[ -n "$1" ] && _RET="$1" && return || return 1
}
-
detect_virt() {
local virt="${UNAVAILABLE}" r="" out=""
if [ -d /run/systemd ]; then
@@ -450,7 +450,7 @@ detect_virt() {
read_virt() {
cached "$DI_VIRT" && return 0
detect_virt
- DI_VIRT=${_RET}
+ DI_VIRT="${_RET}"
}
is_container() {
@@ -616,6 +616,7 @@ read_pid1_product_name() {
dmi_chassis_asset_tag_matches() {
is_container && return 1
+ # shellcheck disable=2254
case "${DI_DMI_CHASSIS_ASSET_TAG}" in
$1) return 0;;
esac
@@ -624,6 +625,7 @@ dmi_chassis_asset_tag_matches() {
dmi_product_name_matches() {
is_container && return 1
+ # shellcheck disable=2254
case "${DI_DMI_PRODUCT_NAME}" in
$1) return 0;;
esac
@@ -632,6 +634,7 @@ dmi_product_name_matches() {
dmi_product_serial_matches() {
is_container && return 1
+ # shellcheck disable=2254
case "${DI_DMI_PRODUCT_SERIAL}" in
$1) return 0;;
esac
@@ -765,7 +768,7 @@ check_config() {
while read line; do
line=${line%%#*}
case "$line" in
- $key:\ *|$key:)
+ $key:\ *|"${key}":)
ret=${line#*:};
ret=${ret# };
found=$((found+1))
@@ -800,6 +803,12 @@ dscheck_MAAS() {
return ${DS_NOT_FOUND}
}
+# LXD datasource requires active /dev/lxd/sock
+# https://linuxcontainers.org/lxd/docs/master/dev-lxd
+dscheck_LXD() {
+ [ -S /dev/lxd/sock ] && return ${DS_FOUND} || return ${DS_NOT_FOUND}
+}
+
dscheck_NoCloud() {
local fslabel="cidata CIDATA" d=""
case " ${DI_KERNEL_CMDLINE} " in
@@ -813,6 +822,7 @@ dscheck_NoCloud() {
check_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
done
+ # shellcheck disable=2086
if has_fs_with_label $fslabel; then
return ${DS_FOUND}
fi
@@ -883,6 +893,11 @@ dscheck_RbxCloud() {
return ${DS_NOT_FOUND}
}
+dscheck_UpCloud() {
+ dmi_sys_vendor_is UpCloud && return ${DS_FOUND}
+ return ${DS_NOT_FOUND}
+}
+
ovf_vmware_guest_customization() {
# vmware guest customization
@@ -891,11 +906,16 @@ ovf_vmware_guest_customization() {
# we have to have the plugin to do vmware customization
local found="" pkg="" pre="${PATH_ROOT}/usr/lib"
+ local x86="x86_64-linux-gnu" aarch="aarch64-linux-gnu"
local ppath="plugins/vmsvc/libdeployPkgPlugin.so"
for pkg in vmware-tools open-vm-tools; do
if [ -f "$pre/$pkg/$ppath" -o -f "${pre}64/$pkg/$ppath" ]; then
found="$pkg"; break;
fi
+ # search in multiarch dir
+ if [ -f "$pre/$x86/$pkg/$ppath" -o -f "$pre/$aarch/$pkg/$ppath" ]; then
+ found="$pkg"; break;
+ fi
done
[ -n "$found" ] || return 1
# vmware customization is disabled by default
@@ -1235,11 +1255,11 @@ dscheck_AltCloud() {
ctype="${DI_DMI_PRODUCT_NAME}"
fi
case "$ctype" in
- ${match_rhev})
+ "${match_rhev}")
probe_floppy || return ${DS_NOT_FOUND}
dev="/dev/floppy"
;;
- ${match_vsphere})
+ "${match_vsphere}")
block_dev_with_label CDROM || return ${DS_NOT_FOUND}
dev="$_RET"
;;
@@ -1305,6 +1325,7 @@ is_ibm_provisioning() {
msg="config '$pcfg' exists."
is_prov=true
if [ -f "$logf" ]; then
+ # shellcheck disable=3013
if [ "$logf" -nt "$PATH_PROC_1_ENVIRON" ]; then
msg="$msg log '$logf' from current boot."
else
@@ -1320,7 +1341,7 @@ is_ibm_provisioning() {
}
is_ibm_cloud() {
- cached "${_IS_IBM_CLOUD}" && return ${_IS_IBM_CLOUD}
+ cached "${_IS_IBM_CLOUD}" && return "${_IS_IBM_CLOUD}"
local ret=1
if [ "$DI_VIRT" = "xen" ]; then
if is_ibm_provisioning; then
@@ -1345,6 +1366,98 @@ dscheck_IBMCloud() {
return ${DS_NOT_FOUND}
}
+dscheck_Vultr() {
+ dmi_sys_vendor_is Vultr && return $DS_FOUND
+
+ case " $DI_KERNEL_CMDLINE " in
+ *\ vultr\ *) return $DS_FOUND ;;
+ esac
+
+ if [ -f "${PATH_ROOT}/etc/vultr" ]; then
+ return $DS_FOUND
+ fi
+
+ return $DS_NOT_FOUND
+}
+
+vmware_has_envvar_vmx_guestinfo() {
+ [ -n "${VMX_GUESTINFO:-}" ]
+}
+
+vmware_has_envvar_vmx_guestinfo_metadata() {
+ [ -n "${VMX_GUESTINFO_METADATA:-}" ]
+}
+
+vmware_has_envvar_vmx_guestinfo_userdata() {
+ [ -n "${VMX_GUESTINFO_USERDATA:-}" ]
+}
+
+vmware_has_envvar_vmx_guestinfo_vendordata() {
+ [ -n "${VMX_GUESTINFO_VENDORDATA:-}" ]
+}
+
+vmware_has_rpctool() {
+ command -v vmware-rpctool >/dev/null 2>&1
+}
+
+vmware_rpctool_guestinfo() {
+ vmware-rpctool "info-get guestinfo.${1}" 2>/dev/null | grep "[[:alnum:]]"
+}
+
+vmware_rpctool_guestinfo_metadata() {
+ vmware_rpctool_guestinfo "metadata"
+}
+
+vmware_rpctool_guestinfo_userdata() {
+ vmware_rpctool_guestinfo "userdata"
+}
+
+vmware_rpctool_guestinfo_vendordata() {
+ vmware_rpctool_guestinfo "vendordata"
+}
+
+dscheck_VMware() {
+ # Checks to see if there is valid data for the VMware datasource.
+ # The data transports are checked in the following order:
+ #
+ # * envvars
+ # * guestinfo
+ #
+ # Please note when updating this function with support for new data
+ # transports, the order should match the order in the _get_data
+ # function from the file DataSourceVMware.py.
+
+ # Check to see if running in a container and the VMware
+ # datasource is configured via environment variables.
+ if vmware_has_envvar_vmx_guestinfo; then
+ if vmware_has_envvar_vmx_guestinfo_metadata || \
+ vmware_has_envvar_vmx_guestinfo_userdata || \
+ vmware_has_envvar_vmx_guestinfo_vendordata; then
+ return "${DS_FOUND}"
+ fi
+ fi
+
+ # Do not proceed unless the detected platform is VMware.
+ if [ ! "${DI_VIRT}" = "vmware" ]; then
+ return "${DS_NOT_FOUND}"
+ fi
+
+ # Do not proceed if the vmware-rpctool command is not present.
+ if ! vmware_has_rpctool; then
+ return "${DS_NOT_FOUND}"
+ fi
+
+ # Activate the VMware datasource only if any of the fields used
+ # by the datasource are present in the guestinfo table.
+ if { vmware_rpctool_guestinfo_metadata || \
+ vmware_rpctool_guestinfo_userdata || \
+ vmware_rpctool_guestinfo_vendordata; } >/dev/null 2>&1; then
+ return "${DS_FOUND}"
+ fi
+
+ return "${DS_NOT_FOUND}"
+}
+
collect_info() {
read_uname_info
read_virt
@@ -1544,10 +1657,10 @@ parse_policy() {
for tok in "$@"; do
val=${tok#*=}
case "$tok" in
- $DI_ENABLED|$DI_DISABLED|search|report) mode=$tok;;
+ "${DI_ENABLED}"|"${DI_DISABLED}"|search|report) mode=$tok;;
found=all|found=first) found=$val;;
maybe=all|maybe=none) maybe=$val;;
- notfound=$DI_ENABLED|notfound=$DI_DISABLED) notfound=$val;;
+ notfound="${DI_ENABLED}"|notfound="${DI_DISABLED}") notfound=$val;;
found=*)
parse_warn found "$val" "${_def_found}"
found=${_def_found};;
@@ -1628,11 +1741,11 @@ _main() {
fi
case "$DI_MODE" in
- $DI_DISABLED)
+ "${DI_DISABLED}")
debug 1 "mode=$DI_DISABLED. returning $ret_dis"
return $ret_dis
;;
- $DI_ENABLED)
+ "${DI_ENABLED}")
debug 1 "mode=$DI_ENABLED. returning $ret_en"
return $ret_en;;
search|report) :;;
@@ -1672,11 +1785,11 @@ _main() {
$dscheck_fn
ret="$?"
case "$ret" in
- $DS_FOUND)
+ "${DS_FOUND}")
debug 1 "check for '$ds' returned found";
exfound_cfg="${exfound_cfg:+${exfound_cfg}${CR}}${_RET_excfg}"
found="${found} $ds";;
- $DS_MAYBE)
+ "${DS_MAYBE}")
debug 1 "check for '$ds' returned maybe";
exmaybe_cfg="${exmaybe_cfg:+${exmaybe_cfg}${CR}}${_RET_excfg}"
maybe="${maybe} $ds";;
@@ -1715,16 +1828,16 @@ _main() {
local basemsg="No ds found [mode=$DI_MODE, notfound=$DI_ON_NOTFOUND]."
local msg="" ret=3
case "$DI_MODE:$DI_ON_NOTFOUND" in
- report:$DI_DISABLED)
+ report:"${DI_DISABLED}")
msg="$basemsg Would disable cloud-init [$ret_dis]"
ret=$ret_en;;
- report:$DI_ENABLED)
+ report:"${DI_ENABLED}")
msg="$basemsg Would enable cloud-init [$ret_en]"
ret=$ret_en;;
- search:$DI_DISABLED)
+ search:"${DI_DISABLED}")
msg="$basemsg Disabled cloud-init [$ret_dis]"
ret=$ret_dis;;
- search:$DI_ENABLED)
+ search:"${DI_ENABLED}")
msg="$basemsg Enabled cloud-init [$ret_en]"
ret=$ret_en;;
*) error "Unexpected result";;
diff --git a/tools/hook-hotplug b/tools/hook-hotplug
new file mode 100755
index 00000000..35bd3da2
--- /dev/null
+++ b/tools/hook-hotplug
@@ -0,0 +1,22 @@
+#!/bin/bash
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# This script checks if cloud-init has hotplug hooked and if
+# cloud-init has finished; if so invoke cloud-init hotplug-hook
+
+is_finished() {
+ [ -e /run/cloud-init/result.json ]
+}
+
+if is_finished; then
+ # open cloud-init's hotplug-hook fifo rw
+ exec 3<>/run/cloud-init/hook-hotplug-cmd
+ env_params=(
+ --subsystem="${SUBSYSTEM}"
+ handle
+ --devpath="${DEVPATH}"
+ --udevaction="${ACTION}"
+ )
+ # write params to cloud-init's hotplug-hook fifo
+ echo "${env_params[@]}" >&3
+fi
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
index 9dd067b9..4ac1ea4f 100755
--- a/tools/mock-meta.py
+++ b/tools/mock-meta.py
@@ -36,74 +36,74 @@ except ImportError:
from http import client as hclient
-log = logging.getLogger('meta-server')
+log = logging.getLogger("meta-server")
EC2_VERSIONS = [
- '1.0',
- '2007-01-19',
- '2007-03-01',
- '2007-08-29',
- '2007-10-10',
- '2007-12-15',
- '2008-02-01',
- '2008-09-01',
- '2009-04-04',
+ "1.0",
+ "2007-01-19",
+ "2007-03-01",
+ "2007-08-29",
+ "2007-10-10",
+ "2007-12-15",
+ "2008-02-01",
+ "2008-09-01",
+ "2009-04-04",
]
BLOCK_DEVS = [
- 'ami',
- 'ephemeral0',
- 'root',
+ "ami",
+ "ephemeral0",
+ "root",
]
-DEV_PREFIX = 'v' # This seems to vary alot depending on images...
+DEV_PREFIX = "v" # This seems to vary alot depending on images...
DEV_MAPPINGS = {
- 'ephemeral0': '%sda2' % (DEV_PREFIX),
- 'root': '/dev/%sda1' % (DEV_PREFIX),
- 'ami': '%sda1' % (DEV_PREFIX),
- 'swap': '%sda3' % (DEV_PREFIX),
+ "ephemeral0": "%sda2" % (DEV_PREFIX),
+ "root": "/dev/%sda1" % (DEV_PREFIX),
+ "ami": "%sda1" % (DEV_PREFIX),
+ "swap": "%sda3" % (DEV_PREFIX),
}
META_CAPABILITIES = [
- 'aki-id',
- 'ami-id',
- 'ami-launch-index',
- 'ami-manifest-path',
- 'ari-id',
- 'block-device-mapping/',
- 'hostname',
- 'instance-action',
- 'instance-id',
- 'instance-type',
- 'local-hostname',
- 'local-ipv4',
- 'placement/',
- 'product-codes',
- 'public-hostname',
- 'public-ipv4',
- 'public-keys/',
- 'reservation-id',
- 'security-groups'
+ "aki-id",
+ "ami-id",
+ "ami-launch-index",
+ "ami-manifest-path",
+ "ari-id",
+ "block-device-mapping/",
+ "hostname",
+ "instance-action",
+ "instance-id",
+ "instance-type",
+ "local-hostname",
+ "local-ipv4",
+ "placement/",
+ "product-codes",
+ "public-hostname",
+ "public-ipv4",
+ "public-keys/",
+ "reservation-id",
+ "security-groups",
]
PUB_KEYS = {
- 'brickies': [
- ('ssh-rsa '
- 'AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemN'
- 'Sj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxz'
- 'xtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJ'
- 'tO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7'
- 'u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN'
- '+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== brickies'),
- '',
+ "brickies": [
+ "ssh-rsa "
+ "AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemN"
+ "Sj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxz"
+ "xtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJ"
+ "tO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7"
+ "u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN"
+ "+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== brickies",
+ "",
],
}
INSTANCE_TYPES = [
- 'm1.large',
- 'm1.medium',
- 'm1.small',
- 'm1.xlarge',
+ "m1.large",
+ "m1.medium",
+ "m1.small",
+ "m1.xlarge",
]
AVAILABILITY_ZONES = [
@@ -111,13 +111,13 @@ AVAILABILITY_ZONES = [
"us-east-1b",
"us-east-1c",
"us-east-1d",
- 'eu-west-1a',
- 'eu-west-1b',
- 'us-west-1',
+ "eu-west-1a",
+ "eu-west-1b",
+ "us-west-1",
]
PLACEMENT_CAPABILITIES = {
- 'availability-zone': AVAILABILITY_ZONES,
+ "availability-zone": AVAILABILITY_ZONES,
}
NOT_IMPL_RESPONSE = json.dumps({})
@@ -130,12 +130,14 @@ class WebException(Exception):
def yamlify(data):
- formatted = yaml.dump(data,
- line_break="\n",
- indent=4,
- explicit_start=True,
- explicit_end=True,
- default_flow_style=False)
+ formatted = yaml.dump(
+ data,
+ line_break="\n",
+ indent=4,
+ explicit_start=True,
+ explicit_end=True,
+ default_flow_style=False,
+ )
return formatted
@@ -164,7 +166,7 @@ ID_CHARS = [c for c in (string.ascii_uppercase + string.digits)]
def id_generator(size=6, lower=False):
- txt = ''.join(random.choice(ID_CHARS) for x in range(size))
+ txt = "".join(random.choice(ID_CHARS) for x in range(size))
if lower:
return txt.lower()
else:
@@ -176,14 +178,14 @@ def get_ssh_keys():
keys.update(PUB_KEYS)
# Nice helper to add in the 'running' users key (if they have one)
- key_pth = os.path.expanduser('~/.ssh/id_rsa.pub')
+ key_pth = os.path.expanduser("~/.ssh/id_rsa.pub")
if not os.path.isfile(key_pth):
- key_pth = os.path.expanduser('~/.ssh/id_dsa.pub')
+ key_pth = os.path.expanduser("~/.ssh/id_dsa.pub")
if os.path.isfile(key_pth):
- with open(key_pth, 'rb') as fh:
+ with open(key_pth, "rb") as fh:
contents = fh.read()
- keys[os.getlogin()] = [contents, '']
+ keys[os.getlogin()] = [contents, ""]
return keys
@@ -193,7 +195,6 @@ class HTTPServerV6(HTTPServer):
class MetaDataHandler(object):
-
def __init__(self, opts):
self.opts = opts
self.instances = {}
@@ -206,17 +207,17 @@ class MetaDataHandler(object):
return "\n".join(caps)
action = params[0]
action = action.lower()
- if action == 'instance-id':
- return 'i-%s' % (id_generator(lower=True))
- elif action == 'ami-launch-index':
+ if action == "instance-id":
+ return "i-%s" % (id_generator(lower=True))
+ elif action == "ami-launch-index":
return "%s" % random.choice([0, 1, 2, 3])
- elif action == 'aki-id':
- return 'aki-%s' % (id_generator(lower=True))
- elif action == 'ami-id':
- return 'ami-%s' % (id_generator(lower=True))
- elif action == 'ari-id':
- return 'ari-%s' % (id_generator(lower=True))
- elif action == 'block-device-mapping':
+ elif action == "aki-id":
+ return "aki-%s" % (id_generator(lower=True))
+ elif action == "ami-id":
+ return "ami-%s" % (id_generator(lower=True))
+ elif action == "ari-id":
+ return "ari-%s" % (id_generator(lower=True))
+ elif action == "block-device-mapping":
nparams = params[1:]
if not nparams:
return "\n".join(BLOCK_DEVS)
@@ -226,23 +227,23 @@ class MetaDataHandler(object):
return "\n".join(sorted(list(DEV_MAPPINGS.keys())))
else:
return str(subvalue)
- elif action in ['hostname', 'local-hostname', 'public-hostname']:
+ elif action in ["hostname", "local-hostname", "public-hostname"]:
# Just echo back there own hostname that they called in on..
return "%s" % (who)
- elif action == 'instance-type':
+ elif action == "instance-type":
return random.choice(INSTANCE_TYPES)
- elif action == 'ami-manifest-path':
- return 'my-amis/spamd-image.manifest.xml'
- elif action == 'security-groups':
- return 'default'
- elif action in ['local-ipv4', 'public-ipv4']:
+ elif action == "ami-manifest-path":
+ return "my-amis/spamd-image.manifest.xml"
+ elif action == "security-groups":
+ return "default"
+ elif action in ["local-ipv4", "public-ipv4"]:
# Just echo back there own ip that they called in on...
- return "%s" % (kwargs.get('client_ip', '10.0.0.1'))
- elif action == 'reservation-id':
+ return "%s" % (kwargs.get("client_ip", "10.0.0.1"))
+ elif action == "reservation-id":
return "r-%s" % (id_generator(lower=True))
- elif action == 'product-codes':
+ elif action == "product-codes":
return "%s" % (id_generator(size=8))
- elif action == 'public-keys':
+ elif action == "public-keys":
nparams = params[1:]
# This is a weird kludge, why amazon why!!!
# public-keys is messed up, list of /latest/meta-data/public-keys/
@@ -267,51 +268,55 @@ class MetaDataHandler(object):
hclient.NOT_FOUND, "Unknown key id %r" % mybe_key
) from e
# Extract the possible sub-params
- result = traverse(nparams[1:], {
- "openssh-key": "\n".join(avail_keys[key_name]),
- })
+ result = traverse(
+ nparams[1:],
+ {
+ "openssh-key": "\n".join(avail_keys[key_name]),
+ },
+ )
if isinstance(result, (dict)):
# TODO(harlowja): This might not be right??
result = "\n".join(sorted(result.keys()))
if not result:
- result = ''
+ result = ""
return result
else:
contents = []
for (i, key_id) in enumerate(key_ids):
contents.append("%s=%s" % (i, key_id))
return "\n".join(contents)
- elif action == 'placement':
+ elif action == "placement":
nparams = params[1:]
if not nparams:
pcaps = sorted(PLACEMENT_CAPABILITIES.keys())
return "\n".join(pcaps)
else:
pentry = nparams[0].strip().lower()
- if pentry == 'availability-zone':
+ if pentry == "availability-zone":
zones = PLACEMENT_CAPABILITIES[pentry]
return "%s" % random.choice(zones)
else:
- return "%s" % (PLACEMENT_CAPABILITIES.get(pentry, ''))
+ return "%s" % (PLACEMENT_CAPABILITIES.get(pentry, ""))
else:
- log.warning(("Did not implement action %s, "
- "returning empty response: %r"),
- action, NOT_IMPL_RESPONSE)
+ log.warning(
+ "Did not implement action %s, returning empty response: %r",
+ action,
+ NOT_IMPL_RESPONSE,
+ )
return NOT_IMPL_RESPONSE
class UserDataHandler(object):
-
def __init__(self, opts):
self.opts = opts
def _get_user_blob(self, **kwargs):
blob = None
- if self.opts['user_data_file'] is not None:
- blob = self.opts['user_data_file']
+ if self.opts["user_data_file"] is not None:
+ blob = self.opts["user_data_file"]
if not blob:
blob_mp = {
- 'hostname': kwargs.get('who', 'localhost'),
+ "hostname": kwargs.get("who", "localhost"),
}
lines = [
"#cloud-config",
@@ -334,9 +339,8 @@ user_fetcher = None
class Ec2Handler(BaseHTTPRequestHandler):
-
def _get_versions(self):
- versions = ['latest'] + EC2_VERSIONS
+ versions = ["latest"] + EC2_VERSIONS
versions = sorted(versions)
return "\n".join(versions)
@@ -347,33 +351,35 @@ class Ec2Handler(BaseHTTPRequestHandler):
def _find_method(self, path):
# Puke! (globals)
func_mapping = {
- 'user-data': user_fetcher.get_data,
- 'meta-data': meta_fetcher.get_data,
+ "user-data": user_fetcher.get_data,
+ "meta-data": meta_fetcher.get_data,
}
- segments = [piece for piece in path.split('/') if len(piece)]
+ segments = [piece for piece in path.split("/") if len(piece)]
log.info("Received segments %s", segments)
if not segments:
return self._get_versions
date = segments[0].strip().lower()
if date not in self._get_versions():
- raise WebException(hclient.BAD_REQUEST,
- "Unknown version format %r" % date)
+ raise WebException(
+ hclient.BAD_REQUEST, "Unknown version format %r" % date
+ )
if len(segments) < 2:
raise WebException(hclient.BAD_REQUEST, "No action provided")
look_name = segments[1].lower()
if look_name not in func_mapping:
- raise WebException(hclient.BAD_REQUEST,
- "Unknown requested data %r" % look_name)
+ raise WebException(
+ hclient.BAD_REQUEST, "Unknown requested data %r" % look_name
+ )
base_func = func_mapping[look_name]
who = self.address_string()
ip_from = self.client_address[0]
if who == ip_from:
# Nothing resolved, so just use 'localhost'
- who = 'localhost'
+ who = "localhost"
kwargs = {
- 'params': list(segments[2:]),
- 'who': who,
- 'client_ip': ip_from,
+ "params": list(segments[2:]),
+ "who": who,
+ "client_ip": ip_from,
}
return functools.partial(base_func, **kwargs)
@@ -384,12 +390,13 @@ class Ec2Handler(BaseHTTPRequestHandler):
func = self._find_method(self.path)
data = func()
if not data:
- data = ''
+ data = ""
self.send_response(hclient.OK)
self.send_header("Content-Type", "binary/octet-stream")
self.send_header("Content-Length", len(data))
- log.info("Sending data (len=%s):\n%s", len(data),
- format_text(data))
+ log.info(
+ "Sending data (len=%s):\n%s", len(data), format_text(data)
+ )
self.end_headers()
self.wfile.write(data.encode())
except RuntimeError as e:
@@ -407,7 +414,7 @@ class Ec2Handler(BaseHTTPRequestHandler):
self._do_response()
-def setup_logging(log_level, fmt='%(levelname)s: @%(name)s : %(message)s'):
+def setup_logging(log_level, fmt="%(levelname)s: @%(name)s : %(message)s"):
root_logger = logging.getLogger()
console_logger = logging.StreamHandler(sys.stdout)
console_logger.setFormatter(logging.Formatter(fmt))
@@ -417,27 +424,47 @@ def setup_logging(log_level, fmt='%(levelname)s: @%(name)s : %(message)s'):
def extract_opts():
parser = argparse.ArgumentParser()
- parser.add_argument("-p", "--port", dest="port", action="store", type=int,
- default=80, metavar="PORT",
- help=("port from which to serve traffic"
- " (default: %default)"))
- parser.add_argument("-a", "--addr", dest="address", action="store",
- type=str, default='::', metavar="ADDRESS",
- help=("address from which to serve traffic"
- " (default: %default)"))
- parser.add_argument("-f", '--user-data-file', dest='user_data_file',
- action='store', metavar='FILE',
- help=("user data filename to serve back to"
- "incoming requests"))
- parser.add_argument('extra', nargs='*')
+ parser.add_argument(
+ "-p",
+ "--port",
+ dest="port",
+ action="store",
+ type=int,
+ default=80,
+ metavar="PORT",
+ help="port from which to serve traffic (default: %default)",
+ )
+ parser.add_argument(
+ "-a",
+ "--addr",
+ dest="address",
+ action="store",
+ type=str,
+ default="::",
+ metavar="ADDRESS",
+ help="address from which to serve traffic (default: %default)",
+ )
+ parser.add_argument(
+ "-f",
+ "--user-data-file",
+ dest="user_data_file",
+ action="store",
+ metavar="FILE",
+ help="user data filename to serve back toincoming requests",
+ )
+ parser.add_argument("extra", nargs="*")
args = parser.parse_args()
- out = {'port': args.port, 'address': args.address, 'extra': args.extra,
- 'user_data_file': None}
+ out = {
+ "port": args.port,
+ "address": args.address,
+ "extra": args.extra,
+ "user_data_file": None,
+ }
if args.user_data_file:
if not os.path.isfile(args.user_data_file):
parser.error("Option -f specified a non-existent file")
- with open(args.user_data_file, 'rb') as fh:
- out['user_data_file'] = fh.read()
+ with open(args.user_data_file, "rb") as fh:
+ out["user_data_file"] = fh.read()
return out
@@ -455,14 +482,14 @@ def run_server():
setup_logging(logging.DEBUG)
setup_fetchers(opts)
log.info("CLI opts: %s", opts)
- server_address = (opts['address'], opts['port'])
+ server_address = (opts["address"], opts["port"])
server = HTTPServerV6(server_address, Ec2Handler)
sa = server.socket.getsockname()
log.info("Serving ec2 metadata on %s using port %s ...", sa[0], sa[1])
server.serve_forever()
-if __name__ == '__main__':
+if __name__ == "__main__":
run_server()
# vi: ts=4 expandtab
diff --git a/tools/read-dependencies b/tools/read-dependencies
index 6ad5f701..efa5879c 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -23,6 +23,9 @@ DEFAULT_REQUIREMENTS = 'requirements.txt'
# Map the appropriate package dir needed for each distro choice
DISTRO_PKG_TYPE_MAP = {
'centos': 'redhat',
+ 'eurolinux': 'redhat',
+ 'miraclelinux': 'redhat',
+ 'rocky': 'redhat',
'redhat': 'redhat',
'debian': 'debian',
'ubuntu': 'debian',
@@ -39,6 +42,7 @@ MAYBE_RELIABLE_YUM_INSTALL = [
error ":: http proxy in use => forcing the use of fixed URLs in /etc/yum.repos.d/*.repo"
sed -i --regexp-extended '/^#baseurl=/s/#// ; /^(mirrorlist|metalink)=/s/^/#/' /etc/yum.repos.d/*.repo
sed -i 's/download\.fedoraproject\.org/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo
+ sed -i 's/download\.example/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo
}
configure_repos_for_proxy_use
n=0; max=10;
@@ -64,11 +68,17 @@ ZYPPER_INSTALL = [
'--auto-agree-with-licenses']
DRY_DISTRO_INSTALL_PKG_CMD = {
+ 'rocky': ['yum', 'install', '--assumeyes'],
'centos': ['yum', 'install', '--assumeyes'],
+ 'eurolinux': ['yum', 'install', '--assumeyes'],
+ 'miraclelinux': ['yum', 'install', '--assumeyes'],
'redhat': ['yum', 'install', '--assumeyes'],
}
DISTRO_INSTALL_PKG_CMD = {
+ 'rocky': MAYBE_RELIABLE_YUM_INSTALL,
+ 'eurolinux': MAYBE_RELIABLE_YUM_INSTALL,
+ 'miraclelinux': MAYBE_RELIABLE_YUM_INSTALL,
'centos': MAYBE_RELIABLE_YUM_INSTALL,
'redhat': MAYBE_RELIABLE_YUM_INSTALL,
'debian': ['apt', 'install', '-y'],
@@ -81,6 +91,8 @@ DISTRO_INSTALL_PKG_CMD = {
# List of base system packages required to enable ci automation
CI_SYSTEM_BASE_PKGS = {
'common': ['make', 'sudo', 'tar'],
+ 'eurolinux': ['python3-tox'],
+ 'miraclelinux': ['python3-tox'],
'redhat': ['python3-tox'],
'centos': ['python3-tox'],
'ubuntu': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild'],
@@ -273,10 +285,10 @@ def pkg_install(pkg_list, distro, test_distro=False, dry_run=False):
cmd = DRY_DISTRO_INSTALL_PKG_CMD[distro]
install_cmd.extend(cmd)
- if distro in ['centos', 'redhat']:
+ if distro in ['centos', 'redhat', 'rocky', 'eurolinux']:
# CentOS and Redhat need epel-release to access oauthlib and jsonschema
subprocess.check_call(install_cmd + ['epel-release'])
- if distro in ['suse', 'opensuse', 'redhat', 'centos']:
+ if distro in ['suse', 'opensuse', 'redhat', 'rocky', 'centos', 'eurolinux']:
pkg_list.append('rpm-build')
subprocess.check_call(install_cmd + pkg_list)
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
index ed454840..176df36b 100755
--- a/tools/render-cloudcfg
+++ b/tools/render-cloudcfg
@@ -1,47 +1,65 @@
#!/usr/bin/env python3
-import argparse
import os
import sys
+import argparse
-VARIANTS = ["alpine", "amazon", "arch", "centos", "debian", "fedora",
- "freebsd", "netbsd", "openbsd", "rhel", "suse", "ubuntu",
- "unknown"]
-
-
-if "avoid-pep8-E402-import-not-top-of-file":
+def main():
_tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, _tdir)
- from cloudinit import templater
- from cloudinit import util
- from cloudinit.atomic_helper import write_file
-
+ from cloudinit import templater, util # pylint: disable=E0401
-def main():
+ VARIANTS = [
+ "almalinux",
+ "alpine",
+ "amazon",
+ "arch",
+ "centos",
+ "cloudlinux",
+ "debian",
+ "eurolinux",
+ "fedora",
+ "freebsd",
+ "gentoo",
+ "miraclelinux",
+ "netbsd",
+ "openbsd",
+ "openEuler",
+ "photon",
+ "rhel",
+ "suse",
+ "rocky",
+ "ubuntu",
+ "unknown",
+ "virtuozzo",
+ ]
parser = argparse.ArgumentParser()
platform = util.system_info()
parser.add_argument(
- "--variant", default=platform['variant'], action="store",
- help="define the variant.", choices=VARIANTS)
+ "--variant",
+ default=platform["variant"],
+ action="store",
+ help="define the variant.",
+ choices=VARIANTS,
+ )
parser.add_argument(
- "template", nargs="?", action="store",
- default='./config/cloud.cfg.tmpl',
- help="Path to the cloud.cfg template")
+ "template",
+ nargs="?",
+ action="store",
+ default="./config/cloud.cfg.tmpl",
+ help="Path to the cloud.cfg template",
+ )
parser.add_argument(
- "output", nargs="?", action="store", default="-",
- help="Output file. Use '-' to write to stdout")
+ "output",
+ nargs="?",
+ action="store",
+ default="-",
+ help="Output file. Use '-' to write to stdout",
+ )
- args = parser.parse_args()
+ args = parser.parse_args(sys.argv[1:])
+ templater.render_cloudcfg(args.variant, args.template, args.output)
- with open(args.template, 'r') as fh:
- contents = fh.read()
- tpl_params = {'variant': args.variant}
- contents = (templater.render_string(contents, tpl_params)).rstrip() + "\n"
- util.load_yaml(contents)
- if args.output == "-":
- sys.stdout.write(contents)
- else:
- write_file(args.output, contents, omode="w")
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/tools/run-container b/tools/run-container
index 15948e77..e049dfdc 100755
--- a/tools/run-container
+++ b/tools/run-container
@@ -191,7 +191,7 @@ os_info() {
get_os_info() {
# run inside container, set OS_NAME, OS_VERSION
- # example OS_NAME are centos, debian, opensuse
+ # example OS_NAME are centos, debian, opensuse, rockylinux
[ -n "${OS_NAME:-}" -a -n "${OS_VERSION:-}" ] && return 0
if [ -f /etc/os-release ]; then
OS_NAME=$(sh -c '. /etc/os-release; echo $ID')
@@ -247,7 +247,7 @@ apt_install() {
install_packages() {
get_os_info || return
case "$OS_NAME" in
- centos) yum_install "$@";;
+ centos|rocky*) yum_install "$@";;
opensuse) zypper_install "$@";;
debian|ubuntu) apt_install "$@";;
*) error "Do not know how to install packages on ${OS_NAME}";
@@ -353,6 +353,7 @@ wait_for_boot() {
inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf"
inside "$name" sh -c "sed -i --regexp-extended '/^#baseurl=/s/#// ; /^(mirrorlist|metalink)=/s/^/#/' /etc/yum.repos.d/*.repo"
inside "$name" sh -c "sed -i 's/download\.fedoraproject\.org/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo"
+ inside "$name" sh -c "sed -i 's/download\.example/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo"
else
debug 1 "do not know how to configure proxy on $OS_NAME"
fi
@@ -485,7 +486,7 @@ main() {
local build_pkg="" build_srcpkg="" pkg_ext="" distflag=""
case "$OS_NAME" in
- centos) distflag="--distro=redhat";;
+ centos|rocky) distflag="--distro=redhat";;
opensuse) distflag="--distro=suse";;
esac
@@ -494,7 +495,7 @@ main() {
build_pkg="./packages/bddeb -d"
build_srcpkg="./packages/bddeb -S -d"
pkg_ext=".deb";;
- centos|opensuse)
+ centos|opensuse|rocky)
build_pkg="./packages/brpm $distflag"
build_srcpkg="./packages/brpm $distflag --srpm"
pkg_ext=".rpm";;
diff --git a/tools/run-pyflakes b/tools/run-flake8
index 179afebe..0021cdb9 100755
--- a/tools/run-pyflakes
+++ b/tools/run-flake8
@@ -2,7 +2,7 @@
CR="
"
-pycheck_dirs=( "cloudinit/" "tests/" "tools/" )
+pycheck_dirs=( "cloudinit/" "tests/" "tools/" "setup.py" )
set -f
if [ $# -eq 0 ]; then
@@ -11,7 +11,7 @@ else
files=( "$@" )
fi
-cmd=( "python3" -m "pyflakes" "${files[@]}" )
+cmd=( "python3" -m "flake8" "${files[@]}" )
echo "Running: " "${cmd[@]}" 1>&2
exec "${cmd[@]}"
diff --git a/tools/run-pep8 b/tools/run-pep8
deleted file mode 100755
index 4bd0bbfb..00000000
--- a/tools/run-pep8
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-pycheck_dirs=( "cloudinit/" "tests/" "tools/" )
-
-CR="
-"
-[ "$1" = "-v" ] && { verbose="$1"; shift; } || verbose=""
-
-set -f
-if [ $# -eq 0 ]; then unset IFS
- IFS="$CR"
- files=( "${bin_files[@]}" "${pycheck_dirs[@]}" )
- unset IFS
-else
- files=( "$@" )
-fi
-
-myname=${0##*/}
-cmd=( "${myname#run-}" $verbose "${files[@]}" )
-echo "Running: " "${cmd[@]}" 1>&2
-exec "${cmd[@]}"
diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py
index d8bbcfcb..b5d77a97 100755
--- a/tools/validate-yaml.py
+++ b/tools/validate-yaml.py
@@ -12,8 +12,8 @@ if __name__ == "__main__":
for fn in sys.argv[1:]:
sys.stdout.write("%s" % (fn))
try:
- fh = open(fn, 'rb')
- yaml.safe_load(fh.read().decode('utf-8'))
+ fh = open(fn, "rb")
+ yaml.safe_load(fh.read().decode("utf-8"))
fh.close()
sys.stdout.write(" - ok\n")
except Exception as e:
diff --git a/tools/write-ssh-key-fingerprints b/tools/write-ssh-key-fingerprints
index 2a3dca7c..9409257d 100755
--- a/tools/write-ssh-key-fingerprints
+++ b/tools/write-ssh-key-fingerprints
@@ -1,39 +1,61 @@
#!/bin/sh
# This file is part of cloud-init. See LICENSE file for license information.
-logger_opts="-p user.info -t ec2"
-# rhels' version of logger_opts does not support long
-# for of -s (--stderr), so use short form.
-logger_opts="$logger_opts -s"
+do_syslog() {
+ log_message=$1
+
+ # rhels' version of logger_opts does not support long
+ # form of -s (--stderr), so use short form.
+ logger_opts="-s"
+
+ # Need to end the options list with "--" to ensure that any minus symbols
+ # in the text passed to logger are not interpreted as logger options.
+ logger_opts="$logger_opts -p user.info -t cloud-init --"
+
+ # shellcheck disable=SC2086 # logger give error if $logger_opts quoted
+ logger $logger_opts "$log_message"
+}
+
# Redirect stderr to stdout
exec 2>&1
fp_blist=",${1},"
key_blist=",${2},"
-{
-echo
-echo "#############################################################"
-echo "-----BEGIN SSH HOST KEY FINGERPRINTS-----"
+
+fingerprint_header_shown=0
for f in /etc/ssh/ssh_host_*key.pub; do
[ -f "$f" ] || continue
- read ktype line < "$f"
+ # shellcheck disable=SC2034 # Unused "line" required for word splitting
+ read -r ktype line < "$f"
# skip the key if its type is in the blacklist
[ "${fp_blist#*,$ktype,}" = "${fp_blist}" ] || continue
- ssh-keygen -l -f "$f"
+ if [ $fingerprint_header_shown -eq 0 ]; then
+ do_syslog "#############################################################"
+ do_syslog "-----BEGIN SSH HOST KEY FINGERPRINTS-----"
+ fingerprint_header_shown=1
+ fi
+ do_syslog "$(ssh-keygen -l -f "$f")"
done
-echo "-----END SSH HOST KEY FINGERPRINTS-----"
-echo "#############################################################"
-
-} | logger $logger_opts
+if [ $fingerprint_header_shown -eq 1 ]; then
+ do_syslog "-----END SSH HOST KEY FINGERPRINTS-----"
+ do_syslog "#############################################################"
+fi
-echo "-----BEGIN SSH HOST KEY KEYS-----"
+key_header_shown=0
for f in /etc/ssh/ssh_host_*key.pub; do
[ -f "$f" ] || continue
- read ktype line < "$f"
+ # shellcheck disable=SC2034 # Unused "line" required for word splitting
+ read -r ktype line < "$f"
# skip the key if its type is in the blacklist
[ "${key_blist#*,$ktype,}" = "${key_blist}" ] || continue
- cat $f
+ if [ $key_header_shown -eq 0 ]; then
+ echo "-----BEGIN SSH HOST KEY KEYS-----"
+ key_header_shown=1
+ fi
+ cat "$f"
done
-echo "-----END SSH HOST KEY KEYS-----"
+if [ $key_header_shown -eq 1 ]; then
+ echo "-----END SSH HOST KEY KEYS-----"
+fi
diff --git a/tox.ini b/tox.ini
index 022b918d..c494cb94 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,127 +1,153 @@
[tox]
-envlist = py3, xenial-dev, flake8, pylint
+envlist = py3, lowest-supported-dev, black, flake8, isort, mypy, pylint
recreate = True
[testenv]
-commands = {envpython} -m pytest {posargs:tests/unittests cloudinit}
+basepython = python3
setenv =
LC_ALL = en_US.utf-8
passenv=
PYTEST_ADDOPTS
+[format_deps]
+black==21.12b0
+flake8==3.9.2
+isort==5.10.1
+mypy==0.931
+pylint==2.11.1
+pytest==7.0.0
+types-PyYAML==6.0.4
+types-requests==2.27.8
+types-setuptools==57.4.9
+
[testenv:flake8]
-basepython = python3
deps =
- flake8==3.8.2
-commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/}
-
-# https://github.com/gabrielfalcao/HTTPretty/issues/223
-setenv =
- LC_ALL = en_US.utf-8
+ flake8=={[format_deps]flake8}
+commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ setup.py}
[testenv:pylint]
-basepython = python3
deps =
- # requirements
- pylint==2.6.0
- # test-requirements because unit tests are now present in cloudinit tree
+ pylint=={[format_deps]pylint}
-r{toxinidir}/test-requirements.txt
- -r{toxinidir}/cloud-tests-requirements.txt
-r{toxinidir}/integration-requirements.txt
commands = {envpython} -m pylint {posargs:cloudinit tests tools}
+[testenv:black]
+deps =
+ black=={[format_deps]black}
+commands = {envpython} -m black . --check
+
+[testenv:isort]
+deps =
+ isort=={[format_deps]isort}
+commands = {envpython} -m isort . --check-only
+
+[testenv:mypy]
+deps =
+ mypy=={[format_deps]mypy}
+ types-pyyaml=={[format_deps]types-PyYAML}
+ types-requests=={[format_deps]types-requests}
+ types-setuptools=={[format_deps]types-setuptools}
+ pytest=={[format_deps]pytest}
+commands = {envpython} -m mypy .
+
+[testenv:check_format]
+deps =
+ black=={[format_deps]black}
+ flake8=={[format_deps]flake8}
+ isort=={[format_deps]isort}
+ mypy=={[format_deps]mypy}
+ pylint=={[format_deps]pylint}
+ pytest=={[format_deps]pytest}
+ -r{toxinidir}/test-requirements.txt
+ -r{toxinidir}/integration-requirements.txt
+commands =
+ {[testenv:black]commands}
+ {[testenv:flake8]commands}
+ {[testenv:isort]commands}
+ {[testenv:mypy]commands}
+ {[testenv:pylint]commands}
+
+[testenv:do_format]
+deps =
+ black=={[format_deps]black}
+ isort=={[format_deps]isort}
+commands =
+ {envpython} -m isort .
+ {envpython} -m black .
+
[testenv:py3]
-basepython = python3
deps =
-r{toxinidir}/test-requirements.txt
commands = {envpython} -m pytest \
--durations 10 \
{posargs:--cov=cloudinit --cov-branch \
- tests/unittests cloudinit}
+ tests/unittests}
-[testenv:py27]
-basepython = python2.7
-deps = -r{toxinidir}/test-requirements.txt
+[lowest-supported-deps]
+# Tox is going to install requirements from pip. This is fine for
+# testing python version compatibility, but when we build cloud-init, we are
+# building against the dependencies in the OS repo, not pip. The OS
+# dependencies will generally be older than what is found in pip.
-[flake8]
-# E226: missing whitespace around arithmetic operator
-# W503: line break before binary operator
-# W504: line break after binary operator
-ignore=E226,W503,W504
-exclude = .venv,.tox,dist,doc,*egg,.git,build,tools
-per-file-ignores =
- cloudinit/cmd/main.py:E402
+# To obtain these versions, check the versions of these libraries
+# in the oldest support Ubuntu distro.
-[testenv:doc]
-basepython = python3
+# httpretty isn't included here because python2.7 requires a higher version
+# than whats run on bionic, so we need two different definitions.
deps =
- -r{toxinidir}/doc-requirements.txt
-commands =
- {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html}
- doc8 doc/rtd
-
-[xenial-shared-deps]
-# The version of pytest in xenial doesn't work with Python 3.8, so we define
-# two xenial environments: [testenv:xenial] runs the tests with exactly the
-# version of pytest present in xenial, and is used in CI. [testenv:xenial-dev]
-# runs the tests with the lowest version of pytest that works with Python 3.8,
-# 3.0.7, but keeps the other dependencies at xenial's level.
-#
-# (This section is not a testenv, it is used to maintain a single definition of
-# the dependencies shared between the two xenial testenvs.)
-deps =
- # requirements
- jinja2==2.8
- pyyaml==3.11
- oauthlib==1.0.3
- pyserial==3.0.1
+ jinja2==2.10
+ oauthlib==2.0.6
+ pyserial==3.4
configobj==5.0.6
- requests==2.9.1
+ pyyaml==3.12
+ requests==2.18.4
+ jsonpatch==1.16
+ jsonschema==2.6.0
+ netifaces==0.10.4
# test-requirements
- pytest-catchlog==1.2.1
-
-[testenv:xenial]
-# When updating this commands definition, also update the definition in
-# [testenv:xenial-dev]. See the comment there for details.
-commands =
- python ./tools/pipremove jsonschema
- python -m pytest {posargs:tests/unittests cloudinit}
-basepython = python3
+ pytest==3.3.2
+ pytest-cov==2.5.1
+ # Needed by pytest and default causes failures
+ attrs==17.4.0
+
+[testenv:lowest-supported]
+# This definition will run on bionic with the version of httpretty
+# that runs there
deps =
- # Refer to the comment in [xenial-shared-deps] for details
- {[xenial-shared-deps]deps}
- httpretty==0.8.6
- jsonpatch==1.10
- pytest==2.8.7
-
-[testenv:xenial-dev]
-# This should be:
-# commands = {[testenv:xenial]commands}
-# but the version of pytest in xenial has a bug
-# (https://github.com/tox-dev/tox/issues/208) which means that the {posargs}
-# substitution variable is misparsed and causes a traceback. Ensure that any
-# changes here are reflected in [testenv:xenial].
-commands =
- python ./tools/pipremove jsonschema
- python -m pytest {posargs:tests/unittests cloudinit}
-basepython = {[testenv:xenial]basepython}
+ {[lowest-supported-deps]deps}
+ httpretty==0.8.14
+commands = {[testenv:py3]commands}
+
+[testenv:lowest-supported-dev]
+# The oldest httpretty version to work with Python 3.7+ is 0.9.5,
+# because it is the first to include this commit:
+# https://github.com/gabrielfalcao/HTTPretty/commit/5776d97da3992b9071db5e21faf175f6e8729060
deps =
- # Refer to the comment in [xenial-shared-deps] for details
- {[xenial-shared-deps]deps}
- # httpretty in xenial is 0.8.6, not 0.9.5. The oldest version to work with
- # Python 3.7+ is 0.9.5, because it is the first to include this commit:
- # https://github.com/gabrielfalcao/HTTPretty/commit/5776d97da3992b9071db5e21faf175f6e8729060
+ {[lowest-supported-deps]deps}
httpretty==0.9.5
- # jsonpatch in xenial is 1.10, not 1.19 (#839779). The oldest version
- # to work with python3.6 is 1.16 as found in Artful. To keep default
- # invocation of 'tox' happy, accept the difference in version here.
- jsonpatch==1.16
- pytest==3.0.7
+commands = {[testenv:py3]commands}
+
+[testenv:doc]
+deps =
+ -r{toxinidir}/doc-requirements.txt
+commands =
+ {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html}
+ doc8 doc/rtd
[testenv:tip-flake8]
-commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/}
+commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ setup.py}
deps = flake8
+[testenv:tip-mypy]
+commands = {envpython} -m mypy --install-types --non-interactive .
+deps =
+ mypy
+ pytest
+ types-PyYAML
+ types-requests
+ types-setuptools
+
[testenv:tip-pylint]
commands = {envpython} -m pylint {posargs:cloudinit tests tools}
deps =
@@ -129,41 +155,42 @@ deps =
pylint
# test-requirements
-r{toxinidir}/test-requirements.txt
- -r{toxinidir}/cloud-tests-requirements.txt
-r{toxinidir}/integration-requirements.txt
-[testenv:citest]
-basepython = python3
-commands = {envpython} -m tests.cloud_tests {posargs}
-passenv = HOME TRAVIS
-deps =
- -r{toxinidir}/cloud-tests-requirements.txt
-
-# Until Xenial tox support is dropped or bumps to tox:2.3.2, reflect changes to
-# deps into testenv:integration-tests-ci: commands, passenv and deps.
-# This is due to (https://github.com/tox-dev/tox/issues/208) which means that
-# the {posargs} handling and substitutions won't do what we want until tox 2.3.2
-# Once Xenial is dropped, integration-tests-ci can use proper substitution
-# commands = {[testenv:integration-tests]commands}
[testenv:integration-tests]
-basepython = python3
commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests}
-passenv = CLOUD_INIT_*
deps =
-r{toxinidir}/integration-requirements.txt
+passenv = CLOUD_INIT_* PYCLOUDLIB_* SSH_AUTH_SOCK OS_*
[testenv:integration-tests-ci]
-commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests}
-passenv = CLOUD_INIT_*
-deps =
- -r{toxinidir}/integration-requirements.txt
+commands = {[testenv:integration-tests]commands}
+deps = {[testenv:integration-tests]deps}
+passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_* TRAVIS
setenv =
- PYTEST_ADDOPTS="-m ci"
+ PYTEST_ADDOPTS="-m ci and not adhoc"
+
+[testenv:integration-tests-jenkins]
+commands = {[testenv:integration-tests]commands}
+deps = {[testenv:integration-tests]deps}
+passenv = *_proxy CLOUD_INIT_* PYCLOUDLIB_* SSH_AUTH_SOCK OS_* GOOGLE_* GCP_*
+setenv =
+ PYTEST_ADDOPTS="-m not adhoc"
+
+[flake8]
+# E203: whitespace before ':', doesn't adhere to pep8 or black formatting
+# W503: line break before binary operator
+ignore=E203,W503
+exclude = .venv,.tox,dist,doc,*egg,.git,build,tools
+per-file-ignores =
+ cloudinit/cmd/main.py:E402
[pytest]
-# TODO: s/--strict/--strict-markers/ once xenial support is dropped
-testpaths = cloudinit tests/unittests
+# TODO: s/--strict/--strict-markers/ once pytest version is high enough
+testpaths = tests/unittests
addopts = --strict
+log_format = %(asctime)s %(levelname)-9s %(name)s:%(filename)s:%(lineno)d %(message)s
+log_date_format = %Y-%m-%d %H:%M:%S
markers =
allow_subp_for: allow subp usage for the given commands (disable_subp_usage)
allow_all_subp: allow all subp usage (disable_subp_usage)
@@ -173,9 +200,16 @@ markers =
gce: test will only run on GCE platform
azure: test will only run on Azure platform
oci: test will only run on OCI platform
+ openstack: test will only run on openstack platform
+ lxd_config_dict: set the config_dict passed on LXD instance creation
lxd_container: test will only run in LXD container
+ lxd_setup: specify callable to be called between init and start
+ lxd_use_exec: `execute` will use `lxc exec` instead of SSH
lxd_vm: test will only run in LXD VM
+ not_bionic: test cannot run on the bionic release
no_container: test cannot run in a container
user_data: the user data to be passed to the test instance
instance_name: the name to be used for the test instance
- sru_2020_11: test is part of the 2020/11 SRU verification
+ ubuntu: this test should run on Ubuntu
+ unstable: skip this test because it is flakey
+ adhoc: only run on adhoc basis, not in any CI environment (travis or jenkins)