summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorzsdc <taras@vyos.io>2020-09-15 17:05:20 +0300
committerzsdc <taras@vyos.io>2020-09-15 17:05:20 +0300
commit7cd260b313267dc7123cb99a75d4555e24909cca (patch)
treef57f3db085a724df237ffa64b589c6bb6dd3b28f
parent1a790ee102fd405e5c3a20a17a69ba0c118ed874 (diff)
parent948bd9c1fcd08346cf8ec0551d7f6c2b234e896b (diff)
downloadvyos-cloud-init-7cd260b313267dc7123cb99a75d4555e24909cca.tar.gz
vyos-cloud-init-7cd260b313267dc7123cb99a75d4555e24909cca.zip
T2117: Cloud-init updated to 20.3
Merged with 20.3 tag from the upstream Cloud-init repository
-rw-r--r--.github/workflows/cla.yml47
-rw-r--r--.github/workflows/stale.yml24
-rw-r--r--.gitignore3
-rw-r--r--.pylintrc6
-rw-r--r--.travis.yml105
-rw-r--r--ChangeLog286
-rw-r--r--HACKING.rst574
-rw-r--r--Makefile58
-rw-r--r--README.md4
-rw-r--r--cloudinit/analyze/dump.py18
-rw-r--r--cloudinit/analyze/show.py48
-rw-r--r--cloudinit/analyze/tests/test_boot.py16
-rw-r--r--cloudinit/analyze/tests/test_dump.py20
-rw-r--r--cloudinit/apport.py1
-rw-r--r--cloudinit/atomic_helper.py4
-rw-r--r--cloudinit/cmd/clean.py5
-rw-r--r--cloudinit/cmd/devel/logs.py4
-rwxr-xr-xcloudinit/cmd/devel/make_mime.py114
-rw-r--r--cloudinit/cmd/devel/parser.py5
-rwxr-xr-xcloudinit/cmd/devel/render.py5
-rw-r--r--cloudinit/cmd/devel/tests/test_logs.py3
-rw-r--r--cloudinit/cmd/query.py45
-rw-r--r--cloudinit/cmd/tests/test_clean.py1
-rw-r--r--cloudinit/cmd/tests/test_main.py2
-rw-r--r--cloudinit/cmd/tests/test_query.py392
-rw-r--r--cloudinit/cmd/tests/test_status.py1
-rw-r--r--cloudinit/config/cc_apk_configure.py263
-rw-r--r--cloudinit/config/cc_apt_configure.py582
-rw-r--r--cloudinit/config/cc_apt_pipelining.py2
-rw-r--r--cloudinit/config/cc_bootcmd.py3
-rwxr-xr-xcloudinit/config/cc_byobu.py3
-rw-r--r--cloudinit/config/cc_ca_certs.py45
-rw-r--r--cloudinit/config/cc_chef.py403
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py7
-rw-r--r--cloudinit/config/cc_disk_setup.py83
-rw-r--r--cloudinit/config/cc_emit_upstart.py8
-rw-r--r--cloudinit/config/cc_fan.py7
-rw-r--r--cloudinit/config/cc_final_message.py2
-rw-r--r--cloudinit/config/cc_growpart.py33
-rw-r--r--cloudinit/config/cc_grub_dpkg.py98
-rw-r--r--cloudinit/config/cc_keys_to_console.py3
-rw-r--r--cloudinit/config/cc_landscape.py3
-rw-r--r--cloudinit/config/cc_locale.py65
-rw-r--r--cloudinit/config/cc_lxd.py23
-rw-r--r--cloudinit/config/cc_mcollective.py3
-rw-r--r--cloudinit/config/cc_mounts.py67
-rw-r--r--cloudinit/config/cc_ntp.py110
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py3
-rw-r--r--cloudinit/config/cc_phone_home.py3
-rw-r--r--cloudinit/config/cc_power_state_change.py72
-rw-r--r--cloudinit/config/cc_puppet.py13
-rw-r--r--cloudinit/config/cc_resizefs.py23
-rw-r--r--cloudinit/config/cc_resolv_conf.py4
-rw-r--r--cloudinit/config/cc_rh_subscription.py19
-rw-r--r--cloudinit/config/cc_rsyslog.py11
-rw-r--r--cloudinit/config/cc_salt_minion.py4
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py4
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py4
-rw-r--r--cloudinit/config/cc_scripts_per_once.py4
-rw-r--r--cloudinit/config/cc_scripts_user.py4
-rw-r--r--cloudinit/config/cc_scripts_vendor.py3
-rw-r--r--cloudinit/config/cc_seed_random.py5
-rw-r--r--cloudinit/config/cc_set_hostname.py3
-rwxr-xr-xcloudinit/config/cc_set_passwords.py7
-rw-r--r--cloudinit/config/cc_snap.py39
-rw-r--r--cloudinit/config/cc_spacewalk.py8
-rwxr-xr-xcloudinit/config/cc_ssh.py43
-rwxr-xr-xcloudinit/config/cc_ssh_authkey_fingerprints.py6
-rwxr-xr-xcloudinit/config/cc_ssh_import_id.py5
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py13
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py9
-rw-r--r--cloudinit/config/cc_users_groups.py7
-rw-r--r--cloudinit/config/cc_write_files.py191
-rw-r--r--cloudinit/config/cc_yum_add_repo.py4
-rw-r--r--cloudinit/config/schema.py137
-rw-r--r--cloudinit/config/tests/test_disable_ec2_metadata.py14
-rw-r--r--cloudinit/config/tests/test_final_message.py46
-rw-r--r--cloudinit/config/tests/test_grub_dpkg.py176
-rw-r--r--cloudinit/config/tests/test_mounts.py28
-rw-r--r--cloudinit/config/tests/test_resolv_conf.py86
-rw-r--r--cloudinit/config/tests/test_set_passwords.py38
-rw-r--r--cloudinit/config/tests/test_snap.py60
-rw-r--r--cloudinit/config/tests/test_ubuntu_advantage.py28
-rw-r--r--cloudinit/config/tests/test_ubuntu_drivers.py33
-rw-r--r--cloudinit/config/tests/test_users_groups.py10
-rwxr-xr-xcloudinit/distros/__init__.py190
-rw-r--r--cloudinit/distros/alpine.py165
-rw-r--r--cloudinit/distros/arch.py21
-rw-r--r--cloudinit/distros/bsd.py129
-rw-r--r--cloudinit/distros/bsd_utils.py50
-rw-r--r--cloudinit/distros/debian.py9
-rw-r--r--cloudinit/distros/freebsd.py148
-rw-r--r--cloudinit/distros/gentoo.py17
-rw-r--r--cloudinit/distros/netbsd.py159
-rw-r--r--cloudinit/distros/networking.py212
-rw-r--r--cloudinit/distros/openbsd.py52
-rw-r--r--cloudinit/distros/opensuse.py12
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py7
-rw-r--r--cloudinit/distros/rhel.py9
-rw-r--r--cloudinit/distros/tests/__init__.py0
-rw-r--r--cloudinit/distros/tests/test_init.py156
-rw-r--r--cloudinit/distros/tests/test_networking.py192
-rw-r--r--cloudinit/distros/ubuntu.py2
-rw-r--r--cloudinit/features.py44
-rw-r--r--cloudinit/gpg.py19
-rw-r--r--cloudinit/handlers/boot_hook.py5
-rw-r--r--cloudinit/handlers/jinja_template.py3
-rw-r--r--cloudinit/handlers/upstart_job.py11
-rw-r--r--cloudinit/helpers.py4
-rw-r--r--cloudinit/log.py17
-rw-r--r--cloudinit/net/__init__.py192
-rw-r--r--cloudinit/net/bsd.py167
-rwxr-xr-xcloudinit/net/cmdline.py52
-rw-r--r--cloudinit/net/dhcp.py50
-rw-r--r--cloudinit/net/eni.py15
-rw-r--r--cloudinit/net/freebsd.py176
-rw-r--r--cloudinit/net/netbsd.py44
-rw-r--r--cloudinit/net/netplan.py17
-rw-r--r--cloudinit/net/network_state.py32
-rw-r--r--cloudinit/net/openbsd.py46
-rw-r--r--cloudinit/net/renderers.py7
-rw-r--r--cloudinit/net/sysconfig.py21
-rw-r--r--cloudinit/net/tests/test_dhcp.py108
-rw-r--r--cloudinit/net/tests/test_init.py172
-rw-r--r--cloudinit/net/tests/test_network_state.py10
-rw-r--r--cloudinit/netinfo.py75
-rw-r--r--cloudinit/reporting/events.py23
-rwxr-xr-xcloudinit/reporting/handlers.py39
-rw-r--r--cloudinit/serial.py2
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py8
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py167
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py3
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py11
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py2
-rw-r--r--cloudinit/sources/DataSourceEc2.py200
-rw-r--r--cloudinit/sources/DataSourceGCE.py2
-rw-r--r--cloudinit/sources/DataSourceHetzner.py15
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py6
-rw-r--r--cloudinit/sources/DataSourceMAAS.py5
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py24
-rw-r--r--cloudinit/sources/DataSourceOVF.py63
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py54
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py9
-rw-r--r--cloudinit/sources/DataSourceOracle.py396
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py34
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py9
-rw-r--r--cloudinit/sources/__init__.py55
-rwxr-xr-xcloudinit/sources/helpers/azure.py524
-rw-r--r--cloudinit/sources/helpers/digitalocean.py21
-rw-r--r--cloudinit/sources/helpers/hetzner.py19
-rw-r--r--cloudinit/sources/helpers/netlink.py3
-rw-r--r--cloudinit/sources/helpers/openstack.py60
-rw-r--r--cloudinit/sources/helpers/tests/test_netlink.py167
-rw-r--r--cloudinit/sources/helpers/tests/test_openstack.py44
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py26
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_custom_script.py3
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py48
-rw-r--r--cloudinit/sources/tests/test_init.py156
-rw-r--r--cloudinit/sources/tests/test_oracle.py1032
-rw-r--r--cloudinit/ssh_util.py48
-rw-r--r--cloudinit/stages.py3
-rw-r--r--cloudinit/subp.py334
-rw-r--r--cloudinit/templater.py4
-rw-r--r--cloudinit/tests/helpers.py72
-rw-r--r--cloudinit/tests/test_conftest.py65
-rw-r--r--cloudinit/tests/test_features.py60
-rw-r--r--cloudinit/tests/test_gpg.py10
-rw-r--r--cloudinit/tests/test_netinfo.py40
-rw-r--r--cloudinit/tests/test_subp.py227
-rw-r--r--cloudinit/tests/test_url_helper.py34
-rw-r--r--cloudinit/tests/test_util.py210
-rw-r--r--cloudinit/url_helper.py21
-rw-r--r--cloudinit/user_data.py39
-rw-r--r--cloudinit/util.py643
-rw-r--r--cloudinit/version.py2
-rw-r--r--config/cloud.cfg.d/05_logging.cfg2
-rw-r--r--config/cloud.cfg.tmpl60
-rw-r--r--conftest.py183
-rw-r--r--doc/examples/cloud-config-apt.txt43
-rw-r--r--doc/examples/cloud-config-boot-cmds.txt4
-rw-r--r--doc/examples/cloud-config-chef-oneiric.txt115
-rw-r--r--doc/examples/cloud-config-chef.txt87
-rw-r--r--doc/examples/cloud-config-datasources.txt8
-rw-r--r--doc/examples/cloud-config-disk-setup.txt331
-rw-r--r--doc/examples/cloud-config-landscape.txt1
-rw-r--r--doc/examples/cloud-config-mcollective.txt82
-rw-r--r--doc/examples/cloud-config-mount-points.txt6
-rw-r--r--doc/examples/cloud-config-phone-home.txt10
-rw-r--r--doc/examples/cloud-config-power-state.txt10
-rw-r--r--doc/examples/cloud-config-puppet.txt88
-rw-r--r--doc/examples/cloud-config-reporting.txt22
-rw-r--r--doc/examples/cloud-config-rh_subscription.txt48
-rw-r--r--doc/examples/cloud-config-rsyslog.txt43
-rw-r--r--doc/examples/cloud-config-user-groups.txt29
-rw-r--r--doc/examples/cloud-config-vendor-data.txt4
-rw-r--r--doc/examples/cloud-config-write-files.txt42
-rw-r--r--doc/examples/cloud-config-yum-repo.txt24
-rw-r--r--doc/examples/cloud-config.txt55
-rw-r--r--doc/examples/kernel-cmdline.txt13
-rw-r--r--doc/rtd/conf.py2
-rw-r--r--doc/rtd/index.rst1
-rw-r--r--doc/rtd/topics/availability.rst19
-rw-r--r--doc/rtd/topics/boot.rst4
-rw-r--r--doc/rtd/topics/cli.rst16
-rw-r--r--doc/rtd/topics/code_review.rst256
-rw-r--r--doc/rtd/topics/datasources/azure.rst16
-rw-r--r--doc/rtd/topics/datasources/cloudstack.rst26
-rw-r--r--doc/rtd/topics/datasources/ec2.rst27
-rw-r--r--doc/rtd/topics/datasources/maas.rst2
-rw-r--r--doc/rtd/topics/datasources/nocloud.rst20
-rw-r--r--doc/rtd/topics/datasources/openstack.rst17
-rw-r--r--doc/rtd/topics/datasources/ovf.rst19
-rw-r--r--doc/rtd/topics/faq.rst21
-rw-r--r--doc/rtd/topics/format.rst55
-rw-r--r--doc/rtd/topics/instancedata.rst363
-rw-r--r--doc/rtd/topics/modules.rst1
-rw-r--r--doc/rtd/topics/network-config-format-v2.rst10
-rw-r--r--doc/rtd/topics/network-config.rst26
-rw-r--r--doc/rtd/topics/tests.rst66
-rw-r--r--integration-requirements.txt1
-rwxr-xr-xpackages/bddeb37
-rwxr-xr-xpackages/brpm25
-rw-r--r--packages/debian/control.in3
-rwxr-xr-xpackages/debian/rules (renamed from packages/debian/rules.in)6
-rw-r--r--packages/pkg-deps.json58
-rw-r--r--packages/redhat/cloud-init.spec.in10
-rw-r--r--requirements.txt3
-rwxr-xr-xsetup.py41
-rwxr-xr-xsystemd/cloud-init-generator.tmpl2
-rw-r--r--systemd/cloud-init.service.tmpl2
-rwxr-xr-xsysvinit/netbsd/cloudconfig17
-rwxr-xr-xsysvinit/netbsd/cloudfinal16
-rwxr-xr-xsysvinit/netbsd/cloudinit16
-rwxr-xr-xsysvinit/netbsd/cloudinitlocal18
-rw-r--r--templates/chef_client.rb.tmpl3
-rw-r--r--templates/chrony.conf.alpine.tmpl38
-rw-r--r--templates/hosts.alpine.tmpl28
-rw-r--r--templates/hosts.freebsd.tmpl9
-rw-r--r--templates/hosts.suse.tmpl2
-rw-r--r--templates/ntp.conf.alpine.tmpl10
-rw-r--r--templates/resolv.conf.tmpl14
-rw-r--r--test-requirements.txt9
-rw-r--r--tests/cloud_tests/bddeb.py8
-rw-r--r--tests/cloud_tests/platforms/__init__.py4
-rw-r--r--tests/cloud_tests/platforms/azurecloud/instance.py9
-rw-r--r--tests/cloud_tests/platforms/azurecloud/platform.py23
-rw-r--r--tests/cloud_tests/platforms/ec2/instance.py4
-rw-r--r--tests/cloud_tests/platforms/ec2/platform.py18
-rw-r--r--tests/cloud_tests/platforms/images.py1
-rw-r--r--tests/cloud_tests/platforms/instances.py4
-rw-r--r--tests/cloud_tests/platforms/lxd/image.py38
-rw-r--r--tests/cloud_tests/platforms/lxd/instance.py6
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/image.py12
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/instance.py10
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/platform.py5
-rw-r--r--tests/cloud_tests/platforms/platforms.py15
-rw-r--r--tests/cloud_tests/platforms/snapshots.py1
-rw-r--r--tests/cloud_tests/releases.yaml52
-rw-r--r--tests/cloud_tests/testcases/__init__.py14
-rw-r--r--tests/cloud_tests/testcases/base.py70
-rw-r--r--tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml73
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_chrony.py4
-rw-r--r--tests/cloud_tests/util.py40
-rw-r--r--tests/cloud_tests/verify.py4
-rw-r--r--tests/unittests/test_builtin_handlers.py9
-rw-r--r--tests/unittests/test_cli.py6
-rw-r--r--tests/unittests/test_cs_util.py2
-rw-r--r--tests/unittests/test_data.py61
-rw-r--r--tests/unittests/test_datasource/test_aliyun.py2
-rw-r--r--tests/unittests/test_datasource/test_altcloud.py7
-rw-r--r--tests/unittests/test_datasource/test_azure.py114
-rw-r--r--tests/unittests/test_datasource/test_azure_helper.py406
-rw-r--r--tests/unittests/test_datasource/test_cloudsigma.py6
-rw-r--r--tests/unittests/test_datasource/test_cloudstack.py2
-rw-r--r--tests/unittests/test_datasource/test_ec2.py395
-rw-r--r--tests/unittests/test_datasource/test_gce.py3
-rw-r--r--tests/unittests/test_datasource/test_hetzner.py23
-rw-r--r--tests/unittests/test_datasource/test_ibmcloud.py7
-rw-r--r--tests/unittests/test_datasource/test_maas.py1
-rw-r--r--tests/unittests/test_datasource/test_nocloud.py17
-rw-r--r--tests/unittests/test_datasource/test_opennebula.py157
-rw-r--r--tests/unittests/test_datasource/test_openstack.py20
-rw-r--r--tests/unittests/test_datasource/test_ovf.py93
-rw-r--r--tests/unittests/test_datasource/test_rbx.py34
-rw-r--r--tests/unittests/test_datasource/test_scaleway.py81
-rw-r--r--tests/unittests/test_datasource/test_smartos.py20
-rw-r--r--tests/unittests/test_distros/test_bsd_utils.py67
-rw-r--r--tests/unittests/test_distros/test_create_users.py8
-rw-r--r--tests/unittests/test_distros/test_debian.py2
-rw-r--r--tests/unittests/test_distros/test_freebsd.py4
-rw-r--r--tests/unittests/test_distros/test_generic.py190
-rw-r--r--tests/unittests/test_distros/test_netbsd.py17
-rw-r--r--tests/unittests/test_distros/test_netconfig.py7
-rw-r--r--tests/unittests/test_distros/test_user_data_normalize.py6
-rw-r--r--tests/unittests/test_ds_identify.py59
-rw-r--r--tests/unittests/test_handler/test_handler_apk_configure.py299
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py11
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py5
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v1.py20
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v3.py58
-rw-r--r--tests/unittests/test_handler/test_handler_bootcmd.py6
-rw-r--r--tests/unittests/test_handler/test_handler_ca_certs.py44
-rw-r--r--tests/unittests/test_handler/test_handler_chef.py16
-rw-r--r--tests/unittests/test_handler/test_handler_disk_setup.py8
-rw-r--r--tests/unittests/test_handler/test_handler_etc_hosts.py8
-rw-r--r--tests/unittests/test_handler/test_handler_growpart.py16
-rw-r--r--tests/unittests/test_handler/test_handler_landscape.py6
-rw-r--r--tests/unittests/test_handler/test_handler_locale.py4
-rw-r--r--tests/unittests/test_handler/test_handler_lxd.py28
-rw-r--r--tests/unittests/test_handler/test_handler_mcollective.py7
-rw-r--r--tests/unittests/test_handler/test_handler_mounts.py138
-rw-r--r--tests/unittests/test_handler/test_handler_ntp.py180
-rw-r--r--tests/unittests/test_handler/test_handler_power_state.py29
-rw-r--r--tests/unittests/test_handler/test_handler_puppet.py51
-rw-r--r--tests/unittests/test_handler/test_handler_runcmd.py4
-rw-r--r--tests/unittests/test_handler/test_handler_seed_random.py5
-rw-r--r--tests/unittests/test_handler/test_handler_spacewalk.py20
-rw-r--r--tests/unittests/test_handler/test_handler_write_files.py85
-rw-r--r--tests/unittests/test_handler/test_handler_yum_add_repo.py17
-rw-r--r--tests/unittests/test_handler/test_handler_zypper_add_repo.py11
-rw-r--r--tests/unittests/test_handler/test_schema.py112
-rw-r--r--tests/unittests/test_net.py168
-rw-r--r--tests/unittests/test_net_freebsd.py2
-rw-r--r--tests/unittests/test_render_cloudcfg.py59
-rw-r--r--tests/unittests/test_reporting.py1
-rw-r--r--tests/unittests/test_reporting_hyperv.py93
-rw-r--r--tests/unittests/test_rh_subscription.py18
-rw-r--r--tests/unittests/test_sshutil.py271
-rw-r--r--tests/unittests/test_templating.py2
-rw-r--r--tests/unittests/test_util.py408
-rw-r--r--tests/unittests/test_vmware/test_guestcust_util.py46
-rw-r--r--tests/unittests/test_vmware_config_file.py22
-rw-r--r--tools/.github-cla-signers22
-rw-r--r--tools/.lp-to-git-user5
-rwxr-xr-xtools/build-on-freebsd3
-rwxr-xr-xtools/build-on-netbsd36
-rwxr-xr-xtools/build-on-openbsd27
-rwxr-xr-xtools/ccfg-merge-debug2
-rwxr-xr-xtools/ds-identify9
-rwxr-xr-xtools/make-mime.py62
-rwxr-xr-xtools/mock-meta.py16
-rwxr-xr-xtools/pipremove2
-rwxr-xr-xtools/read-dependencies65
-rwxr-xr-xtools/read-version2
-rwxr-xr-xtools/render-cloudcfg6
-rwxr-xr-xtools/run-container22
-rwxr-xr-xtools/run-pyflakes3
-rwxr-xr-xtools/run-pyflakes32
-rwxr-xr-xtools/tox-venv2
-rwxr-xr-xtools/validate-yaml.py2
-rw-r--r--tox.ini144
356 files changed, 14411 insertions, 5832 deletions
diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml
index 34e11c2d..8a0b2c07 100644
--- a/.github/workflows/cla.yml
+++ b/.github/workflows/cla.yml
@@ -8,22 +8,33 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- - run: |
- echo "::set-env name=CLA_SIGNED::$(grep -q ': \"${{ github.actor }}\"' ./tools/.lp-to-git-user && echo CLA signed || echo CLA not signed)"
- - name: Add CLA label
+ - name: Check CLA signing status for ${{ github.event.pull_request.user.login }}
run: |
- # POST a new label to this issue
- curl --request POST \
- --url https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.number }}/labels \
- --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
- --header 'content-type: application/json' \
- --data '{"labels": ["${{env.CLA_SIGNED}}"]}'
- - name: Comment about CLA signing
- if: env.CLA_SIGNED == 'CLA not signed'
- run: |
- # POST a comment directing submitter to sign the CLA
- curl --request POST \
- --url https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.number }}/comments \
- --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
- --header 'content-type: application/json' \
- --data '{"body": "Hello ${{ github.actor }},\n\nThank you for your contribution to cloud-init.\n\nIn order for us to merge this pull request, you need\nto have signed the Contributor License Agreement (CLA).\nPlease ensure that you have signed the CLA by following our\nhacking guide at:\n\nhttps://cloudinit.readthedocs.io/en/latest/topics/hacking.html\n\nThanks,\nYour friendly cloud-init upstream\n"}'
+ cat > unsigned-cla.txt <<EOF
+ Hello ${{ github.event.pull_request.user.login }},
+
+ Thank you for your contribution to cloud-init.
+
+ In order for us to merge this pull request, you need
+ to have signed the Contributor License Agreement (CLA).
+ Please sign the CLA by following our
+ hacking guide at:
+ https://cloudinit.readthedocs.io/en/latest/topics/hacking.html
+
+ Thanks,
+ Your friendly cloud-init upstream
+ EOF
+
+ has_signed() {
+ username="$1"
+ grep -q ": \"$username\"" ./tools/.lp-to-git-user && return 0
+ grep -q "^$username$" ./tools/.github-cla-signers && return 0
+ return 1
+ }
+
+ if has_signed "${{ github.event.pull_request.user.login }}"; then
+ echo "Thanks ${{ github.event.pull_request.user.login }} for signing cloud-init's CLA"
+ else
+ cat unsigned-cla.txt
+ exit 1
+ fi
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
new file mode 100644
index 00000000..20c5735d
--- /dev/null
+++ b/.github/workflows/stale.yml
@@ -0,0 +1,24 @@
+name: Mark and close stale pull requests
+
+on:
+ schedule:
+ - cron: "0 0 * * *" # Daily @ 00:00
+
+jobs:
+ stale:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/stale@v1
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ days-before-stale: 14
+ days-before-close: 7
+ stale-pr-message: |
+ Hello! Thank you for this proposed change to cloud-init. This pull request is now marked as stale as it has not seen any activity in 14 days. If no activity occurs within the next 7 days, this pull request will automatically close.
+
+ If you are waiting for code review and you are seeing this message, apologies! Please reply, tagging mitechie, and he will ensure that someone takes a look soon.
+
+ (If the pull request is closed, please do feel free to reopen it if you wish to continue working on it.)
+ stale-pr-label: 'stale-pr'
diff --git a/.gitignore b/.gitignore
index 9e19c618..3589b210 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,6 +13,9 @@ stage
*.cover
.idea/
.venv/
+.pc/
+.cache/
+.mypy_cache/
# Ignore packaging artifacts
cloud-init.dsc
diff --git a/.pylintrc b/.pylintrc
index c83546a6..94a81d0e 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -7,8 +7,6 @@ jobs=4
[MESSAGES CONTROL]
# Errors and warings with some filtered:
-# W0105(pointless-string-statement)
-# W0107(unnecessary-pass)
# W0201(attribute-defined-outside-init)
# W0212(protected-access)
# W0221(arguments-differ)
@@ -20,7 +18,6 @@ jobs=4
# W0602(global-variable-not-assigned)
# W0603(global-statement)
# W0611(unused-import)
-# W0612(unused-variable)
# W0613(unused-argument)
# W0621(redefined-outer-name)
# W0622(redefined-builtin)
@@ -28,7 +25,7 @@ jobs=4
# W0703(broad-except)
# W1401(anomalous-backslash-in-string)
-disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401
+disable=C, F, I, R, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401
[REPORTS]
@@ -50,7 +47,6 @@ ignored-modules=
http.client,
httplib,
pkg_resources,
- six.moves,
# cloud_tests requirements.
boto3,
botocore,
diff --git a/.travis.yml b/.travis.yml
index d2651c0b..4c5bf4c4 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,6 +1,29 @@
language: python
dist: bionic
+# We use two different caching strategies. The default is to cache pip
+# packages (as most of our jobs use pip packages), which is configured here.
+# For the integration tests, we instead want to cache the lxd images and
+# package build schroot.
+#
+# We cache the lxd images because this saves a few seconds in the general
+# case, but provides substantial speed-ups when cloud-images.ubuntu.com, the
+# source of the images, is under heavy load. The directory in which the lxd
+# images are stored (/var/snap/lxd/common/lxd/images/) is not
+# readable/writeable by the default user (which is a requirement for caching),
+# so we instead cache the `lxd_images/` directory. We move lxd images out of
+# there before we run tests and back in once tests are complete. We _move_ the
+# images out and only copy the most recent lxd image back into the cache, to
+# avoid our cache growing without bound. (We only need the most recent lxd
+# image because the integration tests only use a single image.)
+#
+# We cache the package build schroot because it saves 2-3 minutes per build.
+# Without caching, we have to perform a debootstrap for every build. We update
+# the schroot before storing it back in the cache, to ensure that we aren't
+# just using an increasingly-old schroot as time passes. The cached schroot is
+# stored as a tarball, to preserve permissions/ownership.
+cache: pip
+
install:
# Required so `git describe` will definitely find a tag; see
# https://github.com/travis-ci/travis-ci/issues/7422
@@ -16,13 +39,23 @@ matrix:
- python: 3.6
env:
TOXENV=py3
- NOSE_VERBOSE=2 # List all tests run by nose
- - install:
+ PYTEST_ADDOPTS=-v # List all tests run by pytest
+ - if: NOT branch =~ /^ubuntu\//
+ cache:
+ - directories:
+ - lxd_images
+ - chroots
+ before_cache:
+ - |
+ # Find the most recent image file
+ latest_file="$(sudo ls -Art /var/snap/lxd/common/lxd/images/ | tail -n 1)"
+ # This might be <hash>.rootfs or <hash>, normalise
+ latest_file="$(basename $latest_file .rootfs)"
+ # Find all files with that prefix and copy them to our cache dir
+ sudo find /var/snap/lxd/common/lxd/images/ -name $latest_file* -print -exec cp {} "$TRAVIS_BUILD_DIR/lxd_images/" \;
+ install:
- git fetch --unshallow
- - sudo apt-get build-dep -y cloud-init
- - sudo apt-get install -y --install-recommends sbuild ubuntu-dev-tools fakeroot tox
- # These are build deps but not pulled in by the build-dep call above
- - sudo apt-get install -y --install-recommends dh-systemd python3-coverage python3-contextlib2
+ - sudo apt-get install -y --install-recommends sbuild ubuntu-dev-tools fakeroot tox debhelper
- pip install .
- pip install tox
# bionic has lxd from deb installed, remove it first to ensure
@@ -32,26 +65,68 @@ matrix:
- sudo snap install lxd
- sudo lxd init --auto
- sudo mkdir --mode=1777 -p /var/snap/lxd/common/consoles
+ # Move any cached lxd images into lxd's image dir
+ - sudo find "$TRAVIS_BUILD_DIR/lxd_images/" -type f -print -exec mv {} /var/snap/lxd/common/lxd/images/ \;
- sudo usermod -a -G lxd $USER
- sudo sbuild-adduser $USER
- cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc
script:
# Ubuntu LTS: Build
- - ./packages/bddeb -S
- # Use this to get a new shell where we're in the sbuild group
- - sudo -E su $USER -c 'mk-sbuild xenial'
- - sudo -E su $USER -c 'sbuild --nolog --verbose --dist=xenial cloud-init_*.dsc'
+ - ./packages/bddeb -S -d --release xenial
+ - |
+ needs_caching=false
+ if [ -e "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" ]; then
+ # If we have a cached chroot, move it into place
+ sudo mkdir -p /var/lib/schroot/chroots/xenial-amd64
+ sudo tar --sparse --xattrs --preserve-permissions --numeric-owner -xf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64
+ # Write its configuration
+ cat > sbuild-xenial-amd64 << EOM
+ [xenial-amd64]
+ description=xenial-amd64
+ groups=sbuild,root,admin
+ root-groups=sbuild,root,admin
+ # Uncomment these lines to allow members of these groups to access
+ # the -source chroots directly (useful for automated updates, etc).
+ #source-root-users=sbuild,root,admin
+ #source-root-groups=sbuild,root,admin
+ type=directory
+ profile=sbuild
+ union-type=overlay
+ directory=/var/lib/schroot/chroots/xenial-amd64
+ EOM
+ sudo mv sbuild-xenial-amd64 /etc/schroot/chroot.d/
+ sudo chown root /etc/schroot/chroot.d/sbuild-xenial-amd64
+ # And ensure it's up-to-date.
+ before_pkgs="$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum)"
+ sudo schroot -c source:xenial-amd64 -d / -- sh -c "apt-get update && apt-get -qqy upgrade"
+ after_pkgs=$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum)
+ if [ "$before_pkgs" != "$after_pkgs" ]; then
+ needs_caching=true
+ fi
+ else
+ # Otherwise, create the chroot
+ sudo -E su $USER -c 'mk-sbuild xenial'
+ needs_caching=true
+ fi
+ # If there are changes to the schroot (or it's entirely new),
+ # tar up the schroot (to preserve ownership/permissions) and
+ # move it into the cached dir; no need to compress it because
+ # Travis will do that anyway
+ if [ "$needs_caching" = "true" ]; then
+ sudo tar --sparse --xattrs --xattrs-include=* -cf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64 .
+ fi
+ # Use sudo to get a new shell where we're in the sbuild group
+ - sudo -E su $USER -c 'sbuild --nolog --no-run-lintian --verbose --dist=xenial cloud-init_*.dsc'
# Ubuntu LTS: Integration
- sg lxd -c 'tox -e citest -- run --verbose --preserve-data --data-dir results --os-name xenial --test modules/apt_configure_sources_list.yaml --test modules/ntp_servers --test modules/set_password_list --test modules/user_groups --deb cloud-init_*_all.deb'
- python: 3.5
env:
TOXENV=xenial
- NOSE_VERBOSE=2 # List all tests run by nose
- # Travis doesn't support Python 3.4 on bionic, so use xenial
+ PYTEST_ADDOPTS=-v # List all tests run by pytest
dist: xenial
- python: 3.6
- env: TOXENV=pycodestyle
- - python: 3.6
- env: TOXENV=pyflakes
+ env: TOXENV=flake8
- python: 3.6
env: TOXENV=pylint
+ - python: 3.6
+ env: TOXENV=doc
diff --git a/ChangeLog b/ChangeLog
index dd4f7add..3e680736 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,289 @@
+20.3
+ - Azure: Add netplan driver filter when using hv_netvsc driver (#539)
+ [James Falcon] (LP: #1830740)
+ - query: do not handle non-decodable non-gzipped content (#543)
+ - DHCP sandboxing failing on noexec mounted /var/tmp (#521) [Eduardo Otubo]
+ - Update the list of valid ssh keys. (#487)
+ [Ole-Martin Bratteng] (LP: #1877869)
+ - cmd: cloud-init query to handle compressed userdata (#516) (LP: #1889938)
+ - Pushing cloud-init log to the KVP (#529) [Moustafa Moustafa]
+ - Add Alpine Linux support. (#535) [dermotbradley]
+ - Detect kernel version before swap file creation (#428) [Eduardo Otubo]
+ - cli: add devel make-mime subcommand (#518)
+ - user-data: only verify mime-types for TYPE_NEEDED and x-shellscript
+ (#511) (LP: #1888822)
+ - DataSourceOracle: retry twice (and document why we retry at all) (#536)
+ - Refactor Azure report ready code (#468) [Johnson Shi]
+ - tox.ini: pin correct version of httpretty in xenial{,-dev} envs (#531)
+ - Support Oracle IMDSv2 API (#528) [James Falcon]
+ - .travis.yml: run a doc build during CI (#534)
+ - doc/rtd/topics/datasources/ovf.rst: fix doc8 errors (#533)
+ - Fix 'Users and Groups' configuration documentation (#530) [sshedi]
+ - cloudinit.distros: update docstrings of add_user and create_user (#527)
+ - Fix headers for device types in network v2 docs (#532)
+ [Caleb Xavier Berger]
+ - Add AlexBaranowski as contributor (#508) [Aleksander Baranowski]
+ - DataSourceOracle: refactor to use only OPC v1 endpoint (#493)
+ - .github/workflows/stale.yml: s/Josh/Rick/ (#526)
+ - Fix a typo in apt pipelining module (#525) [Xiao Liang]
+ - test_util: parametrize devlist tests (#523) [James Falcon]
+ - Recognize LABEL_FATBOOT labels (#513) [James Falcon] (LP: #1841466)
+ - Handle additional identifier for SLES For HPC (#520) [Robert Schweikert]
+ - Revert "test-requirements.txt: pin pytest to <6 (#512)" (#515)
+ - test-requirements.txt: pin pytest to <6 (#512)
+ - Add "tsanghan" as contributor (#504) [tsanghan]
+ - fix brpm building (LP: #1886107)
+ - Adding eandersson as a contributor (#502) [Erik Olof Gunnar Andersson]
+ - azure: disable bouncing hostname when setting hostname fails (#494)
+ [Anh Vo]
+ - VMware: Support parsing DEFAULT-RUN-POST-CUST-SCRIPT (#441)
+ [xiaofengw-vmware]
+ - DataSourceAzure: Use ValueError when JSONDecodeError is not available
+ (#490) [Anh Vo]
+ - cc_ca_certs.py: fix blank line problem when removing CAs and adding
+ new one (#483) [dermotbradley]
+ - freebsd: py37-serial is now py37-pyserial (#492) [Gonéri Le Bouder]
+ - ssh exit with non-zero status on disabled user (#472)
+ [Eduardo Otubo] (LP: #1170059)
+ - cloudinit: remove global disable of pylint W0107 and fix errors (#489)
+ - networking: refactor wait_for_physdevs from cloudinit.net (#466)
+ (LP: #1884626)
+ - HACKING.rst: add pytest.param pytest gotcha (#481)
+ - cloudinit: remove global disable of pylint W0105 and fix errors (#480)
+ - Fix two minor warnings (#475)
+ - test_data: fix faulty patch (#476)
+ - cc_mounts: handle missing fstab (#484) (LP: #1886531)
+ - LXD cloud_tests: support more lxd image formats (#482) [Paride Legovini]
+ - Add update_etc_hosts as default module on *BSD (#479) [Adam Dobrawy]
+ - cloudinit: fix tip-pylint failures and bump pinned pylint version (#478)
+ - Added BirknerAlex as contributor and sorted the file (#477)
+ [Alexander Birkner]
+ - Update list of types of modules in cli.rst [saurabhvartak1982]
+ - tests: use markers to configure disable_subp_usage (#473)
+ - Add mention of vendor-data to no-cloud format documentation (#470)
+ [Landon Kirk]
+ - Fix broken link to OpenStack metadata service docs (#467)
+ [Matt Riedemann]
+ - Disable ec2 mirror for non aws instances (#390)
+ [lucasmoura] (LP: #1456277)
+ - cloud_tests: don't pass --python-version to read-dependencies (#465)
+ - networking: refactor is_physical from cloudinit.net (#457) (LP: #1884619)
+ - Enable use of the caplog fixture in pytest tests, and add a
+ cc_final_message test using it (#461)
+ - RbxCloud: Add support for FreeBSD (#464) [Adam Dobrawy]
+ - Add schema for cc_chef module (#375) [lucasmoura] (LP: #1858888)
+ - test_util: add (partial) testing for util.mount_cb (#463)
+ - .travis.yml: revert to installing ubuntu-dev-tools (#460)
+ - HACKING.rst: add details of net refactor tracking (#456)
+ - .travis.yml: rationalise installation of dependencies in host (#449)
+ - Add dermotbradley as contributor. (#458) [dermotbradley]
+ - net/networking: remove unused functions/methods (#453)
+ - distros.networking: initial implementation of layout (#391)
+ - cloud-init.service.tmpl: use "rhel" instead of "redhat" (#452)
+ - Change from redhat to rhel in systemd generator tmpl (#450)
+ [Eduardo Otubo]
+ - Hetzner: support reading user-data that is base64 encoded. (#448)
+ [Scott Moser] (LP: #1884071)
+ - HACKING.rst: add strpath gotcha to testing gotchas section (#446)
+ - cc_final_message: don't create directories when writing boot-finished
+ (#445) (LP: #1883903)
+ - .travis.yml: only store new schroot if something has changed (#440)
+ - util: add ensure_dir_exists parameter to write_file (#443)
+ - printing the error stream of the dhclient process before killing it
+ (#369) [Moustafa Moustafa]
+ - Fix link to the MAAS documentation (#442)
+ [Paride Legovini] (LP: #1883666)
+ - RPM build: disable the dynamic mirror URLs when using a proxy (#437)
+ [Paride Legovini]
+ - util: rename write_file's copy_mode parameter to preserve_mode (#439)
+ - .travis.yml: use $TRAVIS_BUILD_DIR for lxd_image caching (#438)
+ - cli.rst: alphabetise devel subcommands and add net-convert to list (#430)
+ - Default to UTF-8 in /var/log/cloud-init.log (#427) [James Falcon]
+ - travis: cache the chroot we use for package builds (#429)
+ - test: fix all flake8 E126 errors (#425) [Joshua Powers]
+ - Fixes KeyError for bridge with no "parameters:" setting (#423)
+ [Brian Candler] (LP: #1879673)
+ - When tools.conf does not exist, running cmd "vmware-toolbox-cmd
+ config get deployPkg enable-custom-scripts", the return code will
+ be EX_UNAVAILABLE(69), on this condition, it should not take it as
+ error. (#413) [chengcheng-chcheng]
+ - Document CloudStack data-server well-known hostname (#399) [Gregor Riepl]
+ - test: move conftest.py to top-level, to cover tests/ also (#414)
+ - Replace cc_chef is_installed with use of subp.is_exe. (#421)
+ [Scott Moser]
+ - Move runparts to subp. (#420) [Scott Moser]
+ - Move subp into its own module. (#416) [Scott Moser]
+ - readme: point at travis-ci.com (#417) [Joshua Powers]
+ - New feature flag functionality and fix includes failing silently (#367)
+ [James Falcon] (LP: #1734939)
+ - Enhance poll imds logging (#365) [Moustafa Moustafa]
+ - test: fix all flake8 E121 and E123 errors (#404) [Joshua Powers]
+ - test: fix all flake8 E241 (#403) [Joshua Powers]
+ - test: ignore flake8 E402 errors in main.py (#402) [Joshua Powers]
+ - cc_grub_dpkg: determine idevs in more robust manner with grub-probe
+ (#358) [Matthew Ruffell] (LP: #1877491)
+ - test: fix all flake8 E741 errors (#401) [Joshua Powers]
+ - tests: add groovy integration tests for ubuntu (#400)
+ - Enable chef_license support for chef infra client (#389) [Bipin Bachhao]
+ - testing: use flake8 again (#392) [Joshua Powers]
+ - enable Puppet, Chef mcollective in default config (#385)
+ [Mina Galić (deprecated: Igor Galić)] (LP: #1880279)
+ - HACKING.rst: introduce .net -> Networking refactor section (#384)
+ - Travis: do not install python3-contextlib2 (dropped dependency) (#388)
+ [Paride Legovini]
+ - HACKING: mention that .github-cla-signers is alpha-sorted (#380)
+ - Add bipinbachhao as contributor (#379) [Bipin Bachhao]
+ - cc_snap: validate that assertions property values are strings (#370)
+ - conftest: implement partial disable_subp_usage (#371)
+ - test_resolv_conf: refresh stale comment (#374)
+ - cc_snap: apply validation to snap.commands properties (#364)
+ - make finding libc platform independent (#366)
+ [Mina Galić (deprecated: Igor Galić)]
+ - doc/rtd/topics/faq: Updates LXD docs links to current site (#368) [TomP]
+ - templater: drop Jinja Python 2 compatibility shim (#353)
+ - cloudinit: minor pylint fixes (#360)
+ - cloudinit: remove unneeded __future__ imports (#362)
+ - migrating momousta lp user to Moustafa-Moustafa GitHub user (#361)
+ [Moustafa Moustafa]
+ - cloud_tests: emit dots on Travis while fetching images (#347)
+ - Add schema to apt configure config (#357) [lucasmoura] (LP: #1858884)
+ - conftest: add docs and tests regarding CiTestCase's subp functionality
+ (#343)
+ - analyze/dump: refactor shared string into variable (#350)
+ - doc: update boot.rst with correct timing of runcmd (#351)
+ - HACKING.rst: change contact info to Rick Harding (#359) [lucasmoura]
+ - HACKING.rst: guide people to add themselves to the CLA file (#349)
+ - HACKING.rst: more unit testing documentation (#354)
+ - .travis.yml: don't run lintian during integration test package builds
+ (#352)
+ - Add test to ensure docs examples are valid cloud-init configs (#355)
+ [James Falcon] (LP: #1876414)
+ - make suse and sles support 127.0.1.1 (#336) [chengcheng-chcheng]
+ - Create tests to validate schema examples (#348)
+ [lucasmoura] (LP: #1876412)
+ - analyze/dump: add support for Amazon Linux 2 log lines (#346)
+ (LP: #1876323)
+ - bsd: upgrade support (#305) [Gonéri Le Bouder]
+ - Add lucasmoura as contributor (#345) [lucasmoura]
+ - Add "therealfalcon" as contributor (#344) [James Falcon]
+ - Adapt the package building scripts to use Python 3 (#231)
+ [Paride Legovini]
+ - DataSourceEc2: use metadata's NIC ordering to determine route-metrics
+ (#342) (LP: #1876312)
+ - .travis.yml: introduce caching (#329)
+ - cc_locale: introduce schema (#335)
+ - doc/rtd/conf.py: bump copyright year to 2020 (#341)
+ - yum_add_repo: Add Centos to the supported distro list (#340)
+
+20.2
+ - doc/format: reference make-mime.py instead of an inline script (#334)
+ - Add docs about creating parent folders (#330) [Adrian Wilkins]
+ - DataSourceNoCloud/OVF: drop claim to support FTP (#333) (LP: #1875470)
+ - schema: ignore spurious pylint error (#332)
+ - schema: add json schema for write_files module (#152)
+ - BSD: find_devs_with_ refactoring (#298) [Gonéri Le Bouder]
+ - nocloud: drop work around for Linux 2.6 (#324) [Gonéri Le Bouder]
+ - cloudinit: drop dependencies on unittest2 and contextlib2 (#322)
+ - distros: handle a potential mirror filtering error case (#328)
+ - log: remove unnecessary import fallback logic (#327)
+ - .travis.yml: don't run integration test on ubuntu/* branches (#321)
+ - More unit test documentation (#314)
+ - conftest: introduce disable_subp_usage autouse fixture (#304)
+ - YAML align indent sizes for docs readability (#323) [Tak Nishigori]
+ - network_state: add missing space to log message (#325)
+ - tests: add missing mocks for get_interfaces_by_mac (#326) (LP: #1873910)
+ - test_mounts: expand happy path test for both happy paths (#319)
+ - cc_mounts: fix incorrect format specifiers (#316) (LP: #1872836)
+ - swap file "size" being used before checked if str (#315) [Eduardo Otubo]
+ - HACKING.rst: add pytest version gotchas section (#311)
+ - docs: Add steps to re-run cloud-id and cloud-init (#313) [Joshua Powers]
+ - readme: OpenBSD is now supported (#309) [Gonéri Le Bouder]
+ - net: ignore 'renderer' key in netplan config (#306) (LP: #1870421)
+ - Add support for NFS/EFS mounts (#300) [Andrew Beresford] (LP: #1870370)
+ - openbsd: set_passwd should not unlock user (#289) [Gonéri Le Bouder]
+ - tools/.github-cla-signers: add beezly as CLA signer (#301)
+ - util: remove unnecessary lru_cache import fallback (#299)
+ - HACKING.rst: reorganise/update CLA signature info (#297)
+ - distros: drop leading/trailing hyphens from mirror URL labels (#296)
+ - HACKING.rst: add note about variable annotations (#295)
+ - CiTestCase: stop using and remove sys_exit helper (#283)
+ - distros: replace invalid characters in mirror URLs with hyphens (#291)
+ (LP: #1868232)
+ - rbxcloud: gracefully handle arping errors (#262) [Adam Dobrawy]
+ - Fix cloud-init ignoring some misdeclared mimetypes in user-data.
+ [Kurt Garloff]
+ - net: ubuntu focal prioritize netplan over eni even if both present
+ (#267) (LP: #1867029)
+ - cloudinit: refactor util.is_ipv4 to net.is_ipv4_address (#292)
+ - net/cmdline: replace type comments with annotations (#294)
+ - HACKING.rst: add Type Annotations design section (#293)
+ - net: introduce is_ip_address function (#288)
+ - CiTestCase: remove now-unneeded parse_and_read helper method (#286)
+ - .travis.yml: allow 30 minutes of inactivity in cloud tests (#287)
+ - sources/tests/test_init: drop use of deprecated inspect.getargspec (#285)
+ - setup.py: drop NIH check_output implementation (#282)
+ - Identify SAP Converged Cloud as OpenStack [Silvio Knizek]
+ - add Openbsd support (#147) [Gonéri Le Bouder]
+ - HACKING.rst: add examples of the two test class types (#278)
+ - VMWware: support to update guest info gc status if enabled (#261)
+ [xiaofengw-vmware]
+ - Add lp-to-git mapping for kgarloff (#279)
+ - set_passwords: avoid chpasswd on BSD (#268) [Gonéri Le Bouder]
+ - HACKING.rst: add Unit Testing design section (#277)
+ - util: read_cc_from_cmdline handle urlencoded yaml content (#275)
+ - distros/tests/test_init: add tests for _get_package_mirror_info (#272)
+ - HACKING.rst: add links to new Code Review Process doc (#276)
+ - freebsd: ensure package update works (#273) [Gonéri Le Bouder]
+ - doc: introduce Code Review Process documentation (#160)
+ - tools: use python3 (#274)
+ - cc_disk_setup: fix RuntimeError (#270) (LP: #1868327)
+ - cc_apt_configure/util: combine search_for_mirror implementations (#271)
+ - bsd: boottime does not depend on the libc soname (#269)
+ [Gonéri Le Bouder]
+ - test_oracle,DataSourceOracle: sort imports (#266)
+ - DataSourceOracle: update .network_config docstring (#257)
+ - cloudinit/tests: remove unneeded with_logs configuration (#263)
+ - .travis.yml: drop stale comment (#255)
+ - .gitignore: add more common directories (#258)
+ - ec2: render network on all NICs and add secondary IPs as static (#114)
+ (LP: #1866930)
+ - ec2 json validation: fix the reference to the 'merged_cfg' key (#256)
+ [Paride Legovini]
+ - releases.yaml: quote the Ubuntu version numbers (#254) [Paride Legovini]
+ - cloudinit: remove six from packaging/tooling (#253)
+ - util/netbsd: drop six usage (#252)
+ - workflows: introduce stale pull request workflow (#125)
+ - cc_resolv_conf: introduce tests and stabilise output across Python
+ versions (#251)
+ - fix minor issue with resolv_conf template (#144) [andreaf74]
+ - doc: CloudInit also support NetBSD (#250) [Gonéri Le Bouder]
+ - Add Netbsd support (#62) [Gonéri Le Bouder]
+ - tox.ini: avoid substition syntax that causes a traceback on xenial (#245)
+ - Add pub_key_ed25519 to cc_phone_home (#237) [Daniel Hensby]
+ - Introduce and use of a list of GitHub usernames that have signed CLA
+ (#244)
+ - workflows/cla.yml: use correct username for CLA check (#243)
+ - tox.ini: use xenial version of jsonpatch in CI (#242)
+ - workflows: CLA validation altered to fail status on pull_request (#164)
+ - tox.ini: bump pyflakes version to 2.1.1 (#239)
+ - cloudinit: move to pytest for running tests (#211)
+ - instance-data: add cloud-init merged_cfg and sys_info keys to json
+ (#214) (LP: #1865969)
+ - ec2: Do not fallback to IMDSv1 on EC2 (#216)
+ - instance-data: write redacted cfg to instance-data.json (#233)
+ (LP: #1865947)
+ - net: support network-config:disabled on the kernel commandline (#232)
+ (LP: #1862702)
+ - ec2: only redact token request headers in logs, avoid altering request
+ (#230) (LP: #1865882)
+ - docs: typo fixed: dta → data [Alexey Vazhnov]
+ - Fixes typo on Amazon Web Services (#217) [Nick Wales]
+ - Fix docs for OpenStack DMI Asset Tag (#228)
+ [Mark T. Voelker] (LP: #1669875)
+ - Add physical network type: cascading to openstack helpers (#200)
+ [sab-systems]
+ - tests: add focal integration tests for ubuntu (#225)
+
20.1
- ec2: Do not log IMDSv2 token values, instead use REDACTED (#219)
(LP: #1863943)
diff --git a/HACKING.rst b/HACKING.rst
index e050fa93..60c7b5e0 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -6,38 +6,35 @@ This document describes how to contribute changes to cloud-init.
It assumes you have a `GitHub`_ account, and refers to your GitHub user
as ``GH_USER`` throughout.
-Do these things once
-====================
+Submitting your first pull request
+==================================
-* To contribute, you must sign the Canonical `contributor license agreement`_
+Follow these steps to submit your first pull request to cloud-init:
- If you have already signed it as an individual, your Launchpad user will be
- listed in the `contributor-agreement-canonical`_ group. Unfortunately there
- is no easy way to check if an organization or company you are doing work for
- has signed. When signing the CLA and prompted for 'Project contact' or
- 'Canonical Project Manager' enter 'Josh Powers'.
+* To contribute to cloud-init, you must sign the Canonical `contributor
+ license agreement`_
- For first-time signers, or for existing contributors who have already signed
- the agreement in Launchpad, we need to verify the link between your
- `Launchpad`_ account and your `GitHub`_ account. To enable us to do this, we
- ask that you create a branch with both your Launchpad and GitHub usernames
- against both the Launchpad and GitHub cloud-init repositories. We've added a
- tool (``tools/migrate-lp-user-to-github``) to the cloud-init repository to
- handle this migration as automatically as possible.
+ * If you have already signed it as an individual, your Launchpad user
+ will be listed in the `contributor-agreement-canonical`_ group.
+ (Unfortunately there is no easy way to check if an organization or
+ company you are doing work for has signed.)
- The cloud-init team will review the two merge proposals and verify
- that the CLA has been signed for the Launchpad user and record the
- associated GitHub account. We will reply to the email address
- associated with your Launchpad account that you've been clear to
- contribute to cloud-init on GitHub.
+ * When signing it:
- If your company has signed the CLA for you, please contact us to help
- in verifying which launchad/GitHub accounts are associated with the
- company. For any questions or help with the process, please email:
+ * ensure that you fill in the GitHub username field.
+ * when prompted for 'Project contact' or 'Canonical Project
+ Manager', enter 'Rick Harding'.
- `Josh Powers <mailto:josh.powers@canonical.com>`_ with the subject: Cloud-Init CLA
+ * If your company has signed the CLA for you, please contact us to
+ help in verifying which Launchpad/GitHub accounts are associated
+ with the company.
- You also may contanct user ``powersj`` in ``#cloud-init`` channel via IRC freenode.
+ * For any questions or help with the process, please email `Rick
+ Harding <mailto:rick.harding@canonical.com>`_ with the subject,
+ "Cloud-Init CLA"
+
+ * You also may contact user ``rick_h`` in the ``#cloud-init``
+ channel on the Freenode IRC network.
* Configure git with your email and name for commit messages.
@@ -60,11 +57,46 @@ Do these things once
git remote add GH_USER git@github.com:GH_USER/cloud-init.git
git push GH_USER master
+* Read through the cloud-init `Code Review Process`_, so you understand
+ how your changes will end up in cloud-init's codebase.
+
+* Submit your first cloud-init pull request, adding yourself to the
+ in-repository list that we use to track CLA signatures:
+ `tools/.github-cla-signers`_
+
+ * See `PR #344`_ and `PR #345`_ for examples of what this pull
+ request should look like.
+
+ * Note that ``.github-cla-signers`` is sorted alphabetically.
+
+ * (If you already have a change that you want to submit, you can
+ also include the change to ``tools/.github-cla-signers`` in that
+ pull request, there is no need for two separate PRs.)
+
.. _GitHub: https://github.com
.. _Launchpad: https://launchpad.net
.. _repository: https://github.com/canonical/cloud-init
.. _contributor license agreement: https://ubuntu.com/legal/contributors
.. _contributor-agreement-canonical: https://launchpad.net/%7Econtributor-agreement-canonical/+members
+.. _tools/.github-cla-signers: https://github.com/canonical/cloud-init/blob/master/tools/.github-cla-signers
+.. _PR #344: https://github.com/canonical/cloud-init/pull/344
+.. _PR #345: https://github.com/canonical/cloud-init/pull/345
+
+Transferring CLA Signatures from Launchpad to Github
+----------------------------------------------------
+
+For existing contributors who have signed the agreement in Launchpad
+before the Github username field was included, we need to verify the
+link between your `Launchpad`_ account and your `GitHub`_ account. To
+enable us to do this, we ask that you create a branch with both your
+Launchpad and GitHub usernames against both the Launchpad and GitHub
+cloud-init repositories. We've added a tool
+(``tools/migrate-lp-user-to-github``) to the cloud-init repository to
+handle this migration as automatically as possible.
+
+The cloud-init team will review the two merge proposals and verify that
+the CLA has been signed for the Launchpad user and record the
+associated GitHub account.
Do these things for each feature or bug
=======================================
@@ -119,13 +151,15 @@ Do these things for each feature or bug
- Click 'Create Pull Request`
Then, someone in the `Ubuntu Server`_ team will review your changes and
-follow up in the pull request.
+follow up in the pull request. Look at the `Code Review Process`_ doc
+to understand the following steps.
Feel free to ping and/or join ``#cloud-init`` on freenode irc if you
have any questions.
.. _tox: https://tox.readthedocs.io/en/latest/
.. _Ubuntu Server: https://github.com/orgs/canonical/teams/ubuntu-server
+.. _Code Review Process: https://cloudinit.readthedocs.io/en/latest/topics/code_review.html
Design
======
@@ -138,3 +172,491 @@ Cloud Config Modules
* Any new modules should use underscores in any new config options and not
hyphens (e.g. `new_option` and *not* `new-option`).
+
+Unit Testing
+------------
+
+cloud-init uses `pytest`_ to run its tests, and has tests written both
+as ``unittest.TestCase`` sub-classes and as un-subclassed pytest tests.
+The following guidelines should be followed:
+
+* For ease of organisation and greater accessibility for developers not
+ familiar with pytest, all cloud-init unit tests must be contained
+ within test classes
+
+ * Put another way, module-level test functions should not be used
+
+* pytest test classes should use `pytest fixtures`_ to share
+ functionality instead of inheritance
+
+* As all tests are contained within classes, it is acceptable to mix
+ ``TestCase`` test classes and pytest test classes within the same
+ test file
+
+ * These can be easily distinguished by their definition: pytest
+ classes will not use inheritance at all (e.g.
+ `TestGetPackageMirrorInfo`_), whereas ``TestCase`` classes will
+ subclass (indirectly) from ``TestCase`` (e.g.
+ `TestPrependBaseCommands`_)
+
+* pytest tests should use bare ``assert`` statements, to take advantage
+ of pytest's `assertion introspection`_
+
+ * For ``==`` and other commutative assertions, the expected value
+ should be placed before the value under test:
+ ``assert expected_value == function_under_test()``
+
+* As we still support Ubuntu 16.04 (Xenial Xerus), we can only use
+ pytest features that are available in v2.8.7. This is an
+ inexhaustive list of ways in which this may catch you out:
+
+ * Support for using ``yield`` in ``pytest.fixture`` functions was
+ only introduced in `pytest 3.0`_. Such functions must instead use
+ the ``pytest.yield_fixture`` decorator.
+
+ * Only the following built-in fixtures are available
+ [#fixture-list]_:
+
+ * ``cache``
+ * ``capfd``
+ * ``caplog`` (provided by ``python3-pytest-catchlog`` on xenial)
+ * ``capsys``
+ * ``monkeypatch``
+ * ``pytestconfig``
+ * ``record_xml_property``
+ * ``recwarn``
+ * ``tmpdir_factory``
+ * ``tmpdir``
+
+ * On xenial, the objects returned by the ``tmpdir`` fixture cannot be
+ used where paths are required; they are rejected as invalid paths.
+ You must instead use their ``.strpath`` attribute.
+
+ * For example, instead of
+ ``util.write_file(tmpdir.join("some_file"), ...)``, you should
+ write ``util.write_file(tmpdir.join("some_file").strpath, ...)``.
+
+ * The `pytest.param`_ function cannot be used. It was introduced in
+ pytest 3.1, which means it is not available on xenial. The more
+ limited mechanism it replaced was removed in pytest 4.0, so is not
+ available in focal or later. The only available alternatives are
+ to write mark-requiring test instances as completely separate
+ tests, without utilising parameterisation, or to apply the mark to
+ the entire parameterized test (and therefore every test instance).
+
+* Variables/parameter names for ``Mock`` or ``MagicMock`` instances
+ should start with ``m_`` to clearly distinguish them from non-mock
+ variables
+
+ * For example, ``m_readurl`` (which would be a mock for ``readurl``)
+
+* The ``assert_*`` methods that are available on ``Mock`` and
+ ``MagicMock`` objects should be avoided, as typos in these method
+ names may not raise ``AttributeError`` (and so can cause tests to
+ silently pass). An important exception: if a ``Mock`` is
+ `autospecced`_ then misspelled assertion methods *will* raise an
+ ``AttributeError``, so these assertion methods may be used on
+ autospecced ``Mock`` objects.
+
+ For non-autospecced ``Mock`` s, these substitutions can be used
+ (``m`` is assumed to be a ``Mock``):
+
+ * ``m.assert_any_call(*args, **kwargs)`` => ``assert
+ mock.call(*args, **kwargs) in m.call_args_list``
+ * ``m.assert_called()`` => ``assert 0 != m.call_count``
+ * ``m.assert_called_once()`` => ``assert 1 == m.call_count``
+ * ``m.assert_called_once_with(*args, **kwargs)`` => ``assert
+ [mock.call(*args, **kwargs)] == m.call_args_list``
+ * ``m.assert_called_with(*args, **kwargs)`` => ``assert
+ mock.call(*args, **kwargs) == m.call_args_list[-1]``
+ * ``m.assert_has_calls(call_list, any_order=True)`` => ``for call in
+ call_list: assert call in m.call_args_list``
+
+ * ``m.assert_has_calls(...)`` and ``m.assert_has_calls(...,
+ any_order=False)`` are not easily replicated in a single
+ statement, so their use when appropriate is acceptable.
+
+ * ``m.assert_not_called()`` => ``assert 0 == m.call_count``
+
+* Test arguments should be ordered as follows:
+
+ * ``mock.patch`` arguments. When used as a decorator, ``mock.patch``
+ partially applies its generated ``Mock`` object as the first
+ argument, so these arguments must go first.
+ * ``pytest.mark.parametrize`` arguments, in the order specified to
+ the ``parametrize`` decorator. These arguments are also provided
+ by a decorator, so it's natural that they sit next to the
+ ``mock.patch`` arguments.
+ * Fixture arguments, alphabetically. These are not provided by a
+ decorator, so they are last, and their order has no defined
+ meaning, so we default to alphabetical.
+
+* It follows from this ordering of test arguments (so that we retain
+ the property that arguments left-to-right correspond to decorators
+ bottom-to-top) that test decorators should be ordered as follows:
+
+ * ``pytest.mark.parametrize``
+ * ``mock.patch``
+
+* When there are multiple patch calls in a test file for the module it
+ is testing, it may be desirable to capture the shared string prefix
+ for these patch calls in a module-level variable. If used, such
+ variables should be named ``M_PATH`` or, for datasource tests,
+ ``DS_PATH``.
+
+.. _pytest: https://docs.pytest.org/
+.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html
+.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15
+.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/master/cloudinit/tests/test_subp.py#L9
+.. _assertion introspection: https://docs.pytest.org/en/latest/assert.html
+.. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093
+.. _pytest.param: https://docs.pytest.org/en/latest/reference.html#pytest-param
+.. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing
+
+Type Annotations
+----------------
+
+The cloud-init codebase uses Python's annotation support for storing
+type annotations in the style specified by `PEP-484`_. Their use in
+the codebase is encouraged but with one important caveat: types from
+the ``typing`` module cannot be used.
+
+cloud-init still supports Python 3.4, which doesn't have the ``typing``
+module in the stdlib. This means that the use of any types from the
+``typing`` module in the codebase would require installation of an
+additional Python module on platforms using Python 3.4. As such
+platforms are generally in maintenance mode, the introduction of a new
+dependency may act as a break in compatibility in practical terms.
+
+Similarly, only function annotations are appropriate for use, as the
+variable annotations specified in `PEP-526`_ were introduced in Python
+3.6.
+
+.. _PEP-484: https://www.python.org/dev/peps/pep-0484/
+.. _PEP-526: https://www.python.org/dev/peps/pep-0526/
+
+.. [#fixture-list] This list of fixtures (with markup) can be
+ reproduced by running::
+
+ py.test-3 --fixtures -q | grep "^[^ -]" | grep -v '\(no\|capturelog\)' | sort | sed 's/.*/* ``\0``/'
+
+ in a xenial lxd container with python3-pytest-catchlog installed.
+
+Feature Flags
+-------------
+
+.. automodule:: cloudinit.features
+ :members:
+
+
+Ongoing Refactors
+=================
+
+This captures ongoing refactoring projects in the codebase. This is
+intended as documentation for developers involved in the refactoring,
+but also for other developers who may interact with the code being
+refactored in the meantime.
+
+``cloudinit.net`` -> ``cloudinit.distros.networking`` Hierarchy
+---------------------------------------------------------------
+
+``cloudinit.net`` was imported from the curtin codebase as a chunk, and
+then modified enough that it integrated with the rest of the cloud-init
+codebase. Over the ~4 years since, the fact that it is not fully
+integrated into the ``Distro`` hierarchy has caused several issues.
+
+The common pattern of these problems is that the commands used for
+networking are different across distributions and operating systems.
+This has lead to ``cloudinit.net`` developing its own "distro
+determination" logic: `get_interfaces_by_mac`_ is probably the clearest
+example of this. Currently, these differences are primarily split
+along Linux/BSD lines. However, it would be short-sighted to only
+refactor in a way that captures this difference: we can anticipate that
+differences will develop between Linux-based distros in future, or
+there may already be differences in tooling that we currently
+work around in less obvious ways.
+
+The high-level plan is to introduce a hierarchy of networking classes
+in ``cloudinit.distros.networking``, which each ``Distro`` subclass
+will reference. These will capture the differences between networking
+on our various distros, while still allowing easy reuse of code between
+distros that share functionality (e.g. most of the Linux networking
+behaviour). ``Distro`` objects will instantiate the networking classes
+at ``self.net``, so callers will call ``distro.net.<func>`` instead of
+``cloudinit.net.<func>``; this will necessitate access to an
+instantiated ``Distro`` object.
+
+An implementation note: there may be external consumers of the
+``cloudinit.net`` module. We don't consider this a public API, so we
+will be removing it as part of this refactor. However, we will ensure
+that the new API is complete from its introduction, so that any such
+consumers can move over to it wholesale. (Note, however, that this new
+API is still not considered public or stable, and may not replicate the
+existing API exactly.)
+
+In more detail:
+
+* The root of this hierarchy will be the
+ ``cloudinit.distros.networking.Networking`` class. This class will
+ have a corresponding method for every ``cloudinit.net`` function that
+ we identify to be involved in refactoring. Initially, these methods'
+ implementations will simply call the corresponding ``cloudinit.net``
+ function. (This gives us the complete API from day one, for existing
+ consumers.)
+* As the biggest differentiator in behaviour, the next layer of the
+ hierarchy will be two subclasses: ``LinuxNetworking`` and
+ ``BSDNetworking``. These will be introduced in the initial PR.
+* When a difference in behaviour for a particular distro is identified,
+ a new ``Networking`` subclass will be created. This new class should
+ generally subclass either ``LinuxNetworking`` or ``BSDNetworking``.
+* To be clear: ``Networking`` subclasses will only be created when
+ needed, we will not create a full hierarchy of per-``Distro``
+ subclasses up-front.
+* Each ``Distro`` class will have a class variable
+ (``cls.networking_cls``) which points at the appropriate
+ networking class (initially this will be either ``LinuxNetworking``
+ or ``BSDNetworking``).
+* When ``Distro`` classes are instantiated, they will instantiate
+ ``cls.networking_cls`` and store the instance at ``self.net``. (This
+ will be implemented in ``cloudinit.distros.Distro.__init__``.)
+* A helper function will be added which will determine the appropriate
+ ``Distro`` subclass for the current system, instantiate it and return
+ its ``net`` attribute. (This is the entry point for existing
+ consumers to migrate to.)
+* Callers of refactored functions will change from calling
+ ``cloudinit.net.<func>`` to ``distro.net.<func>``, where ``distro``
+ is an instance of the appropriate ``Distro`` class for this system.
+ (This will require making such an instance available to callers,
+ which will constitute a large part of the work in this project.)
+
+After the initial structure is in place, the work in this refactor will
+consist of replacing the ``cloudinit.net.some_func`` call in each
+``cloudinit.distros.networking.Networking`` method with the actual
+implementation. This can be done incrementally, one function at a
+time:
+
+* pick an unmigrated ``cloudinit.distros.networking.Networking`` method
+* find it in the `the list of bugs tagged net-refactor`_ and assign
+ yourself to it (see :ref:`Managing Work/Tracking Progress` below for
+ more details)
+* refactor all of its callers to call the ``distro.net.<func>`` method
+ on ``Distro`` instead of the ``cloudinit.net.<func>`` function. (This
+ is likely to be the most time-consuming step, as it may require
+ plumbing ``Distro`` objects through to places that previously have
+ not consumed them.)
+* refactor its implementation from ``cloudinit.net`` into the
+ ``Networking`` hierarchy (e.g. if it has an if/else on BSD, this is
+ the time to put the implementations in their respective subclasses)
+
+ * if part of the method contains distro-independent logic, then you
+ may need to create new methods to capture this distro-specific
+ logic; we don't want to replicate common logic in different
+ ``Networking`` subclasses
+ * if after the refactor, the method on the root ``Networking`` class
+ no longer has any implementation, it should be converted to an
+ `abstractmethod`_
+
+* ensure that the new implementation has unit tests (either by moving
+ existing tests, or by writing new ones)
+* ensure that the new implementation has a docstring
+* add any appropriate type annotations
+
+ * note that we must follow the constraints described in the "Type
+ Annotations" section above, so you may not be able to write
+ complete annotations
+ * we have `type aliases`_ defined in ``cloudinit.distros.networking``
+ which should be used when applicable
+
+* finally, remove it (and any other now-unused functions) from
+ cloudinit.net (to avoid having two parallel implementations)
+
+``cloudinit.net`` Functions/Classes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The functions/classes that need refactoring break down into some broad
+categories:
+
+* helpers for accessing ``/sys`` (that should not be on the top-level
+ ``Networking`` class as they are Linux-specific):
+
+ * ``get_sys_class_path``
+ * ``sys_dev_path``
+ * ``read_sys_net``
+ * ``read_sys_net_safe``
+ * ``read_sys_net_int``
+
+* those that directly access ``/sys`` (via helpers) and should (IMO) be
+ included in the API of the ``Networking`` class:
+
+ * ``generate_fallback_config``
+
+ * the ``config_driver`` parameter is used and passed as a boolean,
+ so we can change the default value to ``False`` (instead of
+ ``None``)
+
+ * ``get_ib_interface_hwaddr``
+ * ``get_interface_mac``
+ * ``interface_has_own_mac``
+ * ``is_bond``
+ * ``is_bridge``
+ * ``is_physical``
+ * ``is_renamed``
+ * ``is_up``
+ * ``is_vlan``
+ * ``wait_for_physdevs``
+
+* those that directly access ``/sys`` (via helpers) but may be
+ Linux-specific concepts or names:
+
+ * ``get_master``
+ * ``device_devid``
+ * ``device_driver``
+
+* those that directly use ``ip``:
+
+ * ``_get_current_rename_info``
+
+ * this has non-distro-specific logic so should potentially be
+ refactored to use helpers on ``self`` instead of ``ip`` directly
+ (rather than being wholesale reimplemented in each of
+ ``BSDNetworking`` or ``LinuxNetworking``)
+ * we can also remove the ``check_downable`` argument, it's never
+ specified so is always ``True``
+
+ * ``_rename_interfaces``
+
+ * this has several internal helper functions which use ``ip``
+ directly, and it calls ``_get_current_rename_info``. That said,
+ there appears to be a lot of non-distro-specific logic that could
+ live in a function on ``Networking``, so this will require some
+ careful refactoring to avoid duplicating that logic in each of
+ ``BSDNetworking`` and ``LinuxNetworking``.
+ * only the ``renames`` and ``current_info`` parameters are ever
+ passed in (and ``current_info`` only by tests), so we can remove
+ the others from the definition
+
+ * ``EphemeralIPv4Network``
+
+ * this is another case where it mixes distro-specific and
+ non-specific functionality. Specifically, ``__init__``,
+ ``__enter__`` and ``__exit__`` are non-specific, and the
+ remaining methods are distro-specific.
+ * when refactoring this, the need to track ``cleanup_cmds`` likely
+ means that the distro-specific behaviour cannot be captured only
+ in the ``Networking`` class. See `this comment in PR #363`_ for
+ more thoughts.
+
+* those that implicitly use ``/sys`` via their call dependencies:
+
+ * ``master_is_bridge_or_bond``
+
+ * appends to ``get_master`` return value, which is a ``/sys`` path
+
+ * ``extract_physdevs``
+
+ * calls ``device_driver`` and ``device_devid`` in both
+ ``_version_*`` impls
+
+ * ``apply_network_config_names``
+
+ * calls ``extract_physdevs``
+ * there is already a ``Distro.apply_network_config_names`` which in
+ the default implementation calls this function; this and its BSD
+ subclass implementations should be refactored at the same time
+ * the ``strict_present`` and ``strict_busy`` parameters are never
+ passed, nor are they used in the function definition, so they can
+ be removed
+
+ * ``get_interfaces``
+
+ * calls ``device_driver``, ``device_devid`` amongst others
+
+ * ``get_ib_hwaddrs_by_interface``
+
+ * calls ``get_interfaces``
+
+* those that may fall into the above categories, but whose use is only
+ related to netfailover (which relies on a Linux-specific network
+ driver, so is unlikely to be relevant elsewhere without a substantial
+ refactor; these probably only need implementing in
+ ``LinuxNetworking``):
+
+ * ``get_dev_features``
+
+ * ``has_netfail_standby_feature``
+
+ * calls ``get_dev_features``
+
+ * ``is_netfailover``
+ * ``is_netfail_master``
+
+ * this is called from ``generate_fallback_config``
+
+ * ``is_netfail_primary``
+ * ``is_netfail_standby``
+
+ * N.B. all of these take an optional ``driver`` argument which is
+ used to pass around a value to avoid having to look it up by
+ calling ``device_driver`` every time. This is something of a leaky
+ abstraction, and is better served by caching on ``device_driver``
+ or storing the cached value on ``self``, so we can drop the
+ parameter from the new API.
+
+* those that use ``/sys`` (via helpers) and have non-exhaustive BSD
+ logic:
+
+ * ``get_devicelist``
+
+* those that already have separate Linux/BSD implementations:
+
+ * ``find_fallback_nic``
+ * ``get_interfaces_by_mac``
+
+* those that have no OS-specific functionality (so do not need to be
+ refactored):
+
+ * ``ParserError``
+ * ``RendererNotFoundError``
+ * ``has_url_connectivity``
+ * ``is_ip_address``
+ * ``is_ipv4_address``
+ * ``natural_sort_key``
+
+Note that the functions in ``cloudinit.net`` use inconsistent parameter
+names for "string that contains a device name"; we can standardise on
+``devname`` (the most common one) in the refactor.
+
+Managing Work/Tracking Progress
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To ensure that we won't have multiple people working on the same part
+of the refactor at the same time, there is a bug for each function.
+You can see the current status by looking at `the list of bugs tagged
+net-refactor`_.
+
+When you're working on refactoring a particular method, ensure that you
+have assigned yourself to the corresponding bug, to avoid duplicate
+work.
+
+Generally, when considering what to pick up to refactor, it is best to
+start with functions in ``cloudinit.net`` which are not called by
+anything else in ``cloudinit.net``. This allows you to focus only on
+refactoring that function and its callsites, rather than having to
+update the other ``cloudinit.net`` function also.
+
+References
+~~~~~~~~~~
+
+* `Mina Galić's email the the cloud-init ML in 2018`_ (plus its thread)
+* `Mina Galić's email to the cloud-init ML in 2019`_ (plus its thread)
+* `PR #363`_, the discussion which prompted finally starting this
+ refactor (and where a lot of the above details were hashed out)
+
+.. _get_interfaces_by_mac: https://github.com/canonical/cloud-init/blob/961239749106daead88da483e7319e9268c67cde/cloudinit/net/__init__.py#L810-L818
+.. _Mina Galić's email the the cloud-init ML in 2018: https://lists.launchpad.net/cloud-init/msg00185.html
+.. _Mina Galić's email to the cloud-init ML in 2019: https://lists.launchpad.net/cloud-init/msg00237.html
+.. _PR #363: https://github.com/canonical/cloud-init/pull/363
+.. _this comment in PR #363: https://github.com/canonical/cloud-init/pull/363#issuecomment-628829489
+.. _abstractmethod: https://docs.python.org/3/library/abc.html#abc.abstractmethod
+.. _type aliases: https://docs.python.org/3/library/typing.html#type-aliases
+.. _the list of bugs tagged net-refactor: https://bugs.launchpad.net/cloud-init/+bugs?field.tag=net-refactor
diff --git a/Makefile b/Makefile
index 315e6b45..5fb0fcbf 100644
--- a/Makefile
+++ b/Makefile
@@ -1,40 +1,22 @@
CWD=$(shell pwd)
-PYVER ?= $(shell for p in python3 python2; do \
- out=$$(command -v $$p 2>&1) && echo $$p && exit; done; exit 1)
-
-noseopts ?= -v
YAML_FILES=$(shell find cloudinit tests tools -name "*.yaml" -type f )
YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )
-PIP_INSTALL := pip install
-
-ifeq ($(PYVER),python3)
- pyflakes = pyflakes3
- unittests = unittest3
- yaml = yaml
-else
-ifeq ($(PYVER),python2)
- pyflakes = pyflakes
- unittests = unittest
-else
- pyflakes = pyflakes pyflakes3
- unittests = unittest unittest3
-endif
-endif
+PYTHON = python3
+PIP_INSTALL := pip3 install
ifeq ($(distro),)
distro = redhat
endif
-READ_VERSION=$(shell $(PYVER) $(CWD)/tools/read-version || \
- echo read-version-failed)
-CODE_VERSION=$(shell $(PYVER) -c "from cloudinit import version; print(version.version_string())")
+READ_VERSION=$(shell $(PYTHON) $(CWD)/tools/read-version || echo read-version-failed)
+CODE_VERSION=$(shell $(PYTHON) -c "from cloudinit import version; print(version.version_string())")
all: check
-check: check_version test $(yaml)
+check: check_version test yaml
style-check: pep8 $(pyflakes)
@@ -44,20 +26,14 @@ pep8:
pyflakes:
@$(CWD)/tools/run-pyflakes
-pyflakes3:
- @$(CWD)/tools/run-pyflakes3
-
unittest: clean_pyc
- nosetests $(noseopts) tests/unittests cloudinit
-
-unittest3: clean_pyc
- nosetests3 $(noseopts) tests/unittests cloudinit
+ python3 -m pytest -v tests/unittests cloudinit
ci-deps-ubuntu:
- @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro
+ @$(PYTHON) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro
ci-deps-centos:
- @$(PYVER) $(CWD)/tools/read-dependencies --distro centos --test-distro
+ @$(PYTHON) $(CWD)/tools/read-dependencies --distro centos --test-distro
pip-requirements:
@echo "Installing cloud-init dependencies..."
@@ -67,7 +43,7 @@ pip-test-requirements:
@echo "Installing cloud-init test dependencies..."
$(PIP_INSTALL) -r "$@.txt" -q
-test: $(unittests)
+test: unittest
check_version:
@if [ "$(READ_VERSION)" != "$(CODE_VERSION)" ]; then \
@@ -76,7 +52,7 @@ check_version:
else true; fi
config/cloud.cfg:
- $(PYVER) ./tools/render-cloudcfg config/cloud.cfg.tmpl config/cloud.cfg
+ $(PYTHON) ./tools/render-cloudcfg config/cloud.cfg.tmpl config/cloud.cfg
clean_pyc:
@find . -type f -name "*.pyc" -delete
@@ -86,30 +62,30 @@ clean: clean_pyc
rm -rf doc/rtd_html .tox .coverage
yaml:
- @$(PYVER) $(CWD)/tools/validate-yaml.py $(YAML_FILES)
+ @$(PYTHON) $(CWD)/tools/validate-yaml.py $(YAML_FILES)
rpm:
- $(PYVER) ./packages/brpm --distro=$(distro)
+ $(PYTHON) ./packages/brpm --distro=$(distro)
srpm:
- $(PYVER) ./packages/brpm --srpm --distro=$(distro)
+ $(PYTHON) ./packages/brpm --srpm --distro=$(distro)
deb:
@which debuild || \
{ echo "Missing devscripts dependency. Install with:"; \
echo sudo apt-get install devscripts; exit 1; }
- $(PYVER) ./packages/bddeb
+ $(PYTHON) ./packages/bddeb
deb-src:
@which debuild || \
{ echo "Missing devscripts dependency. Install with:"; \
echo sudo apt-get install devscripts; exit 1; }
- $(PYVER) ./packages/bddeb -S -d
+ $(PYTHON) ./packages/bddeb -S -d
doc:
tox -e doc
-.PHONY: test pyflakes pyflakes3 clean pep8 rpm srpm deb deb-src yaml
+.PHONY: test pyflakes clean pep8 rpm srpm deb deb-src yaml
.PHONY: check_version pip-test-requirements pip-requirements clean_pyc
-.PHONY: unittest unittest3 style-check doc
+.PHONY: unittest style-check doc
diff --git a/README.md b/README.md
index 872ea44e..435405da 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# cloud-init
-[![Build Status](https://travis-ci.org/canonical/cloud-init.svg?branch=master)](https://travis-ci.org/canonical/cloud-init) [![Read the Docs](https://readthedocs.org/projects/cloudinit/badge/?version=latest&style=flat)](https://cloudinit.readthedocs.org)
+[![Build Status](https://travis-ci.com/canonical/cloud-init.svg?branch=master)](https://travis-ci.com/canonical/cloud-init) [![Read the Docs](https://readthedocs.org/projects/cloudinit/badge/?version=latest&style=flat)](https://cloudinit.readthedocs.org)
Cloud-init is the *industry standard* multi-distribution method for
cross-platform cloud instance initialization. It is supported across all
@@ -39,7 +39,7 @@ get in contact with that distribution and send them our way!
| Supported OSes | Supported Public Clouds | Supported Private Clouds |
| --- | --- | --- |
-| Ubuntu<br />SLES/openSUSE<br />RHEL/CentOS<br />Fedora<br />Gentoo Linux<br />Debian<br />ArchLinux<br />FreeBSD<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />Digital Ocean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
+| Alpine Linux<br />ArchLinux<br />Debian<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />RHEL/CentOS<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />Digital Ocean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
## To start developing cloud-init
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
index 1f3060d0..62ad51fe 100644
--- a/cloudinit/analyze/dump.py
+++ b/cloudinit/analyze/dump.py
@@ -4,6 +4,7 @@ import calendar
from datetime import datetime
import sys
+from cloudinit import subp
from cloudinit import util
stage_to_description = {
@@ -51,7 +52,7 @@ def parse_timestamp(timestampstr):
def parse_timestamp_from_date(timestampstr):
- out, _ = util.subp(['date', '+%s.%3N', '-d', timestampstr])
+ out, _ = subp.subp(['date', '+%s.%3N', '-d', timestampstr])
timestamp = out.strip()
return float(timestamp)
@@ -74,8 +75,12 @@ def parse_ci_logline(line):
#
# 2017-05-22 18:02:01,088 - util.py[DEBUG]: Cloud-init v. 0.7.9 running \
# 'init-local' at Mon, 22 May 2017 18:02:01 +0000. Up 2.0 seconds.
+ #
+ # Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start: \
+ # init-local/check-cache: attempting to read from cache [check]
- separators = [' - ', ' [CLOUDINIT] ']
+ amazon_linux_2_sep = ' cloud-init['
+ separators = [' - ', ' [CLOUDINIT] ', amazon_linux_2_sep]
found = False
for sep in separators:
if sep in line:
@@ -98,7 +103,14 @@ def parse_ci_logline(line):
hostname = extra.split()[-1]
else:
hostname = timehost.split()[-1]
- timestampstr = timehost.split(hostname)[0].strip()
+ if sep == amazon_linux_2_sep:
+ # This is an Amazon Linux style line, with no hostname and a PID.
+ # Use the whole of timehost as timestampstr, and strip off the PID
+ # from the start of eventstr.
+ timestampstr = timehost.strip()
+ eventstr = eventstr.split(maxsplit=1)[1]
+ else:
+ timestampstr = timehost.split(hostname)[0].strip()
if 'Cloud-init v.' in eventstr:
event_type = 'start'
if 'running' in eventstr:
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
index fb152b1d..01a4d3e5 100644
--- a/cloudinit/analyze/show.py
+++ b/cloudinit/analyze/show.py
@@ -11,31 +11,29 @@ import os
import time
import sys
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros import uses_systemd
-# An event:
-'''
-{
- "description": "executing late commands",
- "event_type": "start",
- "level": "INFO",
- "name": "cmd-install/stage-late"
- "origin": "cloudinit",
- "timestamp": 1461164249.1590767,
-},
-
- {
- "description": "executing late commands",
- "event_type": "finish",
- "level": "INFO",
- "name": "cmd-install/stage-late",
- "origin": "cloudinit",
- "result": "SUCCESS",
- "timestamp": 1461164249.1590767
- }
-
-'''
+# Example events:
+# {
+# "description": "executing late commands",
+# "event_type": "start",
+# "level": "INFO",
+# "name": "cmd-install/stage-late"
+# "origin": "cloudinit",
+# "timestamp": 1461164249.1590767,
+# }
+# {
+# "description": "executing late commands",
+# "event_type": "finish",
+# "level": "INFO",
+# "name": "cmd-install/stage-late",
+# "origin": "cloudinit",
+# "result": "SUCCESS",
+# "timestamp": 1461164249.1590767
+# }
+
format_key = {
'%d': 'delta',
'%D': 'description',
@@ -155,7 +153,7 @@ class SystemctlReader(object):
:return: whether the subp call failed or not
'''
try:
- value, err = util.subp(self.args, capture=True)
+ value, err = subp.subp(self.args, capture=True)
if err:
return err
self.epoch = value
@@ -215,7 +213,7 @@ def gather_timestamps_using_dmesg():
with gather_timestamps_using_systemd
'''
try:
- data, _ = util.subp(['dmesg'], capture=True)
+ data, _ = subp.subp(['dmesg'], capture=True)
split_entries = data[0].splitlines()
for i in split_entries:
if i.decode('UTF-8').find('user') != -1:
@@ -269,7 +267,7 @@ def gather_timestamps_using_systemd():
except OSError as err:
raise RuntimeError('Could not determine container boot '
'time from /proc/1/cmdline. ({})'
- .format(err))
+ .format(err)) from err
status = CONTAINER_CODE
else:
status = FAIL_CODE
diff --git a/cloudinit/analyze/tests/test_boot.py b/cloudinit/analyze/tests/test_boot.py
index f4001c14..f69423c3 100644
--- a/cloudinit/analyze/tests/test_boot.py
+++ b/cloudinit/analyze/tests/test_boot.py
@@ -25,7 +25,7 @@ class TestDistroChecker(CiTestCase):
m_get_linux_distro, m_is_FreeBSD):
self.assertEqual(err_code, dist_check_timestamp())
- @mock.patch('cloudinit.util.subp', return_value=(0, 1))
+ @mock.patch('cloudinit.subp.subp', return_value=(0, 1))
def test_subp_fails(self, m_subp):
self.assertEqual(err_code, dist_check_timestamp())
@@ -42,7 +42,7 @@ class TestSystemCtlReader(CiTestCase):
with self.assertRaises(RuntimeError):
reader.parse_epoch_as_float()
- @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
def test_systemctl_works_correctly_threshold(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
self.assertEqual(1.0, reader.parse_epoch_as_float())
@@ -50,12 +50,12 @@ class TestSystemCtlReader(CiTestCase):
self.assertTrue(thresh < 1e-6)
self.assertTrue(thresh > (-1 * 1e-6))
- @mock.patch('cloudinit.util.subp', return_value=('U=0', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=0', None))
def test_systemctl_succeed_zero(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
self.assertEqual(0.0, reader.parse_epoch_as_float())
- @mock.patch('cloudinit.util.subp', return_value=('U=1', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=1', None))
def test_systemctl_succeed_distinct(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
val1 = reader.parse_epoch_as_float()
@@ -64,13 +64,13 @@ class TestSystemCtlReader(CiTestCase):
val2 = reader2.parse_epoch_as_float()
self.assertNotEqual(val1, val2)
- @mock.patch('cloudinit.util.subp', return_value=('100', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('100', None))
def test_systemctl_epoch_not_splittable(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
with self.assertRaises(IndexError):
reader.parse_epoch_as_float()
- @mock.patch('cloudinit.util.subp', return_value=('U=foobar', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=foobar', None))
def test_systemctl_cannot_convert_epoch_to_float(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
with self.assertRaises(ValueError):
@@ -130,7 +130,7 @@ class TestAnalyzeBoot(CiTestCase):
self.assertEqual(err_string, data)
@mock.patch("cloudinit.util.is_container", return_value=True)
- @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
def test_container_no_ci_log_line(self, m_is_container, m_subp):
path = os.path.dirname(os.path.abspath(__file__))
log_path = path + '/boot-test.log'
@@ -148,7 +148,7 @@ class TestAnalyzeBoot(CiTestCase):
self.assertEqual(FAIL_CODE, finish_code)
@mock.patch("cloudinit.util.is_container", return_value=True)
- @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
@mock.patch('cloudinit.analyze.__main__._get_events', return_value=[{
'name': 'init-local', 'description': 'starting search', 'timestamp':
100000}])
diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py
index db2a667b..dac1efb6 100644
--- a/cloudinit/analyze/tests/test_dump.py
+++ b/cloudinit/analyze/tests/test_dump.py
@@ -5,7 +5,8 @@ from textwrap import dedent
from cloudinit.analyze.dump import (
dump_events, parse_ci_logline, parse_timestamp)
-from cloudinit.util import which, write_file
+from cloudinit.util import write_file
+from cloudinit.subp import which
from cloudinit.tests.helpers import CiTestCase, mock, skipIf
@@ -119,6 +120,23 @@ class TestParseCILogLine(CiTestCase):
m_parse_from_date.assert_has_calls(
[mock.call("2016-08-30 21:53:25.972325+00:00")])
+ def test_parse_logline_returns_event_for_amazon_linux_2_line(self):
+ line = (
+ "Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start:"
+ " init-local/check-cache: attempting to read from cache [check]")
+ # Generate the expected value using `datetime`, so that TZ
+ # determination is consistent with the code under test.
+ timestamp_dt = datetime.strptime(
+ "Apr 30 19:39:11", "%b %d %H:%M:%S"
+ ).replace(year=datetime.now().year)
+ expected = {
+ 'description': 'attempting to read from cache [check]',
+ 'event_type': 'start',
+ 'name': 'init-local/check-cache',
+ 'origin': 'cloudinit',
+ 'timestamp': timestamp_dt.timestamp()}
+ self.assertEqual(expected, parse_ci_logline(line))
+
SAMPLE_LOGS = dedent("""\
Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 1f2c2e7e..9bded16c 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -36,6 +36,7 @@ KNOWN_CLOUD_NAMES = [
'OVF',
'RbxCloud - (HyperOne, Rootbox, Rubikon)',
'OpenTelekomCloud',
+ 'SAP Converged Cloud',
'Scaleway',
'SmartOS',
'VMware',
diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py
index 1f61faa2..485ff92f 100644
--- a/cloudinit/atomic_helper.py
+++ b/cloudinit/atomic_helper.py
@@ -11,10 +11,10 @@ LOG = logging.getLogger(__name__)
def write_file(filename, content, mode=_DEF_PERMS,
- omode="wb", copy_mode=False):
+ omode="wb", preserve_mode=False):
# open filename in mode 'omode', write content, set permissions to 'mode'
- if copy_mode:
+ if preserve_mode:
try:
file_stat = os.stat(filename)
mode = stat.S_IMODE(file_stat.st_mode)
diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py
index 30e49de0..928a8eea 100644
--- a/cloudinit/cmd/clean.py
+++ b/cloudinit/cmd/clean.py
@@ -10,9 +10,8 @@ import os
import sys
from cloudinit.stages import Init
-from cloudinit.util import (
- ProcessExecutionError, del_dir, del_file, get_config_logfiles,
- is_link, subp)
+from cloudinit.subp import (ProcessExecutionError, subp)
+from cloudinit.util import (del_dir, del_file, get_config_logfiles, is_link)
def error(msg):
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index 4c086b51..51c61cca 100644
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -12,8 +12,8 @@ import sys
from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
from cloudinit.temp_utils import tempdir
-from cloudinit.util import (
- ProcessExecutionError, chdir, copy, ensure_dir, subp, write_file)
+from cloudinit.subp import (ProcessExecutionError, subp)
+from cloudinit.util import (chdir, copy, ensure_dir, write_file)
CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']
diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py
new file mode 100755
index 00000000..4e6a5778
--- /dev/null
+++ b/cloudinit/cmd/devel/make_mime.py
@@ -0,0 +1,114 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Generate multi-part mime messages for user-data """
+
+import argparse
+import sys
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+
+from cloudinit import log
+from cloudinit.handlers import INCLUSION_TYPES_MAP
+from . import addLogHandlerCLI
+
+NAME = 'make-mime'
+LOG = log.getLogger(NAME)
+EPILOG = ("Example: make-mime -a config.yaml:cloud-config "
+ "-a script.sh:x-shellscript > user-data")
+
+
+def file_content_type(text):
+ """ Return file content type by reading the first line of the input. """
+ try:
+ filename, content_type = text.split(":", 1)
+ return (open(filename, 'r'), filename, content_type.strip())
+ except ValueError as e:
+ raise argparse.ArgumentError(
+ text, "Invalid value for %r" % (text)
+ ) from e
+
+
+def get_parser(parser=None):
+ """Build or extend and arg parser for make-mime utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ subcommand which will be extended to support the args of this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser()
+ # update the parser's doc and add an epilog to show an example
+ parser.description = __doc__
+ parser.epilog = EPILOG
+ parser.add_argument("-a", "--attach", dest="files", type=file_content_type,
+ action='append', default=[],
+ metavar="<file>:<content-type>",
+ help=("attach the given file as the specified "
+ "content-type"))
+ parser.add_argument('-l', '--list-types', action='store_true',
+ default=False,
+ help='List support cloud-init content types.')
+ parser.add_argument('-f', '--force', action='store_true',
+ default=False,
+ help='Ignore unknown content-type warnings')
+ return parser
+
+
+def get_content_types(strip_prefix=False):
+ """ Return a list of cloud-init supported content types. Optionally
+ strip out the leading 'text/' of the type if strip_prefix=True.
+ """
+ return sorted([ctype.replace("text/", "") if strip_prefix else ctype
+ for ctype in INCLUSION_TYPES_MAP.values()])
+
+
+def handle_args(name, args):
+ """Create a multi-part MIME archive for use as user-data. Optionally
+ print out the list of supported content types of cloud-init.
+
+ Also setup CLI log handlers to report to stderr since this is a development
+ utility which should be run by a human on the CLI.
+
+ @return 0 on success, 1 on failure.
+ """
+ addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
+ if args.list_types:
+ print("\n".join(get_content_types(strip_prefix=True)))
+ return 0
+
+ sub_messages = []
+ errors = []
+ for i, (fh, filename, format_type) in enumerate(args.files):
+ contents = fh.read()
+ sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
+ sub_message.add_header('Content-Disposition',
+ 'attachment; filename="%s"' % (filename))
+ content_type = sub_message.get_content_type().lower()
+ if content_type not in get_content_types():
+ level = "WARNING" if args.force else "ERROR"
+ msg = (level + ": content type %r for attachment %s "
+ "may be incorrect!") % (content_type, i + 1)
+ sys.stderr.write(msg + '\n')
+ errors.append(msg)
+ sub_messages.append(sub_message)
+ if len(errors) and not args.force:
+ sys.stderr.write("Invalid content-types, override with --force\n")
+ return 1
+ combined_message = MIMEMultipart()
+ for msg in sub_messages:
+ combined_message.attach(msg)
+ print(combined_message)
+ return 0
+
+
+def main():
+ args = get_parser().parse_args()
+ return(handle_args(NAME, args))
+
+
+if __name__ == '__main__':
+ sys.exit(main())
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
index 99a234ce..1a3c46a4 100644
--- a/cloudinit/cmd/devel/parser.py
+++ b/cloudinit/cmd/devel/parser.py
@@ -9,6 +9,7 @@ from cloudinit.config import schema
from . import net_convert
from . import render
+from . import make_mime
def get_parser(parser=None):
@@ -25,7 +26,9 @@ def get_parser(parser=None):
(net_convert.NAME, net_convert.__doc__,
net_convert.get_parser, net_convert.handle_args),
(render.NAME, render.__doc__,
- render.get_parser, render.handle_args)
+ render.get_parser, render.handle_args),
+ (make_mime.NAME, make_mime.__doc__,
+ make_mime.get_parser, make_mime.handle_args),
]
for (subcmd, helpmsg, get_parser, handler) in subcmds:
parser = subparsers.add_parser(subcmd, help=helpmsg)
diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
index 1bc22406..1090aa16 100755
--- a/cloudinit/cmd/devel/render.py
+++ b/cloudinit/cmd/devel/render.py
@@ -57,8 +57,9 @@ def handle_args(name, args):
paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
if not os.path.exists(instance_data_fn):
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- instance_data_fn, redacted_data_fn)
+ 'Missing root-readable %s. Using redacted %s instead.',
+ instance_data_fn, redacted_data_fn
+ )
instance_data_fn = redacted_data_fn
else:
instance_data_fn = redacted_data_fn
diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py
index d2dfa8de..ddfd58e1 100644
--- a/cloudinit/cmd/devel/tests/test_logs.py
+++ b/cloudinit/cmd/devel/tests/test_logs.py
@@ -8,7 +8,8 @@ from cloudinit.cmd.devel import logs
from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
from cloudinit.tests.helpers import (
FilesystemMockingTestCase, mock, wrap_and_call)
-from cloudinit.util import ensure_dir, load_file, subp, write_file
+from cloudinit.subp import subp
+from cloudinit.util import ensure_dir, load_file, write_file
@mock.patch('cloudinit.cmd.devel.logs.os.getuid')
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
index e3db8679..07db9552 100644
--- a/cloudinit/cmd/query.py
+++ b/cloudinit/cmd/query.py
@@ -1,6 +1,17 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""Query standardized instance metadata from the command line."""
+"""Query standardized instance metadata provided to machine, returning a JSON
+structure.
+
+Some instance-data values may be binary on some platforms, such as userdata and
+vendordata. Attempt to decompress and decode UTF-8 any binary values.
+
+Any binary values in the instance metadata will be base64-encoded and prefixed
+with "ci-b64:" in the output. userdata and, where applicable, vendordata may
+be provided to the machine gzip-compressed (and therefore as binary data).
+query will attempt to decompress these to a string before emitting the JSON
+output; if this fails, they are treated as binary.
+"""
import argparse
from errno import EACCES
@@ -30,7 +41,7 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(
- prog=NAME, description='Query cloud-init instance data')
+ prog=NAME, description=__doc__)
parser.add_argument(
'-d', '--debug', action='store_true', default=False,
help='Add verbose messages during template render')
@@ -52,8 +63,10 @@ def get_parser(parser=None):
' /var/lib/cloud/instance/vendor-data.txt'))
parser.add_argument(
'varname', type=str, nargs='?',
- help=('A dot-delimited instance data variable to query from'
- ' instance-data query. For example: v2.local_hostname'))
+ help=('A dot-delimited specific variable to query from'
+ ' instance-data. For example: v1.local_hostname. If the'
+ ' value is not JSON serializable, it will be base64-encoded and'
+ ' will contain the prefix "ci-b64:". '))
parser.add_argument(
'-a', '--all', action='store_true', default=False, dest='dump_all',
help='Dump all available instance-data')
@@ -65,6 +78,21 @@ def get_parser(parser=None):
return parser
+def load_userdata(ud_file_path):
+ """Attempt to return a string of user-data from ud_file_path
+
+ Attempt to decode or decompress if needed.
+ If unable to decode the content, raw bytes will be returned.
+
+ @returns: String of uncompressed userdata if possible, otherwise bytes.
+ """
+ bdata = util.load_file(ud_file_path, decode=False)
+ try:
+ return bdata.decode('utf-8')
+ except UnicodeDecodeError:
+ return util.decomp_gzip(bdata, quiet=False, decode=True)
+
+
def handle_args(name, args):
"""Handle calls to 'cloud-init query' as a subcommand."""
paths = None
@@ -90,8 +118,9 @@ def handle_args(name, args):
instance_data_fn = sensitive_data_fn
else:
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- sensitive_data_fn, redacted_data_fn)
+ 'Missing root-readable %s. Using redacted %s instead.',
+ sensitive_data_fn, redacted_data_fn
+ )
instance_data_fn = redacted_data_fn
else:
instance_data_fn = redacted_data_fn
@@ -120,8 +149,8 @@ def handle_args(name, args):
instance_data['vendordata'] = (
'<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn))
else:
- instance_data['userdata'] = util.load_file(user_data_fn)
- instance_data['vendordata'] = util.load_file(vendor_data_fn)
+ instance_data['userdata'] = load_userdata(user_data_fn)
+ instance_data['vendordata'] = load_userdata(vendor_data_fn)
if args.format:
payload = '## template: jinja\n{fmt}'.format(fmt=args.format)
rendered_payload = render_jinja_payload(
diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py
index 13a69aa1..a848a810 100644
--- a/cloudinit/cmd/tests/test_clean.py
+++ b/cloudinit/cmd/tests/test_clean.py
@@ -167,7 +167,6 @@ class TestClean(CiTestCase):
wrap_and_call(
'cloudinit.cmd.clean',
{'Init': {'side_effect': self.init_class},
- 'sys.exit': {'side_effect': self.sys_exit},
'sys.argv': {'new': ['clean', '--logs']}},
clean.main)
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
index 384fddc6..585b3b0e 100644
--- a/cloudinit/cmd/tests/test_main.py
+++ b/cloudinit/cmd/tests/test_main.py
@@ -18,8 +18,6 @@ myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand')
class TestMain(FilesystemMockingTestCase):
- with_logs = True
-
def setUp(self):
super(TestMain, self).setUp()
self.new_root = self.tmp_dir()
diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py
index 6d36a4ea..c258d321 100644
--- a/cloudinit/cmd/tests/test_query.py
+++ b/cloudinit/cmd/tests/test_query.py
@@ -1,195 +1,260 @@
# This file is part of cloud-init. See LICENSE file for license information.
import errno
-from io import StringIO
+import gzip
+from io import BytesIO
+import json
from textwrap import dedent
-import os
+
+import pytest
from collections import namedtuple
from cloudinit.cmd import query
from cloudinit.helpers import Paths
from cloudinit.sources import (
REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE)
-from cloudinit.tests.helpers import CiTestCase, mock
-from cloudinit.util import ensure_dir, write_file
+from cloudinit.tests.helpers import mock
+
+from cloudinit.util import b64e, write_file
+
+def _gzip_data(data):
+ with BytesIO() as iobuf:
+ with gzip.GzipFile(mode="wb", fileobj=iobuf) as gzfp:
+ gzfp.write(data)
+ return iobuf.getvalue()
-class TestQuery(CiTestCase):
- with_logs = True
+@mock.patch("cloudinit.cmd.query.addLogHandlerCLI", lambda *args: "")
+class TestQuery:
args = namedtuple(
'queryargs',
('debug dump_all format instance_data list_keys user_data vendor_data'
' varname'))
- def setUp(self):
- super(TestQuery, self).setUp()
- self.tmp = self.tmp_dir()
- self.instance_data = self.tmp_path('instance-data', dir=self.tmp)
+ def _setup_paths(self, tmpdir, ud_val=None, vd_val=None):
+ """Write userdata and vendordata into a tmpdir.
- def test_handle_args_error_on_missing_param(self):
+ Return:
+ 4-tuple : (paths, run_dir_path, userdata_path, vendordata_path)
+ """
+ if ud_val:
+ user_data = tmpdir.join('user-data')
+ write_file(user_data.strpath, ud_val)
+ else:
+ user_data = None
+ if vd_val:
+ vendor_data = tmpdir.join('vendor-data')
+ write_file(vendor_data.strpath, vd_val)
+ else:
+ vendor_data = None
+ run_dir = tmpdir.join('run_dir')
+ run_dir.ensure_dir()
+ return (
+ Paths({'run_dir': run_dir.strpath}),
+ run_dir,
+ user_data,
+ vendor_data
+ )
+
+ def test_handle_args_error_on_missing_param(self, caplog, capsys):
"""Error when missing required parameters and print usage."""
args = self.args(
debug=False, dump_all=False, format=None, instance_data=None,
list_keys=False, user_data=None, vendor_data=None, varname=None)
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(1, query.handle_args('anyname', args))
+ with mock.patch(
+ "cloudinit.cmd.query.addLogHandlerCLI", return_value=""
+ ) as m_cli_log:
+ assert 1 == query.handle_args('anyname', args)
expected_error = (
- 'ERROR: Expected one of the options: --all, --format, --list-keys'
+ 'Expected one of the options: --all, --format, --list-keys'
' or varname\n')
- self.assertIn(expected_error, self.logs.getvalue())
- self.assertIn('usage: query', m_stdout.getvalue())
- self.assertIn(expected_error, m_stderr.getvalue())
+ assert expected_error in caplog.text
+ out, _err = capsys.readouterr()
+ assert 'usage: query' in out
+ assert 1 == m_cli_log.call_count
- def test_handle_args_error_on_missing_instance_data(self):
+ def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir):
"""When instance_data file path does not exist, log an error."""
- absent_fn = self.tmp_path('absent', dir=self.tmp)
+ absent_fn = tmpdir.join('absent')
args = self.args(
- debug=False, dump_all=True, format=None, instance_data=absent_fn,
+ debug=False, dump_all=True, format=None,
+ instance_data=absent_fn.strpath,
list_keys=False, user_data='ud', vendor_data='vd', varname=None)
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- self.assertEqual(1, query.handle_args('anyname', args))
- self.assertIn(
- 'ERROR: Missing instance-data file: %s' % absent_fn,
- self.logs.getvalue())
- self.assertIn(
- 'ERROR: Missing instance-data file: %s' % absent_fn,
- m_stderr.getvalue())
+ assert 1 == query.handle_args('anyname', args)
- def test_handle_args_error_when_no_read_permission_instance_data(self):
+ msg = 'Missing instance-data file: %s' % absent_fn
+ assert msg in caplog.text
+
+ def test_handle_args_error_when_no_read_permission_instance_data(
+ self, caplog, tmpdir
+ ):
"""When instance_data file is unreadable, log an error."""
- noread_fn = self.tmp_path('unreadable', dir=self.tmp)
- write_file(noread_fn, 'thou shall not pass')
+ noread_fn = tmpdir.join('unreadable')
+ noread_fn.write('thou shall not pass')
args = self.args(
- debug=False, dump_all=True, format=None, instance_data=noread_fn,
+ debug=False, dump_all=True, format=None,
+ instance_data=noread_fn.strpath,
list_keys=False, user_data='ud', vendor_data='vd', varname=None)
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- with mock.patch('cloudinit.cmd.query.util.load_file') as m_load:
- m_load.side_effect = OSError(errno.EACCES, 'Not allowed')
- self.assertEqual(1, query.handle_args('anyname', args))
- self.assertIn(
- "ERROR: No read permission on '%s'. Try sudo" % noread_fn,
- self.logs.getvalue())
- self.assertIn(
- "ERROR: No read permission on '%s'. Try sudo" % noread_fn,
- m_stderr.getvalue())
+ with mock.patch('cloudinit.cmd.query.util.load_file') as m_load:
+ m_load.side_effect = OSError(errno.EACCES, 'Not allowed')
+ assert 1 == query.handle_args('anyname', args)
+ msg = "No read permission on '%s'. Try sudo" % noread_fn
+ assert msg in caplog.text
- def test_handle_args_defaults_instance_data(self):
+ def test_handle_args_defaults_instance_data(self, caplog, tmpdir):
"""When no instance_data argument, default to configured run_dir."""
args = self.args(
debug=False, dump_all=True, format=None, instance_data=None,
list_keys=False, user_data=None, vendor_data=None, varname=None)
- run_dir = self.tmp_path('run_dir', dir=self.tmp)
- ensure_dir(run_dir)
- paths = Paths({'run_dir': run_dir})
- self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
- self.m_paths.return_value = paths
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- self.assertEqual(1, query.handle_args('anyname', args))
- json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
- self.assertIn(
- 'ERROR: Missing instance-data file: %s' % json_file,
- self.logs.getvalue())
- self.assertIn(
- 'ERROR: Missing instance-data file: %s' % json_file,
- m_stderr.getvalue())
+ paths, run_dir, _, _ = self._setup_paths(tmpdir)
+ with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
+ m_paths.return_value = paths
+ assert 1 == query.handle_args('anyname', args)
+ json_file = run_dir.join(INSTANCE_JSON_FILE)
+ msg = 'Missing instance-data file: %s' % json_file.strpath
+ assert msg in caplog.text
- def test_handle_args_root_fallsback_to_instance_data(self):
+ def test_handle_args_root_fallsback_to_instance_data(self, caplog, tmpdir):
"""When no instance_data argument, root falls back to redacted json."""
args = self.args(
debug=False, dump_all=True, format=None, instance_data=None,
list_keys=False, user_data=None, vendor_data=None, varname=None)
- run_dir = self.tmp_path('run_dir', dir=self.tmp)
- ensure_dir(run_dir)
- paths = Paths({'run_dir': run_dir})
- self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
- self.m_paths.return_value = paths
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ paths, run_dir, _, _ = self._setup_paths(tmpdir)
+ with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
+ m_paths.return_value = paths
with mock.patch('os.getuid') as m_getuid:
m_getuid.return_value = 0
- self.assertEqual(1, query.handle_args('anyname', args))
- json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
- sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
- self.assertIn(
- 'WARNING: Missing root-readable %s. Using redacted %s instead.' % (
- sensitive_file, json_file),
- m_stderr.getvalue())
+ assert 1 == query.handle_args('anyname', args)
+ json_file = run_dir.join(INSTANCE_JSON_FILE)
+ sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ msg = (
+ 'Missing root-readable %s. Using redacted %s instead.' %
+ (
+ sensitive_file.strpath, json_file.strpath
+ )
+ )
+ assert msg in caplog.text
- def test_handle_args_root_uses_instance_sensitive_data(self):
- """When no instance_data argument, root uses semsitive json."""
- user_data = self.tmp_path('user-data', dir=self.tmp)
- vendor_data = self.tmp_path('vendor-data', dir=self.tmp)
- write_file(user_data, 'ud')
- write_file(vendor_data, 'vd')
- run_dir = self.tmp_path('run_dir', dir=self.tmp)
- sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
- write_file(sensitive_file, '{"my-var": "it worked"}')
- ensure_dir(run_dir)
- paths = Paths({'run_dir': run_dir})
- self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
- self.m_paths.return_value = paths
+ @pytest.mark.parametrize(
+ 'ud_src,ud_expected,vd_src,vd_expected',
+ (
+ ('hi mom', 'hi mom', 'hi pops', 'hi pops'),
+ ('ud'.encode('utf-8'), 'ud', 'vd'.encode('utf-8'), 'vd'),
+ (_gzip_data(b'ud'), 'ud', _gzip_data(b'vd'), 'vd'),
+ (_gzip_data('ud'.encode('utf-8')), 'ud', _gzip_data(b'vd'), 'vd'),
+ )
+ )
+ def test_handle_args_root_processes_user_data(
+ self, ud_src, ud_expected, vd_src, vd_expected, capsys, tmpdir
+ ):
+ """Support reading multiple user-data file content types"""
+ paths, run_dir, user_data, vendor_data = self._setup_paths(
+ tmpdir, ud_val=ud_src, vd_val=vd_src
+ )
+ sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ sensitive_file.write('{"my-var": "it worked"}')
args = self.args(
debug=False, dump_all=True, format=None, instance_data=None,
- list_keys=False, user_data=vendor_data, vendor_data=vendor_data,
- varname=None)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ list_keys=False, user_data=user_data.strpath,
+ vendor_data=vendor_data.strpath, varname=None)
+ with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
+ m_paths.return_value = paths
with mock.patch('os.getuid') as m_getuid:
m_getuid.return_value = 0
- self.assertEqual(0, query.handle_args('anyname', args))
- self.assertEqual(
- '{\n "my_var": "it worked",\n "userdata": "vd",\n '
- '"vendordata": "vd"\n}\n', m_stdout.getvalue())
+ assert 0 == query.handle_args('anyname', args)
+ out, _err = capsys.readouterr()
+ cmd_output = json.loads(out)
+ assert "it worked" == cmd_output['my_var']
+ if ud_expected == "ci-b64:":
+ ud_expected = "ci-b64:{}".format(b64e(ud_src))
+ if vd_expected == "ci-b64:":
+ vd_expected = "ci-b64:{}".format(b64e(vd_src))
+ assert ud_expected == cmd_output['userdata']
+ assert vd_expected == cmd_output['vendordata']
- def test_handle_args_dumps_all_instance_data(self):
+ def test_handle_args_root_uses_instance_sensitive_data(
+ self, capsys, tmpdir
+ ):
+ """When no instance_data argument, root uses sensitive json."""
+ paths, run_dir, user_data, vendor_data = self._setup_paths(
+ tmpdir, ud_val='ud', vd_val='vd'
+ )
+ sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ sensitive_file.write('{"my-var": "it worked"}')
+ args = self.args(
+ debug=False, dump_all=True, format=None, instance_data=None,
+ list_keys=False, user_data=user_data.strpath,
+ vendor_data=vendor_data.strpath, varname=None)
+ with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
+ m_paths.return_value = paths
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 0
+ assert 0 == query.handle_args('anyname', args)
+ expected = (
+ '{\n "my_var": "it worked",\n "userdata": "ud",\n '
+ '"vendordata": "vd"\n}\n'
+ )
+ out, _err = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_dumps_all_instance_data(self, capsys, tmpdir):
"""When --all is specified query will dump all instance data vars."""
- write_file(self.instance_data, '{"my-var": "it worked"}')
+ instance_data = tmpdir.join('instance-data')
+ instance_data.write('{"my-var": "it worked"}')
args = self.args(
debug=False, dump_all=True, format=None,
- instance_data=self.instance_data, list_keys=False,
+ instance_data=instance_data.strpath, list_keys=False,
user_data='ud', vendor_data='vd', varname=None)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- self.assertEqual(0, query.handle_args('anyname', args))
- self.assertEqual(
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args('anyname', args)
+ expected = (
'{\n "my_var": "it worked",\n "userdata": "<%s> file:ud",\n'
' "vendordata": "<%s> file:vd"\n}\n' % (
- REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE),
- m_stdout.getvalue())
+ REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE
+ )
+ )
+ out, _err = capsys.readouterr()
+ assert expected == out
- def test_handle_args_returns_top_level_varname(self):
+ def test_handle_args_returns_top_level_varname(self, capsys, tmpdir):
"""When the argument varname is passed, report its value."""
- write_file(self.instance_data, '{"my-var": "it worked"}')
+ instance_data = tmpdir.join('instance-data')
+ instance_data.write('{"my-var": "it worked"}')
args = self.args(
debug=False, dump_all=True, format=None,
- instance_data=self.instance_data, list_keys=False,
+ instance_data=instance_data.strpath, list_keys=False,
user_data='ud', vendor_data='vd', varname='my_var')
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- self.assertEqual(0, query.handle_args('anyname', args))
- self.assertEqual('it worked\n', m_stdout.getvalue())
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args('anyname', args)
+ out, _err = capsys.readouterr()
+ assert 'it worked\n' == out
- def test_handle_args_returns_nested_varname(self):
+ def test_handle_args_returns_nested_varname(self, capsys, tmpdir):
"""If user_data file is a jinja template render instance-data vars."""
- write_file(self.instance_data,
- '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}')
+ instance_data = tmpdir.join('instance-data')
+ instance_data.write(
+ '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}'
+ )
args = self.args(
debug=False, dump_all=False, format=None,
- instance_data=self.instance_data, user_data='ud', vendor_data='vd',
- list_keys=False, varname='v1.key_2')
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- self.assertEqual(0, query.handle_args('anyname', args))
- self.assertEqual('value-2\n', m_stdout.getvalue())
+ instance_data=instance_data.strpath, user_data='ud',
+ vendor_data='vd', list_keys=False, varname='v1.key_2')
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args('anyname', args)
+ out, _err = capsys.readouterr()
+ assert 'value-2\n' == out
- def test_handle_args_returns_standardized_vars_to_top_level_aliases(self):
+ def test_handle_args_returns_standardized_vars_to_top_level_aliases(
+ self, capsys, tmpdir
+ ):
"""Any standardized vars under v# are promoted as top-level aliases."""
- write_file(
- self.instance_data,
+ instance_data = tmpdir.join('instance-data')
+ instance_data.write(
'{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
' "top": "gun"}')
expected = dedent("""\
@@ -209,65 +274,68 @@ class TestQuery(CiTestCase):
""")
args = self.args(
debug=False, dump_all=True, format=None,
- instance_data=self.instance_data, user_data='ud', vendor_data='vd',
- list_keys=False, varname=None)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- self.assertEqual(0, query.handle_args('anyname', args))
- self.assertEqual(expected, m_stdout.getvalue())
+ instance_data=instance_data.strpath, user_data='ud',
+ vendor_data='vd', list_keys=False, varname=None)
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args('anyname', args)
+ out, _err = capsys.readouterr()
+ assert expected == out
- def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(self):
+ def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(
+ self, capsys, tmpdir
+ ):
"""Sort all top-level keys when only --list-keys provided."""
- write_file(
- self.instance_data,
+ instance_data = tmpdir.join('instance-data')
+ instance_data.write(
'{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
' "top": "gun"}')
expected = 'top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n'
args = self.args(
debug=False, dump_all=False, format=None,
- instance_data=self.instance_data, list_keys=True, user_data='ud',
- vendor_data='vd', varname=None)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- self.assertEqual(0, query.handle_args('anyname', args))
- self.assertEqual(expected, m_stdout.getvalue())
+ instance_data=instance_data.strpath, list_keys=True,
+ user_data='ud', vendor_data='vd', varname=None)
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args('anyname', args)
+ out, _err = capsys.readouterr()
+ assert expected == out
- def test_handle_args_list_keys_sorts_nested_keys_when_varname(self):
+ def test_handle_args_list_keys_sorts_nested_keys_when_varname(
+ self, capsys, tmpdir
+ ):
"""Sort all nested keys of varname object when --list-keys provided."""
- write_file(
- self.instance_data,
+ instance_data = tmpdir.join('instance-data')
+ instance_data.write(
'{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":' +
' {"v2_2": "val2.2"}, "top": "gun"}')
expected = 'v1_1\nv1_2\n'
args = self.args(
debug=False, dump_all=False, format=None,
- instance_data=self.instance_data, list_keys=True,
+ instance_data=instance_data.strpath, list_keys=True,
user_data='ud', vendor_data='vd', varname='v1')
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- self.assertEqual(0, query.handle_args('anyname', args))
- self.assertEqual(expected, m_stdout.getvalue())
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args('anyname', args)
+ out, _err = capsys.readouterr()
+ assert expected == out
- def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(self):
+ def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(
+ self, caplog, tmpdir
+ ):
"""Raise an error when --list-keys and varname specify a non-list."""
- write_file(
- self.instance_data,
+ instance_data = tmpdir.join('instance-data')
+ instance_data.write(
'{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' +
'{"v2_2": "val2.2"}, "top": "gun"}')
- expected_error = "ERROR: --list-keys provided but 'top' is not a dict"
+ expected_error = "--list-keys provided but 'top' is not a dict"
args = self.args(
debug=False, dump_all=False, format=None,
- instance_data=self.instance_data, list_keys=True, user_data='ud',
- vendor_data='vd', varname='top')
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- self.assertEqual(1, query.handle_args('anyname', args))
- self.assertEqual('', m_stdout.getvalue())
- self.assertIn(expected_error, m_stderr.getvalue())
+ instance_data=instance_data.strpath, list_keys=True,
+ user_data='ud', vendor_data='vd', varname='top')
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ assert 1 == query.handle_args('anyname', args)
+ assert expected_error in caplog.text
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py
index 1ed10896..1c9eec37 100644
--- a/cloudinit/cmd/tests/test_status.py
+++ b/cloudinit/cmd/tests/test_status.py
@@ -382,7 +382,6 @@ class TestStatus(CiTestCase):
wrap_and_call(
'cloudinit.cmd.status',
{'sys.argv': {'new': ['status']},
- 'sys.exit': {'side_effect': self.sys_exit},
'_is_cloudinit_disabled': (False, ''),
'Init': {'side_effect': self.init_class}},
status.main)
diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py
new file mode 100644
index 00000000..84d7a0b6
--- /dev/null
+++ b/cloudinit/config/cc_apk_configure.py
@@ -0,0 +1,263 @@
+# Copyright (c) 2020 Dermot Bradley
+#
+# Author: Dermot Bradley <dermot_bradley@yahoo.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Apk Configure: Configures apk repositories file."""
+
+from textwrap import dedent
+
+from cloudinit import log as logging
+from cloudinit import temp_utils
+from cloudinit import templater
+from cloudinit import util
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+
+# If no mirror is specified then use this one
+DEFAULT_MIRROR = "https://alpine.global.ssl.fastly.net/alpine"
+
+
+REPOSITORIES_TEMPLATE = """\
+## template:jinja
+#
+# Created by cloud-init
+#
+# This file is written on first boot of an instance
+#
+
+{{ alpine_baseurl }}/{{ alpine_version }}/main
+{% if community_enabled -%}
+{{ alpine_baseurl }}/{{ alpine_version }}/community
+{% endif -%}
+{% if testing_enabled -%}
+{% if alpine_version != 'edge' %}
+#
+# Testing - using with non-Edge installation may cause problems!
+#
+{% endif %}
+{{ alpine_baseurl }}/edge/testing
+{% endif %}
+{% if local_repo != '' %}
+
+#
+# Local repo
+#
+{{ local_repo }}/{{ alpine_version }}
+{% endif %}
+
+"""
+
+
+frequency = PER_INSTANCE
+distros = ['alpine']
+schema = {
+ 'id': 'cc_apk_configure',
+ 'name': 'APK Configure',
+ 'title': 'Configure apk repositories file',
+ 'description': dedent("""\
+ This module handles configuration of the /etc/apk/repositories file.
+
+ .. note::
+ To ensure that apk configuration is valid yaml, any strings
+ containing special characters, especially ``:`` should be quoted.
+ """),
+ 'distros': distros,
+ 'examples': [
+ dedent("""\
+ # Keep the existing /etc/apk/repositories file unaltered.
+ apk_repos:
+ preserve_repositories: true
+ """),
+ dedent("""\
+ # Create repositories file for Alpine v3.12 main and community
+ # using default mirror site.
+ apk_repos:
+ alpine_repo:
+ community_enabled: true
+ version: 'v3.12'
+ """),
+ dedent("""\
+ # Create repositories file for Alpine Edge main, community, and
+ # testing using a specified mirror site and also a local repo.
+ apk_repos:
+ alpine_repo:
+ base_url: 'https://some-alpine-mirror/alpine'
+ community_enabled: true
+ testing_enabled: true
+ version: 'edge'
+ local_repo_base_url: 'https://my-local-server/local-alpine'
+ """),
+ ],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'apk_repos': {
+ 'type': 'object',
+ 'properties': {
+ 'preserve_repositories': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ By default, cloud-init will generate a new repositories
+ file ``/etc/apk/repositories`` based on any valid
+ configuration settings specified within a apk_repos
+ section of cloud config. To disable this behavior and
+ preserve the repositories file from the pristine image,
+ set ``preserve_repositories`` to ``true``.
+
+ The ``preserve_repositories`` option overrides
+ all other config keys that would alter
+ ``/etc/apk/repositories``.
+ """)
+ },
+ 'alpine_repo': {
+ 'type': ['object', 'null'],
+ 'properties': {
+ 'base_url': {
+ 'type': 'string',
+ 'default': DEFAULT_MIRROR,
+ 'description': dedent("""\
+ The base URL of an Alpine repository, or
+ mirror, to download official packages from.
+ If not specified then it defaults to ``{}``
+ """.format(DEFAULT_MIRROR))
+ },
+ 'community_enabled': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ Whether to add the Community repo to the
+ repositories file. By default the Community
+ repo is not included.
+ """)
+ },
+ 'testing_enabled': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ Whether to add the Testing repo to the
+ repositories file. By default the Testing
+ repo is not included. It is only recommended
+ to use the Testing repo on a machine running
+ the ``Edge`` version of Alpine as packages
+ installed from Testing may have dependancies
+ that conflict with those in non-Edge Main or
+ Community repos."
+ """)
+ },
+ 'version': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The Alpine version to use (e.g. ``v3.12`` or
+ ``edge``)
+ """)
+ },
+ },
+ 'required': ['version'],
+ 'minProperties': 1,
+ 'additionalProperties': False,
+ },
+ 'local_repo_base_url': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The base URL of an Alpine repository containing
+ unofficial packages
+ """)
+ }
+ },
+ 'required': [],
+ 'minProperties': 1, # Either preserve_repositories or alpine_repo
+ 'additionalProperties': False,
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema)
+
+
+def handle(name, cfg, cloud, log, _args):
+ """
+ Call to handle apk_repos sections in cloud-config file.
+
+ @param name: The module name "apk-configure" from cloud.cfg
+ @param cfg: A nested dict containing the entire cloud config contents.
+ @param cloud: The CloudInit object in use.
+ @param log: Pre-initialized Python logger object to use for logging.
+ @param _args: Any module arguments from cloud.cfg
+ """
+
+ # If there is no "apk_repos" section in the configuration
+ # then do nothing.
+ apk_section = cfg.get('apk_repos')
+ if not apk_section:
+ LOG.debug(("Skipping module named %s,"
+ " no 'apk_repos' section found"), name)
+ return
+
+ validate_cloudconfig_schema(cfg, schema)
+
+ # If "preserve_repositories" is explicitly set to True in
+ # the configuration do nothing.
+ if util.get_cfg_option_bool(apk_section, 'preserve_repositories', False):
+ LOG.debug(("Skipping module named %s,"
+ " 'preserve_repositories' is set"), name)
+ return
+
+ # If there is no "alpine_repo" subsection of "apk_repos" present in the
+ # configuration then do nothing, as at least "version" is required to
+ # create valid repositories entries.
+ alpine_repo = apk_section.get('alpine_repo')
+ if not alpine_repo:
+ LOG.debug(("Skipping module named %s,"
+ " no 'alpine_repo' configuration found"), name)
+ return
+
+ # If there is no "version" value present in configuration then do nothing.
+ alpine_version = alpine_repo.get('version')
+ if not alpine_version:
+ LOG.debug(("Skipping module named %s,"
+ " 'version' not specified in alpine_repo"), name)
+ return
+
+ local_repo = apk_section.get('local_repo_base_url', '')
+
+ _write_repositories_file(alpine_repo, alpine_version, local_repo)
+
+
+def _write_repositories_file(alpine_repo, alpine_version, local_repo):
+ """
+ Write the /etc/apk/repositories file with the specified entries.
+
+ @param alpine_repo: A nested dict of the alpine_repo configuration.
+ @param alpine_version: A string of the Alpine version to use.
+ @param local_repo: A string containing the base URL of a local repo.
+ """
+
+ repo_file = '/etc/apk/repositories'
+
+ alpine_baseurl = alpine_repo.get('base_url', DEFAULT_MIRROR)
+
+ params = {'alpine_baseurl': alpine_baseurl,
+ 'alpine_version': alpine_version,
+ 'community_enabled': alpine_repo.get('community_enabled'),
+ 'testing_enabled': alpine_repo.get('testing_enabled'),
+ 'local_repo': local_repo}
+
+ tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
+ template_fn = tfile[1] # Filepath is second item in tuple
+ util.write_file(template_fn, content=REPOSITORIES_TEMPLATE)
+
+ LOG.debug('Generating Alpine repository configuration file: %s',
+ repo_file)
+ templater.render_to_file(template_fn, repo_file, params)
+ # Clean up temporary template
+ util.del_file(template_fn)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index c44dec45..73d8719f 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -6,228 +6,372 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Apt Configure
--------------
-**Summary:** configure apt
-
-This module handles both configuration of apt options and adding source lists.
-There are configuration options such as ``apt_get_wrapper`` and
-``apt_get_command`` that control how cloud-init invokes apt-get.
-These configuration options are handled on a per-distro basis, so consult
-documentation for cloud-init's distro support for instructions on using
-these config options.
-
-.. note::
- To ensure that apt configuration is valid yaml, any strings containing
- special characters, especially ``:`` should be quoted.
-
-.. note::
- For more information about apt configuration, see the
- ``Additional apt configuration`` example.
-
-**Preserve sources.list:**
-
-By default, cloud-init will generate a new sources list in
-``/etc/apt/sources.list.d`` based on any changes specified in cloud config.
-To disable this behavior and preserve the sources list from the pristine image,
-set ``preserve_sources_list`` to ``true``.
-
-.. note::
- The ``preserve_sources_list`` option overrides all other config keys that
- would alter ``sources.list`` or ``sources.list.d``, **except** for
- additional sources to be added to ``sources.list.d``.
-
-**Disable source suites:**
-
-Entries in the sources list can be disabled using ``disable_suites``, which
-takes a list of suites to be disabled. If the string ``$RELEASE`` is present in
-a suite in the ``disable_suites`` list, it will be replaced with the release
-name. If a suite specified in ``disable_suites`` is not present in
-``sources.list`` it will be ignored. For convenience, several aliases are
-provided for ``disable_suites``:
-
- - ``updates`` => ``$RELEASE-updates``
- - ``backports`` => ``$RELEASE-backports``
- - ``security`` => ``$RELEASE-security``
- - ``proposed`` => ``$RELEASE-proposed``
- - ``release`` => ``$RELEASE``
-
-.. note::
- When a suite is disabled using ``disable_suites``, its entry in
- ``sources.list`` is not deleted; it is just commented out.
-
-**Configure primary and security mirrors:**
-
-The primary and security archive mirrors can be specified using the ``primary``
-and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys
-take a list of configs, allowing mirrors to be specified on a per-architecture
-basis. Each config is a dictionary which must have an entry for ``arches``,
-specifying which architectures that config entry is for. The keyword
-``default`` applies to any architecture not explicitly listed. The mirror url
-can be specified with the ``uri`` key, or a list of mirrors to check can be
-provided in order, with the first mirror that can be resolved being selected.
-This allows the same configuration to be used in different environment, with
-different hosts used for a local apt mirror. If no mirror is provided by
-``uri`` or ``search``, ``search_dns`` may be used to search for dns names in
-the format ``<distro>-mirror`` in each of the following:
-
- - fqdn of this host per cloud metadata
- - localdomain
- - domains listed in ``/etc/resolv.conf``
-
-If there is a dns entry for ``<distro>-mirror``, then it is assumed that there
-is a distro mirror at ``http://<distro>-mirror.<domain>/<distro>``. If the
-``primary`` key is defined, but not the ``security`` key, then then
-configuration for ``primary`` is also used for ``security``. If ``search_dns``
-is used for the ``security`` key, the search pattern will be.
-``<distro>-security-mirror``.
-
-If no mirrors are specified, or all lookups fail, then default mirrors defined
-in the datasource are used. If none are present in the datasource either the
-following defaults are used:
-
- - primary: ``http://archive.ubuntu.com/ubuntu``
- - security: ``http://security.ubuntu.com/ubuntu``
-
-**Specify sources.list template:**
-
-A custom template for rendering ``sources.list`` can be specefied with
-``sources_list``. If no ``sources_list`` template is given, cloud-init will
-use sane default. Within this template, the following strings will be replaced
-with the appropriate values:
-
- - ``$MIRROR``
- - ``$RELEASE``
- - ``$PRIMARY``
- - ``$SECURITY``
-
-**Pass configuration to apt:**
-
-Apt configuration can be specified using ``conf``. Configuration is specified
-as a string. For multiline apt configuration, make sure to follow yaml syntax.
-
-**Configure apt proxy:**
-
-Proxy configuration for apt can be specified using ``conf``, but proxy config
-keys also exist for convenience. The proxy config keys, ``http_proxy``,
-``ftp_proxy``, and ``https_proxy`` may be used to specify a proxy for http, ftp
-and https protocols respectively. The ``proxy`` key also exists as an alias for
-``http_proxy``. Proxy url is specified in the format
-``<protocol>://[[user][:pass]@]host[:port]/``.
-
-**Add apt repos by regex:**
+"""Apt Configure: Configure apt for the user."""
-All source entries in ``apt-sources`` that match regex in
-``add_apt_repo_match`` will be added to the system using
-``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults
-to ``^[\\w-]+:\\w``
-
-**Add source list entries:**
-
-Source list entries can be specified as a dictionary under the ``sources``
-config key, with key in the dict representing a different source file. The key
-of each source entry will be used as an id that can be referenced in
-other config entries, as well as the filename for the source's configuration
-under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``,
-it will be appended. If there is no configuration for a key in ``sources``, no
-file will be written, but the key may still be referred to as an id in other
-``sources`` entries.
-
-Each entry under ``sources`` is a dictionary which may contain any of the
-following optional keys:
-
- - ``source``: a sources.list entry (some variable replacements apply)
- - ``keyid``: a key to import via shortid or fingerprint
- - ``key``: a raw PGP key
- - ``keyserver``: alternate keyserver to pull ``keyid`` key from
-
-The ``source`` key supports variable replacements for the following strings:
-
- - ``$MIRROR``
- - ``$PRIMARY``
- - ``$SECURITY``
- - ``$RELEASE``
-
-**Internal name:** ``cc_apt_configure``
+import glob
+import os
+import re
+from textwrap import dedent
-**Module frequency:** per instance
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
+from cloudinit import gpg
+from cloudinit import log as logging
+from cloudinit import subp
+from cloudinit import templater
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
-**Supported distros:** ubuntu, debian
+LOG = logging.getLogger(__name__)
-**Config keys**::
+# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
+ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
- apt:
- preserve_sources_list: <true/false>
- disable_suites:
+frequency = PER_INSTANCE
+distros = ["ubuntu", "debian"]
+mirror_property = {
+ 'type': 'array',
+ 'item': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'required': ['arches'],
+ 'properties': {
+ 'arches': {
+ 'type': 'array',
+ 'item': {
+ 'type': 'string'
+ },
+ 'minItems': 1
+ },
+ 'uri': {
+ 'type': 'string',
+ 'format': 'uri'
+ },
+ 'search': {
+ 'type': 'array',
+ 'item': {
+ 'type': 'string',
+ 'format': 'uri'
+ },
+ 'minItems': 1
+ },
+ 'search_dns': {
+ 'type': 'boolean',
+ }
+ }
+ }
+}
+schema = {
+ 'id': 'cc_apt_configure',
+ 'name': 'Apt Configure',
+ 'title': 'Configure apt for the user',
+ 'description': dedent("""\
+ This module handles both configuration of apt options and adding
+ source lists. There are configuration options such as
+ ``apt_get_wrapper`` and ``apt_get_command`` that control how
+ cloud-init invokes apt-get. These configuration options are
+ handled on a per-distro basis, so consult documentation for
+ cloud-init's distro support for instructions on using
+ these config options.
+
+ .. note::
+ To ensure that apt configuration is valid yaml, any strings
+ containing special characters, especially ``:`` should be quoted.
+
+ .. note::
+ For more information about apt configuration, see the
+ ``Additional apt configuration`` example."""),
+ 'distros': distros,
+ 'examples': [dedent("""\
+ apt:
+ preserve_sources_list: false
+ disable_suites:
- $RELEASE-updates
- backports
- $RELEASE
- mysuite
- primary:
+ primary:
- arches:
- amd64
- i386
- default
- uri: "http://us.archive.ubuntu.com/ubuntu"
+ uri: 'http://us.archive.ubuntu.com/ubuntu'
search:
- - "http://cool.but-sometimes-unreachable.com/ubuntu"
- - "http://us.archive.ubuntu.com/ubuntu"
+ - 'http://cool.but-sometimes-unreachable.com/ubuntu'
+ - 'http://us.archive.ubuntu.com/ubuntu'
search_dns: <true/false>
- arches:
- s390x
- arm64
- uri: "http://archive-to-use-for-arm64.example.com/ubuntu"
- security:
+ uri: 'http://archive-to-use-for-arm64.example.com/ubuntu'
+ security:
- arches:
- default
search_dns: true
- sources_list: |
- deb $MIRROR $RELEASE main restricted
- deb-src $MIRROR $RELEASE main restricted
- deb $PRIMARY $RELEASE universe restricted
- deb $SECURITY $RELEASE-security multiverse
- debconf_selections:
- set1: the-package the-package/some-flag boolean true
- conf: |
- APT {
- Get {
- Assume-Yes "true";
- Fix-Broken "true";
+ sources_list: |
+ deb $MIRROR $RELEASE main restricted
+ deb-src $MIRROR $RELEASE main restricted
+ deb $PRIMARY $RELEASE universe restricted
+ deb $SECURITY $RELEASE-security multiverse
+ debconf_selections:
+ set1: the-package the-package/some-flag boolean true
+ conf: |
+ APT {
+ Get {
+ Assume-Yes 'true';
+ Fix-Broken 'true';
+ }
+ }
+ proxy: 'http://[[user][:pass]@]host[:port]/'
+ http_proxy: 'http://[[user][:pass]@]host[:port]/'
+ ftp_proxy: 'ftp://[[user][:pass]@]host[:port]/'
+ https_proxy: 'https://[[user][:pass]@]host[:port]/'
+ sources:
+ source1:
+ keyid: 'keyid'
+ keyserver: 'keyserverurl'
+ source: 'deb http://<url>/ xenial main'
+ source2:
+ source: 'ppa:<ppa-name>'
+ source3:
+ source: 'deb $MIRROR $RELEASE multiverse'
+ key: |
+ ------BEGIN PGP PUBLIC KEY BLOCK-------
+ <key data>
+ ------END PGP PUBLIC KEY BLOCK-------""")],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'apt': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'properties': {
+ 'preserve_sources_list': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ By default, cloud-init will generate a new sources
+ list in ``/etc/apt/sources.list.d`` based on any
+ changes specified in cloud config. To disable this
+ behavior and preserve the sources list from the
+ pristine image, set ``preserve_sources_list``
+ to ``true``.
+
+ The ``preserve_sources_list`` option overrides
+ all other config keys that would alter
+ ``sources.list`` or ``sources.list.d``,
+ **except** for additional sources to be added
+ to ``sources.list.d``.""")
+ },
+ 'disable_suites': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string'
+ },
+ 'uniqueItems': True,
+ 'description': dedent("""\
+ Entries in the sources list can be disabled using
+ ``disable_suites``, which takes a list of suites
+ to be disabled. If the string ``$RELEASE`` is
+ present in a suite in the ``disable_suites`` list,
+ it will be replaced with the release name. If a
+ suite specified in ``disable_suites`` is not
+ present in ``sources.list`` it will be ignored.
+ For convenience, several aliases are provided for
+ ``disable_suites``:
+
+ - ``updates`` => ``$RELEASE-updates``
+ - ``backports`` => ``$RELEASE-backports``
+ - ``security`` => ``$RELEASE-security``
+ - ``proposed`` => ``$RELEASE-proposed``
+ - ``release`` => ``$RELEASE``.
+
+ When a suite is disabled using ``disable_suites``,
+ its entry in ``sources.list`` is not deleted; it
+ is just commented out.""")
+ },
+ 'primary': {
+ **mirror_property,
+ 'description': dedent("""\
+ The primary and security archive mirrors can
+ be specified using the ``primary`` and
+ ``security`` keys, respectively. Both the
+ ``primary`` and ``security`` keys take a list
+ of configs, allowing mirrors to be specified
+ on a per-architecture basis. Each config is a
+ dictionary which must have an entry for
+ ``arches``, specifying which architectures
+ that config entry is for. The keyword
+ ``default`` applies to any architecture not
+ explicitly listed. The mirror url can be specified
+ with the ``uri`` key, or a list of mirrors to
+ check can be provided in order, with the first
+ mirror that can be resolved being selected. This
+ allows the same configuration to be used in
+ different environment, with different hosts used
+ for a local apt mirror. If no mirror is provided
+ by ``uri`` or ``search``, ``search_dns`` may be
+ used to search for dns names in the format
+ ``<distro>-mirror`` in each of the following:
+
+ - fqdn of this host per cloud metadata,
+ - localdomain,
+ - domains listed in ``/etc/resolv.conf``.
+
+ If there is a dns entry for ``<distro>-mirror``,
+ then it is assumed that there is a distro mirror
+ at ``http://<distro>-mirror.<domain>/<distro>``.
+ If the ``primary`` key is defined, but not the
+ ``security`` key, then then configuration for
+ ``primary`` is also used for ``security``.
+ If ``search_dns`` is used for the ``security``
+ key, the search pattern will be
+ ``<distro>-security-mirror``.
+
+ If no mirrors are specified, or all lookups fail,
+ then default mirrors defined in the datasource
+ are used. If none are present in the datasource
+ either the following defaults are used:
+
+ - ``primary`` => \
+ ``http://archive.ubuntu.com/ubuntu``.
+ - ``security`` => \
+ ``http://security.ubuntu.com/ubuntu``
+ """)},
+ 'security': {
+ **mirror_property,
+ 'description': dedent("""\
+ Please refer to the primary config documentation""")
+ },
+ 'add_apt_repo_match': {
+ 'type': 'string',
+ 'default': ADD_APT_REPO_MATCH,
+ 'description': dedent("""\
+ All source entries in ``apt-sources`` that match
+ regex in ``add_apt_repo_match`` will be added to
+ the system using ``add-apt-repository``. If
+ ``add_apt_repo_match`` is not specified, it
+ defaults to ``{}``""".format(ADD_APT_REPO_MATCH))
+ },
+ 'debconf_selections': {
+ 'type': 'object',
+ 'items': {'type': 'string'},
+ 'description': dedent("""\
+ Debconf additional configurations can be specified as a
+ dictionary under the ``debconf_selections`` config
+ key, with each key in the dict representing a
+ different set of configurations. The value of each key
+ must be a string containing all the debconf
+ configurations that must be applied. We will bundle
+ all of the values and pass them to
+ ``debconf-set-selections``. Therefore, each value line
+ must be a valid entry for ``debconf-set-selections``,
+ meaning that they must possess for distinct fields:
+
+ ``pkgname question type answer``
+
+ Where:
+
+ - ``pkgname`` is the name of the package.
+ - ``question`` the name of the questions.
+ - ``type`` is the type of question.
+ - ``answer`` is the value used to ansert the \
+ question.
+
+ For example: \
+ ``ippackage ippackage/ip string 127.0.01``
+ """)
+ },
+ 'sources_list': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Specifies a custom template for rendering
+ ``sources.list`` . If no ``sources_list`` template
+ is given, cloud-init will use sane default. Within
+ this template, the following strings will be
+ replaced with the appropriate values:
+
+ - ``$MIRROR``
+ - ``$RELEASE``
+ - ``$PRIMARY``
+ - ``$SECURITY``""")
+ },
+ 'conf': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Specify configuration for apt, such as proxy
+ configuration. This configuration is specified as a
+ string. For multiline apt configuration, make sure
+ to follow yaml syntax.""")
+ },
+ 'https_proxy': {
+ 'type': 'string',
+ 'description': dedent("""\
+ More convenient way to specify https apt proxy.
+ https proxy url is specified in the format
+ ``https://[[user][:pass]@]host[:port]/``.""")
+ },
+ 'http_proxy': {
+ 'type': 'string',
+ 'description': dedent("""\
+ More convenient way to specify http apt proxy.
+ http proxy url is specified in the format
+ ``http://[[user][:pass]@]host[:port]/``.""")
+ },
+ 'proxy': {
+ 'type': 'string',
+ 'description': 'Alias for defining a http apt proxy.'
+ },
+ 'ftp_proxy': {
+ 'type': 'string',
+ 'description': dedent("""\
+ More convenient way to specify ftp apt proxy.
+ ftp proxy url is specified in the format
+ ``ftp://[[user][:pass]@]host[:port]/``.""")
+ },
+ 'sources': {
+ 'type': 'object',
+ 'items': {'type': 'string'},
+ 'description': dedent("""\
+ Source list entries can be specified as a
+ dictionary under the ``sources`` config key, with
+ each key in the dict representing a different source
+ file. The key of each source entry will be used
+ as an id that can be referenced in other config
+ entries, as well as the filename for the source's
+ configuration under ``/etc/apt/sources.list.d``.
+ If the name does not end with ``.list``, it will
+ be appended. If there is no configuration for a
+ key in ``sources``, no file will be written, but
+ the key may still be referred to as an id in other
+ ``sources`` entries.
+
+ Each entry under ``sources`` is a dictionary which
+ may contain any of the following optional keys:
+
+ - ``source``: a sources.list entry \
+ (some variable replacements apply).
+ - ``keyid``: a key to import via shortid or \
+ fingerprint.
+ - ``key``: a raw PGP key.
+ - ``keyserver``: alternate keyserver to pull \
+ ``keyid`` key from.
+
+ The ``source`` key supports variable
+ replacements for the following strings:
+
+ - ``$MIRROR``
+ - ``$PRIMARY``
+ - ``$SECURITY``
+ - ``$RELEASE``""")
}
}
- proxy: "http://[[user][:pass]@]host[:port]/"
- http_proxy: "http://[[user][:pass]@]host[:port]/"
- ftp_proxy: "ftp://[[user][:pass]@]host[:port]/"
- https_proxy: "https://[[user][:pass]@]host[:port]/"
- sources:
- source1:
- keyid: "keyid"
- keyserver: "keyserverurl"
- source: "deb http://<url>/ xenial main"
- source2:
- source: "ppa:<ppa-name>"
- source3:
- source: "deb $MIRROR $RELEASE multiverse"
- key: |
- ------BEGIN PGP PUBLIC KEY BLOCK-------
- <key data>
- ------END PGP PUBLIC KEY BLOCK-------
-"""
-
-import glob
-import os
-import re
-
-from cloudinit import gpg
-from cloudinit import log as logging
-from cloudinit import templater
-from cloudinit import util
+ }
+ }
+}
-LOG = logging.getLogger(__name__)
+__doc__ = get_schema_doc(schema)
-# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
-ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
# place where apt stores cached repository data
APT_LISTS = "/var/lib/apt/lists"
@@ -279,6 +423,7 @@ def handle(name, ocfg, cloud, log, _):
"Expected dictionary for 'apt' config, found {config_type}".format(
config_type=type(cfg)))
+ validate_cloudconfig_schema(cfg, schema)
apply_debconf_selections(cfg, target)
apply_apt(cfg, cloud, target)
@@ -287,7 +432,7 @@ def _should_configure_on_empty_apt():
# if no config was provided, should apt configuration be done?
if util.system_is_snappy():
return False, "system is snappy."
- if not (util.which('apt-get') or util.which('apt')):
+ if not (subp.which('apt-get') or subp.which('apt')):
return False, "no apt commands."
return True, "Apt is available."
@@ -334,7 +479,7 @@ def apply_apt(cfg, cloud, target):
def debconf_set_selections(selections, target=None):
if not selections.endswith(b'\n'):
selections += b'\n'
- util.subp(['debconf-set-selections'], data=selections, target=target,
+ subp.subp(['debconf-set-selections'], data=selections, target=target,
capture=True)
@@ -359,7 +504,7 @@ def dpkg_reconfigure(packages, target=None):
"but cannot be unconfigured: %s", unhandled)
if len(to_config):
- util.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
+ subp.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
list(to_config), data=None, target=target, capture=True)
@@ -402,7 +547,7 @@ def apply_debconf_selections(cfg, target=None):
def clean_cloud_init(target):
"""clean out any local cloud-init config"""
flist = glob.glob(
- util.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
+ subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
LOG.debug("cleaning cloud-init config from: %s", flist)
for dpkg_cfg in flist:
@@ -431,7 +576,7 @@ def rename_apt_lists(new_mirrors, target, arch):
"""rename_apt_lists - rename apt lists to preserve old cache data"""
default_mirrors = get_default_mirrors(arch)
- pre = util.target_path(target, APT_LISTS)
+ pre = subp.target_path(target, APT_LISTS)
for (name, omirror) in default_mirrors.items():
nmirror = new_mirrors.get(name)
if not nmirror:
@@ -550,8 +695,8 @@ def add_apt_key_raw(key, target=None):
"""
LOG.debug("Adding key:\n'%s'", key)
try:
- util.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
- except util.ProcessExecutionError:
+ subp.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
+ except subp.ProcessExecutionError:
LOG.exception("failed to add apt GPG Key to apt keyring")
raise
@@ -614,13 +759,13 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None,
if aa_repo_match(source):
try:
- util.subp(["add-apt-repository", source], target=target)
- except util.ProcessExecutionError:
+ subp.subp(["add-apt-repository", source], target=target)
+ except subp.ProcessExecutionError:
LOG.exception("add-apt-repository failed.")
raise
continue
- sourcefn = util.target_path(target, ent['filename'])
+ sourcefn = subp.target_path(target, ent['filename'])
try:
contents = "%s\n" % (source)
util.write_file(sourcefn, contents, omode="a")
@@ -763,25 +908,6 @@ def convert_to_v3_apt_format(cfg):
return cfg
-def search_for_mirror(candidates):
- """
- Search through a list of mirror urls for one that works
- This needs to return quickly.
- """
- if candidates is None:
- return None
-
- LOG.debug("search for mirror in candidates: '%s'", candidates)
- for cand in candidates:
- try:
- if util.is_resolvable_url(cand):
- LOG.debug("found working mirror: '%s'", cand)
- return cand
- except Exception:
- pass
- return None
-
-
def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
"""
Try to resolve a list of predefines DNS names to pick mirrors
@@ -813,7 +939,7 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
for post in doms:
mirror_list.append(mirrorfmt % (post))
- mirror = search_for_mirror(mirror_list)
+ mirror = util.search_for_mirror(mirror_list)
return mirror
@@ -876,7 +1002,7 @@ def get_mirror(cfg, mirrortype, arch, cloud):
# fallback to search if specified
if mirror is None:
# list of mirrors to try to resolve
- mirror = search_for_mirror(mcfg.get("search", None))
+ mirror = util.search_for_mirror(mcfg.get("search", None))
# fallback to search_dns if specified
if mirror is None:
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index 225d0905..aa186ce2 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -9,7 +9,7 @@ Apt Pipelining
--------------
**Summary:** configure apt pipelining
-This module configures apt's ``Acquite::http::Pipeline-Depth`` option, whcih
+This module configures apt's ``Acquite::http::Pipeline-Depth`` option, which
controls how apt handles HTTP pipelining. It may be useful for pipelining to be
disabled, because some web servers, such as S3 do not pipeline properly (LP:
#948461). The ``apt_pipelining`` config key may be set to ``false`` to disable
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 6813f534..246e4497 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -16,6 +16,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_ALWAYS
from cloudinit import temp_utils
+from cloudinit import subp
from cloudinit import util
frequency = PER_ALWAYS
@@ -99,7 +100,7 @@ def handle(name, cfg, cloud, log, _args):
if iid:
env['INSTANCE_ID'] = str(iid)
cmd = ['/bin/sh', tmpf.name]
- util.subp(cmd, env=env, capture=False)
+ subp.subp(cmd, env=env, capture=False)
except Exception:
util.logexc(log, "Failed to run bootcmd module %s", name)
raise
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index 0b4352c8..9fdaeba1 100755
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -39,6 +39,7 @@ Valid configuration options for this module are:
"""
from cloudinit.distros import ug_util
+from cloudinit import subp
from cloudinit import util
distros = ['ubuntu', 'debian']
@@ -93,6 +94,6 @@ def handle(name, cfg, cloud, log, args):
if len(shcmd):
cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
log.debug("Setting byobu to %s", value)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 64bc900e..3c453d91 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -16,11 +16,16 @@ can be removed from the system with the configuration option
certificates must be specified using valid yaml. in order to specify a
multiline certificate, the yaml multiline list syntax must be used
+.. note::
+ For Alpine Linux the "remove-defaults" functionality works if the
+ ca-certificates package is installed but not if the
+ ca-certificates-bundle package is installed.
+
**Internal name:** ``cc_ca_certs``
**Module frequency:** per instance
-**Supported distros:** ubuntu, debian
+**Supported distros:** alpine, debian, ubuntu
**Config keys**::
@@ -36,6 +41,7 @@ can be removed from the system with the configuration option
import os
+from cloudinit import subp
from cloudinit import util
CA_CERT_PATH = "/usr/share/ca-certificates/"
@@ -44,14 +50,14 @@ CA_CERT_CONFIG = "/etc/ca-certificates.conf"
CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"
CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
-distros = ['ubuntu', 'debian']
+distros = ['alpine', 'debian', 'ubuntu']
def update_ca_certs():
"""
Updates the CA certificate cache on the current machine.
"""
- util.subp(["update-ca-certificates"], capture=False)
+ subp.subp(["update-ca-certificates"], capture=False)
def add_ca_certs(certs):
@@ -66,17 +72,23 @@ def add_ca_certs(certs):
cert_file_contents = "\n".join([str(c) for c in certs])
util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644)
- # Append cert filename to CA_CERT_CONFIG file.
- # We have to strip the content because blank lines in the file
- # causes subsequent entries to be ignored. (LP: #1077020)
- orig = util.load_file(CA_CERT_CONFIG)
- cur_cont = '\n'.join([line for line in orig.splitlines()
- if line != CA_CERT_FILENAME])
- out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME)
+ if os.stat(CA_CERT_CONFIG).st_size == 0:
+ # If the CA_CERT_CONFIG file is empty (i.e. all existing
+ # CA certs have been deleted) then simply output a single
+ # line with the cloud-init cert filename.
+ out = "%s\n" % CA_CERT_FILENAME
+ else:
+ # Append cert filename to CA_CERT_CONFIG file.
+ # We have to strip the content because blank lines in the file
+ # causes subsequent entries to be ignored. (LP: #1077020)
+ orig = util.load_file(CA_CERT_CONFIG)
+ cur_cont = '\n'.join([line for line in orig.splitlines()
+ if line != CA_CERT_FILENAME])
+ out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME)
util.write_file(CA_CERT_CONFIG, out, omode="wb")
-def remove_default_ca_certs():
+def remove_default_ca_certs(distro_name):
"""
Removes all default trusted CA certificates from the system. To actually
apply the change you must also call L{update_ca_certs}.
@@ -84,11 +96,14 @@ def remove_default_ca_certs():
util.delete_dir_contents(CA_CERT_PATH)
util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
util.write_file(CA_CERT_CONFIG, "", mode=0o644)
- debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
- util.subp(('debconf-set-selections', '-'), debconf_sel)
+
+ if distro_name != 'alpine':
+ debconf_sel = (
+ "ca-certificates ca-certificates/trust_new_crts " + "select no")
+ subp.subp(('debconf-set-selections', '-'), debconf_sel)
-def handle(name, cfg, _cloud, log, _args):
+def handle(name, cfg, cloud, log, _args):
"""
Call to handle ca-cert sections in cloud-config file.
@@ -110,7 +125,7 @@ def handle(name, cfg, _cloud, log, _args):
# default trusted CA certs first.
if ca_cert_cfg.get("remove-defaults", False):
log.debug("Removing default certificates")
- remove_default_ca_certs()
+ remove_default_ca_certs(cloud.distro.name)
# If we are given any new trusted CA certs to add, add them.
if "trusted" in ca_cert_cfg:
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 01d61fa1..aaf71366 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -6,78 +6,22 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Chef
-----
-**Summary:** module that configures, starts and installs chef.
-
-This module enables chef to be installed (from packages or
-from gems, or from omnibus). Before this occurs chef configurations are
-written to disk (validation.pem, client.pem, firstboot.json, client.rb),
-and needed chef folders/directories are created (/etc/chef and /var/log/chef
-and so-on). Then once installing proceeds correctly if configured chef will
-be started (in daemon mode or in non-daemon mode) and then once that has
-finished (if ran in non-daemon mode this will be when chef finishes
-converging, if ran in daemon mode then no further actions are possible since
-chef will have forked into its own process) then a post run function can
-run that can do finishing activities (such as removing the validation pem
-file).
-
-**Internal name:** ``cc_chef``
-
-**Module frequency:** per always
-
-**Supported distros:** all
-
-**Config keys**::
-
- chef:
- directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef,
- /var/cache/chef, /var/backups/chef, /var/run/chef)
- validation_cert: (optional string to be written to file validation_key)
- special value 'system' means set use existing file
- validation_key: (optional the path for validation_cert. default
- /etc/chef/validation.pem)
- firstboot_path: (path to write run_list and initial_attributes keys that
- should also be present in this configuration, defaults
- to /etc/chef/firstboot.json)
- exec: boolean to run or not run chef (defaults to false, unless
- a gem installed is requested
- where this will then default
- to true)
-
- chef.rb template keys (if falsey, then will be skipped and not
- written to /etc/chef/client.rb)
-
- chef:
- client_key:
- encrypted_data_bag_secret:
- environment:
- file_backup_path:
- file_cache_path:
- json_attribs:
- log_level:
- log_location:
- node_name:
- omnibus_url:
- omnibus_url_retries:
- omnibus_version:
- pid_file:
- server_url:
- show_time:
- ssl_verify_mode:
- validation_cert:
- validation_key:
- validation_name:
-"""
+"""Chef: module that configures, starts and installs chef."""
import itertools
import json
import os
+from textwrap import dedent
+from cloudinit import subp
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
from cloudinit import templater
+from cloudinit import temp_utils
from cloudinit import url_helper
from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
+
RUBY_VERSION_DEFAULT = "1.8"
@@ -98,6 +42,8 @@ OMNIBUS_URL = "https://www.chef.io/chef/install.sh"
OMNIBUS_URL_RETRIES = 5
CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem'
+CHEF_ENCRYPTED_DATA_BAG_PATH = '/etc/chef/encrypted_data_bag_secret'
+CHEF_ENVIRONMENT = '_default'
CHEF_FB_PATH = '/etc/chef/firstboot.json'
CHEF_RB_TPL_DEFAULTS = {
# These are ruby symbols...
@@ -107,11 +53,11 @@ CHEF_RB_TPL_DEFAULTS = {
'log_location': '/var/log/chef/client.log',
'validation_key': CHEF_VALIDATION_PEM_PATH,
'validation_cert': None,
- 'client_key': "/etc/chef/client.pem",
+ 'client_key': '/etc/chef/client.pem',
'json_attribs': CHEF_FB_PATH,
- 'file_cache_path': "/var/cache/chef",
- 'file_backup_path': "/var/backups/chef",
- 'pid_file': "/var/run/chef/client.pid",
+ 'file_cache_path': '/var/cache/chef',
+ 'file_backup_path': '/var/backups/chef',
+ 'pid_file': '/var/run/chef/client.pid',
'show_time': True,
'encrypted_data_bag_secret': None,
}
@@ -122,9 +68,9 @@ CHEF_RB_TPL_PATH_KEYS = frozenset([
'client_key',
'file_cache_path',
'json_attribs',
- 'file_cache_path',
'pid_file',
'encrypted_data_bag_secret',
+ 'chef_license',
])
CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys())
CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS)
@@ -141,12 +87,277 @@ CHEF_EXEC_PATH = '/usr/bin/chef-client'
CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20'])
-def is_installed():
- if not os.path.isfile(CHEF_EXEC_PATH):
- return False
- if not os.access(CHEF_EXEC_PATH, os.X_OK):
- return False
- return True
+frequency = PER_ALWAYS
+distros = ["all"]
+schema = {
+ 'id': 'cc_chef',
+ 'name': 'Chef',
+ 'title': 'module that configures, starts and installs chef',
+ 'description': dedent("""\
+ This module enables chef to be installed (from packages,
+ gems, or from omnibus). Before this occurs, chef configuration is
+ written to disk (validation.pem, client.pem, firstboot.json,
+ client.rb), and required directories are created (/etc/chef and
+ /var/log/chef and so-on). If configured, chef will be
+ installed and started in either daemon or non-daemon mode.
+ If run in non-daemon mode, post run actions are executed to do
+ finishing activities such as removing validation.pem."""),
+ 'distros': distros,
+ 'examples': [dedent("""
+ chef:
+ directories:
+ - /etc/chef
+ - /var/log/chef
+ validation_cert: system
+ install_type: omnibus
+ initial_attributes:
+ apache:
+ prefork:
+ maxclients: 100
+ keepalive: off
+ run_list:
+ - recipe[apache2]
+ - role[db]
+ encrypted_data_bag_secret: /etc/chef/encrypted_data_bag_secret
+ environment: _default
+ log_level: :auto
+ omnibus_url_retries: 2
+ server_url: https://chef.yourorg.com:4000
+ ssl_verify_mode: :verify_peer
+ validation_name: yourorg-validator""")],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'chef': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'properties': {
+ 'directories': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string'
+ },
+ 'uniqueItems': True,
+ 'description': dedent("""\
+ Create the necessary directories for chef to run. By
+ default, it creates the following directories:
+
+ {chef_dirs}""").format(
+ chef_dirs="\n".join(
+ [" - ``{}``".format(d) for d in CHEF_DIRS]
+ )
+ )
+ },
+ 'validation_cert': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Optional string to be written to file validation_key.
+ Special value ``system`` means set use existing file.
+ """)
+ },
+ 'validation_key': {
+ 'type': 'string',
+ 'default': CHEF_VALIDATION_PEM_PATH,
+ 'description': dedent("""\
+ Optional path for validation_cert. default to
+ ``{}``.""".format(CHEF_VALIDATION_PEM_PATH))
+ },
+ 'firstboot_path': {
+ 'type': 'string',
+ 'default': CHEF_FB_PATH,
+ 'description': dedent("""\
+ Path to write run_list and initial_attributes keys that
+ should also be present in this configuration, defaults
+ to ``{}``.""".format(CHEF_FB_PATH))
+ },
+ 'exec': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ define if we should run or not run chef (defaults to
+ false, unless a gem installed is requested where this
+ will then default to true).""")
+ },
+ 'client_key': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['client_key'],
+ 'description': dedent("""\
+ Optional path for client_cert. default to
+ ``{}``.""".format(CHEF_RB_TPL_DEFAULTS['client_key']))
+ },
+ 'encrypted_data_bag_secret': {
+ 'type': 'string',
+ 'default': None,
+ 'description': dedent("""\
+ Specifies the location of the secret key used by chef
+ to encrypt data items. By default, this path is set
+ to None, meaning that chef will have to look at the
+ path ``{}`` for it.
+ """.format(CHEF_ENCRYPTED_DATA_BAG_PATH))
+ },
+ 'environment': {
+ 'type': 'string',
+ 'default': CHEF_ENVIRONMENT,
+ 'description': dedent("""\
+ Specifies which environment chef will use. By default,
+ it will use the ``{}`` configuration.
+ """.format(CHEF_ENVIRONMENT))
+ },
+ 'file_backup_path': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['file_backup_path'],
+ 'description': dedent("""\
+ Specifies the location in which backup files are
+ stored. By default, it uses the
+ ``{}`` location.""".format(
+ CHEF_RB_TPL_DEFAULTS['file_backup_path']))
+ },
+ 'file_cache_path': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['file_cache_path'],
+ 'description': dedent("""\
+ Specifies the location in which chef cache files will
+ be saved. By default, it uses the ``{}``
+ location.""".format(
+ CHEF_RB_TPL_DEFAULTS['file_cache_path']))
+ },
+ 'json_attribs': {
+ 'type': 'string',
+ 'default': CHEF_FB_PATH,
+ 'description': dedent("""\
+ Specifies the location in which some chef json data is
+ stored. By default, it uses the
+ ``{}`` location.""".format(CHEF_FB_PATH))
+ },
+ 'log_level': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['log_level'],
+ 'description': dedent("""\
+ Defines the level of logging to be stored in the log
+ file. By default this value is set to ``{}``.
+ """.format(CHEF_RB_TPL_DEFAULTS['log_level']))
+ },
+ 'log_location': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['log_location'],
+ 'description': dedent("""\
+ Specifies the location of the chef lof file. By
+ default, the location is specified at
+ ``{}``.""".format(
+ CHEF_RB_TPL_DEFAULTS['log_location']))
+ },
+ 'node_name': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The name of the node to run. By default, we will
+ use th instance id as the node name.""")
+ },
+ 'omnibus_url': {
+ 'type': 'string',
+ 'default': OMNIBUS_URL,
+ 'description': dedent("""\
+ Omnibus URL if chef should be installed through
+ Omnibus. By default, it uses the
+ ``{}``.""".format(OMNIBUS_URL))
+ },
+ 'omnibus_url_retries': {
+ 'type': 'integer',
+ 'default': OMNIBUS_URL_RETRIES,
+ 'description': dedent("""\
+ The number of retries that will be attempted to reach
+ the Omnibus URL""")
+ },
+ 'omnibus_version': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Optional version string to require for omnibus
+ install.""")
+ },
+ 'pid_file': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['pid_file'],
+ 'description': dedent("""\
+ The location in which a process identification
+ number (pid) is saved. By default, it saves
+ in the ``{}`` location.""".format(
+ CHEF_RB_TPL_DEFAULTS['pid_file']))
+ },
+ 'server_url': {
+ 'type': 'string',
+ 'description': 'The URL for the chef server'
+ },
+ 'show_time': {
+ 'type': 'boolean',
+ 'default': True,
+ 'description': 'Show time in chef logs'
+ },
+ 'ssl_verify_mode': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'],
+ 'description': dedent("""\
+ Set the verify mode for HTTPS requests. We can have
+ two possible values for this parameter:
+
+ - ``:verify_none``: No validation of SSL \
+ certificates.
+ - ``:verify_peer``: Validate all SSL certificates.
+
+ By default, the parameter is set as ``{}``.
+ """.format(CHEF_RB_TPL_DEFAULTS['ssl_verify_mode']))
+ },
+ 'validation_name': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The name of the chef-validator key that Chef Infra
+ Client uses to access the Chef Infra Server during
+ the initial Chef Infra Client run.""")
+ },
+ 'force_install': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ If set to ``True``, forces chef installation, even
+ if it is already installed.""")
+ },
+ 'initial_attributes': {
+ 'type': 'object',
+ 'items': {
+ 'type': 'string'
+ },
+ 'description': dedent("""\
+ Specify a list of initial attributes used by the
+ cookbooks.""")
+ },
+ 'install_type': {
+ 'type': 'string',
+ 'default': 'packages',
+ 'description': dedent("""\
+ The type of installation for chef. It can be one of
+ the following values:
+
+ - ``packages``
+ - ``gems``
+ - ``omnibus``""")
+ },
+ 'run_list': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string'
+ },
+ 'description': 'A run list for a first boot json.'
+ },
+ "chef_license": {
+ 'type': 'string',
+ 'description': dedent("""\
+ string that indicates if user accepts or not license
+ related to some of chef products""")
+ }
+ }
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema)
def post_run_chef(chef_cfg, log):
@@ -196,6 +407,8 @@ def handle(name, cfg, cloud, log, _args):
log.debug(("Skipping module named %s,"
" no 'chef' key in configuration"), name)
return
+
+ validate_cloudconfig_schema(cfg, schema)
chef_cfg = cfg['chef']
# Ensure the chef directories we use exist
@@ -223,7 +436,7 @@ def handle(name, cfg, cloud, log, _args):
iid = str(cloud.datasource.get_instance_id())
params = get_template_params(iid, chef_cfg, log)
# Do a best effort attempt to ensure that the template values that
- # are associated with paths have there parent directory created
+ # are associated with paths have their parent directory created
# before they are used by the chef-client itself.
param_paths = set()
for (k, v) in params.items():
@@ -253,9 +466,10 @@ def handle(name, cfg, cloud, log, _args):
# Try to install chef, if its not already installed...
force_install = util.get_cfg_option_bool(chef_cfg,
'force_install', default=False)
- if not is_installed() or force_install:
+ installed = subp.is_exe(CHEF_EXEC_PATH)
+ if not installed or force_install:
run = install_chef(cloud, chef_cfg, log)
- elif is_installed():
+ elif installed:
run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
else:
run = False
@@ -280,7 +494,32 @@ def run_chef(chef_cfg, log):
cmd.extend(CHEF_EXEC_DEF_ARGS)
else:
cmd.extend(CHEF_EXEC_DEF_ARGS)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
+
+
+def subp_blob_in_tempfile(blob, *args, **kwargs):
+ """Write blob to a tempfile, and call subp with args, kwargs. Then cleanup.
+
+ 'basename' as a kwarg allows providing the basename for the file.
+ The 'args' argument to subp will be updated with the full path to the
+ filename as the first argument.
+ """
+ basename = kwargs.pop('basename', "subp_blob")
+
+ if len(args) == 0 and 'args' not in kwargs:
+ args = [tuple()]
+
+ # Use tmpdir over tmpfile to avoid 'text file busy' on execute
+ with temp_utils.tempdir(needs_exe=True) as tmpd:
+ tmpf = os.path.join(tmpd, basename)
+ if 'args' in kwargs:
+ kwargs['args'] = [tmpf] + list(kwargs['args'])
+ else:
+ args = list(args)
+ args[0] = [tmpf] + args[0]
+
+ util.write_file(tmpf, blob, mode=0o700)
+ return subp.subp(*args, **kwargs)
def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
@@ -303,7 +542,7 @@ def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
else:
args = ['-v', omnibus_version]
content = url_helper.readurl(url=url, retries=retries).contents
- return util.subp_blob_in_tempfile(
+ return subp_blob_in_tempfile(
blob=content, args=args,
basename='chef-omnibus-install', capture=False)
@@ -352,11 +591,11 @@ def install_chef_from_gems(ruby_version, chef_version, distro):
if not os.path.exists('/usr/bin/ruby'):
util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
if chef_version:
- util.subp(['/usr/bin/gem', 'install', 'chef',
+ subp.subp(['/usr/bin/gem', 'install', 'chef',
'-v %s' % chef_version, '--no-ri',
'--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False)
else:
- util.subp(['/usr/bin/gem', 'install', 'chef',
+ subp.subp(['/usr/bin/gem', 'install', 'chef',
'--no-ri', '--no-rdoc', '--bindir',
'/usr/bin', '-q'], capture=False)
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index 885b3138..dff93245 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -26,6 +26,7 @@ by default.
disable_ec2_metadata: <true/false>
"""
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import PER_ALWAYS
@@ -40,15 +41,15 @@ def handle(name, cfg, _cloud, log, _args):
disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
if disabled:
reject_cmd = None
- if util.which('ip'):
+ if subp.which('ip'):
reject_cmd = REJECT_CMD_IP
- elif util.which('ifconfig'):
+ elif subp.which('ifconfig'):
reject_cmd = REJECT_CMD_IF
else:
log.error(('Neither "route" nor "ip" command found, unable to '
'manipulate routing table'))
return
- util.subp(reject_cmd, capture=False)
+ subp.subp(reject_cmd, capture=False)
else:
log.debug(("Skipping module named %s,"
" disabling the ec2 route not enabled"), name)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 0796cb7b..a7bdc703 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -99,6 +99,7 @@ specified using ``filesystem``.
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
+from cloudinit import subp
import logging
import os
import shlex
@@ -106,13 +107,13 @@ import shlex
frequency = PER_INSTANCE
# Define the commands to use
-UDEVADM_CMD = util.which('udevadm')
-SFDISK_CMD = util.which("sfdisk")
-SGDISK_CMD = util.which("sgdisk")
-LSBLK_CMD = util.which("lsblk")
-BLKID_CMD = util.which("blkid")
-BLKDEV_CMD = util.which("blockdev")
-WIPEFS_CMD = util.which("wipefs")
+UDEVADM_CMD = subp.which('udevadm')
+SFDISK_CMD = subp.which("sfdisk")
+SGDISK_CMD = subp.which("sgdisk")
+LSBLK_CMD = subp.which("lsblk")
+BLKID_CMD = subp.which("blkid")
+BLKDEV_CMD = subp.which("blockdev")
+WIPEFS_CMD = subp.which("wipefs")
LANG_C_ENV = {'LANG': 'C'}
@@ -163,7 +164,7 @@ def handle(_name, cfg, cloud, log, _args):
def update_disk_setup_devices(disk_setup, tformer):
# update 'disk_setup' dictionary anywhere were a device may occur
# update it with the response from 'tformer'
- for origname in disk_setup.keys():
+ for origname in list(disk_setup):
transformed = tformer(origname)
if transformed is None or transformed == origname:
continue
@@ -248,9 +249,11 @@ def enumerate_disk(device, nodeps=False):
info = None
try:
- info, _err = util.subp(lsblk_cmd)
+ info, _err = subp.subp(lsblk_cmd)
except Exception as e:
- raise Exception("Failed during disk check for %s\n%s" % (device, e))
+ raise Exception(
+ "Failed during disk check for %s\n%s" % (device, e)
+ ) from e
parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]
@@ -310,9 +313,11 @@ def check_fs(device):
blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
try:
- out, _err = util.subp(blkid_cmd, rcs=[0, 2])
+ out, _err = subp.subp(blkid_cmd, rcs=[0, 2])
except Exception as e:
- raise Exception("Failed during disk check for %s\n%s" % (device, e))
+ raise Exception(
+ "Failed during disk check for %s\n%s" % (device, e)
+ ) from e
if out:
if len(out.splitlines()) == 1:
@@ -427,16 +432,16 @@ def get_dyn_func(*args):
else:
return globals()[func_name]
- except KeyError:
- raise Exception("No such function %s to call!" % func_name)
+ except KeyError as e:
+ raise Exception("No such function %s to call!" % func_name) from e
def get_hdd_size(device):
try:
- size_in_bytes, _ = util.subp([BLKDEV_CMD, '--getsize64', device])
- sector_size, _ = util.subp([BLKDEV_CMD, '--getss', device])
+ size_in_bytes, _ = subp.subp([BLKDEV_CMD, '--getsize64', device])
+ sector_size, _ = subp.subp([BLKDEV_CMD, '--getss', device])
except Exception as e:
- raise Exception("Failed to get %s size\n%s" % (device, e))
+ raise Exception("Failed to get %s size\n%s" % (device, e)) from e
return int(size_in_bytes) / int(sector_size)
@@ -452,10 +457,11 @@ def check_partition_mbr_layout(device, layout):
read_parttbl(device)
prt_cmd = [SFDISK_CMD, "-l", device]
try:
- out, _err = util.subp(prt_cmd, data="%s\n" % layout)
+ out, _err = subp.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
- raise Exception("Error running partition command on %s\n%s" % (
- device, e))
+ raise Exception(
+ "Error running partition command on %s\n%s" % (device, e)
+ ) from e
found_layout = []
for line in out.splitlines():
@@ -482,10 +488,11 @@ def check_partition_mbr_layout(device, layout):
def check_partition_gpt_layout(device, layout):
prt_cmd = [SGDISK_CMD, '-p', device]
try:
- out, _err = util.subp(prt_cmd, update_env=LANG_C_ENV)
+ out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV)
except Exception as e:
- raise Exception("Error running partition command on %s\n%s" % (
- device, e))
+ raise Exception(
+ "Error running partition command on %s\n%s" % (device, e)
+ ) from e
out_lines = iter(out.splitlines())
# Skip header. Output looks like:
@@ -655,9 +662,11 @@ def purge_disk(device):
wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
try:
LOG.info("Purging filesystem on /dev/%s", d['name'])
- util.subp(wipefs_cmd)
- except Exception:
- raise Exception("Failed FS purge of /dev/%s" % d['name'])
+ subp.subp(wipefs_cmd)
+ except Exception as e:
+ raise Exception(
+ "Failed FS purge of /dev/%s" % d['name']
+ ) from e
purge_disk_ptable(device)
@@ -682,7 +691,7 @@ def read_parttbl(device):
blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
util.udevadm_settle()
try:
- util.subp(blkdev_cmd)
+ subp.subp(blkdev_cmd)
except Exception as e:
util.logexc(LOG, "Failed reading the partition table %s" % e)
@@ -697,25 +706,27 @@ def exec_mkpart_mbr(device, layout):
# Create the partitions
prt_cmd = [SFDISK_CMD, "--Linux", "--unit=S", "--force", device]
try:
- util.subp(prt_cmd, data="%s\n" % layout)
+ subp.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
- raise Exception("Failed to partition device %s\n%s" % (device, e))
+ raise Exception(
+ "Failed to partition device %s\n%s" % (device, e)
+ ) from e
read_parttbl(device)
def exec_mkpart_gpt(device, layout):
try:
- util.subp([SGDISK_CMD, '-Z', device])
+ subp.subp([SGDISK_CMD, '-Z', device])
for index, (partition_type, (start, end)) in enumerate(layout):
index += 1
- util.subp([SGDISK_CMD,
+ subp.subp([SGDISK_CMD,
'-n', '{}:{}:{}'.format(index, start, end), device])
if partition_type is not None:
# convert to a 4 char (or more) string right padded with 0
# 82 -> 8200. 'Linux' -> 'Linux'
pinput = str(partition_type).ljust(4, "0")
- util.subp(
+ subp.subp(
[SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device])
except Exception:
LOG.warning("Failed to partition device %s", device)
@@ -967,9 +978,9 @@ def mkfs(fs_cfg):
fs_cmd)
else:
# Find the mkfs command
- mkfs_cmd = util.which("mkfs.%s" % fs_type)
+ mkfs_cmd = subp.which("mkfs.%s" % fs_type)
if not mkfs_cmd:
- mkfs_cmd = util.which("mk%s" % fs_type)
+ mkfs_cmd = subp.which("mk%s" % fs_type)
if not mkfs_cmd:
LOG.warning("Cannot create fstype '%s'. No mkfs.%s command",
@@ -994,8 +1005,8 @@ def mkfs(fs_cfg):
LOG.debug("Creating file system %s on %s", label, device)
LOG.debug(" Using cmd: %s", str(fs_cmd))
try:
- util.subp(fs_cmd, shell=shell)
+ subp.subp(fs_cmd, shell=shell)
except Exception as e:
- raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e))
+ raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index b342e04d..b1d99f97 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -25,7 +25,7 @@ import os
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
+from cloudinit import subp
frequency = PER_ALWAYS
@@ -43,9 +43,9 @@ def is_upstart_system():
del myenv['UPSTART_SESSION']
check_cmd = ['initctl', 'version']
try:
- (out, _err) = util.subp(check_cmd, env=myenv)
+ (out, _err) = subp.subp(check_cmd, env=myenv)
return 'upstart' in out
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.debug("'%s' returned '%s', not using upstart",
' '.join(check_cmd), e.exit_code)
return False
@@ -66,7 +66,7 @@ def handle(name, _cfg, cloud, log, args):
for n in event_names:
cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
try:
- util.subp(cmd)
+ subp.subp(cmd)
except Exception as e:
# TODO(harlowja), use log exception from utils??
log.warning("Emission of upstart event %s failed due to: %s", n, e)
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index 0a135bbe..77984bca 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -39,6 +39,7 @@ If cloud-init sees a ``fan`` entry in cloud-config it will:
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -62,8 +63,8 @@ def stop_update_start(service, config_file, content, systemd=False):
def run(cmd, msg):
try:
- return util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
+ return subp.subp(cmd, capture=True)
+ except subp.ProcessExecutionError as e:
LOG.warning("failed: %s (%s): %s", service, cmd, e)
return False
@@ -94,7 +95,7 @@ def handle(name, cfg, cloud, log, args):
util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w")
distro = cloud.distro
- if not util.which('fanctl'):
+ if not subp.which('fanctl'):
distro.install_packages(['ubuntu-fan'])
stop_update_start(
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index fd141541..3441f7a9 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -78,7 +78,7 @@ def handle(_name, cfg, cloud, log, args):
boot_fin_fn = cloud.paths.boot_finished
try:
contents = "%s - %s - v. %s\n" % (uptime, ts, cver)
- util.write_file(boot_fin_fn, contents)
+ util.write_file(boot_fin_fn, contents, ensure_dir_exists=False)
except Exception:
util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 1b512a06..237c3d02 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -70,6 +70,7 @@ import stat
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
+from cloudinit import subp
from cloudinit import util
frequency = PER_ALWAYS
@@ -131,30 +132,30 @@ class ResizeGrowPart(object):
myenv['LANG'] = 'C'
try:
- (out, _err) = util.subp(["growpart", "--help"], env=myenv)
+ (out, _err) = subp.subp(["growpart", "--help"], env=myenv)
if re.search(r"--update\s+", out):
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
return False
def resize(self, diskdev, partnum, partdev):
before = get_size(partdev)
try:
- util.subp(["growpart", '--dry-run', diskdev, partnum])
- except util.ProcessExecutionError as e:
+ subp.subp(["growpart", '--dry-run', diskdev, partnum])
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)",
diskdev, partnum)
- raise ResizeFailedException(e)
+ raise ResizeFailedException(e) from e
return (before, before)
try:
- util.subp(["growpart", diskdev, partnum])
- except util.ProcessExecutionError as e:
+ subp.subp(["growpart", diskdev, partnum])
+ except subp.ProcessExecutionError as e:
util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)
- raise ResizeFailedException(e)
+ raise ResizeFailedException(e) from e
return (before, get_size(partdev))
@@ -165,11 +166,11 @@ class ResizeGpart(object):
myenv['LANG'] = 'C'
try:
- (_out, err) = util.subp(["gpart", "help"], env=myenv, rcs=[0, 1])
+ (_out, err) = subp.subp(["gpart", "help"], env=myenv, rcs=[0, 1])
if re.search(r"gpart recover ", err):
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
return False
@@ -182,18 +183,18 @@ class ResizeGpart(object):
be recovered.
"""
try:
- util.subp(["gpart", "recover", diskdev])
- except util.ProcessExecutionError as e:
+ subp.subp(["gpart", "recover", diskdev])
+ except subp.ProcessExecutionError as e:
if e.exit_code != 0:
util.logexc(LOG, "Failed: gpart recover %s", diskdev)
- raise ResizeFailedException(e)
+ raise ResizeFailedException(e) from e
before = get_size(partdev)
try:
- util.subp(["gpart", "resize", "-i", partnum, diskdev])
- except util.ProcessExecutionError as e:
+ subp.subp(["gpart", "resize", "-i", partnum, diskdev])
+ except subp.ProcessExecutionError as e:
util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev)
- raise ResizeFailedException(e)
+ raise ResizeFailedException(e) from e
# Since growing the FS requires a reboot, make sure we reboot
# first when this module has finished.
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index a323edfa..eb03c664 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -1,8 +1,9 @@
-# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2009-2010, 2020 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Matthew Ruffell <matthew.ruffell@canonical.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
@@ -15,15 +16,15 @@ Configure which device is used as the target for grub installation. This module
should work correctly by default without any user configuration. It can be
enabled/disabled using the ``enabled`` config key in the ``grub_dpkg`` config
dict. The global config key ``grub-dpkg`` is an alias for ``grub_dpkg``. If no
-installation device is specified this module will look for the first existing
-device in:
+installation device is specified this module will execute grub-probe to
+determine which disk the /boot directory is associated with.
- - ``/dev/sda``
- - ``/dev/vda``
- - ``/dev/xvda``
- - ``/dev/sda1``
- - ``/dev/vda1``
- - ``/dev/xvda1``
+The value which is placed into the debconf database is in the format which the
+grub postinstall script expects. Normally, this is a /dev/disk/by-id/ value,
+but we do fallback to the plain disk name if a by-id name is not present.
+
+If this module is executed inside a container, then the debconf database is
+seeded with empty values, and install_devices_empty is set to true.
**Internal name:** ``cc_grub_dpkg``
@@ -42,11 +43,68 @@ device in:
import os
+from cloudinit import subp
from cloudinit import util
+from cloudinit.subp import ProcessExecutionError
distros = ['ubuntu', 'debian']
+def fetch_idevs(log):
+ """
+ Fetches the /dev/disk/by-id device grub is installed to.
+ Falls back to plain disk name if no by-id entry is present.
+ """
+ disk = ""
+ devices = []
+
+ try:
+ # get the root disk where the /boot directory resides.
+ disk = subp.subp(['grub-probe', '-t', 'disk', '/boot'],
+ capture=True)[0].strip()
+ except ProcessExecutionError as e:
+ # grub-common may not be installed, especially on containers
+ # FileNotFoundError is a nested exception of ProcessExecutionError
+ if isinstance(e.reason, FileNotFoundError):
+ log.debug("'grub-probe' not found in $PATH")
+ # disks from the container host are present in /proc and /sys
+ # which is where grub-probe determines where /boot is.
+ # it then checks for existence in /dev, which fails as host disks
+ # are not exposed to the container.
+ elif "failed to get canonical path" in e.stderr:
+ log.debug("grub-probe 'failed to get canonical path'")
+ else:
+ # something bad has happened, continue to log the error
+ raise
+ except Exception:
+ util.logexc(log, "grub-probe failed to execute for grub-dpkg")
+
+ if not disk or not os.path.exists(disk):
+ # If we failed to detect a disk, we can return early
+ return ''
+
+ try:
+ # check if disk exists and use udevadm to fetch symlinks
+ devices = subp.subp(
+ ['udevadm', 'info', '--root', '--query=symlink', disk],
+ capture=True
+ )[0].strip().split()
+ except Exception:
+ util.logexc(
+ log, "udevadm DEVLINKS symlink query failed for disk='%s'", disk
+ )
+
+ log.debug('considering these device symlinks: %s', ','.join(devices))
+ # filter symlinks for /dev/disk/by-id entries
+ devices = [dev for dev in devices if 'disk/by-id' in dev]
+ log.debug('filtered to these disk/by-id symlinks: %s', ','.join(devices))
+ # select first device if there is one, else fall back to plain name
+ idevs = sorted(devices)[0] if devices else disk
+ log.debug('selected %s', idevs)
+
+ return idevs
+
+
def handle(name, cfg, _cloud, log, _args):
mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {}))
@@ -62,22 +120,10 @@ def handle(name, cfg, _cloud, log, _args):
idevs_empty = util.get_cfg_option_str(
mycfg, "grub-pc/install_devices_empty", None)
- if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
- (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):
- if idevs is None:
- idevs = ""
- if idevs_empty is None:
- idevs_empty = "true"
- else:
- if idevs_empty is None:
- idevs_empty = "false"
- if idevs is None:
- idevs = "/dev/sda"
- for dev in ("/dev/sda", "/dev/vda", "/dev/xvda",
- "/dev/sda1", "/dev/vda1", "/dev/xvda1"):
- if os.path.exists(dev):
- idevs = dev
- break
+ if idevs is None:
+ idevs = fetch_idevs(log)
+ if idevs_empty is None:
+ idevs_empty = "false" if idevs else "true"
# now idevs and idevs_empty are set to determined values
# or, those set by user
@@ -90,7 +136,7 @@ def handle(name, cfg, _cloud, log, _args):
(idevs, idevs_empty))
try:
- util.subp(['debconf-set-selections'], dconf_sel)
+ subp.subp(['debconf-set-selections'], dconf_sel)
except Exception:
util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index 3d2ded3d..0f2be52b 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -33,6 +33,7 @@ key can be used. By default ``ssh-dss`` keys are not written to console.
import os
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
frequency = PER_INSTANCE
@@ -64,7 +65,7 @@ def handle(name, cfg, cloud, log, _args):
try:
cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)]
- (stdout, _stderr) = util.subp(cmd)
+ (stdout, _stderr) = subp.subp(cmd)
util.multi_log("%s\n" % (stdout.strip()),
stderr=False, console=True)
except Exception:
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index a9c04d86..299c4d01 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -61,6 +61,7 @@ from io import BytesIO
from configobj import ConfigObj
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
@@ -116,7 +117,7 @@ def handle(_name, cfg, cloud, log, _args):
log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE)
util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
- util.subp(["service", "landscape-client", "restart"])
+ subp.subp(["service", "landscape-client", "restart"])
def merge_together(objs):
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index f68c3cc7..4f8b7bf6 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -6,27 +6,58 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Locale
-------
-**Summary:** set system locale
+"""Locale: set system locale"""
-Configure the system locale and apply it system wide. By default use the locale
-specified by the datasource.
+from textwrap import dedent
-**Internal name:** ``cc_locale``
-
-**Module frequency:** per instance
+from cloudinit import util
+from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
+from cloudinit.settings import PER_INSTANCE
-**Supported distros:** all
-**Config keys**::
+frequency = PER_INSTANCE
+distros = ['all']
+schema = {
+ 'id': 'cc_locale',
+ 'name': 'Locale',
+ 'title': 'Set system locale',
+ 'description': dedent(
+ """\
+ Configure the system locale and apply it system wide. By default use
+ the locale specified by the datasource."""
+ ),
+ 'distros': distros,
+ 'examples': [
+ dedent("""\
+ # Set the locale to ar_AE
+ locale: ar_AE
+ """),
+ dedent("""\
+ # Set the locale to fr_CA in /etc/alternate_path/locale
+ locale: fr_CA
+ locale_configfile: /etc/alternate_path/locale
+ """),
+ ],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'locale': {
+ 'type': 'string',
+ 'description': (
+ "The locale to set as the system's locale (e.g. ar_PS)"
+ ),
+ },
+ 'locale_configfile': {
+ 'type': 'string',
+ 'description': (
+ "The file in which to write the locale configuration (defaults"
+ " to the distro's default location)"
+ ),
+ },
+ },
+}
- locale: <locale str>
- locale_configfile: <path to locale config file>
-"""
-
-from cloudinit import util
+__doc__ = get_schema_doc(schema) # Supplement python help()
def handle(name, cfg, cloud, log, args):
@@ -40,6 +71,8 @@ def handle(name, cfg, cloud, log, args):
name, locale)
return
+ validate_cloudconfig_schema(cfg, schema)
+
log.debug("Setting locale to %s", locale)
locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
cloud.distro.apply_locale(locale, locale_cfgfile)
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 151a9844..7129c9c6 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -48,6 +48,7 @@ lxd-bridge will be configured accordingly.
"""
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
import os
@@ -85,16 +86,16 @@ def handle(name, cfg, cloud, log, args):
# Install the needed packages
packages = []
- if not util.which("lxd"):
+ if not subp.which("lxd"):
packages.append('lxd')
- if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
+ if init_cfg.get("storage_backend") == "zfs" and not subp.which('zfs'):
packages.append('zfsutils-linux')
if len(packages):
try:
cloud.distro.install_packages(packages)
- except util.ProcessExecutionError as exc:
+ except subp.ProcessExecutionError as exc:
log.warning("failed to install packages %s: %s", packages, exc)
return
@@ -104,20 +105,20 @@ def handle(name, cfg, cloud, log, args):
'network_address', 'network_port', 'storage_backend',
'storage_create_device', 'storage_create_loop',
'storage_pool', 'trust_password')
- util.subp(['lxd', 'waitready', '--timeout=300'])
+ subp.subp(['lxd', 'waitready', '--timeout=300'])
cmd = ['lxd', 'init', '--auto']
for k in init_keys:
if init_cfg.get(k):
cmd.extend(["--%s=%s" %
(k.replace('_', '-'), str(init_cfg[k]))])
- util.subp(cmd)
+ subp.subp(cmd)
# Set up lxd-bridge if bridge config is given
dconf_comm = "debconf-communicate"
if bridge_cfg:
net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
if os.path.exists("/etc/default/lxd-bridge") \
- and util.which(dconf_comm):
+ and subp.which(dconf_comm):
# Bridge configured through packaging
debconf = bridge_to_debconf(bridge_cfg)
@@ -127,7 +128,7 @@ def handle(name, cfg, cloud, log, args):
log.debug("Setting lxd debconf via " + dconf_comm)
data = "\n".join(["set %s %s" % (k, v)
for k, v in debconf.items()]) + "\n"
- util.subp(['debconf-communicate'], data)
+ subp.subp(['debconf-communicate'], data)
except Exception:
util.logexc(log, "Failed to run '%s' for lxd with" %
dconf_comm)
@@ -137,7 +138,7 @@ def handle(name, cfg, cloud, log, args):
# Run reconfigure
log.debug("Running dpkg-reconfigure for lxd")
- util.subp(['dpkg-reconfigure', 'lxd',
+ subp.subp(['dpkg-reconfigure', 'lxd',
'--frontend=noninteractive'])
else:
# Built-in LXD bridge support
@@ -264,7 +265,7 @@ def _lxc(cmd):
env = {'LC_ALL': 'C',
'HOME': os.environ.get('HOME', '/root'),
'USER': os.environ.get('USER', 'root')}
- util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
+ subp.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
def maybe_cleanup_default(net_name, did_init, create, attach,
@@ -286,7 +287,7 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
try:
_lxc(["network", "delete", net_name])
LOG.debug(msg, net_name, succeeded)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
raise e
LOG.debug(msg, net_name, fail_assume_enoent)
@@ -296,7 +297,7 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
try:
_lxc(["profile", "device", "remove", profile, nic_name])
LOG.debug(msg, nic_name, profile, succeeded)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
raise e
LOG.debug(msg, nic_name, profile, fail_assume_enoent)
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index 351183f1..41ea4fc9 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -56,6 +56,7 @@ import io
from configobj import ConfigObj
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
@@ -140,6 +141,6 @@ def handle(name, cfg, cloud, log, _args):
configure(config=mcollective_cfg['conf'])
# restart mcollective to handle updated config
- util.subp(['service', 'mcollective', 'restart'], capture=False)
+ subp.subp(['service', 'mcollective', 'restart'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 4ae3f1fc..54f2f878 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -65,15 +65,19 @@ swap file is created.
from string import whitespace
import logging
-import os.path
+import os
import re
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
+# Name matches 'server:/path'
+NETWORK_NAME_FILTER = r"^.+:.*"
+NETWORK_NAME_RE = re.compile(NETWORK_NAME_FILTER)
WS = re.compile("[%s]+" % (whitespace))
FSTAB_PATH = "/etc/fstab"
MNT_COMMENT = "comment=cloudconfig"
@@ -93,6 +97,13 @@ def is_meta_device_name(name):
return False
+def is_network_device(name):
+ # return true if this is a network device
+ if NETWORK_NAME_RE.match(name):
+ return True
+ return False
+
+
def _get_nth_partition_for_device(device_path, partition_number):
potential_suffixes = [str(partition_number), 'p%s' % (partition_number,),
'-part%s' % (partition_number,)]
@@ -122,6 +133,9 @@ def sanitize_devname(startname, transformer, log):
devname = "ephemeral0"
log.debug("Adjusted mount option from ephemeral to ephemeral0")
+ if is_network_device(startname):
+ return startname
+
device_path, partition_number = util.expand_dotted_devname(devname)
if is_meta_device_name(device_path):
@@ -223,24 +237,24 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
return size
-def create_swapfile(fname, size):
+def create_swapfile(fname: str, size: str) -> None:
"""Size is in MiB."""
- errmsg = "Failed to create swapfile '%s' of size %dMB via %s: %s"
+ errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s"
def create_swap(fname, size, method):
LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'",
fname, fstype, method)
if method == "fallocate":
- cmd = ['fallocate', '-l', '%dM' % size, fname]
+ cmd = ['fallocate', '-l', '%sM' % size, fname]
elif method == "dd":
cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M',
- 'count=%d' % size]
+ 'count=%s' % size]
try:
- util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
+ subp.subp(cmd, capture=True)
+ except subp.ProcessExecutionError as e:
LOG.warning(errmsg, fname, size, method, e)
util.del_file(fname)
@@ -249,20 +263,22 @@ def create_swapfile(fname, size):
fstype = util.get_mount_info(swap_dir)[1]
- if fstype in ("xfs", "btrfs"):
+ if (fstype == "xfs" and
+ util.kernel_version() < (4, 18)) or fstype == "btrfs":
create_swap(fname, size, "dd")
else:
try:
create_swap(fname, size, "fallocate")
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.warning(errmsg, fname, size, "dd", e)
LOG.warning("Will attempt with dd.")
create_swap(fname, size, "dd")
- util.chmod(fname, 0o600)
+ if os.path.exists(fname):
+ util.chmod(fname, 0o600)
try:
- util.subp(['mkswap', fname])
- except util.ProcessExecutionError:
+ subp.subp(['mkswap', fname])
+ except subp.ProcessExecutionError:
util.del_file(fname)
raise
@@ -274,7 +290,6 @@ def setup_swapfile(fname, size=None, maxsize=None):
maxsize: the maximum size
"""
swap_dir = os.path.dirname(fname)
- mibsize = str(int(size / (2 ** 20)))
if str(size).lower() == "auto":
try:
memsize = util.read_meminfo()['total']
@@ -286,6 +301,7 @@ def setup_swapfile(fname, size=None, maxsize=None):
size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize,
memsize=memsize)
+ mibsize = str(int(size / (2 ** 20)))
if not size:
LOG.debug("Not creating swap: suggested size was 0")
return
@@ -365,17 +381,18 @@ def handle(_name, cfg, cloud, log, _args):
fstab_devs = {}
fstab_removed = []
- for line in util.load_file(FSTAB_PATH).splitlines():
- if MNT_COMMENT in line:
- fstab_removed.append(line)
- continue
+ if os.path.exists(FSTAB_PATH):
+ for line in util.load_file(FSTAB_PATH).splitlines():
+ if MNT_COMMENT in line:
+ fstab_removed.append(line)
+ continue
- try:
- toks = WS.split(line)
- except Exception:
- pass
- fstab_devs[toks[0]] = line
- fstab_lines.append(line)
+ try:
+ toks = WS.split(line)
+ except Exception:
+ pass
+ fstab_devs[toks[0]] = line
+ fstab_lines.append(line)
for i in range(len(cfgmnt)):
# skip something that wasn't a list
@@ -525,9 +542,9 @@ def handle(_name, cfg, cloud, log, _args):
for cmd in activate_cmds:
fmt = "Activate mounts: %s:" + ' '.join(cmd)
try:
- util.subp(cmd)
+ subp.subp(cmd)
log.debug(fmt, "PASS")
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
log.warning(fmt, "FAIL")
util.logexc(log, fmt, "FAIL")
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 5498bbaa..3d7279d6 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -14,6 +14,7 @@ from cloudinit import log as logging
from cloudinit import temp_utils
from cloudinit import templater
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
from cloudinit.settings import PER_INSTANCE
@@ -23,7 +24,8 @@ LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
NR_POOL_SERVERS = 4
-distros = ['centos', 'debian', 'fedora', 'opensuse', 'rhel', 'sles', 'ubuntu']
+distros = ['alpine', 'centos', 'debian', 'fedora', 'opensuse', 'rhel',
+ 'sles', 'ubuntu']
NTP_CLIENT_CONFIG = {
'chrony': {
@@ -62,6 +64,17 @@ NTP_CLIENT_CONFIG = {
# This is Distro-specific configuration overrides of the base config
DISTRO_CLIENT_CONFIG = {
+ 'alpine': {
+ 'chrony': {
+ 'confpath': '/etc/chrony/chrony.conf',
+ 'service_name': 'chronyd',
+ },
+ 'ntp': {
+ 'confpath': '/etc/ntp.conf',
+ 'packages': [],
+ 'service_name': 'ntpd',
+ },
+ },
'debian': {
'chrony': {
'confpath': '/etc/chrony/chrony.conf',
@@ -113,11 +126,11 @@ schema = {
Handle ntp configuration. If ntp is not installed on the system and
ntp configuration is specified, ntp will be installed. If there is a
default ntp config file in the image or one is present in the
- distro's ntp package, it will be copied to ``/etc/ntp.conf.dist``
- before any changes are made. A list of ntp pools and ntp servers can
- be provided under the ``ntp`` config key. If no ntp ``servers`` or
- ``pools`` are provided, 4 pools will be used in the format
- ``{0-3}.{distro}.pool.ntp.org``."""),
+ distro's ntp package, it will be copied to a file with ``.dist``
+ appended to the filename before any changes are made. A list of ntp
+ pools and ntp servers can be provided under the ``ntp`` config key.
+ If no ntp ``servers`` or ``pools`` are provided, 4 pools will be used
+ in the format ``{0-3}.{distro}.pool.ntp.org``."""),
'distros': distros,
'examples': [
dedent("""\
@@ -169,8 +182,11 @@ schema = {
'uniqueItems': True,
'description': dedent("""\
List of ntp pools. If both pools and servers are
- empty, 4 default pool servers will be provided of
- the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ empty, 4 default pool servers will be provided of
+ the format ``{0-3}.{distro}.pool.ntp.org``. NOTE:
+ for Alpine Linux when using the Busybox NTP client
+ this setting will be ignored due to the limited
+ functionality of Busybox's ntpd.""")
},
'servers': {
'type': 'array',
@@ -181,46 +197,46 @@ schema = {
'uniqueItems': True,
'description': dedent("""\
List of ntp servers. If both pools and servers are
- empty, 4 default pool servers will be provided with
- the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ empty, 4 default pool servers will be provided with
+ the format ``{0-3}.{distro}.pool.ntp.org``.""")
},
'ntp_client': {
'type': 'string',
'default': 'auto',
'description': dedent("""\
Name of an NTP client to use to configure system NTP.
- When unprovided or 'auto' the default client preferred
- by the distribution will be used. The following
- built-in client names can be used to override existing
- configuration defaults: chrony, ntp, ntpdate,
- systemd-timesyncd."""),
+ When unprovided or 'auto' the default client preferred
+ by the distribution will be used. The following
+ built-in client names can be used to override existing
+ configuration defaults: chrony, ntp, ntpdate,
+ systemd-timesyncd."""),
},
'enabled': {
'type': 'boolean',
'default': True,
'description': dedent("""\
Attempt to enable ntp clients if set to True. If set
- to False, ntp client will not be configured or
- installed"""),
+ to False, ntp client will not be configured or
+ installed"""),
},
'config': {
'description': dedent("""\
Configuration settings or overrides for the
- ``ntp_client`` specified."""),
+ ``ntp_client`` specified."""),
'type': ['object'],
'properties': {
'confpath': {
'type': 'string',
'description': dedent("""\
The path to where the ``ntp_client``
- configuration is written."""),
+ configuration is written."""),
},
'check_exe': {
'type': 'string',
'description': dedent("""\
The executable name for the ``ntp_client``.
- For example, ntp service ``check_exe`` is
- 'ntpd' because it runs the ntpd binary."""),
+ For example, ntp service ``check_exe`` is
+ 'ntpd' because it runs the ntpd binary."""),
},
'packages': {
'type': 'array',
@@ -230,22 +246,22 @@ schema = {
'uniqueItems': True,
'description': dedent("""\
List of packages needed to be installed for the
- selected ``ntp_client``."""),
+ selected ``ntp_client``."""),
},
'service_name': {
'type': 'string',
'description': dedent("""\
The systemd or sysvinit service name used to
- start and stop the ``ntp_client``
- service."""),
+ start and stop the ``ntp_client``
+ service."""),
},
'template': {
'type': 'string',
'description': dedent("""\
Inline template allowing users to define their
- own ``ntp_client`` configuration template.
- The value must start with '## template:jinja'
- to enable use of templating support.
+ own ``ntp_client`` configuration template.
+ The value must start with '## template:jinja'
+ to enable use of templating support.
"""),
},
},
@@ -307,7 +323,7 @@ def select_ntp_client(ntp_client, distro):
if distro_ntp_client == "auto":
for client in distro.preferred_ntp_clients:
cfg = distro_cfg.get(client)
- if util.which(cfg.get('check_exe')):
+ if subp.which(cfg.get('check_exe')):
LOG.debug('Selected NTP client "%s", already installed',
client)
clientcfg = cfg
@@ -336,7 +352,7 @@ def install_ntp_client(install_func, packages=None, check_exe="ntpd"):
@param check_exe: string. The name of a binary that indicates the package
the specified package is already installed.
"""
- if util.which(check_exe):
+ if subp.which(check_exe):
return
if packages is None:
packages = ['ntp']
@@ -363,21 +379,30 @@ def generate_server_names(distro):
"""
names = []
pool_distro = distro
- # For legal reasons x.pool.sles.ntp.org does not exist,
- # use the opensuse pool
+
if distro == 'sles':
+ # For legal reasons x.pool.sles.ntp.org does not exist,
+ # use the opensuse pool
pool_distro = 'opensuse'
+ elif distro == 'alpine':
+ # Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist
+ # so use general x.pool.ntp.org instead.
+ pool_distro = ''
+
for x in range(0, NR_POOL_SERVERS):
- name = "%d.%s.pool.ntp.org" % (x, pool_distro)
- names.append(name)
+ names.append(".".join(
+ [n for n in [str(x)] + [pool_distro] + ['pool.ntp.org'] if n]))
+
return names
-def write_ntp_config_template(distro_name, servers=None, pools=None,
- path=None, template_fn=None, template=None):
+def write_ntp_config_template(distro_name, service_name=None, servers=None,
+ pools=None, path=None, template_fn=None,
+ template=None):
"""Render a ntp client configuration for the specified client.
@param distro_name: string. The distro class name.
+ @param service_name: string. The name of the NTP client service.
@param servers: A list of strings specifying ntp servers. Defaults to empty
list.
@param pools: A list of strings specifying ntp pools. Defaults to empty
@@ -396,7 +421,14 @@ def write_ntp_config_template(distro_name, servers=None, pools=None,
if not pools:
pools = []
- if len(servers) == 0 and len(pools) == 0:
+ if (len(servers) == 0 and distro_name == 'alpine' and
+ service_name == 'ntpd'):
+ # Alpine's Busybox ntpd only understands "servers" configuration
+ # and not "pool" configuration.
+ servers = generate_server_names(distro_name)
+ LOG.debug(
+ 'Adding distro default ntp servers: %s', ','.join(servers))
+ elif len(servers) == 0 and len(pools) == 0:
pools = generate_server_names(distro_name)
LOG.debug(
'Adding distro default ntp pool servers: %s', ','.join(pools))
@@ -431,7 +463,7 @@ def reload_ntp(service, systemd=False):
cmd = ['systemctl', 'reload-or-restart', service]
else:
cmd = ['service', service, 'restart']
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
def supplemental_schema_validation(ntp_config):
@@ -531,6 +563,8 @@ def handle(name, cfg, cloud, log, _args):
raise RuntimeError(msg)
write_ntp_config_template(cloud.distro.name,
+ service_name=ntp_client_config.get(
+ 'service_name'),
servers=ntp_cfg.get('servers', []),
pools=ntp_cfg.get('pools', []),
path=ntp_client_config.get('confpath'),
@@ -543,7 +577,7 @@ def handle(name, cfg, cloud, log, _args):
try:
reload_ntp(ntp_client_config['service_name'],
systemd=cloud.distro.uses_systemd())
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.exception("Failed to reload/start ntp service: %s", e)
raise
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index 86afffef..036baf85 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -43,6 +43,7 @@ import os
import time
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
REBOOT_FILE = "/var/run/reboot-required"
@@ -57,7 +58,7 @@ def _multi_cfg_bool_get(cfg, *keys):
def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
- util.subp(REBOOT_CMD)
+ subp.subp(REBOOT_CMD)
start = time.time()
wait_time = initial_sleep
for _i in range(0, wait_attempts):
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index b8e27090..733c3910 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -19,6 +19,7 @@ keys to post. Available keys are:
- ``pub_key_dsa``
- ``pub_key_rsa``
- ``pub_key_ecdsa``
+ - ``pub_key_ed25519``
- ``instance_id``
- ``hostname``
- ``fdqn``
@@ -52,6 +53,7 @@ POST_LIST_ALL = [
'pub_key_dsa',
'pub_key_rsa',
'pub_key_ecdsa',
+ 'pub_key_ed25519',
'instance_id',
'hostname',
'fqdn'
@@ -105,6 +107,7 @@ def handle(name, cfg, cloud, log, args):
'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub',
'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub',
'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
+ 'pub_key_ed25519': '/etc/ssh/ssh_host_ed25519_key.pub',
}
for (n, path) in pubkeys.items():
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 3e81a3c7..6fcb8a7d 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -22,9 +22,8 @@ The ``delay`` key specifies a duration to be added onto any shutdown command
used. Therefore, if a 5 minute delay and a 120 second shutdown are specified,
the maximum amount of time between cloud-init starting and the system shutting
down is 7 minutes, and the minimum amount of time is 5 minutes. The ``delay``
-key must have an argument in a form that the ``shutdown`` utility recognizes.
-The most common format is the form ``+5`` for 5 minutes. See ``man shutdown``
-for more options.
+key must have an argument in either the form ``+5`` for 5 minutes or ``now``
+for immediate shutdown.
Optionally, a command can be run to determine whether or not
the system should shut down. The command to be run should be specified in the
@@ -33,6 +32,10 @@ the system should shut down. The command to be run should be specified in the
``condition`` key is omitted or the command specified by the ``condition``
key returns 0.
+.. note::
+ With Alpine Linux any message value specified is ignored as Alpine's halt,
+ poweroff, and reboot commands do not support broadcasting a message.
+
**Internal name:** ``cc_power_state_change``
**Module frequency:** per instance
@@ -56,6 +59,7 @@ import subprocess
import time
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
frequency = PER_INSTANCE
@@ -71,7 +75,7 @@ def givecmdline(pid):
# PID COMM ARGS
# 1 init /bin/init --
if util.is_FreeBSD():
- (output, _err) = util.subp(['procstat', '-c', str(pid)])
+ (output, _err) = subp.subp(['procstat', '-c', str(pid)])
line = output.splitlines()[1]
m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line)
return m.group(2)
@@ -111,9 +115,9 @@ def check_condition(cond, log=None):
return False
-def handle(_name, cfg, _cloud, log, _args):
+def handle(_name, cfg, cloud, log, _args):
try:
- (args, timeout, condition) = load_power_state(cfg)
+ (args, timeout, condition) = load_power_state(cfg, cloud.distro.name)
if args is None:
log.debug("no power_state provided. doing nothing")
return
@@ -140,7 +144,19 @@ def handle(_name, cfg, _cloud, log, _args):
condition, execmd, [args, devnull_fp])
-def load_power_state(cfg):
+def convert_delay(delay, fmt=None, scale=None):
+ if not fmt:
+ fmt = "+%s"
+ if not scale:
+ scale = 1
+
+ if delay != "now":
+ delay = fmt % int(int(delay) * int(scale))
+
+ return delay
+
+
+def load_power_state(cfg, distro_name):
# returns a tuple of shutdown_command, timeout
# shutdown_command is None if no config found
pstate = cfg.get('power_state')
@@ -160,26 +176,42 @@ def load_power_state(cfg):
(','.join(opt_map.keys()), mode))
delay = pstate.get("delay", "now")
- # convert integer 30 or string '30' to '+30'
- try:
- delay = "+%s" % int(delay)
- except ValueError:
- pass
+ message = pstate.get("message")
+ scale = 1
+ fmt = "+%s"
+ command = ["shutdown", opt_map[mode]]
+
+ if distro_name == 'alpine':
+ # Convert integer 30 or string '30' to '1800' (seconds) as Alpine's
+ # halt/poweroff/reboot commands take seconds rather than minutes.
+ scale = 60
+ # No "+" in front of delay value as not supported by Alpine's commands.
+ fmt = "%s"
+ if delay == "now":
+ # Alpine's commands do not understand "now".
+ delay = "0"
+ command = [mode, "-d"]
+ # Alpine's commands don't support a message.
+ message = None
- if delay != "now" and not re.match(r"\+[0-9]+", delay):
+ try:
+ delay = convert_delay(delay, fmt=fmt, scale=scale)
+ except ValueError as e:
raise TypeError(
"power_state[delay] must be 'now' or '+m' (minutes)."
- " found '%s'." % delay)
+ " found '%s'." % delay
+ ) from e
- args = ["shutdown", opt_map[mode], delay]
- if pstate.get("message"):
- args.append(pstate.get("message"))
+ args = command + [delay]
+ if message:
+ args.append(message)
try:
timeout = float(pstate.get('timeout', 30.0))
- except ValueError:
- raise ValueError("failed to convert timeout '%s' to float." %
- pstate['timeout'])
+ except ValueError as e:
+ raise ValueError(
+ "failed to convert timeout '%s' to float." % pstate['timeout']
+ ) from e
condition = pstate.get("condition", True)
if not isinstance(condition, (str, list, bool)):
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index c01f5b8f..bc981cf4 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -83,6 +83,7 @@ import yaml
from io import StringIO
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
@@ -105,14 +106,14 @@ class PuppetConstants(object):
def _autostart_puppet(log):
# Set puppet to automatically start
if os.path.exists('/etc/default/puppet'):
- util.subp(['sed', '-i',
+ subp.subp(['sed', '-i',
'-e', 's/^START=.*/START=yes/',
'/etc/default/puppet'], capture=False)
elif os.path.exists('/bin/systemctl'):
- util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
+ subp.subp(['/bin/systemctl', 'enable', 'puppet.service'],
capture=False)
elif os.path.exists('/sbin/chkconfig'):
- util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
+ subp.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
else:
log.warning(("Sorry we do not know how to enable"
" puppet services on this system"))
@@ -159,9 +160,9 @@ def handle(name, cfg, cloud, log, _args):
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
cleaned_contents = '\n'.join(cleaned_lines)
# Move to puppet_config.read_file when dropping py2.7
- puppet_config.readfp( # pylint: disable=W1505
+ puppet_config.read_file(
StringIO(cleaned_contents),
- filename=p_constants.conf_path)
+ source=p_constants.conf_path)
for (cfg_name, cfg) in puppet_cfg['conf'].items():
# Cert configuration is a special case
# Dump the puppet master ca certificate in the correct place
@@ -203,6 +204,6 @@ def handle(name, cfg, cloud, log, _args):
_autostart_puppet(log)
# Start puppetd
- util.subp(['service', 'puppet', 'start'], capture=False)
+ subp.subp(['service', 'puppet', 'start'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 01dfc125..978d2ee0 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -19,6 +19,7 @@ from textwrap import dedent
from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_ALWAYS
+from cloudinit import subp
from cloudinit import util
NOBLOCK = "noblock"
@@ -88,11 +89,11 @@ def _resize_zfs(mount_point, devpth):
def _get_dumpfs_output(mount_point):
- return util.subp(['dumpfs', '-m', mount_point])[0]
+ return subp.subp(['dumpfs', '-m', mount_point])[0]
def _get_gpart_output(part):
- return util.subp(['gpart', 'show', part])[0]
+ return subp.subp(['gpart', 'show', part])[0]
def _can_skip_resize_ufs(mount_point, devpth):
@@ -117,14 +118,12 @@ def _can_skip_resize_ufs(mount_point, devpth):
if o == "-f":
frag_sz = int(a)
# check the current partition size
- """
- # gpart show /dev/da0
-=> 40 62914480 da0 GPT (30G)
- 40 1024 1 freebsd-boot (512K)
- 1064 58719232 2 freebsd-ufs (28G)
- 58720296 3145728 3 freebsd-swap (1.5G)
- 61866024 1048496 - free - (512M)
- """
+ # Example output from `gpart show /dev/da0`:
+ # => 40 62914480 da0 GPT (30G)
+ # 40 1024 1 freebsd-boot (512K)
+ # 1064 58719232 2 freebsd-ufs (28G)
+ # 58720296 3145728 3 freebsd-swap (1.5G)
+ # 61866024 1048496 - free - (512M)
expect_sz = None
m = re.search('^(/dev/.+)p([0-9])$', devpth)
gpart_res = _get_gpart_output(m.group(1))
@@ -306,8 +305,8 @@ def handle(name, cfg, _cloud, log, args):
def do_resize(resize_cmd, log):
try:
- util.subp(resize_cmd)
- except util.ProcessExecutionError:
+ subp.subp(resize_cmd)
+ except subp.ProcessExecutionError:
util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
raise
# TODO(harlowja): Should we add a fsck check after this to make
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 69f4768a..519e66eb 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -30,7 +30,7 @@ are configured correctly.
**Module frequency:** per instance
-**Supported distros:** fedora, rhel, sles
+**Supported distros:** alpine, fedora, rhel, sles
**Config keys**::
@@ -55,7 +55,7 @@ LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-distros = ['fedora', 'opensuse', 'rhel', 'sles']
+distros = ['alpine', 'fedora', 'opensuse', 'rhel', 'sles']
def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 28c79b83..28d62e9d 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -39,6 +39,7 @@ Subscription`` example config.
"""
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -173,7 +174,7 @@ class SubscriptionManager(object):
try:
_sub_man_cli(cmd)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
return False
return True
@@ -200,7 +201,7 @@ class SubscriptionManager(object):
try:
return_out = _sub_man_cli(cmd, logstring_val=True)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
"to: {0}".format(e.stderr))
@@ -223,7 +224,7 @@ class SubscriptionManager(object):
# Attempting to register the system only
try:
return_out = _sub_man_cli(cmd, logstring_val=True)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
"to: {0}".format(e.stderr))
@@ -246,7 +247,7 @@ class SubscriptionManager(object):
try:
return_out = _sub_man_cli(cmd)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.stdout.rstrip() != '':
for line in e.stdout.split("\n"):
if line != '':
@@ -264,7 +265,7 @@ class SubscriptionManager(object):
cmd = ['attach', '--auto']
try:
return_out = _sub_man_cli(cmd)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
self.log_warn("Auto-attach failed with: {0}".format(e))
return False
for line in return_out.split("\n"):
@@ -341,7 +342,7 @@ class SubscriptionManager(object):
"system: %s", (", ".join(pool_list))
.replace('--pool=', ''))
return True
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
self.log_warn("Unable to attach pool {0} "
"due to {1}".format(pool, e))
return False
@@ -414,7 +415,7 @@ class SubscriptionManager(object):
try:
_sub_man_cli(cmd)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
self.log_warn("Unable to alter repos due to {0}".format(e))
return False
@@ -432,11 +433,11 @@ class SubscriptionManager(object):
def _sub_man_cli(cmd, logstring_val=False):
'''
- Uses the prefered cloud-init subprocess def of util.subp
+ Uses the prefered cloud-init subprocess def of subp.subp
and runs subscription-manager. Breaking this to a
separate function for later use in mocking and unittests
'''
- return util.subp(['subscription-manager'] + cmd,
+ return subp.subp(['subscription-manager'] + cmd,
logstring=logstring_val)
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 5df0137d..2a2bc931 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -182,6 +182,7 @@ import os
import re
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
DEF_FILENAME = "20-cloud-config.conf"
@@ -215,7 +216,7 @@ def reload_syslog(command=DEF_RELOAD, systemd=False):
cmd = ['service', service, 'restart']
else:
cmd = command
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
def load_config(cfg):
@@ -346,8 +347,10 @@ class SyslogRemotesLine(object):
if self.port:
try:
int(self.port)
- except ValueError:
- raise ValueError("port '%s' is not an integer" % self.port)
+ except ValueError as e:
+ raise ValueError(
+ "port '%s' is not an integer" % self.port
+ ) from e
if not self.addr:
raise ValueError("address is required")
@@ -429,7 +432,7 @@ def handle(name, cfg, cloud, log, _args):
restarted = reload_syslog(
command=mycfg[KEYNAME_RELOAD],
systemd=cloud.distro.uses_systemd()),
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
restarted = False
log.warning("Failed to reload syslog", e)
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index 5dd8de37..b61876aa 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -45,7 +45,7 @@ specify them with ``pkg_name``, ``service_name`` and ``config_dir``.
import os
-from cloudinit import safeyaml, util
+from cloudinit import safeyaml, subp, util
from cloudinit.distros import rhel_util
@@ -130,6 +130,6 @@ def handle(name, cfg, cloud, log, _args):
# restart salt-minion. 'service' will start even if not started. if it
# was started, it needs to be restarted for config change.
- util.subp(['service', const.srv_name, 'restart'], capture=False)
+ subp.subp(['service', const.srv_name, 'restart'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index 588e1b03..1e3f419e 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -24,7 +24,7 @@ module does not accept any config keys.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_ALWAYS
@@ -38,7 +38,7 @@ def handle(name, _cfg, cloud, log, _args):
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index 75549b52..5966fb9a 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -27,7 +27,7 @@ the system. As a result per-instance scripts will run again.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_INSTANCE
@@ -41,7 +41,7 @@ def handle(name, _cfg, cloud, log, _args):
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index 259bdfab..bcca859e 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -25,7 +25,7 @@ be run in alphabetical order. This module does not accept any config keys.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_ONCE
@@ -39,7 +39,7 @@ def handle(name, _cfg, cloud, log, _args):
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index d940dbd6..215703ef 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -27,7 +27,7 @@ This module does not accept any config keys.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_INSTANCE
@@ -42,7 +42,7 @@ def handle(name, _cfg, cloud, log, _args):
# go here...
runparts_path = os.path.join(cloud.get_ipath_cur(), SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index faac9242..e0a4bfff 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -28,6 +28,7 @@ entry under the ``vendor_data`` config key.
import os
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
@@ -46,7 +47,7 @@ def handle(name, cfg, cloud, log, _args):
prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
try:
- util.runparts(runparts_path, exe_prefix=prefix)
+ subp.runparts(runparts_path, exe_prefix=prefix)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index b65f3ed9..4fb9b44e 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -65,6 +65,7 @@ from io import BytesIO
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
frequency = PER_INSTANCE
@@ -92,14 +93,14 @@ def handle_random_seed_command(command, required, env=None):
return
cmd = command[0]
- if not util.which(cmd):
+ if not subp.which(cmd):
if required:
raise ValueError(
"command '{cmd}' not found but required=true".format(cmd=cmd))
else:
LOG.debug("command '%s' not found for seed_command", cmd)
return
- util.subp(command, env=env, capture=False)
+ subp.subp(command, env=env, capture=False)
def handle(name, cfg, cloud, log, _args):
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 10d6d197..1d23d80d 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -55,7 +55,6 @@ class SetHostnameError(Exception):
This may happen if we attempt to set the hostname early in cloud-init's
init-local timeframe as certain services may not be running yet.
"""
- pass
def handle(name, cfg, cloud, log, _args):
@@ -86,7 +85,7 @@ def handle(name, cfg, cloud, log, _args):
except Exception as e:
msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname)
util.logexc(log, msg)
- raise SetHostnameError("%s: %s" % (msg, e))
+ raise SetHostnameError("%s: %s" % (msg, e)) from e
write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn})
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 4943d545..d6b5682d 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -83,6 +83,7 @@ import sys
from cloudinit.distros import ug_util
from cloudinit import log as logging
from cloudinit.ssh_util import update_ssh_config
+from cloudinit import subp
from cloudinit import util
from string import ascii_letters, digits
@@ -128,7 +129,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
cmd = list(service_cmd) + ["restart", service_name]
else:
cmd = list(service_cmd) + [service_name, "restart"]
- util.subp(cmd)
+ subp.subp(cmd)
LOG.debug("Restarted the SSH daemon.")
@@ -241,12 +242,12 @@ def rand_user_password(pwlen=20):
def chpasswd(distro, plist_in, hashed=False):
- if util.is_FreeBSD():
+ if util.is_BSD():
for pentry in plist_in.splitlines():
u, p = pentry.split(":")
distro.set_passwd(u, p, hashed=hashed)
else:
cmd = ['chpasswd'] + (['-e'] if hashed else [])
- util.subp(cmd, plist_in)
+ subp.subp(cmd, plist_in)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 90724b81..20ed7d2f 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -12,6 +12,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_INSTANCE
from cloudinit.subp import prepend_base_command
+from cloudinit import subp
from cloudinit import util
@@ -61,9 +62,9 @@ schema = {
snap:
assertions:
00: |
- signed_assertion_blob_here
+ signed_assertion_blob_here
02: |
- signed_assertion_blob_here
+ signed_assertion_blob_here
commands:
00: snap create-user --sudoer --known <snap-user>@mydomain.com
01: snap install canonical-livepatch
@@ -85,6 +86,21 @@ schema = {
01: ['snap', 'install', 'vlc']
02: snap install vlc
03: 'snap install vlc'
+ """), dedent("""\
+ # You can use a list of commands
+ snap:
+ commands:
+ - ['install', 'vlc']
+ - ['snap', 'install', 'vlc']
+ - snap install vlc
+ - 'snap install vlc'
+ """), dedent("""\
+ # You can use a list of assertions
+ snap:
+ assertions:
+ - signed_assertion_blob_here
+ - |
+ signed_assertion_blob_here
""")],
'frequency': PER_INSTANCE,
'type': 'object',
@@ -98,7 +114,8 @@ schema = {
'additionalItems': False, # Reject items non-string
'minItems': 1,
'minProperties': 1,
- 'uniqueItems': True
+ 'uniqueItems': True,
+ 'additionalProperties': {'type': 'string'},
},
'commands': {
'type': ['object', 'array'], # Array of strings or dict
@@ -110,6 +127,12 @@ schema = {
'additionalItems': False, # Reject non-string & non-list
'minItems': 1,
'minProperties': 1,
+ 'additionalProperties': {
+ 'oneOf': [
+ {'type': 'string'},
+ {'type': 'array', 'items': {'type': 'string'}},
+ ],
+ },
},
'squashfuse_in_container': {
'type': 'boolean'
@@ -122,10 +145,6 @@ schema = {
}
}
-# TODO schema for 'assertions' and 'commands' are too permissive at the moment.
-# Once python-jsonschema supports schema draft 6 add support for arbitrary
-# object keys with 'patternProperties' constraint to validate string values.
-
__doc__ = get_schema_doc(schema) # Supplement python help()
SNAP_CMD = "snap"
@@ -157,7 +176,7 @@ def add_assertions(assertions):
LOG.debug('Snap acking: %s', asrt.split('\n')[0:2])
util.write_file(ASSERTIONS_FILE, combined.encode('utf-8'))
- util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
+ subp.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
def run_commands(commands):
@@ -186,8 +205,8 @@ def run_commands(commands):
for command in fixed_snap_commands:
shell = isinstance(command, str)
try:
- util.subp(command, shell=shell, status_cb=sys.stderr.write)
- except util.ProcessExecutionError as e:
+ subp.subp(command, shell=shell, status_cb=sys.stderr.write)
+ except subp.ProcessExecutionError as e:
cmd_failures.append(str(e))
if cmd_failures:
msg = 'Failures running snap commands:\n{cmd_failures}'.format(
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
index 1020e944..95083607 100644
--- a/cloudinit/config/cc_spacewalk.py
+++ b/cloudinit/config/cc_spacewalk.py
@@ -27,7 +27,7 @@ For more information about spacewalk see: https://fedorahosted.org/spacewalk/
activation_key: <key>
"""
-from cloudinit import util
+from cloudinit import subp
distros = ['redhat', 'fedora']
@@ -41,9 +41,9 @@ def is_registered():
# assume we aren't registered; which is sorta ghetto...
already_registered = False
try:
- util.subp(['rhn-profile-sync', '--verbose'], capture=False)
+ subp.subp(['rhn-profile-sync', '--verbose'], capture=False)
already_registered = True
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
raise
return already_registered
@@ -65,7 +65,7 @@ def do_register(server, profile_name,
cmd.extend(['--sslCACert', str(ca_cert_path)])
if activation_key:
cmd.extend(['--activationkey', str(activation_key)])
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def handle(name, cfg, cloud, log, _args):
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 163cce99..9b2a333a 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -35,6 +35,42 @@ root login is disabled, and root login opts are set to::
no-port-forwarding,no-agent-forwarding,no-X11-forwarding
+Supported public key types for the ``ssh_authorized_keys`` are:
+
+ - dsa
+ - rsa
+ - ecdsa
+ - ed25519
+ - ecdsa-sha2-nistp256-cert-v01@openssh.com
+ - ecdsa-sha2-nistp256
+ - ecdsa-sha2-nistp384-cert-v01@openssh.com
+ - ecdsa-sha2-nistp384
+ - ecdsa-sha2-nistp521-cert-v01@openssh.com
+ - ecdsa-sha2-nistp521
+ - sk-ecdsa-sha2-nistp256-cert-v01@openssh.com
+ - sk-ecdsa-sha2-nistp256@openssh.com
+ - sk-ssh-ed25519-cert-v01@openssh.com
+ - sk-ssh-ed25519@openssh.com
+ - ssh-dss-cert-v01@openssh.com
+ - ssh-dss
+ - ssh-ed25519-cert-v01@openssh.com
+ - ssh-ed25519
+ - ssh-rsa-cert-v01@openssh.com
+ - ssh-rsa
+ - ssh-xmss-cert-v01@openssh.com
+ - ssh-xmss@openssh.com
+
+.. note::
+ this list has been filtered out from the supported keytypes of
+ `OpenSSH`_ source, where the sigonly keys are removed. Please see
+ ``ssh_util`` for more information.
+
+ ``dsa``, ``rsa``, ``ecdsa`` and ``ed25519`` are added for legacy,
+ as they are valid public keys in some old distros. They can possibly
+ be removed in the future when support for the older distros are dropped
+
+.. _OpenSSH: https://github.com/openssh/openssh-portable/blob/master/sshkey.c
+
Host Keys
^^^^^^^^^
@@ -116,6 +152,7 @@ import sys
from cloudinit.distros import ug_util
from cloudinit import ssh_util
+from cloudinit import subp
from cloudinit import util
@@ -164,7 +201,7 @@ def handle(_name, cfg, cloud, log, _args):
try:
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
log.debug("Generated a key for %s from %s", pair[0], pair[1])
except Exception:
util.logexc(log, "Failed generated a key for %s from %s",
@@ -186,9 +223,9 @@ def handle(_name, cfg, cloud, log, _args):
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
try:
- out, err = util.subp(cmd, capture=True, env=lang_c)
+ out, err = subp.subp(cmd, capture=True, env=lang_c)
sys.stdout.write(util.decode_binary(out))
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
err = util.decode_binary(e.stderr).lower()
if (e.exit_code == 1 and
err.lower().startswith("unknown key")):
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 7ac1c8cf..05d30ad1 100755
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -13,7 +13,7 @@ Write fingerprints of authorized keys for each user to log. This is enabled by
default, but can be disabled using ``no_ssh_fingerprints``. The hash type for
the keys can be specified, but defaults to ``sha256``.
-**Internal name:** `` cc_ssh_authkey_fingerprints``
+**Internal name:** ``cc_ssh_authkey_fingerprints``
**Module frequency:** per instance
@@ -59,8 +59,8 @@ def _gen_fingerprint(b64_text, hash_meth='sha256'):
def _is_printable_key(entry):
if any([entry.keytype, entry.base64, entry.comment, entry.options]):
- if (entry.keytype and
- entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']):
+ if (entry.keytype and entry.keytype.lower().strip()
+ in ssh_util.VALID_KEY_TYPES):
return True
return False
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 63f87298..856e5a9e 100755
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -31,6 +31,7 @@ either ``lp:`` for launchpad or ``gh:`` for github to the username.
"""
from cloudinit.distros import ug_util
+from cloudinit import subp
from cloudinit import util
import pwd
@@ -101,8 +102,8 @@ def import_ssh_ids(ids, user, log):
log.debug("Importing SSH ids for user %s.", user)
try:
- util.subp(cmd, capture=False)
- except util.ProcessExecutionError as exc:
+ subp.subp(cmd, capture=False)
+ except subp.ProcessExecutionError as exc:
util.logexc(log, "Failed to run command to import %s SSH ids", user)
raise exc
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index 8b6d2a1a..d61dc655 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -8,6 +8,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
@@ -109,18 +110,18 @@ def configure_ua(token=None, enable=None):
attach_cmd = ['ua', 'attach', token]
LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd))
try:
- util.subp(attach_cmd)
- except util.ProcessExecutionError as e:
+ subp.subp(attach_cmd)
+ except subp.ProcessExecutionError as e:
msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format(
error=str(e))
util.logexc(LOG, msg)
- raise RuntimeError(msg)
+ raise RuntimeError(msg) from e
enable_errors = []
for service in enable:
try:
cmd = ['ua', 'enable', service]
- util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
+ subp.subp(cmd, capture=True)
+ except subp.ProcessExecutionError as e:
enable_errors.append((service, e))
if enable_errors:
for service, error in enable_errors:
@@ -135,7 +136,7 @@ def configure_ua(token=None, enable=None):
def maybe_install_ua_tools(cloud):
"""Install ubuntu-advantage-tools if not present."""
- if util.which('ua'):
+ if subp.which('ua'):
return
try:
cloud.distro.update_package_sources()
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
index 297451d6..2d1d2b32 100644
--- a/cloudinit/config/cc_ubuntu_drivers.py
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -9,6 +9,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import temp_utils
from cloudinit import type_utils
from cloudinit import util
@@ -108,7 +109,7 @@ def install_drivers(cfg, pkg_install_func):
LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc)
return
- if not util.which('ubuntu-drivers'):
+ if not subp.which('ubuntu-drivers'):
LOG.debug("'ubuntu-drivers' command not available. "
"Installing ubuntu-drivers-common")
pkg_install_func(['ubuntu-drivers-common'])
@@ -131,7 +132,7 @@ def install_drivers(cfg, pkg_install_func):
debconf_script,
util.encode_text(NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT),
mode=0o755)
- util.subp([debconf_script, debconf_file])
+ subp.subp([debconf_script, debconf_file])
except Exception as e:
util.logexc(
LOG, "Failed to register NVIDIA debconf template: %s", str(e))
@@ -141,8 +142,8 @@ def install_drivers(cfg, pkg_install_func):
util.del_dir(tdir)
try:
- util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
- except util.ProcessExecutionError as exc:
+ subp.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
+ except subp.ProcessExecutionError as exc:
if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr:
LOG.warning('the available version of ubuntu-drivers is'
' too old to perform requested driver installation')
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index 13764e60..426498a3 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -78,6 +78,13 @@ config keys for an entry in ``users`` are as follows:
If specifying a sudo rule for a user, ensure that the syntax for the rule
is valid, as it is not checked by cloud-init.
+.. note::
+ Most of these configuration options will not be honored if the user
+ already exists. Following options are the exceptions and they are
+ applicable on already-existing users:
+ - 'plain_text_passwd', 'hashed_passwd', 'lock_passwd', 'sudo',
+ 'ssh_authorized_keys', 'ssh_redirect_user'.
+
**Internal name:** ``cc_users_groups``
**Module frequency:** per instance
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index bd87e9e5..8601e707 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -4,60 +4,14 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Write Files
------------
-**Summary:** write arbitrary files
-
-Write out arbitrary content to files, optionally setting permissions. Content
-can be specified in plain text or binary. Data encoded with either base64 or
-binary gzip data can be specified and will be decoded before being written.
-
-.. note::
- if multiline data is provided, care should be taken to ensure that it
- follows yaml formatting standards. to specify binary data, use the yaml
- option ``!!binary``
-
-.. note::
- Do not write files under /tmp during boot because of a race with
- systemd-tmpfiles-clean that can cause temp files to get cleaned during
- the early boot process. Use /run/somedir instead to avoid race LP:1707222.
-
-**Internal name:** ``cc_write_files``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- write_files:
- - encoding: b64
- content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
- owner: root:root
- path: /etc/sysconfig/selinux
- permissions: '0644'
- - content: |
- # My new /etc/sysconfig/samba file
-
- SMDBOPTIONS="-D"
- path: /etc/sysconfig/samba
- - content: !!binary |
- f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAA
- AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAA
- AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAAB
- ...
- path: /bin/arch
- permissions: '0555'
- - content: |
- 15 * * * * root ship_logs
- path: /etc/crontab
- append: true
-"""
+"""Write Files: write arbitrary files"""
import base64
import os
+from textwrap import dedent
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
@@ -71,6 +25,142 @@ UNKNOWN_ENC = 'text/plain'
LOG = logging.getLogger(__name__)
+distros = ['all']
+
+# The schema definition for each cloud-config module is a strict contract for
+# describing supported configuration parameters for each cloud-config section.
+# It allows cloud-config to validate and alert users to invalid or ignored
+# configuration options before actually attempting to deploy with said
+# configuration.
+
+supported_encoding_types = [
+ 'gz', 'gzip', 'gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64', 'b64',
+ 'base64']
+
+schema = {
+ 'id': 'cc_write_files',
+ 'name': 'Write Files',
+ 'title': 'write arbitrary files',
+ 'description': dedent("""\
+ Write out arbitrary content to files, optionally setting permissions.
+ Parent folders in the path are created if absent.
+ Content can be specified in plain text or binary. Data encoded with
+ either base64 or binary gzip data can be specified and will be decoded
+ before being written. For empty file creation, content can be omitted.
+
+ .. note::
+ if multiline data is provided, care should be taken to ensure that it
+ follows yaml formatting standards. to specify binary data, use the yaml
+ option ``!!binary``
+
+ .. note::
+ Do not write files under /tmp during boot because of a race with
+ systemd-tmpfiles-clean that can cause temp files to get cleaned during
+ the early boot process. Use /run/somedir instead to avoid race
+ LP:1707222."""),
+ 'distros': distros,
+ 'examples': [
+ dedent("""\
+ # Write out base64 encoded content to /etc/sysconfig/selinux
+ write_files:
+ - encoding: b64
+ content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
+ owner: root:root
+ path: /etc/sysconfig/selinux
+ permissions: '0644'
+ """),
+ dedent("""\
+ # Appending content to an existing file
+ write_files:
+ - content: |
+ 15 * * * * root ship_logs
+ path: /etc/crontab
+ append: true
+ """),
+ dedent("""\
+ # Provide gziped binary content
+ write_files:
+ - encoding: gzip
+ content: !!binary |
+ H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
+ path: /usr/bin/hello
+ permissions: '0755'
+ """),
+ dedent("""\
+ # Create an empty file on the system
+ write_files:
+ - path: /root/CLOUD_INIT_WAS_HERE
+ """)],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'write_files': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'path': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Path of the file to which ``content`` is decoded
+ and written
+ """),
+ },
+ 'content': {
+ 'type': 'string',
+ 'default': '',
+ 'description': dedent("""\
+ Optional content to write to the provided ``path``.
+ When content is present and encoding is not '%s',
+ decode the content prior to writing. Default:
+ **''**
+ """ % UNKNOWN_ENC),
+ },
+ 'owner': {
+ 'type': 'string',
+ 'default': DEFAULT_OWNER,
+ 'description': dedent("""\
+ Optional owner:group to chown on the file. Default:
+ **{owner}**
+ """.format(owner=DEFAULT_OWNER)),
+ },
+ 'permissions': {
+ 'type': 'string',
+ 'default': oct(DEFAULT_PERMS).replace('o', ''),
+ 'description': dedent("""\
+ Optional file permissions to set on ``path``
+ represented as an octal string '0###'. Default:
+ **'{perms}'**
+ """.format(perms=oct(DEFAULT_PERMS).replace('o', ''))),
+ },
+ 'encoding': {
+ 'type': 'string',
+ 'default': UNKNOWN_ENC,
+ 'enum': supported_encoding_types,
+ 'description': dedent("""\
+ Optional encoding type of the content. Default is
+ **text/plain** and no content decoding is
+ performed. Supported encoding types are:
+ %s.""" % ", ".join(supported_encoding_types)),
+ },
+ 'append': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ Whether to append ``content`` to existing file if
+ ``path`` exists. Default: **false**.
+ """),
+ },
+ },
+ 'required': ['path'],
+ 'additionalProperties': False
+ },
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema) # Supplement python help()
+
def handle(name, cfg, _cloud, log, _args):
files = cfg.get('write_files')
@@ -78,6 +168,7 @@ def handle(name, cfg, _cloud, log, _args):
log.debug(("Skipping module named %s,"
" no/empty 'write_files' key in configuration"), name)
return
+ validate_cloudconfig_schema(cfg, schema)
write_files(name, files)
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 3673166a..01fe683c 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -18,7 +18,7 @@ entry, the config entry will be skipped.
**Module frequency:** per always
-**Supported distros:** fedora, rhel
+**Supported distros:** centos, fedora, rhel
**Config keys**::
@@ -36,7 +36,7 @@ from configparser import ConfigParser
from cloudinit import util
-distros = ['fedora', 'rhel']
+distros = ['centos', 'fedora', 'rhel']
def _canonicalize_id(repo_id):
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 807c3eee..8a966aee 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -1,8 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""schema.py: Set of module functions for processing cloud-config schema."""
-from __future__ import print_function
-
from cloudinit import importer
from cloudinit.util import find_modules, load_file
@@ -36,6 +34,8 @@ SCHEMA_DOC_TMPL = """
{examples}
"""
SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}'
+SCHEMA_LIST_ITEM_TMPL = (
+ '{prefix}Each item in **{prop_name}** list supports the following keys:')
SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n'
SCHEMA_EXAMPLES_SPACER_TEMPLATE = '\n # --- Example{0} ---'
@@ -58,6 +58,19 @@ class SchemaValidationError(ValueError):
super(SchemaValidationError, self).__init__(message)
+def is_schema_byte_string(checker, instance):
+ """TYPE_CHECKER override allowing bytes for string type
+
+ For jsonschema v. 3.0.0+
+ """
+ try:
+ from jsonschema import Draft4Validator
+ except ImportError:
+ return False
+ return (Draft4Validator.TYPE_CHECKER.is_type(instance, "string") or
+ isinstance(instance, (bytes,)))
+
+
def validate_cloudconfig_schema(config, schema, strict=False):
"""Validate provided config meets the schema definition.
@@ -73,11 +86,31 @@ def validate_cloudconfig_schema(config, schema, strict=False):
"""
try:
from jsonschema import Draft4Validator, FormatChecker
+ from jsonschema.validators import create, extend
except ImportError:
logging.debug(
'Ignoring schema validation. python-jsonschema is not present')
return
- validator = Draft4Validator(schema, format_checker=FormatChecker())
+
+ # Allow for bytes to be presented as an acceptable valid value for string
+ # type jsonschema attributes in cloud-init's schema.
+ # This allows #cloud-config to provide valid yaml "content: !!binary | ..."
+ if hasattr(Draft4Validator, 'TYPE_CHECKER'): # jsonschema 3.0+
+ type_checker = Draft4Validator.TYPE_CHECKER.redefine(
+ 'string', is_schema_byte_string)
+ cloudinitValidator = extend(Draft4Validator, type_checker=type_checker)
+ else: # jsonschema 2.6 workaround
+ types = Draft4Validator.DEFAULT_TYPES
+ # Allow bytes as well as string (and disable a spurious
+ # unsupported-assignment-operation pylint warning which appears because
+ # this code path isn't written against the latest jsonschema).
+ types['string'] = (str, bytes) # pylint: disable=E1137
+ cloudinitValidator = create(
+ meta_schema=Draft4Validator.META_SCHEMA,
+ validators=Draft4Validator.VALIDATORS,
+ version="draft4",
+ default_types=types)
+ validator = cloudinitValidator(schema, format_checker=FormatChecker())
errors = ()
for error in sorted(validator.iter_errors(config), key=lambda e: e.path):
path = '.'.join([str(p) for p in error.path])
@@ -106,7 +139,6 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
schemapaths = _schemapath_for_cloudconfig(
cloudconfig, original_content)
errors_by_line = defaultdict(list)
- error_count = 1
error_footer = []
annotated_content = []
for path, msg in schema_errors:
@@ -120,18 +152,17 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
if col is not None:
msg = 'Line {line} column {col}: {msg}'.format(
line=line, col=col, msg=msg)
- error_footer.append('# E{0}: {1}'.format(error_count, msg))
- error_count += 1
lines = original_content.decode().split('\n')
- error_count = 1
- for line_number, line in enumerate(lines):
- errors = errors_by_line[line_number + 1]
+ error_index = 1
+ for line_number, line in enumerate(lines, 1):
+ errors = errors_by_line[line_number]
if errors:
- error_label = ','.join(
- ['E{0}'.format(count + error_count)
- for count in range(0, len(errors))])
- error_count += len(errors)
- annotated_content.append(line + '\t\t# ' + error_label)
+ error_label = []
+ for error in errors:
+ error_label.append('E{0}'.format(error_index))
+ error_footer.append('# E{0}: {1}'.format(error_index, error))
+ error_index += 1
+ annotated_content.append(line + '\t\t# ' + ','.join(error_label))
else:
annotated_content.append(line)
annotated_content.append(
@@ -179,7 +210,7 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
error = SchemaValidationError(errors)
if annotate:
print(annotated_cloudconfig_file({}, content, error.schema_errors))
- raise error
+ raise error from e
try:
validate_cloudconfig_schema(
cloudconfig, schema, strict=True)
@@ -213,20 +244,34 @@ def _schemapath_for_cloudconfig(config, original_content):
previous_depth = -1
path_prefix = ''
if line.startswith('- '):
+ # Process list items adding a list_index to the path prefix
+ previous_list_idx = '.%d' % (list_index - 1)
+ if path_prefix and path_prefix.endswith(previous_list_idx):
+ path_prefix = path_prefix[:-len(previous_list_idx)]
key = str(list_index)
- value = line[1:]
+ schema_line_numbers[key] = line_number
+ item_indent = len(re.match(RE_YAML_INDENT, line[1:]).groups()[0])
+ item_indent += 1 # For the leading '-' character
+ previous_depth = indent_depth
+ indent_depth += item_indent
+ line = line[item_indent:] # Strip leading list item + whitespace
list_index += 1
else:
+ # Process non-list lines setting value if present
list_index = 0
key, value = line.split(':', 1)
+ if path_prefix:
+ # Append any existing path_prefix for a fully-pathed key
+ key = path_prefix + '.' + key
while indent_depth <= previous_depth:
if scopes:
previous_depth, path_prefix = scopes.pop()
+ if list_index > 0 and indent_depth == previous_depth:
+ path_prefix = '.'.join(path_prefix.split('.')[:-1])
+ break
else:
previous_depth = -1
path_prefix = ''
- if path_prefix:
- key = path_prefix + '.' + key
scopes.append((indent_depth, key))
if value:
value = value.strip()
@@ -259,6 +304,28 @@ def _get_property_type(property_dict):
return property_type
+def _parse_description(description, prefix):
+ """Parse description from the schema in a format that we can better
+ display in our docs. This parser does three things:
+
+ - Guarantee that a paragraph will be in a single line
+ - Guarantee that each new paragraph will be aligned with
+ the first paragraph
+ - Proper align lists of items
+
+ @param description: The original description in the schema.
+ @param prefix: The number of spaces used to align the current description
+ """
+ list_paragraph = prefix * 3
+ description = re.sub(r"(\S)\n(\S)", r"\1 \2", description)
+ description = re.sub(
+ r"\n\n", r"\n\n{}".format(prefix), description)
+ description = re.sub(
+ r"\n( +)-", r"\n{}-".format(list_paragraph), description)
+
+ return description
+
+
def _get_property_doc(schema, prefix=' '):
"""Return restructured text describing the supported schema properties."""
new_prefix = prefix + ' '
@@ -266,11 +333,23 @@ def _get_property_doc(schema, prefix=' '):
for prop_key, prop_config in schema.get('properties', {}).items():
# Define prop_name and dscription for SCHEMA_PROPERTY_TMPL
description = prop_config.get('description', '')
+
properties.append(SCHEMA_PROPERTY_TMPL.format(
prefix=prefix,
prop_name=prop_key,
type=_get_property_type(prop_config),
- description=description.replace('\n', '')))
+ description=_parse_description(description, prefix)))
+ items = prop_config.get('items')
+ if items:
+ if isinstance(items, list):
+ for item in items:
+ properties.append(
+ _get_property_doc(item, prefix=new_prefix))
+ elif isinstance(items, dict) and items.get('properties'):
+ properties.append(SCHEMA_LIST_ITEM_TMPL.format(
+ prefix=new_prefix, prop_name=prop_key))
+ new_prefix += ' '
+ properties.append(_get_property_doc(items, prefix=new_prefix))
if 'properties' in prop_config:
properties.append(
_get_property_doc(prop_config, prefix=new_prefix))
@@ -346,8 +425,9 @@ def get_parser(parser=None):
description='Validate cloud-config files or document schema')
parser.add_argument('-c', '--config-file',
help='Path of the cloud-config yaml file to validate')
- parser.add_argument('-d', '--doc', action="store_true", default=False,
- help='Print schema documentation')
+ parser.add_argument('-d', '--docs', nargs='+',
+ help=('Print schema module docs. Choices: all or'
+ ' space-delimited cc_names.'))
parser.add_argument('--annotate', action="store_true", default=False,
help='Annotate existing cloud-config file with errors')
return parser
@@ -355,9 +435,9 @@ def get_parser(parser=None):
def handle_schema_args(name, args):
"""Handle provided schema args and perform the appropriate actions."""
- exclusive_args = [args.config_file, args.doc]
+ exclusive_args = [args.config_file, args.docs]
if not any(exclusive_args) or all(exclusive_args):
- error('Expected either --config-file argument or --doc')
+ error('Expected either --config-file argument or --docs')
full_schema = get_schema()
if args.config_file:
try:
@@ -370,9 +450,16 @@ def handle_schema_args(name, args):
error(str(e))
else:
print("Valid cloud-config file {0}".format(args.config_file))
- if args.doc:
+ elif args.docs:
+ schema_ids = [subschema['id'] for subschema in full_schema['allOf']]
+ schema_ids += ['all']
+ invalid_docs = set(args.docs).difference(set(schema_ids))
+ if invalid_docs:
+ error('Invalid --docs value {0}. Must be one of: {1}'.format(
+ list(invalid_docs), ', '.join(schema_ids)))
for subschema in full_schema['allOf']:
- print(get_schema_doc(subschema))
+ if 'all' in args.docs or subschema['id'] in args.docs:
+ print(get_schema_doc(subschema))
def main():
diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py
index 67646b03..b00f2083 100644
--- a/cloudinit/config/tests/test_disable_ec2_metadata.py
+++ b/cloudinit/config/tests/test_disable_ec2_metadata.py
@@ -15,10 +15,8 @@ DISABLE_CFG = {'disable_ec2_metadata': 'true'}
class TestEC2MetadataRoute(CiTestCase):
- with_logs = True
-
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
def test_disable_ifconfig(self, m_subp, m_which):
"""Set the route if ifconfig command is available"""
m_which.side_effect = lambda x: x if x == 'ifconfig' else None
@@ -27,8 +25,8 @@ class TestEC2MetadataRoute(CiTestCase):
['route', 'add', '-host', '169.254.169.254', 'reject'],
capture=False)
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
def test_disable_ip(self, m_subp, m_which):
"""Set the route if ip command is available"""
m_which.side_effect = lambda x: x if x == 'ip' else None
@@ -37,8 +35,8 @@ class TestEC2MetadataRoute(CiTestCase):
['ip', 'route', 'add', 'prohibit', '169.254.169.254'],
capture=False)
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
def test_disable_no_tool(self, m_subp, m_which):
"""Log error when neither route nor ip commands are available"""
m_which.return_value = None # Find neither ifconfig nor ip
diff --git a/cloudinit/config/tests/test_final_message.py b/cloudinit/config/tests/test_final_message.py
new file mode 100644
index 00000000..46ba99b2
--- /dev/null
+++ b/cloudinit/config/tests/test_final_message.py
@@ -0,0 +1,46 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_final_message import handle
+
+
+class TestHandle:
+ # TODO: Expand these tests to cover full functionality; currently they only
+ # cover the logic around how the boot-finished file is written (and not its
+ # contents).
+
+ @pytest.mark.parametrize(
+ "instance_dir_exists,file_is_written,expected_log_substring",
+ [
+ (True, True, None),
+ (False, False, "Failed to write boot finished file "),
+ ],
+ )
+ def test_boot_finished_written(
+ self,
+ instance_dir_exists,
+ file_is_written,
+ expected_log_substring,
+ caplog,
+ tmpdir,
+ ):
+ instance_dir = tmpdir.join("var/lib/cloud/instance")
+ if instance_dir_exists:
+ instance_dir.ensure_dir()
+ boot_finished = instance_dir.join("boot-finished")
+
+ m_cloud = mock.Mock(
+ paths=mock.Mock(boot_finished=boot_finished.strpath)
+ )
+
+ handle(None, {}, m_cloud, logging.getLogger(), [])
+
+ # We should not change the status of the instance directory
+ assert instance_dir_exists == instance_dir.exists()
+ assert file_is_written == boot_finished.exists()
+
+ if expected_log_substring:
+ assert expected_log_substring in caplog.text
diff --git a/cloudinit/config/tests/test_grub_dpkg.py b/cloudinit/config/tests/test_grub_dpkg.py
new file mode 100644
index 00000000..99c05bb5
--- /dev/null
+++ b/cloudinit/config/tests/test_grub_dpkg.py
@@ -0,0 +1,176 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import pytest
+
+from unittest import mock
+from logging import Logger
+from cloudinit.subp import ProcessExecutionError
+from cloudinit.config.cc_grub_dpkg import fetch_idevs, handle
+
+
+class TestFetchIdevs:
+ """Tests cc_grub_dpkg.fetch_idevs()"""
+
+ # Note: udevadm info returns devices in a large single line string
+ @pytest.mark.parametrize(
+ "grub_output,path_exists,expected_log_call,udevadm_output"
+ ",expected_idevs",
+ [
+ # Inside a container, grub not installed
+ (
+ ProcessExecutionError(reason=FileNotFoundError()),
+ False,
+ mock.call("'grub-probe' not found in $PATH"),
+ '',
+ '',
+ ),
+ # Inside a container, grub installed
+ (
+ ProcessExecutionError(stderr="failed to get canonical path"),
+ False,
+ mock.call("grub-probe 'failed to get canonical path'"),
+ '',
+ '',
+ ),
+ # KVM Instance
+ (
+ ['/dev/vda'],
+ True,
+ None,
+ (
+ '/dev/disk/by-path/pci-0000:00:00.0 ',
+ '/dev/disk/by-path/virtio-pci-0000:00:00.0 '
+ ),
+ '/dev/vda',
+ ),
+ # Xen Instance
+ (
+ ['/dev/xvda'],
+ True,
+ None,
+ '',
+ '/dev/xvda',
+ ),
+ # NVMe Hardware Instance
+ (
+ ['/dev/nvme1n1'],
+ True,
+ None,
+ (
+ '/dev/disk/by-id/nvme-Company_hash000 ',
+ '/dev/disk/by-id/nvme-nvme.000-000-000-000-000 ',
+ '/dev/disk/by-path/pci-0000:00:00.0-nvme-0 '
+ ),
+ '/dev/disk/by-id/nvme-Company_hash000',
+ ),
+ # SCSI Hardware Instance
+ (
+ ['/dev/sda'],
+ True,
+ None,
+ (
+ '/dev/disk/by-id/company-user-1 ',
+ '/dev/disk/by-id/scsi-0Company_user-1 ',
+ '/dev/disk/by-path/pci-0000:00:00.0-scsi-0:0:0:0 '
+ ),
+ '/dev/disk/by-id/company-user-1',
+ ),
+ ],
+ )
+ @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.os.path.exists")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
+ def test_fetch_idevs(self, m_subp, m_exists, m_logexc, grub_output,
+ path_exists, expected_log_call, udevadm_output,
+ expected_idevs):
+ """Tests outputs from grub-probe and udevadm info against grub-dpkg"""
+ m_subp.side_effect = [
+ grub_output,
+ ["".join(udevadm_output)]
+ ]
+ m_exists.return_value = path_exists
+ log = mock.Mock(spec=Logger)
+ idevs = fetch_idevs(log)
+ assert expected_idevs == idevs
+ if expected_log_call is not None:
+ assert expected_log_call in log.debug.call_args_list
+
+
+class TestHandle:
+ """Tests cc_grub_dpkg.handle()"""
+
+ @pytest.mark.parametrize(
+ "cfg_idevs,cfg_idevs_empty,fetch_idevs_output,expected_log_output",
+ [
+ (
+ # No configuration
+ None,
+ None,
+ '/dev/disk/by-id/nvme-Company_hash000',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/disk/by-id/nvme-Company_hash000','false'"
+ ),
+ ),
+ (
+ # idevs set, idevs_empty unset
+ '/dev/sda',
+ None,
+ '/dev/sda',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/sda','false'"
+ ),
+ ),
+ (
+ # idevs unset, idevs_empty set
+ None,
+ 'true',
+ '/dev/xvda',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/xvda','true'"
+ ),
+ ),
+ (
+ # idevs set, idevs_empty set
+ '/dev/vda',
+ 'false',
+ '/dev/disk/by-id/company-user-1',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/vda','false'"
+ ),
+ ),
+ (
+ # idevs set, idevs_empty set
+ # Respect what the user defines, even if its logically wrong
+ '/dev/nvme0n1',
+ 'true',
+ '',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/nvme0n1','true'"
+ ),
+ )
+ ],
+ )
+ @mock.patch("cloudinit.config.cc_grub_dpkg.fetch_idevs")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.util.get_cfg_option_str")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
+ def test_handle(self, m_subp, m_logexc, m_get_cfg_str, m_fetch_idevs,
+ cfg_idevs, cfg_idevs_empty, fetch_idevs_output,
+ expected_log_output):
+ """Test setting of correct debconf database entries"""
+ m_get_cfg_str.side_effect = [
+ cfg_idevs,
+ cfg_idevs_empty
+ ]
+ m_fetch_idevs.return_value = fetch_idevs_output
+ log = mock.Mock(spec=Logger)
+ handle(mock.Mock(), mock.Mock(), mock.Mock(), log, mock.Mock())
+ log.debug.assert_called_with("".join(expected_log_output))
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_mounts.py b/cloudinit/config/tests/test_mounts.py
new file mode 100644
index 00000000..764a33e3
--- /dev/null
+++ b/cloudinit/config/tests/test_mounts.py
@@ -0,0 +1,28 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_mounts import create_swapfile
+
+
+M_PATH = 'cloudinit.config.cc_mounts.'
+
+
+class TestCreateSwapfile:
+
+ @pytest.mark.parametrize('fstype', ('xfs', 'btrfs', 'ext4', 'other'))
+ @mock.patch(M_PATH + 'util.get_mount_info')
+ @mock.patch(M_PATH + 'subp.subp')
+ def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir):
+ swap_file = tmpdir.join("swap-file")
+ fname = str(swap_file)
+
+ # Some of the calls to subp.subp should create the swap file; this
+ # roughly approximates that
+ m_subp.side_effect = lambda *args, **kwargs: swap_file.write('')
+
+ m_get_mount_info.return_value = (mock.ANY, fstype)
+
+ create_swapfile(fname, '')
+ assert mock.call(['mkswap', fname]) in m_subp.call_args_list
diff --git a/cloudinit/config/tests/test_resolv_conf.py b/cloudinit/config/tests/test_resolv_conf.py
new file mode 100644
index 00000000..6546a0b5
--- /dev/null
+++ b/cloudinit/config/tests/test_resolv_conf.py
@@ -0,0 +1,86 @@
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_resolv_conf import generate_resolv_conf
+
+
+EXPECTED_HEADER = """\
+# Your system has been configured with 'manage-resolv-conf' set to true.
+# As a result, cloud-init has written this file with configuration data
+# that it has been provided. Cloud-init, by default, will write this file
+# a single time (PER_ONCE).
+#\n\n"""
+
+
+class TestGenerateResolvConf:
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_default_target_fname_is_etc_resolvconf(self, m_render_to_file):
+ generate_resolv_conf("templates/resolv.conf.tmpl", mock.MagicMock())
+
+ assert [
+ mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_target_fname_is_used_if_passed(self, m_render_to_file):
+ generate_resolv_conf(
+ "templates/resolv.conf.tmpl", mock.MagicMock(), "/use/this/path"
+ )
+
+ assert [
+ mock.call(mock.ANY, "/use/this/path", mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ # Patch in templater so we can assert on the actual generated content
+ @mock.patch("cloudinit.templater.util.write_file")
+ # Parameterise with the value to be passed to generate_resolv_conf as the
+ # params parameter, and the expected line after the header as
+ # expected_extra_line.
+ @pytest.mark.parametrize(
+ "params,expected_extra_line",
+ [
+ # No options
+ ({}, None),
+ # Just a true flag
+ ({"options": {"foo": True}}, "options foo"),
+ # Just a false flag
+ ({"options": {"foo": False}}, None),
+ # Just an option
+ ({"options": {"foo": "some_value"}}, "options foo:some_value"),
+ # A true flag and an option
+ (
+ {"options": {"foo": "some_value", "bar": True}},
+ "options bar foo:some_value",
+ ),
+ # Two options
+ (
+ {"options": {"foo": "some_value", "bar": "other_value"}},
+ "options bar:other_value foo:some_value",
+ ),
+ # Everything
+ (
+ {
+ "options": {
+ "foo": "some_value",
+ "bar": "other_value",
+ "baz": False,
+ "spam": True,
+ }
+ },
+ "options spam bar:other_value foo:some_value",
+ ),
+ ],
+ )
+ def test_flags_and_options(
+ self, m_write_file, params, expected_extra_line
+ ):
+ generate_resolv_conf("templates/resolv.conf.tmpl", params)
+
+ expected_content = EXPECTED_HEADER
+ if expected_extra_line is not None:
+ # If we have any extra lines, expect a trailing newline
+ expected_content += "\n".join([expected_extra_line, ""])
+ assert [
+ mock.call(mock.ANY, expected_content, mode=mock.ANY)
+ ] == m_write_file.call_args_list
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
index 8247c388..daa1ef51 100644
--- a/cloudinit/config/tests/test_set_passwords.py
+++ b/cloudinit/config/tests/test_set_passwords.py
@@ -14,7 +14,7 @@ class TestHandleSshPwauth(CiTestCase):
with_logs = True
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_unknown_value_logs_warning(self, m_subp):
setpass.handle_ssh_pwauth("floo")
self.assertIn("Unrecognized value: ssh_pwauth=floo",
@@ -22,7 +22,7 @@ class TestHandleSshPwauth(CiTestCase):
m_subp.assert_not_called()
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config):
"""If systemctl in service cmd: systemctl restart name."""
setpass.handle_ssh_pwauth(
@@ -31,7 +31,7 @@ class TestHandleSshPwauth(CiTestCase):
m_subp.call_args)
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_service_as_service_cmd(self, m_subp, m_update_ssh_config):
"""If systemctl in service cmd: systemctl restart name."""
setpass.handle_ssh_pwauth(
@@ -40,7 +40,7 @@ class TestHandleSshPwauth(CiTestCase):
m_subp.call_args)
@mock.patch(MODPATH + "update_ssh_config", return_value=False)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config):
"""If config is not updated, then no system restart should be done."""
setpass.handle_ssh_pwauth(True)
@@ -48,7 +48,7 @@ class TestHandleSshPwauth(CiTestCase):
self.assertIn("No need to restart SSH", self.logs.getvalue())
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config):
"""If 'unchanged', then no updates to config and no restart."""
setpass.handle_ssh_pwauth(
@@ -56,7 +56,7 @@ class TestHandleSshPwauth(CiTestCase):
m_update_ssh_config.assert_not_called()
m_subp.assert_not_called()
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_valid_change_values(self, m_subp):
"""If value is a valid changen value, then update should be called."""
upname = MODPATH + "update_ssh_config"
@@ -88,7 +88,7 @@ class TestSetPasswordsHandle(CiTestCase):
'ssh_pwauth=None\n',
self.logs.getvalue())
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp):
"""handle parses command password hashes."""
cloud = self.tmp_cloud(distro='ubuntu')
@@ -98,7 +98,7 @@ class TestSetPasswordsHandle(CiTestCase):
'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q'
'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1']
cfg = {'chpasswd': {'list': valid_hashed_pwds}}
- with mock.patch(MODPATH + 'util.subp') as m_subp:
+ with mock.patch(MODPATH + 'subp.subp') as m_subp:
setpass.handle(
'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
self.assertIn(
@@ -112,12 +112,12 @@ class TestSetPasswordsHandle(CiTestCase):
'\n'.join(valid_hashed_pwds) + '\n')],
m_subp.call_args_list)
- @mock.patch(MODPATH + "util.is_FreeBSD")
- @mock.patch(MODPATH + "util.subp")
- def test_freebsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
- self, m_subp, m_is_freebsd):
- """FreeBSD calls custom pw commands instead of chpasswd and passwd"""
- m_is_freebsd.return_value = True
+ @mock.patch(MODPATH + "util.is_BSD")
+ @mock.patch(MODPATH + "subp.subp")
+ def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
+ self, m_subp, m_is_bsd):
+ """BSD don't use chpasswd"""
+ m_is_bsd.return_value = True
cloud = self.tmp_cloud(distro='freebsd')
valid_pwds = ['ubuntu:passw0rd']
cfg = {'chpasswd': {'list': valid_pwds}}
@@ -129,18 +129,18 @@ class TestSetPasswordsHandle(CiTestCase):
mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])],
m_subp.call_args_list)
- @mock.patch(MODPATH + "util.is_FreeBSD")
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "util.is_BSD")
+ @mock.patch(MODPATH + "subp.subp")
def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp,
- m_is_freebsd):
+ m_is_bsd):
"""handle parses command set random passwords."""
- m_is_freebsd.return_value = False
+ m_is_bsd.return_value = False
cloud = self.tmp_cloud(distro='ubuntu')
valid_random_pwds = [
'root:R',
'ubuntu:RANDOM']
cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}}
- with mock.patch(MODPATH + 'util.subp') as m_subp:
+ with mock.patch(MODPATH + 'subp.subp') as m_subp:
setpass.handle(
'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
self.assertIn(
diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py
index cbbb173d..6d4c014a 100644
--- a/cloudinit/config/tests/test_snap.py
+++ b/cloudinit/config/tests/test_snap.py
@@ -92,7 +92,7 @@ class TestAddAssertions(CiTestCase):
super(TestAddAssertions, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_add_assertions_on_empty_list(self, m_subp):
"""When provided with an empty list, add_assertions does nothing."""
add_assertions([])
@@ -107,7 +107,7 @@ class TestAddAssertions(CiTestCase):
"assertion parameter was not a list or dict: I'm Not Valid",
str(context_manager.exception))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_add_assertions_adds_assertions_as_list(self, m_subp):
"""When provided with a list, add_assertions adds all assertions."""
self.assertEqual(
@@ -130,7 +130,7 @@ class TestAddAssertions(CiTestCase):
self.assertEqual(
util.load_file(compare_file), util.load_file(assert_file))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_add_assertions_adds_assertions_as_dict(self, m_subp):
"""When provided with a dict, add_assertions adds all assertions."""
self.assertEqual(
@@ -168,7 +168,7 @@ class TestRunCommands(CiTestCase):
super(TestRunCommands, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_run_commands_on_empty_list(self, m_subp):
"""When provided with an empty list, run_commands does nothing."""
run_commands([])
@@ -310,6 +310,52 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin):
{'snap': {'commands': {'01': 'also valid'}}}, schema)
self.assertEqual('', self.logs.getvalue())
+ @mock.patch('cloudinit.config.cc_snap.run_commands')
+ def test_schema_when_commands_values_are_invalid_type(self, _):
+ """Warnings when snap:commands values are invalid type (e.g. int)"""
+ validate_cloudconfig_schema(
+ {'snap': {'commands': [123]}}, schema)
+ validate_cloudconfig_schema(
+ {'snap': {'commands': {'01': 123}}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid config:\n"
+ "snap.commands.0: 123 is not valid under any of the given"
+ " schemas\n"
+ "WARNING: Invalid config:\n"
+ "snap.commands.01: 123 is not valid under any of the given"
+ " schemas\n",
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.config.cc_snap.run_commands')
+ def test_schema_when_commands_list_values_are_invalid_type(self, _):
+ """Warnings when snap:commands list values are wrong type (e.g. int)"""
+ validate_cloudconfig_schema(
+ {'snap': {'commands': [["snap", "install", 123]]}}, schema)
+ validate_cloudconfig_schema(
+ {'snap': {'commands': {'01': ["snap", "install", 123]}}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid config:\n"
+ "snap.commands.0: ['snap', 'install', 123] is not valid under any"
+ " of the given schemas\n",
+ "WARNING: Invalid config:\n"
+ "snap.commands.0: ['snap', 'install', 123] is not valid under any"
+ " of the given schemas\n",
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.config.cc_snap.run_commands')
+ def test_schema_when_assertions_values_are_invalid_type(self, _):
+ """Warnings when snap:assertions values are invalid type (e.g. int)"""
+ validate_cloudconfig_schema(
+ {'snap': {'assertions': [123]}}, schema)
+ validate_cloudconfig_schema(
+ {'snap': {'assertions': {'01': 123}}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid config:\n"
+ "snap.assertions.0: 123 is not of type 'string'\n"
+ "WARNING: Invalid config:\n"
+ "snap.assertions.01: 123 is not of type 'string'\n",
+ self.logs.getvalue())
+
@mock.patch('cloudinit.config.cc_snap.add_assertions')
def test_warn_schema_assertions_is_not_list_or_dict(self, _):
"""Warn when snap:assertions config is not a list or dict."""
@@ -345,7 +391,7 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin):
def test_duplicates_are_fine_array_array(self):
"""Duplicated commands array/array entries are allowed."""
self.assertSchemaValid(
- {'commands': [["echo", "bye"], ["echo" "bye"]]},
+ {'commands': [["echo", "bye"], ["echo", "bye"]]},
"command entries can be duplicate.")
def test_duplicates_are_fine_array_string(self):
@@ -431,7 +477,7 @@ class TestHandle(CiTestCase):
self.assertEqual('HI\nMOM\n', util.load_file(outfile))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_handle_adds_assertions(self, m_subp):
"""Any configured snap assertions are provided to add_assertions."""
assert_file = self.tmp_path('snapd.assertions', dir=self.tmp)
@@ -447,7 +493,7 @@ class TestHandle(CiTestCase):
self.assertEqual(
util.load_file(compare_file), util.load_file(assert_file))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
@skipUnlessJsonSchema()
def test_handle_validates_schema(self, m_subp):
"""Any provided configuration is runs validate_cloudconfig_schema."""
diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
index 8c4161ef..db7fb726 100644
--- a/cloudinit/config/tests/test_ubuntu_advantage.py
+++ b/cloudinit/config/tests/test_ubuntu_advantage.py
@@ -3,7 +3,7 @@
from cloudinit.config.cc_ubuntu_advantage import (
configure_ua, handle, maybe_install_ua_tools, schema)
from cloudinit.config.schema import validate_cloudconfig_schema
-from cloudinit import util
+from cloudinit import subp
from cloudinit.tests.helpers import (
CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
@@ -26,10 +26,10 @@ class TestConfigureUA(CiTestCase):
super(TestConfigureUA, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_error(self, m_subp):
"""Errors from ua attach command are raised."""
- m_subp.side_effect = util.ProcessExecutionError(
+ m_subp.side_effect = subp.ProcessExecutionError(
'Invalid token SomeToken')
with self.assertRaises(RuntimeError) as context_manager:
configure_ua(token='SomeToken')
@@ -39,7 +39,7 @@ class TestConfigureUA(CiTestCase):
'Stdout: Invalid token SomeToken\nStderr: -',
str(context_manager.exception))
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_token(self, m_subp):
"""When token is provided, attach the machine to ua using the token."""
configure_ua(token='SomeToken')
@@ -48,7 +48,7 @@ class TestConfigureUA(CiTestCase):
'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
self.logs.getvalue())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_on_service_error(self, m_subp):
"""all services should be enabled and then any failures raised"""
@@ -56,7 +56,7 @@ class TestConfigureUA(CiTestCase):
fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']]
if cmd in fail_cmds and capture:
svc = cmd[-1]
- raise util.ProcessExecutionError(
+ raise subp.ProcessExecutionError(
'Invalid {} credentials'.format(svc.upper()))
m_subp.side_effect = fake_subp
@@ -83,7 +83,7 @@ class TestConfigureUA(CiTestCase):
'Failure enabling Ubuntu Advantage service(s): "esm", "cc"',
str(context_manager.exception))
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_empty_services(self, m_subp):
"""When services is an empty list, do not auto-enable attach."""
configure_ua(token='SomeToken', enable=[])
@@ -92,7 +92,7 @@ class TestConfigureUA(CiTestCase):
'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
self.logs.getvalue())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_specific_services(self, m_subp):
"""When services a list, only enable specific services."""
configure_ua(token='SomeToken', enable=['fips'])
@@ -105,7 +105,7 @@ class TestConfigureUA(CiTestCase):
self.logs.getvalue())
@mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_string_services(self, m_subp):
"""When services a string, treat as singleton list and warn"""
configure_ua(token='SomeToken', enable='fips')
@@ -119,7 +119,7 @@ class TestConfigureUA(CiTestCase):
'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
self.logs.getvalue())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_weird_services(self, m_subp):
"""When services not string or list, warn but still attach"""
configure_ua(token='SomeToken', enable={'deffo': 'wont work'})
@@ -285,7 +285,7 @@ class TestMaybeInstallUATools(CiTestCase):
super(TestMaybeInstallUATools, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which):
"""Do nothing if ubuntu-advantage-tools already exists."""
m_which.return_value = '/usr/bin/ua' # already installed
@@ -294,7 +294,7 @@ class TestMaybeInstallUATools(CiTestCase):
'Some apt error')
maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_tools_raises_update_errors(self, m_which):
"""maybe_install_ua_tools logs and raises apt update errors."""
m_which.return_value = None
@@ -306,7 +306,7 @@ class TestMaybeInstallUATools(CiTestCase):
self.assertEqual('Some apt error', str(context_manager.exception))
self.assertIn('Package update failed\nTraceback', self.logs.getvalue())
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_raises_install_errors(self, m_which):
"""maybe_install_ua_tools logs and raises package install errors."""
m_which.return_value = None
@@ -320,7 +320,7 @@ class TestMaybeInstallUATools(CiTestCase):
self.assertIn(
'Failed to install ubuntu-advantage-tools\n', self.logs.getvalue())
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_tools_happy_path(self, m_which):
"""maybe_install_ua_tools installs ubuntu-advantage-tools."""
m_which.return_value = None
diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py
index 46952692..504ba356 100644
--- a/cloudinit/config/tests/test_ubuntu_drivers.py
+++ b/cloudinit/config/tests/test_ubuntu_drivers.py
@@ -7,7 +7,7 @@ from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock
from cloudinit.config.schema import (
SchemaValidationError, validate_cloudconfig_schema)
from cloudinit.config import cc_ubuntu_drivers as drivers
-from cloudinit.util import ProcessExecutionError
+from cloudinit.subp import ProcessExecutionError
MPATH = "cloudinit.config.cc_ubuntu_drivers."
M_TMP_PATH = MPATH + "temp_utils.mkdtemp"
@@ -16,6 +16,13 @@ OLD_UBUNTU_DRIVERS_ERROR_STDERR = (
"(choose from 'list', 'autoinstall', 'devices', 'debug')\n")
+# The tests in this module call helper methods which are decorated with
+# mock.patch. pylint doesn't understand that mock.patch passes parameters to
+# the decorated function, so it incorrectly reports that we aren't passing
+# values for all parameters. Instead of annotating every single call, we
+# disable it for the entire module:
+# pylint: disable=no-value-for-parameter
+
class AnyTempScriptAndDebconfFile(object):
def __init__(self, tmp_dir, debconf_file):
@@ -46,8 +53,8 @@ class TestUbuntuDrivers(CiTestCase):
schema=drivers.schema, strict=True)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=False)
def _assert_happy_path_taken(
self, config, m_which, m_subp, m_tmp):
"""Positive path test through handle. Package should be installed."""
@@ -73,8 +80,8 @@ class TestUbuntuDrivers(CiTestCase):
self._assert_happy_path_taken(new_config)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp")
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp")
+ @mock.patch(MPATH + "subp.which", return_value=False)
def test_handle_raises_error_if_no_drivers_found(
self, m_which, m_subp, m_tmp):
"""If ubuntu-drivers doesn't install any drivers, raise an error."""
@@ -102,8 +109,8 @@ class TestUbuntuDrivers(CiTestCase):
self.assertIn('ubuntu-drivers found no drivers for installation',
self.logs.getvalue())
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=False)
def _assert_inert_with_config(self, config, m_which, m_subp):
"""Helper to reduce repetition when testing negative cases"""
myCloud = mock.MagicMock()
@@ -147,8 +154,8 @@ class TestUbuntuDrivers(CiTestCase):
self.assertEqual(0, m_install_drivers.call_count)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=True)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=True)
def test_install_drivers_no_install_if_present(
self, m_which, m_subp, m_tmp):
"""If 'ubuntu-drivers' is present, no package install should occur."""
@@ -174,8 +181,8 @@ class TestUbuntuDrivers(CiTestCase):
self.assertEqual(0, pkg_install.call_count)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp")
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp")
+ @mock.patch(MPATH + "subp.which", return_value=False)
def test_install_drivers_handles_old_ubuntu_drivers_gracefully(
self, m_which, m_subp, m_tmp):
"""Older ubuntu-drivers versions should emit message and raise error"""
@@ -212,8 +219,8 @@ class TestUbuntuDriversWithVersion(TestUbuntuDrivers):
install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123']
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=False)
def test_version_none_uses_latest(self, m_which, m_subp, m_tmp):
tdir = self.tmp_dir()
debconf_file = os.path.join(tdir, 'nvidia.template')
diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py
index f620b597..df89ddb3 100644
--- a/cloudinit/config/tests/test_users_groups.py
+++ b/cloudinit/config/tests/test_users_groups.py
@@ -39,7 +39,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
@@ -65,7 +65,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='freebsd', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_fbsd_user.call_args_list,
[mock.call('freebsd', groups='wheel', lock_passwd=True,
shell='/bin/tcsh'),
@@ -86,7 +86,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
@@ -107,7 +107,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
@@ -146,7 +146,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 92598a2d..2537608f 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -13,6 +13,8 @@ import abc
import os
import re
import stat
+import string
+import urllib.parse
from io import StringIO
from cloudinit import importer
@@ -23,9 +25,14 @@ from cloudinit.net import network_state
from cloudinit.net import renderers
from cloudinit import ssh_util
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
+from cloudinit.features import \
+ ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES
+
from cloudinit.distros.parsers import hosts
+from .networking import LinuxNetworking
# Used when a cloud-config module can be run on all cloud-init distibutions.
@@ -33,12 +40,13 @@ from cloudinit.distros.parsers import hosts
ALL_DISTROS = 'all'
OSFAMILIES = {
+ 'alpine': ['alpine'],
+ 'arch': ['arch'],
'debian': ['debian', 'ubuntu'],
- 'redhat': ['amazon', 'centos', 'fedora', 'rhel'],
- 'gentoo': ['gentoo'],
'freebsd': ['freebsd'],
+ 'gentoo': ['gentoo'],
+ 'redhat': ['amazon', 'centos', 'fedora', 'rhel'],
'suse': ['opensuse', 'sles'],
- 'arch': ['arch'],
}
LOG = logging.getLogger(__name__)
@@ -50,6 +58,9 @@ _EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$')
# Default NTP Client Configurations
PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate']
+# Letters/Digits/Hyphen characters, for use in domain name validation
+LDH_ASCII_CHARS = string.ascii_letters + string.digits + "-"
+
class Distro(metaclass=abc.ABCMeta):
@@ -61,11 +72,13 @@ class Distro(metaclass=abc.ABCMeta):
init_cmd = ['service'] # systemctl, service etc
renderer_configs = {}
_preferred_ntp_clients = None
+ networking_cls = LinuxNetworking
def __init__(self, name, cfg, paths):
self._paths = paths
self._cfg = cfg
self.name = name
+ self.networking = self.networking_cls()
@abc.abstractmethod
def install_packages(self, pkglist):
@@ -220,8 +233,8 @@ class Distro(metaclass=abc.ABCMeta):
LOG.debug("Non-persistently setting the system hostname to %s",
hostname)
try:
- util.subp(['hostname', hostname])
- except util.ProcessExecutionError:
+ subp.subp(['hostname', hostname])
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Failed to non-persistently adjust the system "
"hostname to %s", hostname)
@@ -356,12 +369,12 @@ class Distro(metaclass=abc.ABCMeta):
LOG.debug("Attempting to run bring up interface %s using command %s",
device_name, cmd)
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
@@ -380,6 +393,9 @@ class Distro(metaclass=abc.ABCMeta):
def add_user(self, name, **kwargs):
"""
Add a user to the system using standard GNU tools
+
+ This should be overriden on distros where useradd is not desirable or
+ not available.
"""
# XXX need to make add_user idempotent somehow as we
# still want to add groups or modify SSH keys on pre-existing
@@ -475,7 +491,7 @@ class Distro(metaclass=abc.ABCMeta):
# Run the command
LOG.debug("Adding user %s", name)
try:
- util.subp(useradd_cmd, logstring=log_useradd_cmd)
+ subp.subp(useradd_cmd, logstring=log_useradd_cmd)
except Exception as e:
util.logexc(LOG, "Failed to create user %s", name)
raise e
@@ -495,7 +511,7 @@ class Distro(metaclass=abc.ABCMeta):
# Run the command
LOG.debug("Adding snap user %s", name)
try:
- (out, err) = util.subp(create_user_cmd, logstring=create_user_cmd,
+ (out, err) = subp.subp(create_user_cmd, logstring=create_user_cmd,
capture=True)
LOG.debug("snap create-user returned: %s:%s", out, err)
jobj = util.load_json(out)
@@ -508,9 +524,22 @@ class Distro(metaclass=abc.ABCMeta):
def create_user(self, name, **kwargs):
"""
- Creates users for the system using the GNU passwd tools. This
- will work on an GNU system. This should be overriden on
- distros where useradd is not desirable or not available.
+ Creates or partially updates the ``name`` user in the system.
+
+ This defers the actual user creation to ``self.add_user`` or
+ ``self.add_snap_user``, and most of the keys in ``kwargs`` will be
+ processed there if and only if the user does not already exist.
+
+ Once the existence of the ``name`` user has been ensured, this method
+ then processes these keys (for both just-created and pre-existing
+ users):
+
+ * ``plain_text_passwd``
+ * ``hashed_passwd``
+ * ``lock_passwd``
+ * ``sudo``
+ * ``ssh_authorized_keys``
+ * ``ssh_redirect_user``
"""
# Add a snap user, if requested
@@ -577,20 +606,21 @@ class Distro(metaclass=abc.ABCMeta):
# passwd must use short '-l' due to SLES11 lacking long form '--lock'
lock_tools = (['passwd', '-l', name], ['usermod', '--lock', name])
try:
- cmd = next(l for l in lock_tools if util.which(l[0]))
- except StopIteration:
+ cmd = next(tool for tool in lock_tools if subp.which(tool[0]))
+ except StopIteration as e:
raise RuntimeError((
"Unable to lock user account '%s'. No tools available. "
- " Tried: %s.") % (name, [c[0] for c in lock_tools]))
+ " Tried: %s.") % (name, [c[0] for c in lock_tools])
+ ) from e
try:
- util.subp(cmd)
+ subp.subp(cmd)
except Exception as e:
util.logexc(LOG, 'Failed to disable password for user %s', name)
raise e
def expire_passwd(self, user):
try:
- util.subp(['passwd', '--expire', user])
+ subp.subp(['passwd', '--expire', user])
except Exception as e:
util.logexc(LOG, "Failed to set 'expire' for %s", user)
raise e
@@ -606,7 +636,7 @@ class Distro(metaclass=abc.ABCMeta):
cmd.append('-e')
try:
- util.subp(cmd, pass_string, logstring="chpasswd for %s" % user)
+ subp.subp(cmd, pass_string, logstring="chpasswd for %s" % user)
except Exception as e:
util.logexc(LOG, "Failed to set password for %s", user)
raise e
@@ -703,7 +733,7 @@ class Distro(metaclass=abc.ABCMeta):
LOG.warning("Skipping creation of existing group '%s'", name)
else:
try:
- util.subp(group_add_cmd)
+ subp.subp(group_add_cmd)
LOG.info("Created new group %s", name)
except Exception:
util.logexc(LOG, "Failed to create group %s", name)
@@ -716,10 +746,115 @@ class Distro(metaclass=abc.ABCMeta):
"; user does not exist.", member, name)
continue
- util.subp(['usermod', '-a', '-G', name, member])
+ subp.subp(['usermod', '-a', '-G', name, member])
LOG.info("Added user '%s' to group '%s'", member, name)
+def _apply_hostname_transformations_to_url(url: str, transformations: list):
+ """
+ Apply transformations to a URL's hostname, return transformed URL.
+
+ This is a separate function because unwrapping and rewrapping only the
+ hostname portion of a URL is complex.
+
+ :param url:
+ The URL to operate on.
+ :param transformations:
+ A list of ``(str) -> Optional[str]`` functions, which will be applied
+ in order to the hostname portion of the URL. If any function
+ (regardless of ordering) returns None, ``url`` will be returned without
+ any modification.
+
+ :return:
+ A string whose value is ``url`` with the hostname ``transformations``
+ applied, or ``None`` if ``url`` is unparseable.
+ """
+ try:
+ parts = urllib.parse.urlsplit(url)
+ except ValueError:
+ # If we can't even parse the URL, we shouldn't use it for anything
+ return None
+ new_hostname = parts.hostname
+ if new_hostname is None:
+ # The URL given doesn't have a hostname component, so (a) we can't
+ # transform it, and (b) it won't work as a mirror; return None.
+ return None
+
+ for transformation in transformations:
+ new_hostname = transformation(new_hostname)
+ if new_hostname is None:
+ # If a transformation returns None, that indicates we should abort
+ # processing and return `url` unmodified
+ return url
+
+ new_netloc = new_hostname
+ if parts.port is not None:
+ new_netloc = "{}:{}".format(new_netloc, parts.port)
+ return urllib.parse.urlunsplit(parts._replace(netloc=new_netloc))
+
+
+def _sanitize_mirror_url(url: str):
+ """
+ Given a mirror URL, replace or remove any invalid URI characters.
+
+ This performs the following actions on the URL's hostname:
+ * Checks if it is an IP address, returning the URL immediately if it is
+ * Converts it to its IDN form (see below for details)
+ * Replaces any non-Letters/Digits/Hyphen (LDH) characters in it with
+ hyphens
+ * TODO: Remove any leading/trailing hyphens from each domain name label
+
+ Before we replace any invalid domain name characters, we first need to
+ ensure that any valid non-ASCII characters in the hostname will not be
+ replaced, by ensuring the hostname is in its Internationalized domain name
+ (IDN) representation (see RFC 5890). This conversion has to be applied to
+ the whole hostname (rather than just the substitution variables), because
+ the Punycode algorithm used by IDNA transcodes each part of the hostname as
+ a whole string (rather than encoding individual characters). It cannot be
+ applied to the whole URL, because (a) the Punycode algorithm expects to
+ operate on domain names so doesn't output a valid URL, and (b) non-ASCII
+ characters in non-hostname parts of the URL aren't encoded via Punycode.
+
+ To put this in RFC 5890's terminology: before we remove or replace any
+ characters from our domain name (which we do to ensure that each label is a
+ valid LDH Label), we first ensure each label is in its A-label form.
+
+ (Note that Python's builtin idna encoding is actually IDNA2003, not
+ IDNA2008. This changes the specifics of how some characters are encoded to
+ ASCII, but doesn't affect the logic here.)
+
+ :param url:
+ The URL to operate on.
+
+ :return:
+ A sanitized version of the URL, which will have been IDNA encoded if
+ necessary, or ``None`` if the generated string is not a parseable URL.
+ """
+ # Acceptable characters are LDH characters, plus "." to separate each label
+ acceptable_chars = LDH_ASCII_CHARS + "."
+ transformations = [
+ # This is an IP address, not a hostname, so no need to apply the
+ # transformations
+ lambda hostname: None if net.is_ip_address(hostname) else hostname,
+
+ # Encode with IDNA to get the correct characters (as `bytes`), then
+ # decode with ASCII so we return a `str`
+ lambda hostname: hostname.encode('idna').decode('ascii'),
+
+ # Replace any unacceptable characters with "-"
+ lambda hostname: ''.join(
+ c if c in acceptable_chars else "-" for c in hostname
+ ),
+
+ # Drop leading/trailing hyphens from each part of the hostname
+ lambda hostname: '.'.join(
+ part.strip('-') for part in hostname.split('.')
+ ),
+ ]
+
+ return _apply_hostname_transformations_to_url(url, transformations)
+
+
def _get_package_mirror_info(mirror_info, data_source=None,
mirror_filter=util.search_for_mirror):
# given a arch specific 'mirror_info' entry (from package_mirrors)
@@ -735,7 +870,12 @@ def _get_package_mirror_info(mirror_info, data_source=None,
# ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b)
# the region is us-east-1. so region = az[0:-1]
if _EC2_AZ_RE.match(data_source.availability_zone):
- subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1]
+ ec2_region = data_source.availability_zone[0:-1]
+
+ if ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES:
+ subst['ec2_region'] = "%s" % ec2_region
+ elif data_source.platform_type == "ec2":
+ subst['ec2_region'] = "%s" % ec2_region
if data_source and data_source.region:
subst['region'] = data_source.region
@@ -748,9 +888,13 @@ def _get_package_mirror_info(mirror_info, data_source=None,
mirrors = []
for tmpl in searchlist:
try:
- mirrors.append(tmpl % subst)
+ mirror = tmpl % subst
except KeyError:
- pass
+ continue
+
+ mirror = _sanitize_mirror_url(mirror)
+ if mirror is not None:
+ mirrors.append(mirror)
found = mirror_filter(mirrors)
if found:
diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py
new file mode 100644
index 00000000..e42443fc
--- /dev/null
+++ b/cloudinit/distros/alpine.py
@@ -0,0 +1,165 @@
+# Copyright (C) 2016 Matt Dainty
+# Copyright (C) 2020 Dermot Bradley
+#
+# Author: Matt Dainty <matt@bodgit-n-scarper.com>
+# Author: Dermot Bradley <dermot_bradley@yahoo.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import subp
+from cloudinit import util
+
+from cloudinit.distros.parsers.hostname import HostnameConf
+
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+NETWORK_FILE_HEADER = """\
+# This file is generated from information provided by the datasource. Changes
+# to it will not persist across an instance reboot. To disable cloud-init's
+# network configuration capabilities, write a file
+# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following:
+# network: {config: disabled}
+
+"""
+
+
+class Distro(distros.Distro):
+ init_cmd = ['rc-service'] # init scripts
+ locale_conf_fn = "/etc/profile.d/locale.sh"
+ network_conf_fn = "/etc/network/interfaces"
+ renderer_configs = {
+ "eni": {"eni_path": network_conf_fn,
+ "eni_header": NETWORK_FILE_HEADER}
+ }
+
+ def __init__(self, name, cfg, paths):
+ distros.Distro.__init__(self, name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ self.default_locale = 'C.UTF-8'
+ self.osfamily = 'alpine'
+ cfg['ssh_svcname'] = 'sshd'
+
+ def get_locale(self):
+ """The default locale for Alpine Linux is different than
+ cloud-init's DataSource default.
+ """
+ return self.default_locale
+
+ def apply_locale(self, locale, out_fn=None):
+ # Alpine has limited locale support due to musl library limitations
+
+ if not locale:
+ locale = self.default_locale
+ if not out_fn:
+ out_fn = self.locale_conf_fn
+
+ lines = [
+ "#",
+ "# This file is created by cloud-init once per new instance boot",
+ "#",
+ "export CHARSET=UTF-8",
+ "export LANG=%s" % locale,
+ "export LC_COLLATE=C",
+ "",
+ ]
+ util.write_file(out_fn, "\n".join(lines), 0o644)
+
+ def install_packages(self, pkglist):
+ self.update_package_sources()
+ self.package_command('add', pkgs=pkglist)
+
+ def _write_network_config(self, netconfig):
+ return self._supported_write_network_config(netconfig)
+
+ def _bring_up_interfaces(self, device_names):
+ use_all = False
+ for d in device_names:
+ if d == 'all':
+ use_all = True
+ if use_all:
+ return distros.Distro._bring_up_interface(self, '-a')
+ else:
+ return distros.Distro._bring_up_interfaces(self, device_names)
+
+ def _write_hostname(self, your_hostname, out_fn):
+ conf = None
+ try:
+ # Try to update the previous one
+ # so lets see if we can read it first.
+ conf = self._read_hostname_conf(out_fn)
+ except IOError:
+ pass
+ if not conf:
+ conf = HostnameConf('')
+ conf.set_hostname(your_hostname)
+ util.write_file(out_fn, str(conf), 0o644)
+
+ def _read_system_hostname(self):
+ sys_hostname = self._read_hostname(self.hostname_conf_fn)
+ return (self.hostname_conf_fn, sys_hostname)
+
+ def _read_hostname_conf(self, filename):
+ conf = HostnameConf(util.load_file(filename))
+ conf.parse()
+ return conf
+
+ def _read_hostname(self, filename, default=None):
+ hostname = None
+ try:
+ conf = self._read_hostname_conf(filename)
+ hostname = conf.hostname
+ except IOError:
+ pass
+ if not hostname:
+ return default
+ return hostname
+
+ def _get_localhost_ip(self):
+ return "127.0.1.1"
+
+ def set_timezone(self, tz):
+ distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
+
+ def package_command(self, command, args=None, pkgs=None):
+ if pkgs is None:
+ pkgs = []
+
+ cmd = ['apk']
+ # Redirect output
+ cmd.append("--quiet")
+
+ if args and isinstance(args, str):
+ cmd.append(args)
+ elif args and isinstance(args, list):
+ cmd.extend(args)
+
+ if command:
+ cmd.append(command)
+
+ pkglist = util.expand_package_list('%s-%s', pkgs)
+ cmd.extend(pkglist)
+
+ # Allow the output of this to flow outwards (ie not be captured)
+ subp.subp(cmd, capture=False)
+
+ def update_package_sources(self):
+ self._runner.run("update-sources", self.package_command,
+ ["update"], freq=PER_INSTANCE)
+
+ @property
+ def preferred_ntp_clients(self):
+ """Allow distro to determine the preferred ntp client list"""
+ if not self._preferred_ntp_clients:
+ self._preferred_ntp_clients = ['chrony', 'ntp']
+
+ return self._preferred_ntp_clients
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index 9f89c5f9..967be168 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -8,6 +8,7 @@ from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
+from cloudinit import subp
from cloudinit.distros import net_util
from cloudinit.distros.parsers.hostname import HostnameConf
@@ -44,7 +45,7 @@ class Distro(distros.Distro):
def apply_locale(self, locale, out_fn=None):
if not out_fn:
out_fn = self.locale_conf_fn
- util.subp(['locale-gen', '-G', locale], capture=False)
+ subp.subp(['locale-gen', '-G', locale], capture=False)
# "" provides trailing newline during join
lines = [
util.make_header(),
@@ -60,9 +61,9 @@ class Distro(distros.Distro):
def _write_network_config(self, netconfig):
try:
return self._supported_write_network_config(netconfig)
- except RendererNotFoundError:
+ except RendererNotFoundError as e:
# Fall back to old _write_network
- raise NotImplementedError
+ raise NotImplementedError from e
def _write_network(self, settings):
entries = net_util.translate_network(settings)
@@ -76,11 +77,11 @@ class Distro(distros.Distro):
def _enable_interface(self, device_name):
cmd = ['netctl', 'reenable', device_name]
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
def _bring_up_interface(self, device_name):
@@ -88,12 +89,12 @@ class Distro(distros.Distro):
LOG.debug("Attempting to run bring up interface %s using command %s",
device_name, cmd)
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
@@ -158,7 +159,7 @@ class Distro(distros.Distro):
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
@@ -173,8 +174,8 @@ def _render_network(entries, target="/", conf_dir="etc/netctl",
devs = []
nameservers = []
- resolv_conf = util.target_path(target, resolv_conf)
- conf_dir = util.target_path(target, conf_dir)
+ resolv_conf = subp.target_path(target, resolv_conf)
+ conf_dir = subp.target_path(target, conf_dir)
for (dev, info) in entries.items():
if dev == 'lo':
diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py
new file mode 100644
index 00000000..2ed7a7d5
--- /dev/null
+++ b/cloudinit/distros/bsd.py
@@ -0,0 +1,129 @@
+import platform
+
+from cloudinit import distros
+from cloudinit.distros import bsd_utils
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import net
+from cloudinit import subp
+from cloudinit import util
+from .networking import BSDNetworking
+
+LOG = logging.getLogger(__name__)
+
+
+class BSD(distros.Distro):
+ networking_cls = BSDNetworking
+ hostname_conf_fn = '/etc/rc.conf'
+ rc_conf_fn = "/etc/rc.conf"
+
+ # Set in BSD distro subclasses
+ group_add_cmd_prefix = []
+ pkg_cmd_install_prefix = []
+ pkg_cmd_remove_prefix = []
+ # There is no update/upgrade on OpenBSD
+ pkg_cmd_update_prefix = None
+ pkg_cmd_upgrade_prefix = None
+
+ def __init__(self, name, cfg, paths):
+ super().__init__(name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ cfg['ssh_svcname'] = 'sshd'
+ self.osfamily = platform.system().lower()
+
+ def _read_system_hostname(self):
+ sys_hostname = self._read_hostname(self.hostname_conf_fn)
+ return (self.hostname_conf_fn, sys_hostname)
+
+ def _read_hostname(self, filename, default=None):
+ return bsd_utils.get_rc_config_value('hostname')
+
+ def _get_add_member_to_group_cmd(self, member_name, group_name):
+ raise NotImplementedError('Return list cmd to add member to group')
+
+ def _write_hostname(self, hostname, filename):
+ bsd_utils.set_rc_config_value('hostname', hostname, fn='/etc/rc.conf')
+
+ def create_group(self, name, members=None):
+ if util.is_group(name):
+ LOG.warning("Skipping creation of existing group '%s'", name)
+ else:
+ group_add_cmd = self.group_add_cmd_prefix + [name]
+ try:
+ subp.subp(group_add_cmd)
+ LOG.info("Created new group %s", name)
+ except Exception:
+ util.logexc(LOG, "Failed to create group %s", name)
+
+ if not members:
+ members = []
+ for member in members:
+ if not util.is_user(member):
+ LOG.warning("Unable to add group member '%s' to group '%s'"
+ "; user does not exist.", member, name)
+ continue
+ try:
+ subp.subp(self._get_add_member_to_group_cmd(member, name))
+ LOG.info("Added user '%s' to group '%s'", member, name)
+ except Exception:
+ util.logexc(LOG, "Failed to add user '%s' to group '%s'",
+ member, name)
+
+ def generate_fallback_config(self):
+ nconf = {'config': [], 'version': 1}
+ for mac, name in net.get_interfaces_by_mac().items():
+ nconf['config'].append(
+ {'type': 'physical', 'name': name,
+ 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]})
+ return nconf
+
+ def install_packages(self, pkglist):
+ self.update_package_sources()
+ self.package_command('install', pkgs=pkglist)
+
+ def _get_pkg_cmd_environ(self):
+ """Return environment vars used in *BSD package_command operations"""
+ raise NotImplementedError('BSD subclasses return a dict of env vars')
+
+ def package_command(self, command, args=None, pkgs=None):
+ if pkgs is None:
+ pkgs = []
+
+ if command == 'install':
+ cmd = self.pkg_cmd_install_prefix
+ elif command == 'remove':
+ cmd = self.pkg_cmd_remove_prefix
+ elif command == 'update':
+ if not self.pkg_cmd_update_prefix:
+ return
+ cmd = self.pkg_cmd_update_prefix
+ elif command == 'upgrade':
+ if not self.pkg_cmd_upgrade_prefix:
+ return
+ cmd = self.pkg_cmd_upgrade_prefix
+
+ if args and isinstance(args, str):
+ cmd.append(args)
+ elif args and isinstance(args, list):
+ cmd.extend(args)
+
+ pkglist = util.expand_package_list('%s-%s', pkgs)
+ cmd.extend(pkglist)
+
+ # Allow the output of this to flow outwards (ie not be captured)
+ subp.subp(cmd, env=self._get_pkg_cmd_environ(), capture=False)
+
+ def _write_network_config(self, netconfig):
+ return self._supported_write_network_config(netconfig)
+
+ def set_timezone(self, tz):
+ distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
+
+ def apply_locale(self, locale, out_fn=None):
+ LOG.debug('Cannot set the locale.')
+
+ def apply_network_config_names(self, netconfig):
+ LOG.debug('Cannot rename network interface.')
diff --git a/cloudinit/distros/bsd_utils.py b/cloudinit/distros/bsd_utils.py
new file mode 100644
index 00000000..079d0d53
--- /dev/null
+++ b/cloudinit/distros/bsd_utils.py
@@ -0,0 +1,50 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import shlex
+
+from cloudinit import util
+
+# On NetBSD, /etc/rc.conf comes with a if block:
+# if [ -r /etc/defaults/rc.conf ]; then
+# as a consequence, the file is not a regular key/value list
+# anymore and we cannot use cloudinit.distros.parsers.sys_conf
+# The module comes with a more naive parser, but is able to
+# preserve these if blocks.
+
+
+def _unquote(value):
+ if value[0] == value[-1] and value[0] in ['"', "'"]:
+ return value[1:-1]
+ return value
+
+
+def get_rc_config_value(key, fn='/etc/rc.conf'):
+ key_prefix = '{}='.format(key)
+ for line in util.load_file(fn).splitlines():
+ if line.startswith(key_prefix):
+ value = line.replace(key_prefix, '')
+ return _unquote(value)
+
+
+def set_rc_config_value(key, value, fn='/etc/rc.conf'):
+ lines = []
+ done = False
+ value = shlex.quote(value)
+ original_content = util.load_file(fn)
+ for line in original_content.splitlines():
+ if '=' in line:
+ k, v = line.split('=', 1)
+ if k == key:
+ v = value
+ done = True
+ lines.append('='.join([k, v]))
+ else:
+ lines.append(line)
+ if not done:
+ lines.append('='.join([key, value]))
+ new_content = '\n'.join(lines) + '\n'
+ if new_content != original_content:
+ util.write_file(fn, new_content)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 128bb523..844aaf21 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -13,6 +13,7 @@ import os
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros.parsers.hostname import HostnameConf
@@ -197,7 +198,7 @@ class Distro(distros.Distro):
# Allow the output of this to flow outwards (ie not be captured)
util.log_time(logfunc=LOG.debug,
msg="apt-%s [%s]" % (command, ' '.join(cmd)),
- func=util.subp,
+ func=subp.subp,
args=(cmd,), kwargs={'env': e, 'capture': False})
def update_package_sources(self):
@@ -214,7 +215,7 @@ def _get_wrapper_prefix(cmd, mode):
if (util.is_true(mode) or
(str(mode).lower() == "auto" and cmd[0] and
- util.which(cmd[0]))):
+ subp.which(cmd[0]))):
return cmd
else:
return []
@@ -269,7 +270,7 @@ def update_locale_conf(locale, sys_path, keyname='LANG'):
"""Update system locale config"""
LOG.debug('Updating %s with locale setting %s=%s',
sys_path, keyname, locale)
- util.subp(
+ subp.subp(
['update-locale', '--locale-file=' + sys_path,
'%s=%s' % (keyname, locale)], capture=False)
@@ -291,7 +292,7 @@ def regenerate_locale(locale, sys_path, keyname='LANG'):
# finally, trigger regeneration
LOG.debug('Generating locales for %s', locale)
- util.subp(['locale-gen', locale], capture=False)
+ subp.subp(['locale-gen', locale], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 026d1142..dde34d41 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -8,34 +8,25 @@ import os
import re
from io import StringIO
-from cloudinit import distros
-from cloudinit import helpers
+import cloudinit.distros.bsd
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import ssh_util
+from cloudinit import subp
from cloudinit import util
-from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
-class Distro(distros.Distro):
+class Distro(cloudinit.distros.bsd.BSD):
usr_lib_exec = '/usr/local/lib'
- rc_conf_fn = "/etc/rc.conf"
login_conf_fn = '/etc/login.conf'
login_conf_fn_bak = '/etc/login.conf.orig'
ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users'
- hostname_conf_fn = '/etc/rc.conf'
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'freebsd'
- cfg['ssh_svcname'] = 'sshd'
+ group_add_cmd_prefix = ['pw', 'group', 'add']
+ pkg_cmd_install_prefix = ["pkg", "install"]
+ pkg_cmd_remove_prefix = ["pkg", "remove"]
+ pkg_cmd_update_prefix = ["pkg", "update"]
+ pkg_cmd_upgrade_prefix = ["pkg", "upgrade"]
def _select_hostname(self, hostname, fqdn):
# Should be FQDN if available. See rc.conf(5) in FreeBSD
@@ -43,45 +34,8 @@ class Distro(distros.Distro):
return fqdn
return hostname
- def _read_system_hostname(self):
- sys_hostname = self._read_hostname(self.hostname_conf_fn)
- return (self.hostname_conf_fn, sys_hostname)
-
- def _read_hostname(self, filename, default=None):
- (_exists, contents) = rhel_util.read_sysconfig_file(filename)
- if contents.get('hostname'):
- return contents['hostname']
- else:
- return default
-
- def _write_hostname(self, hostname, filename):
- rhel_util.update_sysconfig_file(filename, {'hostname': hostname})
-
- def create_group(self, name, members):
- group_add_cmd = ['pw', 'group', 'add', name]
- if util.is_group(name):
- LOG.warning("Skipping creation of existing group '%s'", name)
- else:
- try:
- util.subp(group_add_cmd)
- LOG.info("Created new group %s", name)
- except Exception:
- util.logexc(LOG, "Failed to create group %s", name)
- raise
- if not members:
- members = []
-
- for member in members:
- if not util.is_user(member):
- LOG.warning("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
- continue
- try:
- util.subp(['pw', 'usermod', '-n', name, '-G', member])
- LOG.info("Added user '%s' to group '%s'", member, name)
- except Exception:
- util.logexc(LOG, "Failed to add user '%s' to group '%s'",
- member, name)
+ def _get_add_member_to_group_cmd(self, member_name, group_name):
+ return ['pw', 'usermod', '-n', member_name, '-G', group_name]
def add_user(self, name, **kwargs):
if util.is_user(name):
@@ -125,7 +79,7 @@ class Distro(distros.Distro):
# Run the command
LOG.info("Adding user %s", name)
try:
- util.subp(pw_useradd_cmd, logstring=log_pw_useradd_cmd)
+ subp.subp(pw_useradd_cmd, logstring=log_pw_useradd_cmd)
except Exception:
util.logexc(LOG, "Failed to create user %s", name)
raise
@@ -137,7 +91,7 @@ class Distro(distros.Distro):
def expire_passwd(self, user):
try:
- util.subp(['pw', 'usermod', user, '-p', '01-Jan-1970'])
+ subp.subp(['pw', 'usermod', user, '-p', '01-Jan-1970'])
except Exception:
util.logexc(LOG, "Failed to set pw expiration for %s", user)
raise
@@ -149,7 +103,7 @@ class Distro(distros.Distro):
hash_opt = "-h"
try:
- util.subp(['pw', 'usermod', user, hash_opt, '0'],
+ subp.subp(['pw', 'usermod', user, hash_opt, '0'],
data=passwd, logstring="chpasswd for %s" % user)
except Exception:
util.logexc(LOG, "Failed to set password for %s", user)
@@ -157,45 +111,13 @@ class Distro(distros.Distro):
def lock_passwd(self, name):
try:
- util.subp(['pw', 'usermod', name, '-h', '-'])
+ subp.subp(['pw', 'usermod', name, '-h', '-'])
except Exception:
util.logexc(LOG, "Failed to lock user %s", name)
raise
- def create_user(self, name, **kwargs):
- self.add_user(name, **kwargs)
-
- # Set password if plain-text password provided and non-empty
- if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
- self.set_passwd(name, kwargs['plain_text_passwd'])
-
- # Default locking down the account. 'lock_passwd' defaults to True.
- # lock account unless lock_password is False.
- if kwargs.get('lock_passwd', True):
- self.lock_passwd(name)
-
- # Configure sudo access
- if 'sudo' in kwargs and kwargs['sudo'] is not False:
- self.write_sudo_rules(name, kwargs['sudo'])
-
- # Import SSH keys
- if 'ssh_authorized_keys' in kwargs:
- keys = set(kwargs['ssh_authorized_keys']) or []
- ssh_util.setup_user_keys(keys, name, options=None)
-
- def generate_fallback_config(self):
- nconf = {'config': [], 'version': 1}
- for mac, name in net.get_interfaces_by_mac().items():
- nconf['config'].append(
- {'type': 'physical', 'name': name,
- 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]})
- return nconf
-
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
-
def apply_locale(self, locale, out_fn=None):
- # Adjust the locals value to the new value
+ # Adjust the locales value to the new value
newconf = StringIO()
for line in util.load_file(self.login_conf_fn).splitlines():
newconf.write(re.sub(r'^default:',
@@ -210,8 +132,8 @@ class Distro(distros.Distro):
try:
LOG.debug("Running cap_mkdb for %s", locale)
- util.subp(['cap_mkdb', self.login_conf_fn])
- except util.ProcessExecutionError:
+ subp.subp(['cap_mkdb', self.login_conf_fn])
+ except subp.ProcessExecutionError:
# cap_mkdb failed, so restore the backup.
util.logexc(LOG, "Failed to apply locale %s", locale)
try:
@@ -225,39 +147,17 @@ class Distro(distros.Distro):
# /etc/rc.conf a line with the following format:
# ifconfig_OLDNAME_name=NEWNAME
# FreeBSD network script will rename the interface automatically.
- return
-
- def install_packages(self, pkglist):
- self.update_package_sources()
- self.package_command('install', pkgs=pkglist)
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
+ pass
+ def _get_pkg_cmd_environ(self):
+ """Return environment vars used in *BSD package_command operations"""
e = os.environ.copy()
e['ASSUME_ALWAYS_YES'] = 'YES'
-
- cmd = ['pkg']
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- if command:
- cmd.append(command)
-
- pkglist = util.expand_package_list('%s-%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, env=e, capture=False)
-
- def set_timezone(self, tz):
- distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
+ return e
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["update"], freq=PER_INSTANCE)
+ self._runner.run(
+ "update-sources", self.package_command,
+ ["update"], freq=PER_INSTANCE)
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index dc57717d..2bee1c89 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -9,6 +9,7 @@
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros import net_util
@@ -39,7 +40,7 @@ class Distro(distros.Distro):
def apply_locale(self, locale, out_fn=None):
if not out_fn:
out_fn = self.locale_conf_fn
- util.subp(['locale-gen', '-G', locale], capture=False)
+ subp.subp(['locale-gen', '-G', locale], capture=False)
# "" provides trailing newline during join
lines = [
util.make_header(),
@@ -94,11 +95,11 @@ class Distro(distros.Distro):
cmd = ['rc-update', 'add', 'net.{name}'.format(name=dev),
'default']
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed",
cmd)
@@ -119,12 +120,12 @@ class Distro(distros.Distro):
LOG.debug("Attempting to run bring up interface %s using command %s",
device_name, cmd)
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
@@ -137,11 +138,11 @@ class Distro(distros.Distro):
# Grab device names from init scripts
cmd = ['ls', '/etc/init.d/net.*']
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
devices = [x.split('.')[2] for x in _out.split(' ')]
@@ -208,7 +209,7 @@ class Distro(distros.Distro):
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py
new file mode 100644
index 00000000..f1a9b182
--- /dev/null
+++ b/cloudinit/distros/netbsd.py
@@ -0,0 +1,159 @@
+# Copyright (C) 2019-2020 Gonéri Le Bouder
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import crypt
+import os
+import platform
+
+import cloudinit.distros.bsd
+from cloudinit import log as logging
+from cloudinit import subp
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class NetBSD(cloudinit.distros.bsd.BSD):
+ """
+ Distro subclass for NetBSD.
+
+ (N.B. OpenBSD inherits from this class.)
+ """
+
+ ci_sudoers_fn = '/usr/pkg/etc/sudoers.d/90-cloud-init-users'
+ group_add_cmd_prefix = ["groupadd"]
+
+ def __init__(self, name, cfg, paths):
+ super().__init__(name, cfg, paths)
+ if os.path.exists("/usr/pkg/bin/pkgin"):
+ self.pkg_cmd_install_prefix = ['pkgin', '-y', 'install']
+ self.pkg_cmd_remove_prefix = ['pkgin', '-y', 'remove']
+ self.pkg_cmd_update_prefix = ['pkgin', '-y', 'update']
+ self.pkg_cmd_upgrade_prefix = ['pkgin', '-y', 'full-upgrade']
+ else:
+ self.pkg_cmd_install_prefix = ['pkg_add', '-U']
+ self.pkg_cmd_remove_prefix = ['pkg_delete']
+
+ def _get_add_member_to_group_cmd(self, member_name, group_name):
+ return ['usermod', '-G', group_name, member_name]
+
+ def add_user(self, name, **kwargs):
+ if util.is_user(name):
+ LOG.info("User %s already exists, skipping.", name)
+ return False
+
+ adduser_cmd = ['useradd']
+ log_adduser_cmd = ['useradd']
+
+ adduser_opts = {
+ "homedir": '-d',
+ "gecos": '-c',
+ "primary_group": '-g',
+ "groups": '-G',
+ "shell": '-s',
+ }
+ adduser_flags = {
+ "no_user_group": '--no-user-group',
+ "system": '--system',
+ "no_log_init": '--no-log-init',
+ }
+
+ for key, val in kwargs.items():
+ if key in adduser_opts and val and isinstance(val, str):
+ adduser_cmd.extend([adduser_opts[key], val])
+
+ elif key in adduser_flags and val:
+ adduser_cmd.append(adduser_flags[key])
+ log_adduser_cmd.append(adduser_flags[key])
+
+ if 'no_create_home' not in kwargs or 'system' not in kwargs:
+ adduser_cmd += ['-m']
+ log_adduser_cmd += ['-m']
+
+ adduser_cmd += [name]
+ log_adduser_cmd += [name]
+
+ # Run the command
+ LOG.info("Adding user %s", name)
+ try:
+ subp.subp(adduser_cmd, logstring=log_adduser_cmd)
+ except Exception:
+ util.logexc(LOG, "Failed to create user %s", name)
+ raise
+ # Set the password if it is provided
+ # For security consideration, only hashed passwd is assumed
+ passwd_val = kwargs.get('passwd', None)
+ if passwd_val is not None:
+ self.set_passwd(name, passwd_val, hashed=True)
+
+ def set_passwd(self, user, passwd, hashed=False):
+ if hashed:
+ hashed_pw = passwd
+ elif not hasattr(crypt, 'METHOD_BLOWFISH'):
+ # crypt.METHOD_BLOWFISH comes with Python 3.7 which is available
+ # on NetBSD 7 and 8.
+ LOG.error((
+ 'Cannot set non-encrypted password for user %s. '
+ 'Python >= 3.7 is required.'), user)
+ return
+ else:
+ method = crypt.METHOD_BLOWFISH # pylint: disable=E1101
+ hashed_pw = crypt.crypt(
+ passwd,
+ crypt.mksalt(method)
+ )
+
+ try:
+ subp.subp(['usermod', '-p', hashed_pw, user])
+ except Exception:
+ util.logexc(LOG, "Failed to set password for %s", user)
+ raise
+ self.unlock_passwd(user)
+
+ def force_passwd_change(self, user):
+ try:
+ subp.subp(['usermod', '-F', user])
+ except Exception:
+ util.logexc(LOG, "Failed to set pw expiration for %s", user)
+ raise
+
+ def lock_passwd(self, name):
+ try:
+ subp.subp(['usermod', '-C', 'yes', name])
+ except Exception:
+ util.logexc(LOG, "Failed to lock user %s", name)
+ raise
+
+ def unlock_passwd(self, name):
+ try:
+ subp.subp(['usermod', '-C', 'no', name])
+ except Exception:
+ util.logexc(LOG, "Failed to unlock user %s", name)
+ raise
+
+ def apply_locale(self, locale, out_fn=None):
+ LOG.debug('Cannot set the locale.')
+
+ def apply_network_config_names(self, netconfig):
+ LOG.debug('NetBSD cannot rename network interface.')
+
+ def _get_pkg_cmd_environ(self):
+ """Return env vars used in NetBSD package_command operations"""
+ os_release = platform.release()
+ os_arch = platform.machine()
+ e = os.environ.copy()
+ e['PKG_PATH'] = (
+ 'http://cdn.netbsd.org/pub/pkgsrc/'
+ 'packages/NetBSD/%s/%s/All'
+ ) % (os_arch, os_release)
+ return e
+
+ def update_package_sources(self):
+ pass
+
+
+class Distro(NetBSD):
+ pass
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/networking.py b/cloudinit/distros/networking.py
new file mode 100644
index 00000000..10ed249d
--- /dev/null
+++ b/cloudinit/distros/networking.py
@@ -0,0 +1,212 @@
+import abc
+import logging
+import os
+
+from cloudinit import net, util
+
+
+LOG = logging.getLogger(__name__)
+
+
+# Type aliases (https://docs.python.org/3/library/typing.html#type-aliases),
+# used to make the signatures of methods a little clearer
+DeviceName = str
+NetworkConfig = dict
+
+
+class Networking(metaclass=abc.ABCMeta):
+ """The root of the Networking hierarchy in cloud-init.
+
+ This is part of an ongoing refactor in the cloud-init codebase, for more
+ details see "``cloudinit.net`` -> ``cloudinit.distros.networking``
+ Hierarchy" in HACKING.rst for full details.
+ """
+
+ def _get_current_rename_info(self) -> dict:
+ return net._get_current_rename_info()
+
+ def _rename_interfaces(self, renames: list, *, current_info=None) -> None:
+ return net._rename_interfaces(renames, current_info=current_info)
+
+ def apply_network_config_names(self, netcfg: NetworkConfig) -> None:
+ return net.apply_network_config_names(netcfg)
+
+ def device_devid(self, devname: DeviceName):
+ return net.device_devid(devname)
+
+ def device_driver(self, devname: DeviceName):
+ return net.device_driver(devname)
+
+ def extract_physdevs(self, netcfg: NetworkConfig) -> list:
+ return net.extract_physdevs(netcfg)
+
+ def find_fallback_nic(self, *, blacklist_drivers=None):
+ return net.find_fallback_nic(blacklist_drivers=blacklist_drivers)
+
+ def generate_fallback_config(
+ self, *, blacklist_drivers=None, config_driver: bool = False
+ ):
+ return net.generate_fallback_config(
+ blacklist_drivers=blacklist_drivers, config_driver=config_driver
+ )
+
+ def get_devicelist(self) -> list:
+ return net.get_devicelist()
+
+ def get_ib_hwaddrs_by_interface(self) -> dict:
+ return net.get_ib_hwaddrs_by_interface()
+
+ def get_ib_interface_hwaddr(
+ self, devname: DeviceName, ethernet_format: bool
+ ):
+ return net.get_ib_interface_hwaddr(devname, ethernet_format)
+
+ def get_interface_mac(self, devname: DeviceName):
+ return net.get_interface_mac(devname)
+
+ def get_interfaces(self) -> list:
+ return net.get_interfaces()
+
+ def get_interfaces_by_mac(self) -> dict:
+ return net.get_interfaces_by_mac()
+
+ def get_master(self, devname: DeviceName):
+ return net.get_master(devname)
+
+ def interface_has_own_mac(
+ self, devname: DeviceName, *, strict: bool = False
+ ) -> bool:
+ return net.interface_has_own_mac(devname, strict=strict)
+
+ def is_bond(self, devname: DeviceName) -> bool:
+ return net.is_bond(devname)
+
+ def is_bridge(self, devname: DeviceName) -> bool:
+ return net.is_bridge(devname)
+
+ @abc.abstractmethod
+ def is_physical(self, devname: DeviceName) -> bool:
+ """
+ Is ``devname`` a physical network device?
+
+ Examples of non-physical network devices: bonds, bridges, tunnels,
+ loopback devices.
+ """
+
+ def is_renamed(self, devname: DeviceName) -> bool:
+ return net.is_renamed(devname)
+
+ def is_up(self, devname: DeviceName) -> bool:
+ return net.is_up(devname)
+
+ def is_vlan(self, devname: DeviceName) -> bool:
+ return net.is_vlan(devname)
+
+ def master_is_bridge_or_bond(self, devname: DeviceName) -> bool:
+ return net.master_is_bridge_or_bond(devname)
+
+ @abc.abstractmethod
+ def settle(self, *, exists=None) -> None:
+ """Wait for device population in the system to complete.
+
+ :param exists:
+ An optional optimisation. If given, only perform as much of the
+ settle process as is required for the given DeviceName to be
+ present in the system. (This may include skipping the settle
+ process entirely, if the device already exists.)
+ :type exists: Optional[DeviceName]
+ """
+
+ def wait_for_physdevs(
+ self, netcfg: NetworkConfig, *, strict: bool = True
+ ) -> None:
+ """Wait for all the physical devices in `netcfg` to exist on the system
+
+ Specifically, this will call `self.settle` 5 times, and check after
+ each one if the physical devices are now present in the system.
+
+ :param netcfg:
+ The NetworkConfig from which to extract physical devices to wait
+ for.
+ :param strict:
+ Raise a `RuntimeError` if any physical devices are not present
+ after waiting.
+ """
+ physdevs = self.extract_physdevs(netcfg)
+
+ # set of expected iface names and mac addrs
+ expected_ifaces = dict([(iface[0], iface[1]) for iface in physdevs])
+ expected_macs = set(expected_ifaces.keys())
+
+ # set of current macs
+ present_macs = self.get_interfaces_by_mac().keys()
+
+ # compare the set of expected mac address values to
+ # the current macs present; we only check MAC as cloud-init
+ # has not yet renamed interfaces and the netcfg may include
+ # such renames.
+ for _ in range(0, 5):
+ if expected_macs.issubset(present_macs):
+ LOG.debug("net: all expected physical devices present")
+ return
+
+ missing = expected_macs.difference(present_macs)
+ LOG.debug("net: waiting for expected net devices: %s", missing)
+ for mac in missing:
+ # trigger a settle, unless this interface exists
+ devname = expected_ifaces[mac]
+ msg = "Waiting for settle or {} exists".format(devname)
+ util.log_time(
+ LOG.debug,
+ msg,
+ func=self.settle,
+ kwargs={"exists": devname},
+ )
+
+ # update present_macs after settles
+ present_macs = self.get_interfaces_by_mac().keys()
+
+ msg = "Not all expected physical devices present: %s" % missing
+ LOG.warning(msg)
+ if strict:
+ raise RuntimeError(msg)
+
+
+class BSDNetworking(Networking):
+ """Implementation of networking functionality shared across BSDs."""
+
+ def is_physical(self, devname: DeviceName) -> bool:
+ raise NotImplementedError()
+
+ def settle(self, *, exists=None) -> None:
+ """BSD has no equivalent to `udevadm settle`; noop."""
+
+
+class LinuxNetworking(Networking):
+ """Implementation of networking functionality common to Linux distros."""
+
+ def get_dev_features(self, devname: DeviceName) -> str:
+ return net.get_dev_features(devname)
+
+ def has_netfail_standby_feature(self, devname: DeviceName) -> bool:
+ return net.has_netfail_standby_feature(devname)
+
+ def is_netfailover(self, devname: DeviceName) -> bool:
+ return net.is_netfailover(devname)
+
+ def is_netfail_master(self, devname: DeviceName) -> bool:
+ return net.is_netfail_master(devname)
+
+ def is_netfail_primary(self, devname: DeviceName) -> bool:
+ return net.is_netfail_primary(devname)
+
+ def is_netfail_standby(self, devname: DeviceName) -> bool:
+ return net.is_netfail_standby(devname)
+
+ def is_physical(self, devname: DeviceName) -> bool:
+ return os.path.exists(net.sys_dev_path(devname, "device"))
+
+ def settle(self, *, exists=None) -> None:
+ if exists is not None:
+ exists = net.sys_dev_path(exists)
+ util.udevadm_settle(exists=exists)
diff --git a/cloudinit/distros/openbsd.py b/cloudinit/distros/openbsd.py
new file mode 100644
index 00000000..720c9cf3
--- /dev/null
+++ b/cloudinit/distros/openbsd.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2019-2020 Gonéri Le Bouder
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import platform
+
+import cloudinit.distros.netbsd
+from cloudinit import log as logging
+from cloudinit import subp
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(cloudinit.distros.netbsd.NetBSD):
+ hostname_conf_fn = '/etc/myname'
+
+ def _read_hostname(self, filename, default=None):
+ return util.load_file(self.hostname_conf_fn)
+
+ def _write_hostname(self, hostname, filename):
+ content = hostname + '\n'
+ util.write_file(self.hostname_conf_fn, content)
+
+ def _get_add_member_to_group_cmd(self, member_name, group_name):
+ return ['usermod', '-G', group_name, member_name]
+
+ def lock_passwd(self, name):
+ try:
+ subp.subp(['usermod', '-p', '*', name])
+ except Exception:
+ util.logexc(LOG, "Failed to lock user %s", name)
+ raise
+
+ def unlock_passwd(self, name):
+ pass
+
+ def _get_pkg_cmd_environ(self):
+ """Return env vars used in OpenBSD package_command operations"""
+ os_release = platform.release()
+ os_arch = platform.machine()
+ e = os.environ.copy()
+ e['PKG_PATH'] = (
+ 'ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/'
+ 'packages/{os_arch}/').format(
+ os_arch=os_arch, os_release=os_release
+ )
+ return e
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index dd56a3f4..b8e557b8 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -14,6 +14,7 @@ from cloudinit.distros.parsers.hostname import HostnameConf
from cloudinit import helpers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros import rhel_util as rhutil
@@ -97,7 +98,7 @@ class Distro(distros.Distro):
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def set_timezone(self, tz):
tz_file = self._find_tz_file(tz)
@@ -129,7 +130,7 @@ class Distro(distros.Distro):
if self.uses_systemd() and filename.endswith('/previous-hostname'):
return util.load_file(filename).strip()
elif self.uses_systemd():
- (out, _err) = util.subp(['hostname'])
+ (out, _err) = subp.subp(['hostname'])
if len(out):
return out
else:
@@ -144,6 +145,9 @@ class Distro(distros.Distro):
return default
return hostname
+ def _get_localhost_ip(self):
+ return "127.0.1.1"
+
def _read_hostname_conf(self, filename):
conf = HostnameConf(util.load_file(filename))
conf.parse()
@@ -160,7 +164,7 @@ class Distro(distros.Distro):
if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
util.write_file(out_fn, hostname)
elif self.uses_systemd():
- util.subp(['hostnamectl', 'set-hostname', str(hostname)])
+ subp.subp(['hostnamectl', 'set-hostname', str(hostname)])
else:
conf = None
try:
@@ -181,7 +185,7 @@ class Distro(distros.Distro):
def preferred_ntp_clients(self):
"""The preferred ntp client is dependent on the version."""
- """Allow distro to determine the preferred ntp client list"""
+ # Allow distro to determine the preferred ntp client list
if not self._preferred_ntp_clients:
distro_info = util.system_info()['dist']
name = distro_info[0]
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index 299d54b5..62929d03 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -150,9 +150,10 @@ class ResolvConf(object):
tail = ''
try:
(cfg_opt, cfg_values) = head.split(None, 1)
- except (IndexError, ValueError):
- raise IOError("Incorrectly formatted resolv.conf line %s"
- % (i + 1))
+ except (IndexError, ValueError) as e:
+ raise IOError(
+ "Incorrectly formatted resolv.conf line %s" % (i + 1)
+ ) from e
if cfg_opt not in ['nameserver', 'domain',
'search', 'sortlist', 'options']:
raise IOError("Unexpected resolv.conf option %s" % (cfg_opt))
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index f55d96f7..c72f7c17 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -11,6 +11,7 @@
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros import rhel_util
@@ -83,7 +84,7 @@ class Distro(distros.Distro):
if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
util.write_file(out_fn, hostname)
elif self.uses_systemd():
- util.subp(['hostnamectl', 'set-hostname', str(hostname)])
+ subp.subp(['hostnamectl', 'set-hostname', str(hostname)])
else:
host_cfg = {
'HOSTNAME': hostname,
@@ -108,7 +109,7 @@ class Distro(distros.Distro):
if self.uses_systemd() and filename.endswith('/previous-hostname'):
return util.load_file(filename).strip()
elif self.uses_systemd():
- (out, _err) = util.subp(['hostname'])
+ (out, _err) = subp.subp(['hostname'])
if len(out):
return out
else:
@@ -146,7 +147,7 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- if util.which('dnf'):
+ if subp.which('dnf'):
LOG.debug('Using DNF for package management')
cmd = ['dnf']
else:
@@ -173,7 +174,7 @@ class Distro(distros.Distro):
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
diff --git a/cloudinit/distros/tests/__init__.py b/cloudinit/distros/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/distros/tests/__init__.py
diff --git a/cloudinit/distros/tests/test_init.py b/cloudinit/distros/tests/test_init.py
new file mode 100644
index 00000000..db534654
--- /dev/null
+++ b/cloudinit/distros/tests/test_init.py
@@ -0,0 +1,156 @@
+# Copyright (C) 2020 Canonical Ltd.
+#
+# Author: Daniel Watkins <oddbloke@ubuntu.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Tests for cloudinit/distros/__init__.py"""
+
+from unittest import mock
+
+import pytest
+
+from cloudinit.distros import _get_package_mirror_info, LDH_ASCII_CHARS
+
+
+# Define a set of characters we would expect to be replaced
+INVALID_URL_CHARS = [
+ chr(x) for x in range(127) if chr(x) not in LDH_ASCII_CHARS
+]
+for separator in [":", ".", "/", "#", "?", "@", "[", "]"]:
+ # Remove from the set characters that either separate hostname parts (":",
+ # "."), terminate hostnames ("/", "#", "?", "@"), or cause Python to be
+ # unable to parse URLs ("[", "]").
+ INVALID_URL_CHARS.remove(separator)
+
+
+class TestGetPackageMirrorInfo:
+ """
+ Tests for cloudinit.distros._get_package_mirror_info.
+
+ These supplement the tests in tests/unittests/test_distros/test_generic.py
+ which are more focused on testing a single production-like configuration.
+ These tests are more focused on specific aspects of the unit under test.
+ """
+
+ @pytest.mark.parametrize('mirror_info,expected', [
+ # Empty info gives empty return
+ ({}, {}),
+ # failsafe values used if present
+ ({'failsafe': {'primary': 'http://value', 'security': 'http://other'}},
+ {'primary': 'http://value', 'security': 'http://other'}),
+ # search values used if present
+ ({'search': {'primary': ['http://value'],
+ 'security': ['http://other']}},
+ {'primary': ['http://value'], 'security': ['http://other']}),
+ # failsafe values used if search value not present
+ ({'search': {'primary': ['http://value']},
+ 'failsafe': {'security': 'http://other'}},
+ {'primary': ['http://value'], 'security': 'http://other'})
+ ])
+ def test_get_package_mirror_info_failsafe(self, mirror_info, expected):
+ """
+ Test the interaction between search and failsafe inputs
+
+ (This doesn't test the case where the mirror_filter removes all search
+ options; test_failsafe_used_if_all_search_results_filtered_out covers
+ that.)
+ """
+ assert expected == _get_package_mirror_info(mirror_info,
+ mirror_filter=lambda x: x)
+
+ def test_failsafe_used_if_all_search_results_filtered_out(self):
+ """Test the failsafe option used if all search options eliminated."""
+ mirror_info = {
+ 'search': {'primary': ['http://value']},
+ 'failsafe': {'primary': 'http://other'}
+ }
+ assert {'primary': 'http://other'} == _get_package_mirror_info(
+ mirror_info, mirror_filter=lambda x: False)
+
+ @pytest.mark.parametrize('allow_ec2_mirror, platform_type', [
+ (True, 'ec2')
+ ])
+ @pytest.mark.parametrize('availability_zone,region,patterns,expected', (
+ # Test ec2_region alone
+ ('fk-fake-1f', None, ['http://EC2-%(ec2_region)s/ubuntu'],
+ ['http://ec2-fk-fake-1/ubuntu']),
+ # Test availability_zone alone
+ ('fk-fake-1f', None, ['http://AZ-%(availability_zone)s/ubuntu'],
+ ['http://az-fk-fake-1f/ubuntu']),
+ # Test region alone
+ (None, 'fk-fake-1', ['http://RG-%(region)s/ubuntu'],
+ ['http://rg-fk-fake-1/ubuntu']),
+ # Test that ec2_region is not available for non-matching AZs
+ ('fake-fake-1f', None,
+ ['http://EC2-%(ec2_region)s/ubuntu',
+ 'http://AZ-%(availability_zone)s/ubuntu'],
+ ['http://az-fake-fake-1f/ubuntu']),
+ # Test that template order maintained
+ (None, 'fake-region',
+ ['http://RG-%(region)s-2/ubuntu', 'http://RG-%(region)s-1/ubuntu'],
+ ['http://rg-fake-region-2/ubuntu', 'http://rg-fake-region-1/ubuntu']),
+ # Test that non-ASCII hostnames are IDNA encoded;
+ # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q"
+ (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com/ubuntu'],
+ ['http://www.xn--idna--4kd53hh6aba3q.com/ubuntu']),
+ # Test that non-ASCII hostnames with a port are IDNA encoded;
+ # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q"
+ (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com:8080/ubuntu'],
+ ['http://www.xn--idna--4kd53hh6aba3q.com:8080/ubuntu']),
+ # Test that non-ASCII non-hostname parts of URLs are unchanged
+ (None, 'ТεЅТ̣', ['http://www.example.com/%(region)s/ubuntu'],
+ ['http://www.example.com/ТεЅТ̣/ubuntu']),
+ # Test that IPv4 addresses are unchanged
+ (None, 'fk-fake-1', ['http://192.168.1.1:8080/%(region)s/ubuntu'],
+ ['http://192.168.1.1:8080/fk-fake-1/ubuntu']),
+ # Test that IPv6 addresses are unchanged
+ (None, 'fk-fake-1',
+ ['http://[2001:67c:1360:8001::23]/%(region)s/ubuntu'],
+ ['http://[2001:67c:1360:8001::23]/fk-fake-1/ubuntu']),
+ # Test that unparseable URLs are filtered out of the mirror list
+ (None, 'inv[lid',
+ ['http://%(region)s.in.hostname/should/be/filtered',
+ 'http://but.not.in.the.path/%(region)s'],
+ ['http://but.not.in.the.path/inv[lid']),
+ (None, '-some-region-',
+ ['http://-lead-ing.%(region)s.trail-ing-.example.com/ubuntu'],
+ ['http://lead-ing.some-region.trail-ing.example.com/ubuntu']),
+ ) + tuple(
+ # Dynamically generate a test case for each non-LDH
+ # (Letters/Digits/Hyphen) ASCII character, testing that it is
+ # substituted with a hyphen
+ (None, 'fk{0}fake{0}1'.format(invalid_char),
+ ['http://%(region)s/ubuntu'], ['http://fk-fake-1/ubuntu'])
+ for invalid_char in INVALID_URL_CHARS
+ ))
+ def test_valid_substitution(self,
+ allow_ec2_mirror,
+ platform_type,
+ availability_zone,
+ region,
+ patterns,
+ expected):
+ """Test substitution works as expected."""
+ flag_path = "cloudinit.distros." \
+ "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES"
+
+ m_data_source = mock.Mock(
+ availability_zone=availability_zone,
+ region=region,
+ platform_type=platform_type
+ )
+ mirror_info = {'search': {'primary': patterns}}
+
+ with mock.patch(flag_path, allow_ec2_mirror):
+ ret = _get_package_mirror_info(
+ mirror_info,
+ data_source=m_data_source,
+ mirror_filter=lambda x: x
+ )
+ print(allow_ec2_mirror)
+ print(platform_type)
+ print(availability_zone)
+ print(region)
+ print(patterns)
+ print(expected)
+ assert {'primary': expected} == ret
diff --git a/cloudinit/distros/tests/test_networking.py b/cloudinit/distros/tests/test_networking.py
new file mode 100644
index 00000000..b9a63842
--- /dev/null
+++ b/cloudinit/distros/tests/test_networking.py
@@ -0,0 +1,192 @@
+from unittest import mock
+
+import pytest
+
+from cloudinit import net
+from cloudinit.distros.networking import (
+ BSDNetworking,
+ LinuxNetworking,
+ Networking,
+)
+
+# See https://docs.pytest.org/en/stable/example
+# /parametrize.html#parametrizing-conditional-raising
+from contextlib import ExitStack as does_not_raise
+
+
+@pytest.yield_fixture
+def generic_networking_cls():
+ """Returns a direct Networking subclass which errors on /sys usage.
+
+ This enables the direct testing of functionality only present on the
+ ``Networking`` super-class, and provides a check on accidentally using /sys
+ in that context.
+ """
+
+ class TestNetworking(Networking):
+ def is_physical(self, *args, **kwargs):
+ raise NotImplementedError
+
+ def settle(self, *args, **kwargs):
+ raise NotImplementedError
+
+ error = AssertionError("Unexpectedly used /sys in generic networking code")
+ with mock.patch(
+ "cloudinit.net.get_sys_class_path", side_effect=error,
+ ):
+ yield TestNetworking
+
+
+@pytest.yield_fixture
+def sys_class_net(tmpdir):
+ sys_class_net_path = tmpdir.join("sys/class/net")
+ sys_class_net_path.ensure_dir()
+ with mock.patch(
+ "cloudinit.net.get_sys_class_path",
+ return_value=sys_class_net_path.strpath + "/",
+ ):
+ yield sys_class_net_path
+
+
+class TestBSDNetworkingIsPhysical:
+ def test_raises_notimplementederror(self):
+ with pytest.raises(NotImplementedError):
+ BSDNetworking().is_physical("eth0")
+
+
+class TestLinuxNetworkingIsPhysical:
+ def test_returns_false_by_default(self, sys_class_net):
+ assert not LinuxNetworking().is_physical("eth0")
+
+ def test_returns_false_if_devname_exists_but_not_physical(
+ self, sys_class_net
+ ):
+ devname = "eth0"
+ sys_class_net.join(devname).mkdir()
+ assert not LinuxNetworking().is_physical(devname)
+
+ def test_returns_true_if_device_is_physical(self, sys_class_net):
+ devname = "eth0"
+ device_dir = sys_class_net.join(devname)
+ device_dir.mkdir()
+ device_dir.join("device").write("")
+
+ assert LinuxNetworking().is_physical(devname)
+
+
+class TestBSDNetworkingSettle:
+ def test_settle_doesnt_error(self):
+ # This also implicitly tests that it doesn't use subp.subp
+ BSDNetworking().settle()
+
+
+@pytest.mark.usefixtures("sys_class_net")
+@mock.patch("cloudinit.distros.networking.util.udevadm_settle", autospec=True)
+class TestLinuxNetworkingSettle:
+ def test_no_arguments(self, m_udevadm_settle):
+ LinuxNetworking().settle()
+
+ assert [mock.call(exists=None)] == m_udevadm_settle.call_args_list
+
+ def test_exists_argument(self, m_udevadm_settle):
+ LinuxNetworking().settle(exists="ens3")
+
+ expected_path = net.sys_dev_path("ens3")
+ assert [
+ mock.call(exists=expected_path)
+ ] == m_udevadm_settle.call_args_list
+
+
+class TestNetworkingWaitForPhysDevs:
+ @pytest.fixture
+ def wait_for_physdevs_netcfg(self):
+ """This config is shared across all the tests in this class."""
+
+ def ethernet(mac, name, driver=None, device_id=None):
+ v2_cfg = {"set-name": name, "match": {"macaddress": mac}}
+ if driver:
+ v2_cfg["match"].update({"driver": driver})
+ if device_id:
+ v2_cfg["match"].update({"device_id": device_id})
+
+ return v2_cfg
+
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", "0x1000"],
+ ["00:11:22:33:44:55", "ens3", "e1000", "0x1643"],
+ ]
+ netcfg = {
+ "version": 2,
+ "ethernets": {args[1]: ethernet(*args) for args in physdevs},
+ }
+ return netcfg
+
+ def test_skips_settle_if_all_present(
+ self, generic_networking_cls, wait_for_physdevs_netcfg,
+ ):
+ networking = generic_networking_cls()
+ with mock.patch.object(
+ networking, "get_interfaces_by_mac"
+ ) as m_get_interfaces_by_mac:
+ m_get_interfaces_by_mac.side_effect = iter(
+ [{"aa:bb:cc:dd:ee:ff": "eth0", "00:11:22:33:44:55": "ens3"}]
+ )
+ with mock.patch.object(
+ networking, "settle", autospec=True
+ ) as m_settle:
+ networking.wait_for_physdevs(wait_for_physdevs_netcfg)
+ assert 0 == m_settle.call_count
+
+ def test_calls_udev_settle_on_missing(
+ self, generic_networking_cls, wait_for_physdevs_netcfg,
+ ):
+ networking = generic_networking_cls()
+ with mock.patch.object(
+ networking, "get_interfaces_by_mac"
+ ) as m_get_interfaces_by_mac:
+ m_get_interfaces_by_mac.side_effect = iter(
+ [
+ {
+ "aa:bb:cc:dd:ee:ff": "eth0"
+ }, # first call ens3 is missing
+ {
+ "aa:bb:cc:dd:ee:ff": "eth0",
+ "00:11:22:33:44:55": "ens3",
+ }, # second call has both
+ ]
+ )
+ with mock.patch.object(
+ networking, "settle", autospec=True
+ ) as m_settle:
+ networking.wait_for_physdevs(wait_for_physdevs_netcfg)
+ m_settle.assert_called_with(exists="ens3")
+
+ @pytest.mark.parametrize(
+ "strict,expectation",
+ [(True, pytest.raises(RuntimeError)), (False, does_not_raise())],
+ )
+ def test_retrying_and_strict_behaviour(
+ self,
+ strict,
+ expectation,
+ generic_networking_cls,
+ wait_for_physdevs_netcfg,
+ ):
+ networking = generic_networking_cls()
+ with mock.patch.object(
+ networking, "get_interfaces_by_mac"
+ ) as m_get_interfaces_by_mac:
+ m_get_interfaces_by_mac.return_value = {}
+
+ with mock.patch.object(
+ networking, "settle", autospec=True
+ ) as m_settle:
+ with expectation:
+ networking.wait_for_physdevs(
+ wait_for_physdevs_netcfg, strict=strict
+ )
+
+ assert (
+ 5 * len(wait_for_physdevs_netcfg["ethernets"])
+ == m_settle.call_count
+ )
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index 23be3bdd..b4c4b0c3 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -49,7 +49,5 @@ class Distro(debian.Distro):
copy.deepcopy(PREFERRED_NTP_CLIENTS))
return self._preferred_ntp_clients
- pass
-
# vi: ts=4 expandtab
diff --git a/cloudinit/features.py b/cloudinit/features.py
new file mode 100644
index 00000000..c44fa29e
--- /dev/null
+++ b/cloudinit/features.py
@@ -0,0 +1,44 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""
+Feature flags are used as a way to easily toggle configuration
+**at build time**. They are provided to accommodate feature deprecation and
+downstream configuration changes.
+
+Currently used upstream values for feature flags are set in
+``cloudinit/features.py``. Overrides to these values (typically via quilt
+patch) can be placed
+in a file called ``feature_overrides.py`` in the same directory. Any value
+set in ``feature_overrides.py`` will override the original value set
+in ``features.py``.
+
+Each flag should include a short comment regarding the reason for
+the flag and intended lifetime.
+
+Tests are required for new feature flags, and tests must verify
+all valid states of a flag, not just the default state.
+"""
+
+ERROR_ON_USER_DATA_FAILURE = True
+"""
+If there is a failure in obtaining user data (i.e., #include or
+decompress fails), old behavior is to log a warning and proceed.
+After the 20.2 release, we instead raise an exception.
+This flag can be removed after Focal is no longer supported
+"""
+
+
+ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES = False
+"""
+When configuring apt mirrors, old behavior is to allow
+the use of ec2 mirrors if the datasource availability_zone format
+matches one of the possible aws ec2 regions. After the 20.2 release, we
+no longer publish ec2 region mirror urls on non-AWS cloud platforms.
+Besides feature_overrides.py, users can override this by providing
+#cloud-config apt directives.
+"""
+
+try:
+ # pylint: disable=wildcard-import
+ from cloudinit.feature_overrides import * # noqa
+except ImportError:
+ pass
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
index 7fe17a2e..be0ca0ea 100644
--- a/cloudinit/gpg.py
+++ b/cloudinit/gpg.py
@@ -8,7 +8,7 @@
"""gpg.py - Collection of gpg key related functions"""
from cloudinit import log as logging
-from cloudinit import util
+from cloudinit import subp
import time
@@ -18,9 +18,9 @@ LOG = logging.getLogger(__name__)
def export_armour(key):
"""Export gpg key, armoured key gets returned"""
try:
- (armour, _) = util.subp(["gpg", "--export", "--armour", key],
+ (armour, _) = subp.subp(["gpg", "--export", "--armour", key],
capture=True)
- except util.ProcessExecutionError as error:
+ except subp.ProcessExecutionError as error:
# debug, since it happens for any key not on the system initially
LOG.debug('Failed to export armoured key "%s": %s', key, error)
armour = None
@@ -51,11 +51,11 @@ def recv_key(key, keyserver, retries=(1, 1)):
while True:
trynum += 1
try:
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
LOG.debug("Imported key '%s' from keyserver '%s' on try %d",
key, keyserver, trynum)
return
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
error = e
try:
naplen = next(sleeps)
@@ -63,18 +63,19 @@ def recv_key(key, keyserver, retries=(1, 1)):
"Import failed with exit code %d, will try again in %ss",
error.exit_code, naplen)
time.sleep(naplen)
- except StopIteration:
+ except StopIteration as e:
raise ValueError(
("Failed to import key '%s' from keyserver '%s' "
- "after %d tries: %s") % (key, keyserver, trynum, error))
+ "after %d tries: %s") % (key, keyserver, trynum, error)
+ ) from e
def delete_key(key):
"""Delete the specified key from the local gpg ring"""
try:
- util.subp(["gpg", "--batch", "--yes", "--delete-keys", key],
+ subp.subp(["gpg", "--batch", "--yes", "--delete-keys", key],
capture=True)
- except util.ProcessExecutionError as error:
+ except subp.ProcessExecutionError as error:
LOG.warning('Failed delete key "%s": %s', key, error)
diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
index dca50a49..c6205097 100644
--- a/cloudinit/handlers/boot_hook.py
+++ b/cloudinit/handlers/boot_hook.py
@@ -12,6 +12,7 @@ import os
from cloudinit import handlers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import (PER_ALWAYS)
@@ -48,8 +49,8 @@ class BootHookPartHandler(handlers.Handler):
env = os.environ.copy()
if self.instance_id is not None:
env['INSTANCE_ID'] = str(self.instance_id)
- util.subp([filepath], env=env)
- except util.ProcessExecutionError:
+ subp.subp([filepath], env=env)
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Boothooks script %s execution error", filepath)
except Exception:
util.logexc(LOG, "Boothooks unknown error when running %s",
diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py
index ce3accf6..aadfbf86 100644
--- a/cloudinit/handlers/jinja_template.py
+++ b/cloudinit/handlers/jinja_template.py
@@ -83,7 +83,8 @@ def render_jinja_payload_from_file(
if e.errno == EACCES:
raise RuntimeError(
'Cannot render jinja template vars. No read permission on'
- " '%s'. Try sudo" % instance_data_file)
+ " '%s'. Try sudo" % instance_data_file
+ ) from e
rendered_payload = render_jinja_payload(
payload, payload_fn, instance_data, debug)
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index 003cad60..a9d29537 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -13,6 +13,7 @@ import re
from cloudinit import handlers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import (PER_INSTANCE)
@@ -52,7 +53,7 @@ class UpstartJobPartHandler(handlers.Handler):
util.write_file(path, payload, 0o644)
if SUITABLE_UPSTART:
- util.subp(["initctl", "reload-configuration"], capture=False)
+ subp.subp(["initctl", "reload-configuration"], capture=False)
def _has_suitable_upstart():
@@ -63,7 +64,7 @@ def _has_suitable_upstart():
if not os.path.exists("/sbin/initctl"):
return False
try:
- (version_out, _err) = util.subp(["initctl", "version"])
+ (version_out, _err) = subp.subp(["initctl", "version"])
except Exception:
util.logexc(LOG, "initctl version failed")
return False
@@ -77,7 +78,7 @@ def _has_suitable_upstart():
if not os.path.exists("/usr/bin/dpkg-query"):
return False
try:
- (dpkg_ver, _err) = util.subp(["dpkg-query",
+ (dpkg_ver, _err) = subp.subp(["dpkg-query",
"--showformat=${Version}",
"--show", "upstart"], rcs=[0, 1])
except Exception:
@@ -86,9 +87,9 @@ def _has_suitable_upstart():
try:
good = "1.8-0ubuntu1.2"
- util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good])
+ subp.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good])
return True
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code == 1:
pass
else:
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 7d2a3305..9752ad28 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -451,8 +451,4 @@ class DefaultingConfigParser(RawConfigParser):
contents = '\n'.join([header, contents, ''])
return contents
-
-def identity(object):
- return object
-
# vi: ts=4 expandtab
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 827db12b..2e5df042 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -122,17 +122,12 @@ def getLogger(name='cloudinit'):
return logging.getLogger(name)
-# Fixes this annoyance...
-# No handlers could be found for logger XXX annoying output...
-try:
- from logging import NullHandler
-except ImportError:
- class NullHandler(logging.Handler):
- def emit(self, record):
- pass
-
-
def _resetLogger(log):
+ """Remove all current handlers, unset log level and add a NullHandler.
+
+ (Adding the NullHandler avoids "No handlers could be found for logger XXX"
+ messages.)
+ """
if not log:
return
handlers = list(log.handlers)
@@ -141,7 +136,7 @@ def _resetLogger(log):
h.close()
log.removeHandler(h)
log.setLevel(NOTSET)
- log.addHandler(NullHandler())
+ log.addHandler(logging.NullHandler())
def resetLogging():
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 1d5eb535..e233149a 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -6,13 +6,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
import errno
+import ipaddress
import logging
import os
import re
-from functools import partial
-from cloudinit.net.network_state import mask_to_net_prefix
+from cloudinit import subp
from cloudinit import util
+from cloudinit.net.network_state import mask_to_net_prefix
from cloudinit.url_helper import UrlError, readurl
LOG = logging.getLogger(__name__)
@@ -97,10 +98,6 @@ def is_up(devname):
return read_sys_net_safe(devname, "operstate", translate=translate)
-def is_wireless(devname):
- return os.path.exists(sys_dev_path(devname, "wireless"))
-
-
def is_bridge(devname):
return os.path.exists(sys_dev_path(devname, "bridge"))
@@ -264,28 +261,6 @@ def is_vlan(devname):
return 'DEVTYPE=vlan' in uevent.splitlines()
-def is_connected(devname):
- # is_connected isn't really as simple as that. 2 is
- # 'physically connected'. 3 is 'not connected'. but a wlan interface will
- # always show 3.
- iflink = read_sys_net_safe(devname, "iflink")
- if iflink == "2":
- return True
- if not is_wireless(devname):
- return False
- LOG.debug("'%s' is wireless, basing 'connected' on carrier", devname)
- return read_sys_net_safe(devname, "carrier",
- translate={'0': False, '1': True})
-
-
-def is_physical(devname):
- return os.path.exists(sys_dev_path(devname, "device"))
-
-
-def is_present(devname):
- return os.path.exists(sys_dev_path(devname))
-
-
def device_driver(devname):
"""Return the device driver for net device named 'devname'."""
driver = None
@@ -334,10 +309,20 @@ def find_fallback_nic(blacklist_drivers=None):
"""Return the name of the 'fallback' network device."""
if util.is_FreeBSD():
return find_fallback_nic_on_freebsd(blacklist_drivers)
+ elif util.is_NetBSD() or util.is_OpenBSD():
+ return find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers)
else:
return find_fallback_nic_on_linux(blacklist_drivers)
+def find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers=None):
+ values = list(sorted(
+ get_interfaces_by_mac().values(),
+ key=natural_sort_key))
+ if values:
+ return values[0]
+
+
def find_fallback_nic_on_freebsd(blacklist_drivers=None):
"""Return the name of the 'fallback' network device on FreeBSD.
@@ -347,7 +332,7 @@ def find_fallback_nic_on_freebsd(blacklist_drivers=None):
we'll use the first interface from ``ifconfig -l -u ether``
"""
- stdout, _stderr = util.subp(['ifconfig', '-l', '-u', 'ether'])
+ stdout, _stderr = subp.subp(['ifconfig', '-l', '-u', 'ether'])
values = stdout.split()
if values:
return values[0]
@@ -508,43 +493,6 @@ def extract_physdevs(netcfg):
raise RuntimeError('Unknown network config version: %s' % version)
-def wait_for_physdevs(netcfg, strict=True):
- physdevs = extract_physdevs(netcfg)
-
- # set of expected iface names and mac addrs
- expected_ifaces = dict([(iface[0], iface[1]) for iface in physdevs])
- expected_macs = set(expected_ifaces.keys())
-
- # set of current macs
- present_macs = get_interfaces_by_mac().keys()
-
- # compare the set of expected mac address values to
- # the current macs present; we only check MAC as cloud-init
- # has not yet renamed interfaces and the netcfg may include
- # such renames.
- for _ in range(0, 5):
- if expected_macs.issubset(present_macs):
- LOG.debug('net: all expected physical devices present')
- return
-
- missing = expected_macs.difference(present_macs)
- LOG.debug('net: waiting for expected net devices: %s', missing)
- for mac in missing:
- # trigger a settle, unless this interface exists
- syspath = sys_dev_path(expected_ifaces[mac])
- settle = partial(util.udevadm_settle, exists=syspath)
- msg = 'Waiting for udev events to settle or %s exists' % syspath
- util.log_time(LOG.debug, msg, func=settle)
-
- # update present_macs after settles
- present_macs = get_interfaces_by_mac().keys()
-
- msg = 'Not all expected physical devices present: %s' % missing
- LOG.warning(msg)
- if strict:
- raise RuntimeError(msg)
-
-
def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
"""read the network config and rename devices accordingly.
if strict_present is false, then do not raise exception if no devices
@@ -558,7 +506,9 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
try:
_rename_interfaces(extract_physdevs(netcfg))
except RuntimeError as e:
- raise RuntimeError('Failed to apply network config names: %s' % e)
+ raise RuntimeError(
+ 'Failed to apply network config names: %s' % e
+ ) from e
def interface_has_own_mac(ifname, strict=False):
@@ -609,9 +559,9 @@ def _get_current_rename_info(check_downable=True):
if check_downable:
nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]")
- ipv6, _err = util.subp(['ip', '-6', 'addr', 'show', 'permanent',
+ ipv6, _err = subp.subp(['ip', '-6', 'addr', 'show', 'permanent',
'scope', 'global'], capture=True)
- ipv4, _err = util.subp(['ip', '-4', 'addr', 'show'], capture=True)
+ ipv4, _err = subp.subp(['ip', '-4', 'addr', 'show'], capture=True)
nics_with_addresses = set()
for bytes_out in (ipv6, ipv4):
@@ -647,13 +597,13 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
for data in cur_info.values())
def rename(cur, new):
- util.subp(["ip", "link", "set", cur, "name", new], capture=True)
+ subp.subp(["ip", "link", "set", cur, "name", new], capture=True)
def down(name):
- util.subp(["ip", "link", "set", name, "down"], capture=True)
+ subp.subp(["ip", "link", "set", name, "down"], capture=True)
def up(name):
- util.subp(["ip", "link", "set", name, "up"], capture=True)
+ subp.subp(["ip", "link", "set", name, "up"], capture=True)
ops = []
errors = []
@@ -799,23 +749,27 @@ def get_ib_interface_hwaddr(ifname, ethernet_format):
def get_interfaces_by_mac():
if util.is_FreeBSD():
return get_interfaces_by_mac_on_freebsd()
+ elif util.is_NetBSD():
+ return get_interfaces_by_mac_on_netbsd()
+ elif util.is_OpenBSD():
+ return get_interfaces_by_mac_on_openbsd()
else:
return get_interfaces_by_mac_on_linux()
def get_interfaces_by_mac_on_freebsd():
- (out, _) = util.subp(['ifconfig', '-a', 'ether'])
+ (out, _) = subp.subp(['ifconfig', '-a', 'ether'])
# flatten each interface block in a single line
def flatten(out):
curr_block = ''
- for l in out.split('\n'):
- if l.startswith('\t'):
- curr_block += l
+ for line in out.split('\n'):
+ if line.startswith('\t'):
+ curr_block += line
else:
if curr_block:
yield curr_block
- curr_block = l
+ curr_block = line
yield curr_block
# looks for interface and mac in a list of flatten block
@@ -830,6 +784,37 @@ def get_interfaces_by_mac_on_freebsd():
return results
+def get_interfaces_by_mac_on_netbsd():
+ ret = {}
+ re_field_match = (
+ r"(?P<ifname>\w+).*address:\s"
+ r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*"
+ )
+ (out, _) = subp.subp(['ifconfig', '-a'])
+ if_lines = re.sub(r'\n\s+', ' ', out).splitlines()
+ for line in if_lines:
+ m = re.match(re_field_match, line)
+ if m:
+ fields = m.groupdict()
+ ret[fields['mac']] = fields['ifname']
+ return ret
+
+
+def get_interfaces_by_mac_on_openbsd():
+ ret = {}
+ re_field_match = (
+ r"(?P<ifname>\w+).*lladdr\s"
+ r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*")
+ (out, _) = subp.subp(['ifconfig', '-a'])
+ if_lines = re.sub(r'\n\s+', ' ', out).splitlines()
+ for line in if_lines:
+ m = re.match(re_field_match, line)
+ if m:
+ fields = m.groupdict()
+ ret[fields['mac']] = fields['ifname']
+ return ret
+
+
def get_interfaces_by_mac_on_linux():
"""Build a dictionary of tuples {mac: name}.
@@ -917,6 +902,38 @@ def has_url_connectivity(url):
return True
+def is_ip_address(s: str) -> bool:
+ """Returns a bool indicating if ``s`` is an IP address.
+
+ :param s:
+ The string to test.
+
+ :return:
+ A bool indicating if the string contains an IP address or not.
+ """
+ try:
+ ipaddress.ip_address(s)
+ except ValueError:
+ return False
+ return True
+
+
+def is_ipv4_address(s: str) -> bool:
+ """Returns a bool indicating if ``s`` is an IPv4 address.
+
+ :param s:
+ The string to test.
+
+ :return:
+ A bool indicating if the string contains an IPv4 address or not.
+ """
+ try:
+ ipaddress.IPv4Address(s)
+ except ValueError:
+ return False
+ return True
+
+
class EphemeralIPv4Network(object):
"""Context manager which sets up temporary static network configuration.
@@ -950,7 +967,8 @@ class EphemeralIPv4Network(object):
self.prefix = mask_to_net_prefix(prefix_or_mask)
except ValueError as e:
raise ValueError(
- 'Cannot setup network: {0}'.format(e))
+ 'Cannot setup network: {0}'.format(e)
+ ) from e
self.connectivity_url = connectivity_url
self.interface = interface
@@ -990,11 +1008,11 @@ class EphemeralIPv4Network(object):
def __exit__(self, excp_type, excp_value, excp_traceback):
"""Teardown anything we set up."""
for cmd in self.cleanup_cmds:
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
def _delete_address(self, address, prefix):
"""Perform the ip command to remove the specified address."""
- util.subp(
+ subp.subp(
['ip', '-family', 'inet', 'addr', 'del',
'%s/%s' % (address, prefix), 'dev', self.interface],
capture=True)
@@ -1006,11 +1024,11 @@ class EphemeralIPv4Network(object):
'Attempting setup of ephemeral network on %s with %s brd %s',
self.interface, cidr, self.broadcast)
try:
- util.subp(
+ subp.subp(
['ip', '-family', 'inet', 'addr', 'add', cidr, 'broadcast',
self.broadcast, 'dev', self.interface],
capture=True, update_env={'LANG': 'C'})
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if "File exists" not in e.stderr:
raise
LOG.debug(
@@ -1018,7 +1036,7 @@ class EphemeralIPv4Network(object):
self.interface, self.ip)
else:
# Address creation success, bring up device and queue cleanup
- util.subp(
+ subp.subp(
['ip', '-family', 'inet', 'link', 'set', 'dev', self.interface,
'up'], capture=True)
self.cleanup_cmds.append(
@@ -1035,7 +1053,7 @@ class EphemeralIPv4Network(object):
via_arg = []
if gateway != "0.0.0.0/0":
via_arg = ['via', gateway]
- util.subp(
+ subp.subp(
['ip', '-4', 'route', 'add', net_address] + via_arg +
['dev', self.interface], capture=True)
self.cleanup_cmds.insert(
@@ -1045,20 +1063,20 @@ class EphemeralIPv4Network(object):
def _bringup_router(self):
"""Perform the ip commands to fully setup the router if needed."""
# Check if a default route exists and exit if it does
- out, _ = util.subp(['ip', 'route', 'show', '0.0.0.0/0'], capture=True)
+ out, _ = subp.subp(['ip', 'route', 'show', '0.0.0.0/0'], capture=True)
if 'default' in out:
LOG.debug(
'Skip ephemeral route setup. %s already has default route: %s',
self.interface, out.strip())
return
- util.subp(
+ subp.subp(
['ip', '-4', 'route', 'add', self.router, 'dev', self.interface,
'src', self.ip], capture=True)
self.cleanup_cmds.insert(
0,
['ip', '-4', 'route', 'del', self.router, 'dev', self.interface,
'src', self.ip])
- util.subp(
+ subp.subp(
['ip', '-4', 'route', 'add', 'default', 'via', self.router,
'dev', self.interface], capture=True)
self.cleanup_cmds.insert(
diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py
new file mode 100644
index 00000000..e34e0454
--- /dev/null
+++ b/cloudinit/net/bsd.py
@@ -0,0 +1,167 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import re
+
+from cloudinit import log as logging
+from cloudinit import net
+from cloudinit import util
+from cloudinit import subp
+from cloudinit.distros.parsers.resolv_conf import ResolvConf
+from cloudinit.distros import bsd_utils
+
+from . import renderer
+
+LOG = logging.getLogger(__name__)
+
+
+class BSDRenderer(renderer.Renderer):
+ resolv_conf_fn = 'etc/resolv.conf'
+ rc_conf_fn = 'etc/rc.conf'
+
+ def get_rc_config_value(self, key):
+ fn = subp.target_path(self.target, self.rc_conf_fn)
+ bsd_utils.get_rc_config_value(key, fn=fn)
+
+ def set_rc_config_value(self, key, value):
+ fn = subp.target_path(self.target, self.rc_conf_fn)
+ bsd_utils.set_rc_config_value(key, value, fn=fn)
+
+ def __init__(self, config=None):
+ if not config:
+ config = {}
+ self.target = None
+ self.interface_configurations = {}
+ self._postcmds = config.get('postcmds', True)
+
+ def _ifconfig_entries(self, settings, target=None):
+ ifname_by_mac = net.get_interfaces_by_mac()
+ for interface in settings.iter_interfaces():
+ device_name = interface.get("name")
+ device_mac = interface.get("mac_address")
+ if device_name and re.match(r'^lo\d+$', device_name):
+ continue
+ if device_mac not in ifname_by_mac:
+ LOG.info('Cannot find any device with MAC %s', device_mac)
+ elif device_mac and device_name:
+ cur_name = ifname_by_mac[device_mac]
+ if cur_name != device_name:
+ LOG.info('netif service will rename interface %s to %s',
+ cur_name, device_name)
+ try:
+ self.rename_interface(cur_name, device_name)
+ except NotImplementedError:
+ LOG.error((
+ 'Interface renaming is '
+ 'not supported on this OS'))
+ device_name = cur_name
+
+ else:
+ device_name = ifname_by_mac[device_mac]
+
+ LOG.info('Configuring interface %s', device_name)
+
+ self.interface_configurations[device_name] = 'DHCP'
+
+ for subnet in interface.get("subnets", []):
+ if subnet.get('type') == 'static':
+ if not subnet.get('netmask'):
+ LOG.debug(
+ 'Skipping IP %s, because there is no netmask',
+ subnet.get('address')
+ )
+ continue
+ LOG.debug('Configuring dev %s with %s / %s', device_name,
+ subnet.get('address'), subnet.get('netmask'))
+
+ self.interface_configurations[device_name] = {
+ 'address': subnet.get('address'),
+ 'netmask': subnet.get('netmask'),
+ }
+
+ def _route_entries(self, settings, target=None):
+ routes = list(settings.iter_routes())
+ for interface in settings.iter_interfaces():
+ subnets = interface.get("subnets", [])
+ for subnet in subnets:
+ if subnet.get('type') != 'static':
+ continue
+ gateway = subnet.get('gateway')
+ if gateway and len(gateway.split('.')) == 4:
+ routes.append({
+ 'network': '0.0.0.0',
+ 'netmask': '0.0.0.0',
+ 'gateway': gateway})
+ routes += subnet.get('routes', [])
+ for route in routes:
+ network = route.get('network')
+ if not network:
+ LOG.debug('Skipping a bad route entry')
+ continue
+ netmask = route.get('netmask')
+ gateway = route.get('gateway')
+ self.set_route(network, netmask, gateway)
+
+ def _resolve_conf(self, settings, target=None):
+ nameservers = settings.dns_nameservers
+ searchdomains = settings.dns_searchdomains
+ for interface in settings.iter_interfaces():
+ for subnet in interface.get("subnets", []):
+ if 'dns_nameservers' in subnet:
+ nameservers.extend(subnet['dns_nameservers'])
+ if 'dns_search' in subnet:
+ searchdomains.extend(subnet['dns_search'])
+ # Try to read the /etc/resolv.conf or just start from scratch if that
+ # fails.
+ try:
+ resolvconf = ResolvConf(util.load_file(subp.target_path(
+ target, self.resolv_conf_fn)))
+ resolvconf.parse()
+ except IOError:
+ util.logexc(LOG, "Failed to parse %s, use new empty file",
+ subp.target_path(target, self.resolv_conf_fn))
+ resolvconf = ResolvConf('')
+ resolvconf.parse()
+
+ # Add some nameservers
+ for server in nameservers:
+ try:
+ resolvconf.add_nameserver(server)
+ except ValueError:
+ util.logexc(LOG, "Failed to add nameserver %s", server)
+
+ # And add any searchdomains.
+ for domain in searchdomains:
+ try:
+ resolvconf.add_search_domain(domain)
+ except ValueError:
+ util.logexc(LOG, "Failed to add search domain %s", domain)
+ util.write_file(
+ subp.target_path(target, self.resolv_conf_fn),
+ str(resolvconf), 0o644)
+
+ def render_network_state(self, network_state, templates=None, target=None):
+ self._ifconfig_entries(settings=network_state)
+ self._route_entries(settings=network_state)
+ self._resolve_conf(settings=network_state)
+
+ self.write_config()
+ self.start_services(run=self._postcmds)
+
+ def dhcp_interfaces(self):
+ ic = self.interface_configurations.items
+ return [k for k, v in ic() if v == 'DHCP']
+
+ def start_services(self, run=False):
+ raise NotImplementedError()
+
+ def write_config(self, target=None):
+ raise NotImplementedError()
+
+ def set_gateway(self, gateway):
+ raise NotImplementedError()
+
+ def rename_interface(self, cur_name, device_name):
+ raise NotImplementedError()
+
+ def set_route(self, network, netmask, gateway):
+ raise NotImplementedError()
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index 64e1c699..cc8dc17b 100755
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -10,6 +10,7 @@ import base64
import glob
import gzip
import io
+import logging
import os
from cloudinit import util
@@ -19,21 +20,19 @@ from . import read_sys_net_safe
_OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface"
+KERNEL_CMDLINE_NETWORK_CONFIG_DISABLED = "disabled"
+
class InitramfsNetworkConfigSource(metaclass=abc.ABCMeta):
"""ABC for net config sources that read config written by initramfses"""
@abc.abstractmethod
- def is_applicable(self):
- # type: () -> bool
+ def is_applicable(self) -> bool:
"""Is this initramfs config source applicable to the current system?"""
- pass
@abc.abstractmethod
- def render_config(self):
- # type: () -> dict
+ def render_config(self) -> dict:
"""Render a v1 network config from the initramfs configuration"""
- pass
class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
@@ -62,8 +61,7 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
if mac_addr:
self._mac_addrs[k] = mac_addr
- def is_applicable(self):
- # type: () -> bool
+ def is_applicable(self) -> bool:
"""
Return whether this system has klibc initramfs network config or not
@@ -81,8 +79,7 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
return True
return False
- def render_config(self):
- # type: () -> dict
+ def render_config(self) -> dict:
return config_from_klibc_net_cfg(
files=self._files, mac_addrs=self._mac_addrs,
)
@@ -115,8 +112,8 @@ def _klibc_to_config_entry(content, mac_addrs=None):
data = util.load_shell_content(content)
try:
name = data['DEVICE'] if 'DEVICE' in data else data['DEVICE6']
- except KeyError:
- raise ValueError("no 'DEVICE' or 'DEVICE6' entry in data")
+ except KeyError as e:
+ raise ValueError("no 'DEVICE' or 'DEVICE6' entry in data") from e
# ipconfig on precise does not write PROTO
# IPv6 config gives us IPV6PROTO, not PROTO.
@@ -233,34 +230,35 @@ def read_initramfs_config():
return None
-def _decomp_gzip(blob, strict=True):
- # decompress blob. raise exception if not compressed unless strict=False.
+def _decomp_gzip(blob):
+ # decompress blob or return original blob
with io.BytesIO(blob) as iobuf:
gzfp = None
try:
gzfp = gzip.GzipFile(mode="rb", fileobj=iobuf)
return gzfp.read()
except IOError:
- if strict:
- raise
return blob
finally:
if gzfp:
gzfp.close()
-def _b64dgz(b64str, gzipped="try"):
- # decode a base64 string. If gzipped is true, transparently uncompresss
- # if gzipped is 'try', then try gunzip, returning the original on fail.
- try:
- blob = base64.b64decode(b64str)
- except TypeError:
- raise ValueError("Invalid base64 text: %s" % b64str)
+def _b64dgz(data):
+ """Decode a string base64 encoding, if gzipped, uncompress as well
- if not gzipped:
- return blob
+ :return: decompressed unencoded string of the data or empty string on
+ unencoded data.
+ """
+ try:
+ blob = base64.b64decode(data)
+ except (TypeError, ValueError):
+ logging.error(
+ "Expected base64 encoded kernel commandline parameter"
+ " network-config. Ignoring network-config=%s.", data)
+ return ''
- return _decomp_gzip(blob, strict=gzipped != "try")
+ return _decomp_gzip(blob)
def read_kernel_cmdline_config(cmdline=None):
@@ -273,6 +271,8 @@ def read_kernel_cmdline_config(cmdline=None):
if tok.startswith("network-config="):
data64 = tok.split("=", 1)[1]
if data64:
+ if data64 == KERNEL_CMDLINE_NETWORK_CONFIG_DISABLED:
+ return {"config": "disabled"}
return util.load_yaml(_b64dgz(data64))
return None
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 19d0199c..4394c68b 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -17,6 +17,7 @@ from cloudinit.net import (
has_url_connectivity)
from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip
from cloudinit import temp_utils
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -30,19 +31,18 @@ class InvalidDHCPLeaseFileError(Exception):
Current uses are DataSourceAzure and DataSourceEc2 during ephemeral
boot to scrape metadata.
"""
- pass
class NoDHCPLeaseError(Exception):
"""Raised when unable to get a DHCP lease."""
- pass
class EphemeralDHCPv4(object):
- def __init__(self, iface=None, connectivity_url=None):
+ def __init__(self, iface=None, connectivity_url=None, dhcp_log_func=None):
self.iface = iface
self._ephipv4 = None
self.lease = None
+ self.dhcp_log_func = dhcp_log_func
self.connectivity_url = connectivity_url
def __enter__(self):
@@ -80,9 +80,10 @@ class EphemeralDHCPv4(object):
if self.lease:
return self.lease
try:
- leases = maybe_perform_dhcp_discovery(self.iface)
- except InvalidDHCPLeaseFileError:
- raise NoDHCPLeaseError()
+ leases = maybe_perform_dhcp_discovery(
+ self.iface, self.dhcp_log_func)
+ except InvalidDHCPLeaseFileError as e:
+ raise NoDHCPLeaseError() from e
if not leases:
raise NoDHCPLeaseError()
self.lease = leases[-1]
@@ -130,13 +131,15 @@ class EphemeralDHCPv4(object):
result[internal_mapping] = self.lease.get(different_names)
-def maybe_perform_dhcp_discovery(nic=None):
+def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None):
"""Perform dhcp discovery if nic valid and dhclient command exists.
If the nic is invalid or undiscoverable or dhclient command is not found,
skip dhcp_discovery and return an empty dict.
@param nic: Name of the network interface we want to run dhclient on.
+ @param dhcp_log_func: A callable accepting the dhclient output and error
+ streams.
@return: A list of dicts representing dhcp options for each lease obtained
from the dhclient discovery if run, otherwise an empty list is
returned.
@@ -150,7 +153,7 @@ def maybe_perform_dhcp_discovery(nic=None):
LOG.debug(
'Skip dhcp_discovery: nic %s not found in get_devicelist.', nic)
return []
- dhclient_path = util.which('dhclient')
+ dhclient_path = subp.which('dhclient')
if not dhclient_path:
LOG.debug('Skip dhclient configuration: No dhclient command found.')
return []
@@ -158,7 +161,7 @@ def maybe_perform_dhcp_discovery(nic=None):
prefix='cloud-init-dhcp-',
needs_exe=True) as tdir:
# Use /var/tmp because /run/cloud-init/tmp is mounted noexec
- return dhcp_discovery(dhclient_path, nic, tdir)
+ return dhcp_discovery(dhclient_path, nic, tdir, dhcp_log_func)
def parse_dhcp_lease_file(lease_file):
@@ -192,13 +195,15 @@ def parse_dhcp_lease_file(lease_file):
return dhcp_leases
-def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
+def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
"""Run dhclient on the interface without scripts or filesystem artifacts.
@param dhclient_cmd_path: Full path to the dhclient used.
@param interface: Name of the network inteface on which to dhclient.
@param cleandir: The directory from which to run dhclient as well as store
dhcp leases.
+ @param dhcp_log_func: A callable accepting the dhclient output and error
+ streams.
@return: A list of dicts of representing the dhcp leases parsed from the
dhcp.leases file or empty list.
@@ -215,14 +220,20 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
pid_file = os.path.join(cleandir, 'dhclient.pid')
lease_file = os.path.join(cleandir, 'dhcp.leases')
+ # In some cases files in /var/tmp may not be executable, launching dhclient
+ # from there will certainly raise 'Permission denied' error. Try launching
+ # the original dhclient instead.
+ if not os.access(sandbox_dhclient_cmd, os.X_OK):
+ sandbox_dhclient_cmd = dhclient_cmd_path
+
# ISC dhclient needs the interface up to send initial discovery packets.
# Generally dhclient relies on dhclient-script PREINIT action to bring the
# link up before attempting discovery. Since we are using -sf /bin/true,
# we need to do that "link up" ourselves first.
- util.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True)
+ subp.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True)
cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file,
'-pf', pid_file, interface, '-sf', '/bin/true']
- util.subp(cmd, capture=True)
+ out, err = subp.subp(cmd, capture=True)
# Wait for pid file and lease file to appear, and for the process
# named by the pid file to daemonize (have pid 1 as its parent). If we
@@ -239,6 +250,7 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
return []
ppid = 'unknown'
+ daemonized = False
for _ in range(0, 1000):
pid_content = util.load_file(pid_file).strip()
try:
@@ -250,13 +262,17 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
if ppid == 1:
LOG.debug('killing dhclient with pid=%s', pid)
os.kill(pid, signal.SIGKILL)
- return parse_dhcp_lease_file(lease_file)
+ daemonized = True
+ break
time.sleep(0.01)
- LOG.error(
- 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds',
- pid_content, ppid, 0.01 * 1000
- )
+ if not daemonized:
+ LOG.error(
+ 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s '
+ 'seconds', pid_content, ppid, 0.01 * 1000
+ )
+ if dhcp_log_func is not None:
+ dhcp_log_func(out, err)
return parse_dhcp_lease_file(lease_file)
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index 2f714563..13c041f3 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -11,6 +11,7 @@ from . import renderer
from .network_state import subnet_is_ipv6
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
@@ -482,10 +483,8 @@ class Renderer(renderer.Renderer):
if searchdomains:
lo['subnets'][0]["dns_search"] = (" ".join(searchdomains))
- ''' Apply a sort order to ensure that we write out
- the physical interfaces first; this is critical for
- bonding
- '''
+ # Apply a sort order to ensure that we write out the physical
+ # interfaces first; this is critical for bonding
order = {
'loopback': 0,
'physical': 1,
@@ -511,13 +510,13 @@ class Renderer(renderer.Renderer):
return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n"
def render_network_state(self, network_state, templates=None, target=None):
- fpeni = util.target_path(target, self.eni_path)
+ fpeni = subp.target_path(target, self.eni_path)
util.ensure_dir(os.path.dirname(fpeni))
header = self.eni_header if self.eni_header else ""
util.write_file(fpeni, header + self._render_interfaces(network_state))
if self.netrules_path:
- netrules = util.target_path(target, self.netrules_path)
+ netrules = subp.target_path(target, self.netrules_path)
util.ensure_dir(os.path.dirname(netrules))
util.write_file(netrules,
self._render_persistent_net(network_state))
@@ -544,9 +543,9 @@ def available(target=None):
expected = ['ifquery', 'ifup', 'ifdown']
search = ['/sbin', '/usr/sbin']
for p in expected:
- if not util.which(p, search=search, target=target):
+ if not subp.which(p, search=search, target=target):
return False
- eni = util.target_path(target, 'etc/network/interfaces')
+ eni = subp.target_path(target, 'etc/network/interfaces')
if not os.path.isfile(eni):
return False
diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py
index d6f61da3..0285dfec 100644
--- a/cloudinit/net/freebsd.py
+++ b/cloudinit/net/freebsd.py
@@ -1,175 +1,59 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import re
-
from cloudinit import log as logging
-from cloudinit import net
+import cloudinit.net.bsd
+from cloudinit import subp
from cloudinit import util
-from cloudinit.distros import rhel_util
-from cloudinit.distros.parsers.resolv_conf import ResolvConf
-
-from . import renderer
LOG = logging.getLogger(__name__)
-class Renderer(renderer.Renderer):
- resolv_conf_fn = 'etc/resolv.conf'
- rc_conf_fn = 'etc/rc.conf'
+class Renderer(cloudinit.net.bsd.BSDRenderer):
def __init__(self, config=None):
- if not config:
- config = {}
- self.dhcp_interfaces = []
- self._postcmds = config.get('postcmds', True)
-
- def _update_rc_conf(self, settings, target=None):
- fn = util.target_path(target, self.rc_conf_fn)
- rhel_util.update_sysconfig_file(fn, settings)
-
- def _write_ifconfig_entries(self, settings, target=None):
- ifname_by_mac = net.get_interfaces_by_mac()
- for interface in settings.iter_interfaces():
- device_name = interface.get("name")
- device_mac = interface.get("mac_address")
- if device_name and re.match(r'^lo\d+$', device_name):
- continue
- if device_mac not in ifname_by_mac:
- LOG.info('Cannot find any device with MAC %s', device_mac)
- elif device_mac and device_name:
- cur_name = ifname_by_mac[device_mac]
- if cur_name != device_name:
- LOG.info('netif service will rename interface %s to %s',
- cur_name, device_name)
- self._update_rc_conf(
- {'ifconfig_%s_name' % cur_name: device_name},
- target=target)
- else:
- device_name = ifname_by_mac[device_mac]
-
- LOG.info('Configuring interface %s', device_name)
- ifconfig = 'DHCP' # default
-
- for subnet in interface.get("subnets", []):
- if ifconfig != 'DHCP':
- LOG.info('The FreeBSD provider only set the first subnet.')
- break
- if subnet.get('type') == 'static':
- if not subnet.get('netmask'):
- LOG.debug(
- 'Skipping IP %s, because there is no netmask',
- subnet.get('address'))
- continue
- LOG.debug('Configuring dev %s with %s / %s', device_name,
- subnet.get('address'), subnet.get('netmask'))
- # Configure an ipv4 address.
- ifconfig = (
- subnet.get('address') + ' netmask ' +
- subnet.get('netmask'))
-
- if ifconfig == 'DHCP':
- self.dhcp_interfaces.append(device_name)
- self._update_rc_conf(
- {'ifconfig_' + device_name: ifconfig},
- target=target)
-
- def _write_route_entries(self, settings, target=None):
- routes = list(settings.iter_routes())
- for interface in settings.iter_interfaces():
- subnets = interface.get("subnets", [])
- for subnet in subnets:
- if subnet.get('type') != 'static':
- continue
- gateway = subnet.get('gateway')
- if gateway and len(gateway.split('.')) == 4:
- routes.append({
- 'network': '0.0.0.0',
- 'netmask': '0.0.0.0',
- 'gateway': gateway})
- routes += subnet.get('routes', [])
- route_cpt = 0
- for route in routes:
- network = route.get('network')
- if not network:
- LOG.debug('Skipping a bad route entry')
- continue
- netmask = route.get('netmask')
- gateway = route.get('gateway')
- route_cmd = "-route %s/%s %s" % (network, netmask, gateway)
- if network == '0.0.0.0':
- self._update_rc_conf(
- {'defaultrouter': gateway}, target=target)
+ self._route_cpt = 0
+ super(Renderer, self).__init__()
+
+ def rename_interface(self, cur_name, device_name):
+ self.set_rc_config_value('ifconfig_%s_name' % cur_name, device_name)
+
+ def write_config(self):
+ for device_name, v in self.interface_configurations.items():
+ if isinstance(v, dict):
+ self.set_rc_config_value(
+ 'ifconfig_' + device_name,
+ v.get('address') + ' netmask ' + v.get('netmask'))
else:
- self._update_rc_conf(
- {'route_net%d' % route_cpt: route_cmd}, target=target)
- route_cpt += 1
-
- def _write_resolve_conf(self, settings, target=None):
- nameservers = settings.dns_nameservers
- searchdomains = settings.dns_searchdomains
- for interface in settings.iter_interfaces():
- for subnet in interface.get("subnets", []):
- if 'dns_nameservers' in subnet:
- nameservers.extend(subnet['dns_nameservers'])
- if 'dns_search' in subnet:
- searchdomains.extend(subnet['dns_search'])
- # Try to read the /etc/resolv.conf or just start from scratch if that
- # fails.
- try:
- resolvconf = ResolvConf(util.load_file(util.target_path(
- target, self.resolv_conf_fn)))
- resolvconf.parse()
- except IOError:
- util.logexc(LOG, "Failed to parse %s, use new empty file",
- util.target_path(target, self.resolv_conf_fn))
- resolvconf = ResolvConf('')
- resolvconf.parse()
-
- # Add some nameservers
- for server in nameservers:
- try:
- resolvconf.add_nameserver(server)
- except ValueError:
- util.logexc(LOG, "Failed to add nameserver %s", server)
-
- # And add any searchdomains.
- for domain in searchdomains:
- try:
- resolvconf.add_search_domain(domain)
- except ValueError:
- util.logexc(LOG, "Failed to add search domain %s", domain)
- util.write_file(
- util.target_path(target, self.resolv_conf_fn),
- str(resolvconf), 0o644)
-
- def _write_network(self, settings, target=None):
- self._write_ifconfig_entries(settings, target=target)
- self._write_route_entries(settings, target=target)
- self._write_resolve_conf(settings, target=target)
-
- self.start_services(run=self._postcmds)
-
- def render_network_state(self, network_state, templates=None, target=None):
- self._write_network(network_state, target=target)
+ self.set_rc_config_value('ifconfig_' + device_name, 'DHCP')
def start_services(self, run=False):
if not run:
LOG.debug("freebsd generate postcmd disabled")
return
- util.subp(['service', 'netif', 'restart'], capture=True)
+ subp.subp(['service', 'netif', 'restart'], capture=True)
# On FreeBSD 10, the restart of routing and dhclient is likely to fail
# because
# - routing: it cannot remove the loopback route, but it will still set
# up the default route as expected.
# - dhclient: it cannot stop the dhclient started by the netif service.
# In both case, the situation is ok, and we can proceed.
- util.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1])
- for dhcp_interface in self.dhcp_interfaces:
- util.subp(['service', 'dhclient', 'restart', dhcp_interface],
+ subp.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1])
+
+ for dhcp_interface in self.dhcp_interfaces():
+ subp.subp(['service', 'dhclient', 'restart', dhcp_interface],
rcs=[0, 1],
capture=True)
+ def set_route(self, network, netmask, gateway):
+ if network == '0.0.0.0':
+ self.set_rc_config_value('defaultrouter', gateway)
+ else:
+ route_name = 'route_net%d' % self._route_cpt
+ route_cmd = "-route %s/%s %s" % (network, netmask, gateway)
+ self.set_rc_config_value(route_name, route_cmd)
+ self._route_cpt += 1
+
def available(target=None):
return util.is_FreeBSD()
diff --git a/cloudinit/net/netbsd.py b/cloudinit/net/netbsd.py
new file mode 100644
index 00000000..71b38ee6
--- /dev/null
+++ b/cloudinit/net/netbsd.py
@@ -0,0 +1,44 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import log as logging
+from cloudinit import subp
+from cloudinit import util
+import cloudinit.net.bsd
+
+LOG = logging.getLogger(__name__)
+
+
+class Renderer(cloudinit.net.bsd.BSDRenderer):
+
+ def __init__(self, config=None):
+ super(Renderer, self).__init__()
+
+ def write_config(self):
+ if self.dhcp_interfaces():
+ self.set_rc_config_value('dhcpcd', 'YES')
+ self.set_rc_config_value(
+ 'dhcpcd_flags',
+ ' '.join(self.dhcp_interfaces())
+ )
+ for device_name, v in self.interface_configurations.items():
+ if isinstance(v, dict):
+ self.set_rc_config_value(
+ 'ifconfig_' + device_name,
+ v.get('address') + ' netmask ' + v.get('netmask'))
+
+ def start_services(self, run=False):
+ if not run:
+ LOG.debug("netbsd generate postcmd disabled")
+ return
+
+ subp.subp(['service', 'network', 'restart'], capture=True)
+ if self.dhcp_interfaces():
+ subp.subp(['service', 'dhcpcd', 'restart'], capture=True)
+
+ def set_route(self, network, netmask, gateway):
+ if network == '0.0.0.0':
+ self.set_rc_config_value('defaultroute', gateway)
+
+
+def available(target=None):
+ return util.is_NetBSD()
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 89855270..53347c83 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -8,6 +8,7 @@ from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2, IPV6_DYNAMIC_TYPES
from cloudinit import log as logging
from cloudinit import util
+from cloudinit import subp
from cloudinit import safeyaml
from cloudinit.net import SYS_CLASS_NET, get_devicelist
@@ -164,14 +165,14 @@ def _extract_bond_slaves_by_name(interfaces, entry, bond_master):
def _clean_default(target=None):
# clean out any known default files and derived files in target
# LP: #1675576
- tpath = util.target_path(target, "etc/netplan/00-snapd-config.yaml")
+ tpath = subp.target_path(target, "etc/netplan/00-snapd-config.yaml")
if not os.path.isfile(tpath):
return
content = util.load_file(tpath, decode=False)
if content != KNOWN_SNAPD_CONFIG:
return
- derived = [util.target_path(target, f) for f in (
+ derived = [subp.target_path(target, f) for f in (
'run/systemd/network/10-netplan-all-en.network',
'run/systemd/network/10-netplan-all-eth.network',
'run/systemd/generator/netplan.stamp')]
@@ -203,10 +204,10 @@ class Renderer(renderer.Renderer):
def features(self):
if self._features is None:
try:
- info_blob, _err = util.subp(self.NETPLAN_INFO, capture=True)
+ info_blob, _err = subp.subp(self.NETPLAN_INFO, capture=True)
info = util.load_yaml(info_blob)
self._features = info['netplan.io']['features']
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
# if the info subcommand is not present then we don't have any
# new features
pass
@@ -218,7 +219,7 @@ class Renderer(renderer.Renderer):
# check network state for version
# if v2, then extract network_state.config
# else render_v2_from_state
- fpnplan = os.path.join(util.target_path(target), self.netplan_path)
+ fpnplan = os.path.join(subp.target_path(target), self.netplan_path)
util.ensure_dir(os.path.dirname(fpnplan))
header = self.netplan_header if self.netplan_header else ""
@@ -239,7 +240,7 @@ class Renderer(renderer.Renderer):
if not run:
LOG.debug("netplan generate postcmd disabled")
return
- util.subp(self.NETPLAN_GENERATE, capture=True)
+ subp.subp(self.NETPLAN_GENERATE, capture=True)
def _net_setup_link(self, run=False):
"""To ensure device link properties are applied, we poke
@@ -253,7 +254,7 @@ class Renderer(renderer.Renderer):
for cmd in [setup_lnk + [SYS_CLASS_NET + iface]
for iface in get_devicelist() if
os.path.islink(SYS_CLASS_NET + iface)]:
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
def _render_content(self, network_state):
@@ -406,7 +407,7 @@ def available(target=None):
expected = ['netplan']
search = ['/usr/sbin', '/sbin']
for p in expected:
- if not util.which(p, search=search, target=target):
+ if not subp.which(p, search=search, target=target):
return False
return True
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 63d6e291..b2f7d31e 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -215,7 +215,7 @@ class NetworkState(object):
return (
route.get('prefix') == 0
and route.get('network') in default_nets
- )
+ )
class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
@@ -297,9 +297,10 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
command_type = command['type']
try:
handler = self.command_handlers[command_type]
- except KeyError:
- raise RuntimeError("No handler found for"
- " command '%s'" % command_type)
+ except KeyError as e:
+ raise RuntimeError(
+ "No handler found for command '%s'" % command_type
+ ) from e
try:
handler(self, command)
except InvalidCommand:
@@ -312,13 +313,14 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
def parse_config_v2(self, skip_broken=True):
for command_type, command in self._config.items():
- if command_type == 'version':
+ if command_type in ['version', 'renderer']:
continue
try:
handler = self.command_handlers[command_type]
- except KeyError:
- raise RuntimeError("No handler found for"
- " command '%s'" % command_type)
+ except KeyError as e:
+ raise RuntimeError(
+ "No handler found for command '%s'" % command_type
+ ) from e
try:
handler(self, command)
self._v2_common(command)
@@ -696,7 +698,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
def handle_wifis(self, command):
LOG.warning('Wifi configuration is only available to distros with'
- 'netplan rendering support.')
+ ' netplan rendering support.')
def _v2_common(self, cfg):
LOG.debug('v2_common: handling config:\n%s', cfg)
@@ -722,10 +724,10 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
item_params = dict((key, value) for (key, value) in
item_cfg.items() if key not in
NETWORK_V2_KEY_FILTER)
- # we accept the fixed spelling, but write the old for compatability
+ # we accept the fixed spelling, but write the old for compatibility
# Xenial does not have an updated netplan which supports the
# correct spelling. LP: #1756701
- params = item_params['parameters']
+ params = item_params.get('parameters', {})
grat_value = params.pop('gratuitous-arp', None)
if grat_value:
params['gratuitious-arp'] = grat_value
@@ -734,8 +736,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
'type': cmd_type,
'name': item_name,
cmd_type + '_interfaces': item_cfg.get('interfaces'),
- 'params': dict((v2key_to_v1[k], v) for k, v in
- item_params.get('parameters', {}).items())
+ 'params': dict((v2key_to_v1[k], v) for k, v in params.items())
}
if 'mtu' in item_cfg:
v1_cmd['mtu'] = item_cfg['mtu']
@@ -915,9 +916,10 @@ def _normalize_route(route):
if metric:
try:
normal_route['metric'] = int(metric)
- except ValueError:
+ except ValueError as e:
raise TypeError(
- 'Route config metric {} is not an integer'.format(metric))
+ 'Route config metric {} is not an integer'.format(metric)
+ ) from e
return normal_route
diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py
new file mode 100644
index 00000000..166d77e6
--- /dev/null
+++ b/cloudinit/net/openbsd.py
@@ -0,0 +1,46 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import log as logging
+from cloudinit import subp
+from cloudinit import util
+import cloudinit.net.bsd
+
+LOG = logging.getLogger(__name__)
+
+
+class Renderer(cloudinit.net.bsd.BSDRenderer):
+
+ def write_config(self):
+ for device_name, v in self.interface_configurations.items():
+ if_file = 'etc/hostname.{}'.format(device_name)
+ fn = subp.target_path(self.target, if_file)
+ if device_name in self.dhcp_interfaces():
+ content = 'dhcp\n'
+ elif isinstance(v, dict):
+ try:
+ content = "inet {address} {netmask}\n".format(
+ address=v['address'],
+ netmask=v['netmask']
+ )
+ except KeyError:
+ LOG.error(
+ "Invalid static configuration for %s",
+ device_name)
+ util.write_file(fn, content)
+
+ def start_services(self, run=False):
+ if not self._postcmds:
+ LOG.debug("openbsd generate postcmd disabled")
+ return
+ subp.subp(['sh', '/etc/netstart'], capture=True)
+
+ def set_route(self, network, netmask, gateway):
+ if network == '0.0.0.0':
+ if_file = 'etc/mygate'
+ fn = subp.target_path(self.target, if_file)
+ content = gateway + '\n'
+ util.write_file(fn, content)
+
+
+def available(target=None):
+ return util.is_OpenBSD()
diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py
index b98dbbe3..e2de4d55 100644
--- a/cloudinit/net/renderers.py
+++ b/cloudinit/net/renderers.py
@@ -2,18 +2,23 @@
from . import eni
from . import freebsd
+from . import netbsd
from . import netplan
from . import RendererNotFoundError
+from . import openbsd
from . import sysconfig
NAME_TO_RENDERER = {
"eni": eni,
"freebsd": freebsd,
+ "netbsd": netbsd,
"netplan": netplan,
+ "openbsd": openbsd,
"sysconfig": sysconfig,
}
-DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd"]
+DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd",
+ "netbsd", "openbsd"]
def search(priority=None, target=None, first=False):
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 0a387377..0a5d481d 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -9,6 +9,7 @@ from configobj import ConfigObj
from cloudinit import log as logging
from cloudinit import util
+from cloudinit import subp
from cloudinit.distros.parsers import networkmanager_conf
from cloudinit.distros.parsers import resolv_conf
@@ -504,7 +505,7 @@ class Renderer(renderer.Renderer):
iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr
else:
iface_cfg['IPV6ADDR_SECONDARIES'] += \
- " " + ipv6_cidr
+ " " + ipv6_cidr
else:
ipv4_index = ipv4_index + 1
suff = "" if ipv4_index == 0 else str(ipv4_index)
@@ -858,19 +859,19 @@ class Renderer(renderer.Renderer):
if not templates:
templates = self.templates
file_mode = 0o644
- base_sysconf_dir = util.target_path(target, self.sysconf_dir)
+ base_sysconf_dir = subp.target_path(target, self.sysconf_dir)
for path, data in self._render_sysconfig(base_sysconf_dir,
network_state, self.flavor,
templates=templates).items():
util.write_file(path, data, file_mode)
if self.dns_path:
- dns_path = util.target_path(target, self.dns_path)
+ dns_path = subp.target_path(target, self.dns_path)
resolv_content = self._render_dns(network_state,
existing_dns_path=dns_path)
if resolv_content:
util.write_file(dns_path, resolv_content, file_mode)
if self.networkmanager_conf_path:
- nm_conf_path = util.target_path(target,
+ nm_conf_path = subp.target_path(target,
self.networkmanager_conf_path)
nm_conf_content = self._render_networkmanager_conf(network_state,
templates)
@@ -878,12 +879,12 @@ class Renderer(renderer.Renderer):
util.write_file(nm_conf_path, nm_conf_content, file_mode)
if self.netrules_path:
netrules_content = self._render_persistent_net(network_state)
- netrules_path = util.target_path(target, self.netrules_path)
+ netrules_path = subp.target_path(target, self.netrules_path)
util.write_file(netrules_path, netrules_content, file_mode)
if available_nm(target=target):
- enable_ifcfg_rh(util.target_path(target, path=NM_CFG_FILE))
+ enable_ifcfg_rh(subp.target_path(target, path=NM_CFG_FILE))
- sysconfig_path = util.target_path(target, templates.get('control'))
+ sysconfig_path = subp.target_path(target, templates.get('control'))
# Distros configuring /etc/sysconfig/network as a file e.g. Centos
if sysconfig_path.endswith('network'):
util.ensure_dir(os.path.dirname(sysconfig_path))
@@ -906,20 +907,20 @@ def available_sysconfig(target=None):
expected = ['ifup', 'ifdown']
search = ['/sbin', '/usr/sbin']
for p in expected:
- if not util.which(p, search=search, target=target):
+ if not subp.which(p, search=search, target=target):
return False
expected_paths = [
'etc/sysconfig/network-scripts/network-functions',
'etc/sysconfig/config']
for p in expected_paths:
- if os.path.isfile(util.target_path(target, p)):
+ if os.path.isfile(subp.target_path(target, p)):
return True
return False
def available_nm(target=None):
- if not os.path.isfile(util.target_path(target, path=NM_CFG_FILE)):
+ if not os.path.isfile(subp.target_path(target, path=NM_CFG_FILE)):
return False
return True
diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
index c3fa1e04..74cf4b94 100644
--- a/cloudinit/net/tests/test_dhcp.py
+++ b/cloudinit/net/tests/test_dhcp.py
@@ -62,7 +62,7 @@ class TestParseDHCPLeasesFile(CiTestCase):
{'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}]
write_file(lease_file, content)
- self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
class TestDHCPRFC3442(CiTestCase):
@@ -88,7 +88,7 @@ class TestDHCPRFC3442(CiTestCase):
'renew': '4 2017/07/27 18:02:30',
'expire': '5 2017/07/28 07:08:15'}]
write_file(lease_file, content)
- self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
def test_parse_lease_finds_classless_static_routes(self):
"""
@@ -114,7 +114,7 @@ class TestDHCPRFC3442(CiTestCase):
'renew': '4 2017/07/27 18:02:30',
'expire': '5 2017/07/28 07:08:15'}]
write_file(lease_file, content)
- self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
@@ -211,7 +211,7 @@ class TestDHCPParseStaticRoutes(CiTestCase):
"class_b": "16,172,16,10",
"class_a": "8,10,10",
"gateway": "0,0",
- "netlen": "33,0",
+ "netlen": "33,0",
}
for rfc3442 in bad_rfc3442.values():
self.assertEqual([], parse_static_routes(rfc3442))
@@ -266,7 +266,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
'Skip dhcp_discovery: nic idontexist not found in get_devicelist.',
self.logs.getvalue())
- @mock.patch('cloudinit.net.dhcp.util.which')
+ @mock.patch('cloudinit.net.dhcp.subp.which')
@mock.patch('cloudinit.net.dhcp.find_fallback_nic')
def test_absent_dhclient_command(self, m_fallback, m_which):
"""When dhclient doesn't exist in the OS, log the issue and no-op."""
@@ -279,7 +279,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch('cloudinit.temp_utils.os.getuid')
@mock.patch('cloudinit.net.dhcp.dhcp_discovery')
- @mock.patch('cloudinit.net.dhcp.util.which')
+ @mock.patch('cloudinit.net.dhcp.subp.which')
@mock.patch('cloudinit.net.dhcp.find_fallback_nic')
def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid):
"""maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery."""
@@ -302,13 +302,14 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch('time.sleep', mock.MagicMock())
@mock.patch('cloudinit.net.dhcp.os.kill')
- @mock.patch('cloudinit.net.dhcp.util.subp')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(self, m_subp,
m_kill):
"""dhcp_discovery logs a warning when pidfile contains invalid content.
Lease processing still occurs and no proc kill is attempted.
"""
+ m_subp.return_value = ('', '')
tmpdir = self.tmp_dir()
dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
script_content = '#!/bin/bash\necho fake-dhclient'
@@ -324,7 +325,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
""")
write_file(self.tmp_path('dhcp.leases', tmpdir), lease_content)
- self.assertItemsEqual(
+ self.assertCountEqual(
[{'interface': 'eth9', 'fixed-address': '192.168.2.74',
'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
dhcp_discovery(dhclient_script, 'eth9', tmpdir))
@@ -337,13 +338,14 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
@mock.patch('cloudinit.net.dhcp.os.kill')
@mock.patch('cloudinit.net.dhcp.util.wait_for_files')
- @mock.patch('cloudinit.net.dhcp.util.subp')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self,
m_subp,
m_wait,
m_kill,
m_getppid):
"""dhcp_discovery waits for the presence of pidfile and dhcp.leases."""
+ m_subp.return_value = ('', '')
tmpdir = self.tmp_dir()
dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
script_content = '#!/bin/bash\necho fake-dhclient'
@@ -364,12 +366,13 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
@mock.patch('cloudinit.net.dhcp.os.kill')
- @mock.patch('cloudinit.net.dhcp.util.subp')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid):
"""dhcp_discovery brings up the interface and runs dhclient.
It also returns the parsed dhcp.leases file generated in the sandbox.
"""
+ m_subp.return_value = ('', '')
tmpdir = self.tmp_dir()
dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
script_content = '#!/bin/bash\necho fake-dhclient'
@@ -389,7 +392,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
write_file(pid_file, "%d\n" % my_pid)
m_getppid.return_value = 1 # Indicate that dhclient has daemonized
- self.assertItemsEqual(
+ self.assertCountEqual(
[{'interface': 'eth9', 'fixed-address': '192.168.2.74',
'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
dhcp_discovery(dhclient_script, 'eth9', tmpdir))
@@ -406,6 +409,87 @@ class TestDHCPDiscoveryClean(CiTestCase):
'eth9', '-sf', '/bin/true'], capture=True)])
m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
+ @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
+ @mock.patch('cloudinit.net.dhcp.os.kill')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
+ def test_dhcp_discovery_outside_sandbox(self, m_subp, m_kill, m_getppid):
+ """dhcp_discovery brings up the interface and runs dhclient.
+
+ It also returns the parsed dhcp.leases file generated in the sandbox.
+ """
+ m_subp.return_value = ('', '')
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
+ script_content = '#!/bin/bash\necho fake-dhclient'
+ write_file(dhclient_script, script_content, mode=0o755)
+ lease_content = dedent("""
+ lease {
+ interface "eth9";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """)
+ lease_file = os.path.join(tmpdir, 'dhcp.leases')
+ write_file(lease_file, lease_content)
+ pid_file = os.path.join(tmpdir, 'dhclient.pid')
+ my_pid = 1
+ write_file(pid_file, "%d\n" % my_pid)
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
+
+ with mock.patch('os.access', return_value=False):
+ self.assertCountEqual(
+ [{'interface': 'eth9', 'fixed-address': '192.168.2.74',
+ 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
+ dhcp_discovery(dhclient_script, 'eth9', tmpdir))
+ # dhclient script got copied
+ with open(os.path.join(tmpdir, 'dhclient.orig')) as stream:
+ self.assertEqual(script_content, stream.read())
+ # Interface was brought up before dhclient called from sandbox
+ m_subp.assert_has_calls([
+ mock.call(
+ ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True),
+ mock.call(
+ [os.path.join(tmpdir, 'dhclient.orig'), '-1', '-v', '-lf',
+ lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'),
+ 'eth9', '-sf', '/bin/true'], capture=True)])
+ m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
+
+ @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
+ @mock.patch('cloudinit.net.dhcp.os.kill')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
+ def test_dhcp_output_error_stream(self, m_subp, m_kill, m_getppid):
+ """"dhcp_log_func is called with the output and error streams of
+ dhclinet when the callable is passed."""
+ dhclient_err = 'FAKE DHCLIENT ERROR'
+ dhclient_out = 'FAKE DHCLIENT OUT'
+ m_subp.return_value = (dhclient_out, dhclient_err)
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
+ script_content = '#!/bin/bash\necho fake-dhclient'
+ write_file(dhclient_script, script_content, mode=0o755)
+ lease_content = dedent("""
+ lease {
+ interface "eth9";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """)
+ lease_file = os.path.join(tmpdir, 'dhcp.leases')
+ write_file(lease_file, lease_content)
+ pid_file = os.path.join(tmpdir, 'dhclient.pid')
+ my_pid = 1
+ write_file(pid_file, "%d\n" % my_pid)
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
+
+ def dhcp_log_func(out, err):
+ self.assertEqual(out, dhclient_out)
+ self.assertEqual(err, dhclient_err)
+
+ dhcp_discovery(
+ dhclient_script, 'eth9', tmpdir, dhcp_log_func=dhcp_log_func)
+
class TestSystemdParseLeases(CiTestCase):
@@ -529,7 +613,7 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
# Ensure that no teardown happens:
m_dhcp.assert_not_called()
- @mock.patch('cloudinit.net.dhcp.util.subp')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
def test_ephemeral_dhcp_setup_network_if_url_connectivity(
self, m_dhcp, m_subp):
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index 5081a337..311ab6f8 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -2,16 +2,20 @@
import copy
import errno
-import httpretty
+import ipaddress
import os
-import requests
import textwrap
from unittest import mock
+import httpretty
+import pytest
+import requests
+
import cloudinit.net as net
-from cloudinit.util import ensure_file, write_file, ProcessExecutionError
-from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase
from cloudinit import safeyaml as yaml
+from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase
+from cloudinit.subp import ProcessExecutionError
+from cloudinit.util import ensure_file, write_file
class TestSysDevPath(CiTestCase):
@@ -139,12 +143,6 @@ class TestReadSysNet(CiTestCase):
write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state)
self.assertFalse(net.is_up('eth0'))
- def test_is_wireless(self):
- """is_wireless is True when /sys/net/devname/wireless exists."""
- self.assertFalse(net.is_wireless('eth0'))
- ensure_file(os.path.join(self.sysdir, 'eth0', 'wireless'))
- self.assertTrue(net.is_wireless('eth0'))
-
def test_is_bridge(self):
"""is_bridge is True when /sys/net/devname/bridge exists."""
self.assertFalse(net.is_bridge('eth0'))
@@ -200,32 +198,6 @@ class TestReadSysNet(CiTestCase):
write_file(os.path.join(self.sysdir, 'eth0', 'uevent'), content)
self.assertTrue(net.is_vlan('eth0'))
- def test_is_connected_when_physically_connected(self):
- """is_connected is True when /sys/net/devname/iflink reports 2."""
- self.assertFalse(net.is_connected('eth0'))
- write_file(os.path.join(self.sysdir, 'eth0', 'iflink'), "2")
- self.assertTrue(net.is_connected('eth0'))
-
- def test_is_connected_when_wireless_and_carrier_active(self):
- """is_connected is True if wireless /sys/net/devname/carrier is 1."""
- self.assertFalse(net.is_connected('eth0'))
- ensure_file(os.path.join(self.sysdir, 'eth0', 'wireless'))
- self.assertFalse(net.is_connected('eth0'))
- write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), "1")
- self.assertTrue(net.is_connected('eth0'))
-
- def test_is_physical(self):
- """is_physical is True when /sys/net/devname/device exists."""
- self.assertFalse(net.is_physical('eth0'))
- ensure_file(os.path.join(self.sysdir, 'eth0', 'device'))
- self.assertTrue(net.is_physical('eth0'))
-
- def test_is_present(self):
- """is_present is True when /sys/net/devname exists."""
- self.assertFalse(net.is_present('eth0'))
- ensure_file(os.path.join(self.sysdir, 'eth0', 'device'))
- self.assertTrue(net.is_present('eth0'))
-
class TestGenerateFallbackConfig(CiTestCase):
@@ -341,8 +313,6 @@ class TestGenerateFallbackConfig(CiTestCase):
class TestNetFindFallBackNic(CiTestCase):
- with_logs = True
-
def setUp(self):
super(TestNetFindFallBackNic, self).setUp()
sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
@@ -396,7 +366,7 @@ class TestGetDeviceList(CiTestCase):
"""get_devicelist returns a directory listing for SYS_CLASS_NET."""
write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'up')
write_file(os.path.join(self.sysdir, 'eth1', 'operstate'), 'up')
- self.assertItemsEqual(['eth0', 'eth1'], net.get_devicelist())
+ self.assertCountEqual(['eth0', 'eth1'], net.get_devicelist())
class TestGetInterfaceMAC(CiTestCase):
@@ -540,7 +510,7 @@ class TestInterfaceHasOwnMAC(CiTestCase):
net.interface_has_own_mac('eth1', strict=True)
-@mock.patch('cloudinit.net.util.subp')
+@mock.patch('cloudinit.net.subp.subp')
class TestEphemeralIPV4Network(CiTestCase):
with_logs = True
@@ -993,86 +963,8 @@ class TestExtractPhysdevs(CiTestCase):
net.extract_physdevs({'version': 3, 'awesome_config': []})
-class TestWaitForPhysdevs(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestWaitForPhysdevs, self).setUp()
- self.add_patch('cloudinit.net.get_interfaces_by_mac',
- 'm_get_iface_mac')
- self.add_patch('cloudinit.util.udevadm_settle', 'm_udev_settle')
-
- def test_wait_for_physdevs_skips_settle_if_all_present(self):
- physdevs = [
- ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
- ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
- ]
- netcfg = {
- 'version': 2,
- 'ethernets': {args[1]: _mk_v2_phys(*args)
- for args in physdevs},
- }
- self.m_get_iface_mac.side_effect = iter([
- {'aa:bb:cc:dd:ee:ff': 'eth0',
- '00:11:22:33:44:55': 'ens3'},
- ])
- net.wait_for_physdevs(netcfg)
- self.assertEqual(0, self.m_udev_settle.call_count)
-
- def test_wait_for_physdevs_calls_udev_settle_on_missing(self):
- physdevs = [
- ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
- ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
- ]
- netcfg = {
- 'version': 2,
- 'ethernets': {args[1]: _mk_v2_phys(*args)
- for args in physdevs},
- }
- self.m_get_iface_mac.side_effect = iter([
- {'aa:bb:cc:dd:ee:ff': 'eth0'}, # first call ens3 is missing
- {'aa:bb:cc:dd:ee:ff': 'eth0',
- '00:11:22:33:44:55': 'ens3'}, # second call has both
- ])
- net.wait_for_physdevs(netcfg)
- self.m_udev_settle.assert_called_with(exists=net.sys_dev_path('ens3'))
-
- def test_wait_for_physdevs_raise_runtime_error_if_missing_and_strict(self):
- physdevs = [
- ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
- ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
- ]
- netcfg = {
- 'version': 2,
- 'ethernets': {args[1]: _mk_v2_phys(*args)
- for args in physdevs},
- }
- self.m_get_iface_mac.return_value = {}
- with self.assertRaises(RuntimeError):
- net.wait_for_physdevs(netcfg)
-
- self.assertEqual(5 * len(physdevs), self.m_udev_settle.call_count)
-
- def test_wait_for_physdevs_no_raise_if_not_strict(self):
- physdevs = [
- ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
- ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
- ]
- netcfg = {
- 'version': 2,
- 'ethernets': {args[1]: _mk_v2_phys(*args)
- for args in physdevs},
- }
- self.m_get_iface_mac.return_value = {}
- net.wait_for_physdevs(netcfg, strict=False)
- self.assertEqual(5 * len(physdevs), self.m_udev_settle.call_count)
-
-
class TestNetFailOver(CiTestCase):
- with_logs = True
-
def setUp(self):
super(TestNetFailOver, self).setUp()
self.add_patch('cloudinit.net.util', 'm_util')
@@ -1297,4 +1189,48 @@ class TestNetFailOver(CiTestCase):
m_standby.return_value = False
self.assertFalse(net.is_netfailover(devname, driver))
+
+class TestIsIpAddress:
+ """Tests for net.is_ip_address.
+
+ Instead of testing with values we rely on the ipaddress stdlib module to
+ handle all values correctly, so simply test that is_ip_address defers to
+ the ipaddress module correctly.
+ """
+
+ @pytest.mark.parametrize('ip_address_side_effect,expected_return', (
+ (ValueError, False),
+ (lambda _: ipaddress.IPv4Address('192.168.0.1'), True),
+ (lambda _: ipaddress.IPv6Address('2001:db8::'), True),
+ ))
+ def test_is_ip_address(self, ip_address_side_effect, expected_return):
+ with mock.patch('cloudinit.net.ipaddress.ip_address',
+ side_effect=ip_address_side_effect) as m_ip_address:
+ ret = net.is_ip_address(mock.sentinel.ip_address_in)
+ assert expected_return == ret
+ expected_call = mock.call(mock.sentinel.ip_address_in)
+ assert [expected_call] == m_ip_address.call_args_list
+
+
+class TestIsIpv4Address:
+ """Tests for net.is_ipv4_address.
+
+ Instead of testing with values we rely on the ipaddress stdlib module to
+ handle all values correctly, so simply test that is_ipv4_address defers to
+ the ipaddress module correctly.
+ """
+
+ @pytest.mark.parametrize('ipv4address_mock,expected_return', (
+ (mock.Mock(side_effect=ValueError), False),
+ (mock.Mock(return_value=ipaddress.IPv4Address('192.168.0.1')), True),
+ ))
+ def test_is_ip_address(self, ipv4address_mock, expected_return):
+ with mock.patch('cloudinit.net.ipaddress.IPv4Address',
+ ipv4address_mock) as m_ipv4address:
+ ret = net.is_ipv4_address(mock.sentinel.ip_address_in)
+ assert expected_return == ret
+ expected_call = mock.call(mock.sentinel.ip_address_in)
+ assert [expected_call] == m_ipv4address.call_args_list
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py
index 55880852..07d726e2 100644
--- a/cloudinit/net/tests/test_network_state.py
+++ b/cloudinit/net/tests/test_network_state.py
@@ -45,4 +45,14 @@ class TestNetworkStateParseConfig(CiTestCase):
self.assertNotEqual(None, result)
+class TestNetworkStateParseConfigV2(CiTestCase):
+
+ def test_version_2_ignores_renderer_key(self):
+ ncfg = {'version': 2, 'renderer': 'networkd', 'ethernets': {}}
+ nsi = network_state.NetworkStateInterpreter(version=ncfg['version'],
+ config=ncfg)
+ nsi.parse_config(skip_broken=False)
+ self.assertEqual(ncfg, nsi.as_dict()['config'])
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 6ba21f4d..628e2908 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -13,6 +13,7 @@ import re
from cloudinit import log as logging
from cloudinit.net.network_state import net_prefix_to_ipv4_mask
+from cloudinit import subp
from cloudinit import util
from cloudinit.simpletable import SimpleTable
@@ -91,6 +92,53 @@ def _netdev_info_iproute(ipaddr_out):
return devs
+def _netdev_info_ifconfig_netbsd(ifconfig_data):
+ # fields that need to be returned in devs for each dev
+ devs = {}
+ for line in ifconfig_data.splitlines():
+ if len(line) == 0:
+ continue
+ if line[0] not in ("\t", " "):
+ curdev = line.split()[0]
+ # current ifconfig pops a ':' on the end of the device
+ if curdev.endswith(':'):
+ curdev = curdev[:-1]
+ if curdev not in devs:
+ devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO)
+ toks = line.lower().strip().split()
+ if len(toks) > 1:
+ if re.search(r"flags=[x\d]+<up.*>", toks[1]):
+ devs[curdev]['up'] = True
+
+ for i in range(len(toks)):
+ if toks[i] == "inet": # Create new ipv4 addr entry
+ network, net_bits = toks[i + 1].split('/')
+ devs[curdev]['ipv4'].append(
+ {'ip': network, 'mask': net_prefix_to_ipv4_mask(net_bits)})
+ elif toks[i] == "broadcast":
+ devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1]
+ elif toks[i] == "address:":
+ devs[curdev]['hwaddr'] = toks[i + 1]
+ elif toks[i] == "inet6":
+ if toks[i + 1] == "addr:":
+ devs[curdev]['ipv6'].append({'ip': toks[i + 2]})
+ else:
+ devs[curdev]['ipv6'].append({'ip': toks[i + 1]})
+ elif toks[i] == "prefixlen": # Add prefix to current ipv6 value
+ addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1]
+ devs[curdev]['ipv6'][-1]['ip'] = addr6
+ elif toks[i].startswith("scope:"):
+ devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:")
+ elif toks[i] == "scopeid":
+ res = re.match(r'.*<(\S+)>', toks[i + 1])
+ if res:
+ devs[curdev]['ipv6'][-1]['scope6'] = res.group(1)
+ else:
+ devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1]
+
+ return devs
+
+
def _netdev_info_ifconfig(ifconfig_data):
# fields that need to be returned in devs for each dev
devs = {}
@@ -149,13 +197,16 @@ def _netdev_info_ifconfig(ifconfig_data):
def netdev_info(empty=""):
devs = {}
- if util.which('ip'):
+ if util.is_NetBSD():
+ (ifcfg_out, _err) = subp.subp(["ifconfig", "-a"], rcs=[0, 1])
+ devs = _netdev_info_ifconfig_netbsd(ifcfg_out)
+ elif subp.which('ip'):
# Try iproute first of all
- (ipaddr_out, _err) = util.subp(["ip", "addr", "show"])
+ (ipaddr_out, _err) = subp.subp(["ip", "addr", "show"])
devs = _netdev_info_iproute(ipaddr_out)
- elif util.which('ifconfig'):
+ elif subp.which('ifconfig'):
# Fall back to net-tools if iproute2 is not present
- (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
+ (ifcfg_out, _err) = subp.subp(["ifconfig", "-a"], rcs=[0, 1])
devs = _netdev_info_ifconfig(ifcfg_out)
else:
LOG.warning(
@@ -235,10 +286,10 @@ def _netdev_route_info_iproute(iproute_data):
entry['flags'] = ''.join(flags)
routes['ipv4'].append(entry)
try:
- (iproute_data6, _err6) = util.subp(
+ (iproute_data6, _err6) = subp.subp(
["ip", "--oneline", "-6", "route", "list", "table", "all"],
rcs=[0, 1])
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
else:
entries6 = iproute_data6.splitlines()
@@ -307,9 +358,9 @@ def _netdev_route_info_netstat(route_data):
routes['ipv4'].append(entry)
try:
- (route_data6, _err6) = util.subp(
+ (route_data6, _err6) = subp.subp(
["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1])
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
else:
entries6 = route_data6.splitlines()
@@ -343,13 +394,13 @@ def _netdev_route_info_netstat(route_data):
def route_info():
routes = {}
- if util.which('ip'):
+ if subp.which('ip'):
# Try iproute first of all
- (iproute_out, _err) = util.subp(["ip", "-o", "route", "list"])
+ (iproute_out, _err) = subp.subp(["ip", "-o", "route", "list"])
routes = _netdev_route_info_iproute(iproute_out)
- elif util.which('netstat'):
+ elif subp.which('netstat'):
# Fall back to net-tools if iproute2 is not present
- (route_out, _err) = util.subp(
+ (route_out, _err) = subp.subp(
["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1])
routes = _netdev_route_info_netstat(route_out)
else:
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
index e5dfab33..b8677c8b 100644
--- a/cloudinit/reporting/events.py
+++ b/cloudinit/reporting/events.py
@@ -12,7 +12,7 @@ import base64
import os.path
import time
-from . import instantiated_handler_registry
+from . import instantiated_handler_registry, available_handlers
FINISH_EVENT_TYPE = 'finish'
START_EVENT_TYPE = 'start'
@@ -81,17 +81,32 @@ class FinishReportingEvent(ReportingEvent):
return data
-def report_event(event):
- """Report an event to all registered event handlers.
+def report_event(event, excluded_handler_types=None):
+ """Report an event to all registered event handlers
+ except those whose type is in excluded_handler_types.
This should generally be called via one of the other functions in
the reporting module.
+ :param excluded_handler_types:
+ List of handlers types to exclude from reporting the event to.
:param event_type:
The type of the event; this should be a constant from the
reporting module.
"""
- for _, handler in instantiated_handler_registry.registered_items.items():
+
+ if not excluded_handler_types:
+ excluded_handler_types = {}
+ excluded_handler_classes = {
+ hndl_cls
+ for hndl_type, hndl_cls in available_handlers.registered_items.items()
+ if hndl_type in excluded_handler_types
+ }
+
+ handlers = instantiated_handler_registry.registered_items.items()
+ for _, handler in handlers:
+ if type(handler) in excluded_handler_classes:
+ continue # skip this excluded handler
handler.publish_event(event)
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 946df7e0..0a8c7af3 100755
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -35,7 +35,6 @@ class ReportingHandler(metaclass=abc.ABCMeta):
def flush(self):
"""Ensure ReportingHandler has published all events"""
- pass
class LogHandler(ReportingHandler):
@@ -114,6 +113,8 @@ class HyperVKvpReportingHandler(ReportingHandler):
https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests
"""
HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048
+ # The maximum value size expected in Azure
+ HV_KVP_AZURE_MAX_VALUE_SIZE = 1024
HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512
HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE +
HV_KVP_EXCHANGE_MAX_VALUE_SIZE)
@@ -139,7 +140,8 @@ class HyperVKvpReportingHandler(ReportingHandler):
self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
self.incarnation_no)
self.publish_thread = threading.Thread(
- target=self._publish_event_routine)
+ target=self._publish_event_routine
+ )
self.publish_thread.daemon = True
self.publish_thread.start()
@@ -195,17 +197,23 @@ class HyperVKvpReportingHandler(ReportingHandler):
def _event_key(self, event):
"""
the event key format is:
- CLOUD_INIT|<incarnation number>|<event_type>|<event_name>|<time>
+ CLOUD_INIT|<incarnation number>|<event_type>|<event_name>|<uuid>
+ [|subevent_index]
"""
return u"{0}|{1}|{2}|{3}".format(self.event_key_prefix,
event.event_type, event.name,
uuid.uuid4())
def _encode_kvp_item(self, key, value):
- data = (struct.pack("%ds%ds" % (
+ data = struct.pack(
+ "%ds%ds"
+ % (
self.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
- self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
- key.encode('utf-8'), value.encode('utf-8')))
+ self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE,
+ ),
+ key.encode("utf-8"),
+ value.encode("utf-8"),
+ )
return data
def _decode_kvp_item(self, record_data):
@@ -219,7 +227,7 @@ class HyperVKvpReportingHandler(ReportingHandler):
v = (
record_data[
self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE
- ].decode('utf-8').strip('\x00'))
+ ].decode('utf-8').strip('\x00'))
return {'key': k, 'value': v}
@@ -244,13 +252,14 @@ class HyperVKvpReportingHandler(ReportingHandler):
data_without_desc = json.dumps(meta_data,
separators=self.JSON_SEPARATORS)
room_for_desc = (
- self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE -
+ self.HV_KVP_AZURE_MAX_VALUE_SIZE -
len(data_without_desc) - 8)
value = data_without_desc.replace(
message_place_holder,
'"{key}":"{desc}"'.format(
key=self.MSG_KEY, desc=des_in_json[:room_for_desc]))
- result_array.append(self._encode_kvp_item(key, value))
+ subkey = "{}|{}".format(key, i)
+ result_array.append(self._encode_kvp_item(subkey, value))
i += 1
des_in_json = des_in_json[room_for_desc:]
if len(des_in_json) == 0:
@@ -265,11 +274,11 @@ class HyperVKvpReportingHandler(ReportingHandler):
"""
key = self._event_key(event)
meta_data = {
- "name": event.name,
- "type": event.event_type,
- "ts": (datetime.utcfromtimestamp(event.timestamp)
- .isoformat() + 'Z'),
- }
+ "name": event.name,
+ "type": event.event_type,
+ "ts": (datetime.utcfromtimestamp(event.timestamp)
+ .isoformat() + 'Z'),
+ }
if hasattr(event, self.RESULT_KEY):
meta_data[self.RESULT_KEY] = event.result
meta_data[self.MSG_KEY] = event.description
@@ -277,7 +286,7 @@ class HyperVKvpReportingHandler(ReportingHandler):
# if it reaches the maximum length of kvp value,
# break it down to slices.
# this should be very corner case.
- if len(value) > self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE:
+ if len(value) > self.HV_KVP_AZURE_MAX_VALUE_SIZE:
return self._break_down(key, meta_data, event.description)
else:
data = self._encode_kvp_item(key, value)
diff --git a/cloudinit/serial.py b/cloudinit/serial.py
index f9ef7acc..67486e09 100644
--- a/cloudinit/serial.py
+++ b/cloudinit/serial.py
@@ -1,7 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import absolute_import
-
try:
from serial import Serial
except ImportError:
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 5270fda8..ac3ecc3d 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -18,9 +18,9 @@ import os.path
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
-from cloudinit.util import ProcessExecutionError
LOG = logging.getLogger(__name__)
@@ -192,7 +192,7 @@ class DataSourceAltCloud(sources.DataSource):
# modprobe floppy
try:
modprobe_floppy()
- except ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
util.logexc(LOG, 'Failed modprobe: %s', e)
return False
@@ -201,7 +201,7 @@ class DataSourceAltCloud(sources.DataSource):
# udevadm settle for floppy device
try:
util.udevadm_settle(exists=floppy_dev, timeout=5)
- except (ProcessExecutionError, OSError) as e:
+ except (subp.ProcessExecutionError, OSError) as e:
util.logexc(LOG, 'Failed udevadm_settle: %s\n', e)
return False
@@ -261,7 +261,7 @@ class DataSourceAltCloud(sources.DataSource):
def modprobe_floppy():
- out, _err = util.subp(CMD_PROBE_FLOPPY)
+ out, _err = subp.subp(CMD_PROBE_FLOPPY)
LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out)
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 61ec522a..f3c6452b 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -8,7 +8,6 @@ import base64
import contextlib
import crypt
from functools import partial
-import json
import os
import os.path
import re
@@ -19,9 +18,11 @@ import xml.etree.ElementTree as ET
from cloudinit import log as logging
from cloudinit import net
from cloudinit.event import EventType
+from cloudinit.net import device_driver
from cloudinit.net.dhcp import EphemeralDHCPv4
from cloudinit import sources
from cloudinit.sources.helpers import netlink
+from cloudinit import subp
from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
from cloudinit import util
from cloudinit.reporting import events
@@ -34,7 +35,9 @@ from cloudinit.sources.helpers.azure import (
get_system_info,
report_diagnostic_event,
EphemeralDHCPv4WithReporting,
- is_byte_swapped)
+ is_byte_swapped,
+ dhcp_log_cb,
+ push_log_to_kvp)
LOG = logging.getLogger(__name__)
@@ -139,8 +142,8 @@ def find_dev_from_busdev(camcontrol_out, busdev):
def execute_or_debug(cmd, fail_ret=None):
try:
- return util.subp(cmd)[0]
- except util.ProcessExecutionError:
+ return subp.subp(cmd)[0]
+ except subp.ProcessExecutionError:
LOG.debug("Failed to execute: %s", ' '.join(cmd))
return fail_ret
@@ -164,12 +167,11 @@ def get_resource_disk_on_freebsd(port_id):
port_id = port_id - 2
g1 = "000" + str(port_id)
g0g1 = "{0}-{1}".format(g0, g1)
- """
- search 'X' from
- 'dev.storvsc.X.%pnpinfo:
- classid=32412632-86cb-44a2-9b5c-50d1417354f5
- deviceid=00000000-0001-8899-0000-000000000000'
- """
+
+ # search 'X' from
+ # 'dev.storvsc.X.%pnpinfo:
+ # classid=32412632-86cb-44a2-9b5c-50d1417354f5
+ # deviceid=00000000-0001-8899-0000-000000000000'
sysctl_out = get_dev_storvsc_sysctl()
storvscid = find_storvscid_from_sysctl_pnpinfo(sysctl_out, g0g1)
@@ -252,11 +254,11 @@ DEF_PASSWD_REDACTION = 'REDACTED'
def get_hostname(hostname_command='hostname'):
if not isinstance(hostname_command, (list, tuple)):
hostname_command = (hostname_command,)
- return util.subp(hostname_command, capture=True)[0].strip()
+ return subp.subp(hostname_command, capture=True)[0].strip()
def set_hostname(hostname, hostname_command='hostname'):
- util.subp([hostname_command, hostname])
+ subp.subp([hostname_command, hostname])
@azure_ds_telemetry_reporter
@@ -275,7 +277,14 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
(previous_hostname == temp_hostname and policy != 'force')):
yield None
return
- set_hostname(temp_hostname, hostname_command)
+ try:
+ set_hostname(temp_hostname, hostname_command)
+ except Exception as e:
+ msg = 'Failed setting temporary hostname: %s' % e
+ report_diagnostic_event(msg)
+ LOG.warning(msg)
+ yield None
+ return
try:
yield previous_hostname
finally:
@@ -343,7 +352,7 @@ class DataSourceAzure(sources.DataSource):
try:
invoke_agent(agent_cmd)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
# claim the datasource even if the command failed
util.logexc(LOG, "agent command '%s' failed.",
self.ds_cfg['agent_command'])
@@ -522,8 +531,9 @@ class DataSourceAzure(sources.DataSource):
try:
crawled_data = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self.crawl_metadata
+ )
except sources.InvalidMetaDataException as e:
LOG.warning('Could not crawl Azure metadata: %s', e)
return False
@@ -596,25 +606,35 @@ class DataSourceAzure(sources.DataSource):
return_val = None
def exc_cb(msg, exception):
- if isinstance(exception, UrlError) and exception.code == 404:
- if self.imds_poll_counter == self.imds_logging_threshold:
- # Reducing the logging frequency as we are polling IMDS
- self.imds_logging_threshold *= 2
- LOG.debug("Call to IMDS with arguments %s failed "
- "with status code %s after %s retries",
- msg, exception.code, self.imds_poll_counter)
- LOG.debug("Backing off logging threshold for the same "
- "exception to %d", self.imds_logging_threshold)
- self.imds_poll_counter += 1
- return True
-
- # If we get an exception while trying to call IMDS, we
- # call DHCP and setup the ephemeral network to acquire the new IP.
- LOG.debug("Call to IMDS with arguments %s failed with "
- "status code %s", msg, exception.code)
- report_diagnostic_event("polling IMDS failed with exception %s"
- % exception.code)
- return False
+ if isinstance(exception, UrlError):
+ if exception.code in (404, 410):
+ if self.imds_poll_counter == self.imds_logging_threshold:
+ # Reducing the logging frequency as we are polling IMDS
+ self.imds_logging_threshold *= 2
+ LOG.debug("Call to IMDS with arguments %s failed "
+ "with status code %s after %s retries",
+ msg, exception.code, self.imds_poll_counter)
+ LOG.debug("Backing off logging threshold for the same "
+ "exception to %d",
+ self.imds_logging_threshold)
+ report_diagnostic_event("poll IMDS with %s failed. "
+ "Exception: %s and code: %s" %
+ (msg, exception.cause,
+ exception.code))
+ self.imds_poll_counter += 1
+ return True
+ else:
+ # If we get an exception while trying to call IMDS, we call
+ # DHCP and setup the ephemeral network to acquire a new IP.
+ report_diagnostic_event("poll IMDS with %s failed. "
+ "Exception: %s and code: %s" %
+ (msg, exception.cause,
+ exception.code))
+ return False
+
+ LOG.debug("poll IMDS failed with an unexpected exception: %s",
+ exception)
+ return False
LOG.debug("Wait for vnetswitch to happen")
while True:
@@ -624,7 +644,8 @@ class DataSourceAzure(sources.DataSource):
name="obtain-dhcp-lease",
description="obtain dhcp lease",
parent=azure_ds_reporter):
- self._ephemeral_dhcp_ctx = EphemeralDHCPv4()
+ self._ephemeral_dhcp_ctx = EphemeralDHCPv4(
+ dhcp_log_func=dhcp_log_cb)
lease = self._ephemeral_dhcp_ctx.obtain_lease()
if vnet_switched:
@@ -675,7 +696,6 @@ class DataSourceAzure(sources.DataSource):
except UrlError:
# Teardown our EphemeralDHCPv4 context on failure as we retry
self._ephemeral_dhcp_ctx.clean_network()
- pass
finally:
if nl_sock:
nl_sock.close()
@@ -771,9 +791,12 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def activate(self, cfg, is_new_instance):
- address_ephemeral_resize(is_new_instance=is_new_instance,
- preserve_ntfs=self.ds_cfg.get(
- DS_CFG_KEY_PRESERVE_NTFS, False))
+ try:
+ address_ephemeral_resize(is_new_instance=is_new_instance,
+ preserve_ntfs=self.ds_cfg.get(
+ DS_CFG_KEY_PRESERVE_NTFS, False))
+ finally:
+ push_log_to_kvp(self.sys_cfg['def_log_file'])
return
@property
@@ -882,9 +905,10 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
(cand_part, cand_path, devpath))
with events.ReportEventStack(
- name="mount-ntfs-and-count",
- description="mount-ntfs-and-count",
- parent=azure_ds_reporter) as evt:
+ name="mount-ntfs-and-count",
+ description="mount-ntfs-and-count",
+ parent=azure_ds_reporter
+ ) as evt:
try:
file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
update_env_for_mount={'LANG': 'C'})
@@ -913,9 +937,10 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
# wait for ephemeral disk to come up
naplen = .2
with events.ReportEventStack(
- name="wait-for-ephemeral-disk",
- description="wait for ephemeral disk",
- parent=azure_ds_reporter):
+ name="wait-for-ephemeral-disk",
+ description="wait for ephemeral disk",
+ parent=azure_ds_reporter
+ ):
missing = util.wait_for_files([devpath],
maxwait=maxwait,
naplen=naplen,
@@ -972,7 +997,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
if command == "builtin":
if util.is_FreeBSD():
command = BOUNCE_COMMAND_FREEBSD
- elif util.which('ifup'):
+ elif subp.which('ifup'):
command = BOUNCE_COMMAND_IFUP
else:
LOG.debug(
@@ -983,7 +1008,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
shell = not isinstance(command, (list, tuple))
# capture=False, see comments in bug 1202758 and bug 1206164.
util.log_time(logfunc=LOG.debug, msg="publishing hostname",
- get_uptime=True, func=util.subp,
+ get_uptime=True, func=subp.subp,
kwargs={'args': command, 'shell': shell, 'capture': False,
'env': env})
return True
@@ -993,7 +1018,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
def crtfile_to_pubkey(fname, data=None):
pipeline = ('openssl x509 -noout -pubkey < "$0" |'
'ssh-keygen -i -m PKCS8 -f /dev/stdin')
- (out, _err) = util.subp(['sh', '-c', pipeline, fname],
+ (out, _err) = subp.subp(['sh', '-c', pipeline, fname],
capture=True, data=data)
return out.rstrip()
@@ -1005,7 +1030,7 @@ def pubkeys_from_crt_files(flist):
for fname in flist:
try:
pubkeys.append(crtfile_to_pubkey(fname))
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
errors.append(fname)
if errors:
@@ -1047,7 +1072,7 @@ def invoke_agent(cmd):
# this is a function itself to simplify patching it for test
if cmd:
LOG.debug("invoking agent: %s", cmd)
- util.subp(cmd, shell=(not isinstance(cmd, list)))
+ subp.subp(cmd, shell=(not isinstance(cmd, list)))
else:
LOG.debug("not invoking agent")
@@ -1122,7 +1147,7 @@ def read_azure_ovf(contents):
except Exception as e:
error_str = "Invalid ovf-env.xml: %s" % e
report_diagnostic_event(error_str)
- raise BrokenAzureDataSource(error_str)
+ raise BrokenAzureDataSource(error_str) from e
results = find_child(dom.documentElement,
lambda n: n.localName == "ProvisioningSection")
@@ -1323,9 +1348,10 @@ def parse_network_config(imds_metadata):
@return: Dictionary containing network version 2 standard configuration.
"""
with events.ReportEventStack(
- name="parse_network_config",
- description="",
- parent=azure_ds_reporter) as evt:
+ name="parse_network_config",
+ description="",
+ parent=azure_ds_reporter
+ ) as evt:
if imds_metadata != sources.UNSET and imds_metadata:
netconfig = {'version': 2, 'ethernets': {}}
LOG.debug('Azure: generating network configuration from IMDS')
@@ -1362,9 +1388,16 @@ def parse_network_config(imds_metadata):
ip=privateIp, prefix=netPrefix))
if dev_config:
mac = ':'.join(re.findall(r'..', intf['macAddress']))
- dev_config.update(
- {'match': {'macaddress': mac.lower()},
- 'set-name': nicname})
+ dev_config.update({
+ 'match': {'macaddress': mac.lower()},
+ 'set-name': nicname
+ })
+ # With netvsc, we can get two interfaces that
+ # share the same MAC, so we need to make sure
+ # our match condition also contains the driver
+ driver = device_driver(nicname)
+ if driver and driver == 'hv_netvsc':
+ dev_config['match']['driver'] = driver
netconfig['ethernets'][nicname] = dev_config
evt.description = "network config from imds"
else:
@@ -1422,8 +1455,14 @@ def _get_metadata_from_imds(retries):
LOG.debug(msg)
return {}
try:
+ from json.decoder import JSONDecodeError
+ json_decode_error = JSONDecodeError
+ except ImportError:
+ json_decode_error = ValueError
+
+ try:
return util.load_json(str(response))
- except json.decoder.JSONDecodeError as e:
+ except json_decode_error as e:
report_diagnostic_event('non-json imds response' % e)
LOG.warning(
'Ignoring non-json IMDS instance metadata: %s', str(response))
@@ -1468,12 +1507,12 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
def _is_platform_viable(seed_dir):
+ """Check platform environment to report if this datasource may run."""
with events.ReportEventStack(
- name="check-platform-viability",
- description="found azure asset tag",
- parent=azure_ds_reporter) as evt:
-
- """Check platform environment to report if this datasource may run."""
+ name="check-platform-viability",
+ description="found azure asset tag",
+ parent=azure_ds_reporter
+ ) as evt:
asset_tag = util.read_dmi_data('chassis-asset-tag')
if asset_tag == AZURE_CHASSIS_ASSET_TAG:
return True
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 2013bed7..54810439 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -22,6 +22,7 @@ from cloudinit import log as logging
from cloudinit.net import dhcp
from cloudinit import sources
from cloudinit import url_helper as uhelp
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -46,7 +47,7 @@ class CloudStackPasswordServerClient(object):
# The password server was in the past, a broken HTTP server, but is now
# fixed. wget handles this seamlessly, so it's easier to shell out to
# that rather than write our own handling code.
- output, _ = util.subp([
+ output, _ = subp.subp([
'wget', '--quiet', '--tries', '3', '--timeout', '20',
'--output-document', '-', '--header',
'DomU_Request: {0}'.format(domu_request),
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index f77923c2..62756cf7 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -10,6 +10,7 @@ import os
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.net import eni
@@ -71,11 +72,11 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
if not found:
dslist = self.sys_cfg.get('datasource_list')
for dev in find_candidate_devs(dslist=dslist):
- try:
- if util.is_FreeBSD() and dev.startswith("/dev/cd"):
+ mtype = None
+ if util.is_BSD():
+ if dev.startswith("/dev/cd"):
mtype = "cd9660"
- else:
- mtype = None
+ try:
results = util.mount_cb(dev, read_config_drive,
mtype=mtype)
found = dev
@@ -245,7 +246,7 @@ def find_candidate_devs(probe_optical=True, dslist=None):
for device in OPTICAL_DEVICES:
try:
util.find_devs_with(path=device)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
by_fstype = []
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index e0ef665e..5040ce5b 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -58,7 +58,7 @@ class DataSourceDigitalOcean(sources.DataSource):
ipv4LL_nic = None
if self.use_ip4LL:
- ipv4LL_nic = do_helper.assign_ipv4_link_local()
+ ipv4LL_nic = do_helper.assign_ipv4_link_local(self.distro)
md = do_helper.read_metadata(
self.metadata_address, timeout=self.timeout,
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 0f2bfef4..1d09c12a 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -29,7 +29,6 @@ STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
API_TOKEN_ROUTE = 'latest/api/token'
-API_TOKEN_DISABLED = '_ec2_disable_api_token'
AWS_TOKEN_TTL_SECONDS = '21600'
AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token'
AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds'
@@ -63,7 +62,7 @@ class DataSourceEc2(sources.DataSource):
# Priority ordered list of additional metadata versions which will be tried
# for extended metadata content. IPv6 support comes in 2016-09-02
- extended_metadata_versions = ['2016-09-02']
+ extended_metadata_versions = ['2018-09-24', '2016-09-02']
# Setup read_url parameters per get_url_params.
url_max_wait = 120
@@ -193,6 +192,12 @@ class DataSourceEc2(sources.DataSource):
return self.metadata['instance-id']
def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None):
+ """ Get an API token for EC2 Instance Metadata Service.
+
+ On EC2. IMDS will always answer an API token, unless
+ the instance owner has disabled the IMDS HTTP endpoint or
+ the network topology conflicts with the configured hop-limit.
+ """
if self.cloud_name != CloudNames.AWS:
return
@@ -205,18 +210,33 @@ class DataSourceEc2(sources.DataSource):
urls.append(cur)
url2base[cur] = url
- # use the self._status_cb to check for Read errors, which means
- # we can't reach the API token URL, so we should disable IMDSv2
+ # use the self._imds_exception_cb to check for Read errors
LOG.debug('Fetching Ec2 IMDSv2 API Token')
- url, response = uhelp.wait_for_url(
- urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb,
- headers_cb=self._get_headers, request_method=request_method,
- headers_redact=AWS_TOKEN_REDACT)
+
+ response = None
+ url = None
+ url_params = self.get_url_params()
+ try:
+ url, response = uhelp.wait_for_url(
+ urls=urls, max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds, status_cb=LOG.warning,
+ headers_cb=self._get_headers,
+ exception_cb=self._imds_exception_cb,
+ request_method=request_method,
+ headers_redact=AWS_TOKEN_REDACT)
+ except uhelp.UrlError:
+ # We use the raised exception to interupt the retry loop.
+ # Nothing else to do here.
+ pass
if url and response:
self._api_token = response
return url2base[url]
+ # If we get here, then wait_for_url timed out, waiting for IMDS
+ # or the IMDS HTTP endpoint is disabled
+ return None
+
def wait_for_metadata_service(self):
mcfg = self.ds_cfg
@@ -240,9 +260,11 @@ class DataSourceEc2(sources.DataSource):
# try the api token path first
metadata_address = self._maybe_fetch_api_token(mdurls)
- if not metadata_address:
- if self._api_token == API_TOKEN_DISABLED:
- LOG.warning('Retrying with IMDSv1')
+ # When running on EC2, we always access IMDS with an API token.
+ # If we could not get an API token, then we assume the IMDS
+ # endpoint was disabled and we move on without a data source.
+ # Fallback to IMDSv1 if not running on EC2
+ if not metadata_address and self.cloud_name != CloudNames.AWS:
# if we can't get a token, use instance-id path
urls = []
url2base = {}
@@ -267,6 +289,8 @@ class DataSourceEc2(sources.DataSource):
if metadata_address:
self.metadata_address = metadata_address
LOG.debug("Using metadata source: '%s'", self.metadata_address)
+ elif self.cloud_name == CloudNames.AWS:
+ LOG.warning("IMDS's HTTP endpoint is probably disabled")
else:
LOG.critical("Giving up on md from %s after %s seconds",
urls, int(time.time() - start_time))
@@ -381,13 +405,16 @@ class DataSourceEc2(sources.DataSource):
logfunc=LOG.debug, msg='Re-crawl of metadata service',
func=self.get_data)
- # Limit network configuration to only the primary/fallback nic
iface = self.fallback_interface
- macs_to_nics = {net.get_interface_mac(iface): iface}
net_md = self.metadata.get('network')
if isinstance(net_md, dict):
+ # SRU_BLOCKER: xenial, bionic and eoan should default
+ # apply_full_imds_network_config to False to retain original
+ # behavior on those releases.
result = convert_ec2_metadata_network_config(
- net_md, macs_to_nics=macs_to_nics, fallback_nic=iface)
+ net_md, fallback_nic=iface,
+ full_network_config=util.get_cfg_option_bool(
+ self.ds_cfg, 'apply_full_imds_network_config', True))
# RELEASE_BLOCKER: xenial should drop the below if statement,
# because the issue being addressed doesn't exist pre-netplan.
@@ -496,11 +523,29 @@ class DataSourceEc2(sources.DataSource):
self._api_token = None
return True # always retry
- def _status_cb(self, msg, exc=None):
- LOG.warning(msg)
- if 'Read timed out' in msg:
- LOG.warning('Cannot use Ec2 IMDSv2 API tokens, using IMDSv1')
- self._api_token = API_TOKEN_DISABLED
+ def _imds_exception_cb(self, msg, exception=None):
+ """Fail quickly on proper AWS if IMDSv2 rejects API token request
+
+ Guidance from Amazon is that if IMDSv2 had disabled token requests
+ by returning a 403, or cloud-init malformed requests resulting in
+ other 40X errors, we want the datasource detection to fail quickly
+ without retries as those symptoms will likely not be resolved by
+ retries.
+
+ Exceptions such as requests.ConnectionError due to IMDS being
+ temporarily unroutable or unavailable will still retry due to the
+ callsite wait_for_url.
+ """
+ if isinstance(exception, uhelp.UrlError):
+ # requests.ConnectionError will have exception.code == None
+ if exception.code and exception.code >= 400:
+ if exception.code == 403:
+ LOG.warning('Ec2 IMDS endpoint returned a 403 error. '
+ 'HTTP endpoint is disabled. Aborting.')
+ else:
+ LOG.warning('Fatal error while requesting '
+ 'Ec2 IMDSv2 API tokens')
+ raise exception
def _get_headers(self, url=''):
"""Return a dict of headers for accessing a url.
@@ -508,8 +553,7 @@ class DataSourceEc2(sources.DataSource):
If _api_token is unset on AWS, attempt to refresh the token via a PUT
and then return the updated token header.
"""
- if self.cloud_name != CloudNames.AWS or (self._api_token ==
- API_TOKEN_DISABLED):
+ if self.cloud_name != CloudNames.AWS:
return {}
# Request a 6 hour token if URL is API_TOKEN_ROUTE
request_token_header = {AWS_TOKEN_REQ_HEADER: AWS_TOKEN_TTL_SECONDS}
@@ -573,9 +617,11 @@ def parse_strict_mode(cfgval):
if sleep:
try:
sleep = int(sleep)
- except ValueError:
- raise ValueError("Invalid sleep '%s' in strict_id setting '%s': "
- "not an integer" % (sleep, cfgval))
+ except ValueError as e:
+ raise ValueError(
+ "Invalid sleep '%s' in strict_id setting '%s': not an integer"
+ % (sleep, cfgval)
+ ) from e
else:
sleep = None
@@ -678,9 +724,10 @@ def _collect_platform_data():
return data
-def convert_ec2_metadata_network_config(network_md, macs_to_nics=None,
- fallback_nic=None):
- """Convert ec2 metadata to network config version 1 data dict.
+def convert_ec2_metadata_network_config(
+ network_md, macs_to_nics=None, fallback_nic=None,
+ full_network_config=True):
+ """Convert ec2 metadata to network config version 2 data dict.
@param: network_md: 'network' portion of EC2 metadata.
generally formed as {"interfaces": {"macs": {}} where
@@ -690,28 +737,105 @@ def convert_ec2_metadata_network_config(network_md, macs_to_nics=None,
not provided, get_interfaces_by_mac is called to get it from the OS.
@param: fallback_nic: Optionally provide the primary nic interface name.
This nic will be guaranteed to minimally have a dhcp4 configuration.
+ @param: full_network_config: Boolean set True to configure all networking
+ presented by IMDS. This includes rendering secondary IPv4 and IPv6
+ addresses on all NICs and rendering network config on secondary NICs.
+ If False, only the primary nic will be configured and only with dhcp
+ (IPv4/IPv6).
- @return A dict of network config version 1 based on the metadata and macs.
+ @return A dict of network config version 2 based on the metadata and macs.
"""
- netcfg = {'version': 1, 'config': []}
+ netcfg = {'version': 2, 'ethernets': {}}
if not macs_to_nics:
macs_to_nics = net.get_interfaces_by_mac()
macs_metadata = network_md['interfaces']['macs']
- for mac, nic_name in macs_to_nics.items():
+
+ if not full_network_config:
+ for mac, nic_name in macs_to_nics.items():
+ if nic_name == fallback_nic:
+ break
+ dev_config = {'dhcp4': True,
+ 'dhcp6': False,
+ 'match': {'macaddress': mac.lower()},
+ 'set-name': nic_name}
+ nic_metadata = macs_metadata.get(mac)
+ if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
+ dev_config['dhcp6'] = True
+ netcfg['ethernets'][nic_name] = dev_config
+ return netcfg
+ # Apply network config for all nics and any secondary IPv4/v6 addresses
+ for mac, nic_name in sorted(macs_to_nics.items()):
nic_metadata = macs_metadata.get(mac)
if not nic_metadata:
continue # Not a physical nic represented in metadata
- nic_cfg = {'type': 'physical', 'name': nic_name, 'subnets': []}
- nic_cfg['mac_address'] = mac
- if (nic_name == fallback_nic or nic_metadata.get('public-ipv4s') or
- nic_metadata.get('local-ipv4s')):
- nic_cfg['subnets'].append({'type': 'dhcp4'})
- if nic_metadata.get('ipv6s'):
- nic_cfg['subnets'].append({'type': 'dhcp6'})
- netcfg['config'].append(nic_cfg)
+ # device-number is zero-indexed, we want it 1-indexed for the
+ # multiplication on the following line
+ nic_idx = int(nic_metadata['device-number']) + 1
+ dhcp_override = {'route-metric': nic_idx * 100}
+ dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
+ 'dhcp6': False,
+ 'match': {'macaddress': mac.lower()},
+ 'set-name': nic_name}
+ if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
+ dev_config['dhcp6'] = True
+ dev_config['dhcp6-overrides'] = dhcp_override
+ dev_config['addresses'] = get_secondary_addresses(nic_metadata, mac)
+ if not dev_config['addresses']:
+ dev_config.pop('addresses') # Since we found none configured
+ netcfg['ethernets'][nic_name] = dev_config
+ # Remove route-metric dhcp overrides if only one nic configured
+ if len(netcfg['ethernets']) == 1:
+ for nic_name in netcfg['ethernets'].keys():
+ netcfg['ethernets'][nic_name].pop('dhcp4-overrides')
+ netcfg['ethernets'][nic_name].pop('dhcp6-overrides', None)
return netcfg
+def get_secondary_addresses(nic_metadata, mac):
+ """Parse interface-specific nic metadata and return any secondary IPs
+
+ :return: List of secondary IPv4 or IPv6 addresses to configure on the
+ interface
+ """
+ ipv4s = nic_metadata.get('local-ipv4s')
+ ipv6s = nic_metadata.get('ipv6s')
+ addresses = []
+ # In version < 2018-09-24 local_ipv4s or ipv6s is a str with one IP
+ if bool(isinstance(ipv4s, list) and len(ipv4s) > 1):
+ addresses.extend(
+ _get_secondary_addresses(
+ nic_metadata, 'subnet-ipv4-cidr-block', mac, ipv4s, '24'))
+ if bool(isinstance(ipv6s, list) and len(ipv6s) > 1):
+ addresses.extend(
+ _get_secondary_addresses(
+ nic_metadata, 'subnet-ipv6-cidr-block', mac, ipv6s, '128'))
+ return sorted(addresses)
+
+
+def _get_secondary_addresses(nic_metadata, cidr_key, mac, ips, default_prefix):
+ """Return list of IP addresses as CIDRs for secondary IPs
+
+ The CIDR prefix will be default_prefix if cidr_key is absent or not
+ parseable in nic_metadata.
+ """
+ addresses = []
+ cidr = nic_metadata.get(cidr_key)
+ prefix = default_prefix
+ if not cidr or len(cidr.split('/')) != 2:
+ ip_type = 'ipv4' if 'ipv4' in cidr_key else 'ipv6'
+ LOG.warning(
+ 'Could not parse %s %s for mac %s. %s network'
+ ' config prefix defaults to /%s',
+ cidr_key, cidr, mac, ip_type, prefix)
+ else:
+ prefix = cidr.split('/')[1]
+ # We know we have > 1 ips for in metadata for this IP type
+ for ip in ips[1:]:
+ addresses.append(
+ '{ip}/{prefix}'.format(ip=ip, prefix=prefix))
+ return addresses
+
+
# Used to match classes to dependencies
datasources = [
(DataSourceEc2Local, (sources.DEP_FILESYSTEM,)), # Run at init-local
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 6cbfbbac..0ec5f6ec 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -116,7 +116,7 @@ def _write_host_key_to_guest_attributes(key_type, key_value):
resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS,
request_method='PUT', check_status=False)
if resp.ok():
- LOG.debug('Wrote %s host key to guest attributes.', key_type)
+ LOG.debug('Wrote %s host key to guest attributes.', key_type)
else:
LOG.debug('Unable to write %s host key to guest attributes.', key_type)
diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py
index 50298330..a86035e0 100644
--- a/cloudinit/sources/DataSourceHetzner.py
+++ b/cloudinit/sources/DataSourceHetzner.py
@@ -59,12 +59,19 @@ class DataSourceHetzner(sources.DataSource):
self.userdata_address, timeout=self.timeout,
sec_between=self.wait_retry, retries=self.retries)
- self.userdata_raw = ud
+ # Hetzner cloud does not support binary user-data. So here, do a
+ # base64 decode of the data if we can. The end result being that a
+ # user can provide base64 encoded (possibly gzipped) data as user-data.
+ #
+ # The fallout is that in the event of b64 encoded user-data,
+ # /var/lib/cloud-init/cloud-config.txt will not be identical to the
+ # user-data provided. It will be decoded.
+ self.userdata_raw = hc_helper.maybe_b64decode(ud)
self.metadata_full = md
- """hostname is name provided by user at launch. The API enforces
- it is a valid hostname, but it is not guaranteed to be resolvable
- in dns or fully qualified."""
+ # hostname is name provided by user at launch. The API enforces it is
+ # a valid hostname, but it is not guaranteed to be resolvable in dns or
+ # fully qualified.
self.metadata['instance-id'] = md['instance-id']
self.metadata['local-hostname'] = md['hostname']
self.metadata['network-config'] = md.get('network-config', None)
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index e0c714e8..8d196185 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -99,6 +99,7 @@ import os
from cloudinit import log as logging
from cloudinit import sources
from cloudinit.sources.helpers import openstack
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -240,7 +241,7 @@ def get_ibm_platform():
fslabels = {}
try:
devs = util.blkid()
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.warning("Failed to run blkid: %s", e)
return (None, None)
@@ -302,7 +303,8 @@ def read_md():
except sources.BrokenMetadata as e:
raise RuntimeError(
"Failed reading IBM config disk (platform=%s path=%s): %s" %
- (platform, path, e))
+ (platform, path, e)
+ ) from e
ret.update(results)
return ret
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 517913aa..9156925f 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -6,8 +6,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import print_function
-
import hashlib
import os
import time
@@ -228,7 +226,8 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
except url_helper.UrlError as e:
if e.code == 404 and not optional:
raise MAASSeedDirMalformed(
- "Missing required %s: %s" % (path, e))
+ "Missing required %s: %s" % (path, e)
+ ) from e
elif e.code != 404:
raise e
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index ee748b41..e408d730 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -36,23 +36,15 @@ class DataSourceNoCloud(sources.DataSource):
return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
def _get_devices(self, label):
- if util.is_FreeBSD():
- devlist = [
- p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label]
- if os.path.exists(p)]
- else:
- # Query optical drive to get it in blkid cache for 2.6 kernels
- util.find_devs_with(path="/dev/sr0")
- util.find_devs_with(path="/dev/sr1")
-
- fslist = util.find_devs_with("TYPE=vfat")
- fslist.extend(util.find_devs_with("TYPE=iso9660"))
+ fslist = util.find_devs_with("TYPE=vfat")
+ fslist.extend(util.find_devs_with("TYPE=iso9660"))
- label_list = util.find_devs_with("LABEL=%s" % label.upper())
- label_list.extend(util.find_devs_with("LABEL=%s" % label.lower()))
+ label_list = util.find_devs_with("LABEL=%s" % label.upper())
+ label_list.extend(util.find_devs_with("LABEL=%s" % label.lower()))
+ label_list.extend(util.find_devs_with("LABEL_FATBOOT=%s" % label))
- devlist = list(set(fslist) & set(label_list))
- devlist.sort(reverse=True)
+ devlist = list(set(fslist) & set(label_list))
+ devlist.sort(reverse=True)
return devlist
def _get_data(self):
@@ -370,7 +362,7 @@ def _merge_new_seed(cur, seeded):
class DataSourceNoCloudNet(DataSourceNoCloud):
def __init__(self, sys_cfg, distro, paths):
DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
- self.supported_seed_starts = ("http://", "https://", "ftp://")
+ self.supported_seed_starts = ("http://", "https://")
# Used to match classes to dependencies
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 7f55b5f8..6a9a331d 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -16,6 +16,7 @@ from xml.dom import minidom
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.sources.helpers.vmware.imc.config \
import Config
@@ -37,7 +38,8 @@ from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
enable_nics,
get_nics_to_enable,
set_customization_status,
- get_tools_config
+ get_tools_config,
+ set_gc_status
)
LOG = logging.getLogger(__name__)
@@ -140,6 +142,8 @@ class DataSourceOVF(sources.DataSource):
try:
cf = ConfigFile(vmwareImcConfigFilePath)
self._vmware_cust_conf = Config(cf)
+ set_gc_status(self._vmware_cust_conf, "Started")
+
(md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf)
self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
@@ -148,14 +152,25 @@ class DataSourceOVF(sources.DataSource):
product_marker, os.path.join(self.paths.cloud_dir, 'data'))
special_customization = product_marker and not hasmarkerfile
customscript = self._vmware_cust_conf.custom_script_name
- custScriptConfig = get_tools_config(
- CONFGROUPNAME_GUESTCUSTOMIZATION,
- GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS,
- "false")
- if custScriptConfig.lower() != "true":
- # Update the customization status if there is a
- # custom script is disabled
- if special_customization and customscript:
+
+ # In case there is a custom script, check whether VMware
+ # Tools configuration allow the custom script to run.
+ if special_customization and customscript:
+ defVal = "false"
+ if self._vmware_cust_conf.default_run_post_script:
+ LOG.debug(
+ "Set default value to true due to"
+ " customization configuration."
+ )
+ defVal = "true"
+
+ custScriptConfig = get_tools_config(
+ CONFGROUPNAME_GUESTCUSTOMIZATION,
+ GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS,
+ defVal)
+ if custScriptConfig.lower() != "true":
+ # Update the customization status if custom script
+ # is disabled
msg = "Custom script is disabled by VM Administrator"
LOG.debug(msg)
set_customization_status(
@@ -171,7 +186,8 @@ class DataSourceOVF(sources.DataSource):
"Error parsing the customization Config File",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if special_customization:
if customscript:
@@ -183,7 +199,8 @@ class DataSourceOVF(sources.DataSource):
"Error executing pre-customization script",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
try:
LOG.debug("Preparing the Network configuration")
@@ -197,7 +214,8 @@ class DataSourceOVF(sources.DataSource):
"Error preparing Network Configuration",
e,
GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if special_customization:
LOG.debug("Applying password customization")
@@ -215,7 +233,8 @@ class DataSourceOVF(sources.DataSource):
"Error applying Password Configuration",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if customscript:
try:
@@ -228,7 +247,8 @@ class DataSourceOVF(sources.DataSource):
"Error executing post-customization script",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if product_marker:
try:
@@ -240,7 +260,8 @@ class DataSourceOVF(sources.DataSource):
"Error creating marker files",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
self._vmware_cust_found = True
found.append('vmware-tools')
@@ -252,6 +273,7 @@ class DataSourceOVF(sources.DataSource):
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_DONE,
GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
+ set_gc_status(self._vmware_cust_conf, "Successful")
else:
np = [('com.vmware.guestInfo', transport_vmware_guestinfo),
@@ -327,7 +349,7 @@ class DataSourceOVFNet(DataSourceOVF):
def __init__(self, sys_cfg, distro, paths):
DataSourceOVF.__init__(self, sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
- self.supported_seed_starts = ("http://", "https://", "ftp://")
+ self.supported_seed_starts = ("http://", "https://")
self.vmware_customization_supported = False
@@ -527,15 +549,15 @@ def transport_iso9660(require_iso=True):
def transport_vmware_guestinfo():
rpctool = "vmware-rpctool"
not_found = None
- if not util.which(rpctool):
+ if not subp.which(rpctool):
return not_found
cmd = [rpctool, "info-get guestinfo.ovfEnv"]
try:
- out, _err = util.subp(cmd)
+ out, _err = subp.subp(cmd)
if out:
return out
LOG.debug("cmd %s exited 0 with empty stdout: %s", cmd, out)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
LOG.warning("%s exited with code %d", rpctool, e.exit_code)
LOG.debug(e)
@@ -647,7 +669,7 @@ def setup_marker_files(markerid, marker_dir):
open(markerfile, 'w').close()
-def _raise_error_status(prefix, error, event, config_file):
+def _raise_error_status(prefix, error, event, config_file, conf):
"""
Raise error and send customization status to the underlying VMware
Virtualization Platform. Also, cleanup the imc directory.
@@ -656,6 +678,7 @@ def _raise_error_status(prefix, error, event, config_file):
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
event)
+ set_gc_status(conf, prefix)
util.del_dir(os.path.dirname(config_file))
raise error
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 02c9a7b8..45481938 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -13,6 +13,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import collections
+import functools
import os
import pwd
import re
@@ -21,6 +22,7 @@ import string
from cloudinit import log as logging
from cloudinit import net
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
@@ -59,10 +61,19 @@ class DataSourceOpenNebula(sources.DataSource):
for cdev in candidates:
try:
if os.path.isdir(self.seed_dir):
- results = read_context_disk_dir(cdev, asuser=parseuser)
+ results = read_context_disk_dir(
+ cdev, self.distro, asuser=parseuser
+ )
elif cdev.startswith("/dev"):
- results = util.mount_cb(cdev, read_context_disk_dir,
- data=parseuser)
+ # util.mount_cb only handles passing a single argument
+ # through to the wrapped function, so we have to partially
+ # apply the function to pass in `distro`. See LP: #1884979
+ partially_applied_func = functools.partial(
+ read_context_disk_dir,
+ asuser=parseuser,
+ distro=self.distro,
+ )
+ results = util.mount_cb(cdev, partially_applied_func)
except NonContextDiskDir:
continue
except BrokenContextDiskDir as exc:
@@ -128,10 +139,10 @@ class BrokenContextDiskDir(Exception):
class OpenNebulaNetwork(object):
- def __init__(self, context, system_nics_by_mac=None):
+ def __init__(self, context, distro, system_nics_by_mac=None):
self.context = context
if system_nics_by_mac is None:
- system_nics_by_mac = get_physical_nics_by_mac()
+ system_nics_by_mac = get_physical_nics_by_mac(distro)
self.ifaces = collections.OrderedDict(
[k for k in sorted(system_nics_by_mac.items(),
key=lambda k: net.natural_sort_key(k[1]))])
@@ -334,7 +345,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
cmd.extend(bash)
- (output, _error) = util.subp(cmd, data=bcmd)
+ (output, _error) = subp.subp(cmd, data=bcmd)
# exclude vars in bash that change on their own or that we used
excluded = (
@@ -366,7 +377,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
return ret
-def read_context_disk_dir(source_dir, asuser=None):
+def read_context_disk_dir(source_dir, distro, asuser=None):
"""
read_context_disk_dir(source_dir):
read source_dir and return a tuple with metadata dict and user-data
@@ -388,18 +399,23 @@ def read_context_disk_dir(source_dir, asuser=None):
if asuser is not None:
try:
pwd.getpwnam(asuser)
- except KeyError:
+ except KeyError as e:
raise BrokenContextDiskDir(
"configured user '{user}' does not exist".format(
- user=asuser))
+ user=asuser)
+ ) from e
try:
path = os.path.join(source_dir, 'context.sh')
content = util.load_file(path)
context = parse_shell_config(content, asuser=asuser)
- except util.ProcessExecutionError as e:
- raise BrokenContextDiskDir("Error processing context.sh: %s" % (e))
+ except subp.ProcessExecutionError as e:
+ raise BrokenContextDiskDir(
+ "Error processing context.sh: %s" % (e)
+ ) from e
except IOError as e:
- raise NonContextDiskDir("Error reading context.sh: %s" % (e))
+ raise NonContextDiskDir(
+ "Error reading context.sh: %s" % (e)
+ ) from e
else:
raise NonContextDiskDir("Missing context.sh")
@@ -417,9 +433,9 @@ def read_context_disk_dir(source_dir, asuser=None):
if ssh_key_var:
lines = context.get(ssh_key_var).splitlines()
- results['metadata']['public-keys'] = [l for l in lines
- if len(l) and not
- l.startswith("#")]
+ results['metadata']['public-keys'] = [
+ line for line in lines if len(line) and not line.startswith("#")
+ ]
# custom hostname -- try hostname or leave cloud-init
# itself create hostname from IP address later
@@ -449,15 +465,17 @@ def read_context_disk_dir(source_dir, asuser=None):
# http://docs.opennebula.org/5.4/operation/references/template.html#context-section
ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP.*$', k)]
if ipaddr_keys:
- onet = OpenNebulaNetwork(context)
+ onet = OpenNebulaNetwork(context, distro)
results['network-interfaces'] = onet.gen_conf()
return results
-def get_physical_nics_by_mac():
+def get_physical_nics_by_mac(distro):
devs = net.get_interfaces_by_mac()
- return dict([(m, n) for m, n in devs.items() if net.is_physical(n)])
+ return dict(
+ [(m, n) for m, n in devs.items() if distro.networking.is_physical(n)]
+ )
# Legacy: Must be present in case we load an old pkl object
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 7a5e71b6..d4b43f44 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -29,7 +29,10 @@ DMI_PRODUCT_NOVA = 'OpenStack Nova'
DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
-VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM]
+# See github.com/sapcc/helm-charts/blob/master/openstack/nova/values.yaml
+# -> compute.defaults.vmware.smbios_asset_tag for this value
+DMI_ASSET_TAG_SAPCCLOUD = 'SAP CCloud VM'
+VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM, DMI_ASSET_TAG_SAPCCLOUD]
class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
@@ -191,10 +194,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
'timeout': url_params.timeout_seconds})
except openstack.NonReadable as e:
raise sources.InvalidMetaDataException(str(e))
- except (openstack.BrokenMetadata, IOError):
+ except (openstack.BrokenMetadata, IOError) as e:
msg = 'Broken metadata address {addr}'.format(
addr=self.metadata_address)
- raise sources.InvalidMetaDataException(msg)
+ raise sources.InvalidMetaDataException(msg) from e
return result
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index eec87403..20d6487d 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -1,30 +1,31 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Datasource for Oracle (OCI/Oracle Cloud Infrastructure)
-OCI provides a OpenStack like metadata service which provides only
-'2013-10-17' and 'latest' versions..
-
Notes:
- * This datasource does not support the OCI-Classic. OCI-Classic
- provides an EC2 lookalike metadata service.
- * The uuid provided in DMI data is not the same as the meta-data provided
+ * This datasource does not support OCI Classic. OCI Classic provides an EC2
+ lookalike metadata service.
+ * The UUID provided in DMI data is not the same as the meta-data provided
instance-id, but has an equivalent lifespan.
* We do need to support upgrade from an instance that cloud-init
identified as OpenStack.
- * Both bare-metal and vms use iscsi root
- * Both bare-metal and vms provide chassis-asset-tag of OracleCloud.com
+ * Bare metal instances use iSCSI root, virtual machine instances do not.
+ * Both bare metal and virtual machine instances provide a chassis-asset-tag of
+ OracleCloud.com.
"""
-from cloudinit.url_helper import combine_url, readurl, UrlError
-from cloudinit.net import dhcp, get_interfaces_by_mac, is_netfail_master
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import util
-from cloudinit.net import cmdline
-from cloudinit import log as logging
+import base64
+from collections import namedtuple
+from contextlib import suppress as noop
-import json
-import re
+from cloudinit import log as logging
+from cloudinit import net, sources, util
+from cloudinit.net import (
+ cmdline,
+ dhcp,
+ get_interfaces_by_mac,
+ is_netfail_master,
+)
+from cloudinit.url_helper import UrlError, readurl
LOG = logging.getLogger(__name__)
@@ -33,79 +34,13 @@ BUILTIN_DS_CONFIG = {
'configure_secondary_nics': False,
}
CHASSIS_ASSET_TAG = "OracleCloud.com"
-METADATA_ENDPOINT = "http://169.254.169.254/openstack/"
-VNIC_METADATA_URL = 'http://169.254.169.254/opc/v1/vnics/'
+METADATA_ROOT = "http://169.254.169.254/opc/v{version}/"
+METADATA_PATTERN = METADATA_ROOT + "{path}/"
# https://docs.cloud.oracle.com/iaas/Content/Network/Troubleshoot/connectionhang.htm#Overview,
# indicates that an MTU of 9000 is used within OCI
MTU = 9000
-
-def _add_network_config_from_opc_imds(network_config):
- """
- Fetch data from Oracle's IMDS, generate secondary NIC config, merge it.
-
- The primary NIC configuration should not be modified based on the IMDS
- values, as it should continue to be configured for DHCP. As such, this
- takes an existing network_config dict which is expected to have the primary
- NIC configuration already present. It will mutate the given dict to
- include the secondary VNICs.
-
- :param network_config:
- A v1 or v2 network config dict with the primary NIC already configured.
- This dict will be mutated.
-
- :raises:
- Exceptions are not handled within this function. Likely exceptions are
- those raised by url_helper.readurl (if communicating with the IMDS
- fails), ValueError/JSONDecodeError (if the IMDS returns invalid JSON),
- and KeyError/IndexError (if the IMDS returns valid JSON with unexpected
- contents).
- """
- resp = readurl(VNIC_METADATA_URL)
- vnics = json.loads(str(resp))
-
- if 'nicIndex' in vnics[0]:
- # TODO: Once configure_secondary_nics defaults to True, lower the level
- # of this log message. (Currently, if we're running this code at all,
- # someone has explicitly opted-in to secondary VNIC configuration, so
- # we should warn them that it didn't happen. Once it's default, this
- # would be emitted on every Bare Metal Machine launch, which means INFO
- # or DEBUG would be more appropriate.)
- LOG.warning(
- 'VNIC metadata indicates this is a bare metal machine; skipping'
- ' secondary VNIC configuration.'
- )
- return
-
- interfaces_by_mac = get_interfaces_by_mac()
-
- for vnic_dict in vnics[1:]:
- # We skip the first entry in the response because the primary interface
- # is already configured by iSCSI boot; applying configuration from the
- # IMDS is not required.
- mac_address = vnic_dict['macAddr'].lower()
- if mac_address not in interfaces_by_mac:
- LOG.debug('Interface with MAC %s not found; skipping', mac_address)
- continue
- name = interfaces_by_mac[mac_address]
-
- if network_config['version'] == 1:
- subnet = {
- 'type': 'static',
- 'address': vnic_dict['privateIp'],
- }
- network_config['config'].append({
- 'name': name,
- 'type': 'physical',
- 'mac_address': mac_address,
- 'mtu': MTU,
- 'subnets': [subnet],
- })
- elif network_config['version'] == 2:
- network_config['ethernets'][name] = {
- 'addresses': [vnic_dict['privateIp']],
- 'mtu': MTU, 'dhcp4': False, 'dhcp6': False,
- 'match': {'macaddress': mac_address}}
+OpcMetadata = namedtuple("OpcMetadata", "version instance_data vnics_data")
def _ensure_netfailover_safe(network_config):
@@ -174,6 +109,7 @@ class DataSourceOracle(sources.DataSource):
def __init__(self, sys_cfg, *args, **kwargs):
super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs)
+ self._vnics_data = None
self.ds_cfg = util.mergemanydict([
util.get_cfg_by_path(sys_cfg, ['datasource', self.dsname], {}),
@@ -187,54 +123,46 @@ class DataSourceOracle(sources.DataSource):
if not self._is_platform_viable():
return False
+ self.system_uuid = _read_system_uuid()
+
# network may be configured if iscsi root. If that is the case
# then read_initramfs_config will return non-None.
- if _is_iscsi_root():
- data = self.crawl_metadata()
- else:
- with dhcp.EphemeralDHCPv4(net.find_fallback_nic()):
- data = self.crawl_metadata()
-
- self._crawled_metadata = data
- vdata = data['2013-10-17']
-
- self.userdata_raw = vdata.get('user_data')
- self.system_uuid = vdata['system_uuid']
-
- vd = vdata.get('vendor_data')
- if vd:
- self.vendordata_pure = vd
- try:
- self.vendordata_raw = sources.convert_vendordata(vd)
- except ValueError as e:
- LOG.warning("Invalid content in vendor-data: %s", e)
- self.vendordata_raw = None
-
- mdcopies = ('public_keys',)
- md = dict([(k, vdata['meta_data'].get(k))
- for k in mdcopies if k in vdata['meta_data']])
-
- mdtrans = (
- # oracle meta_data.json name, cloudinit.datasource.metadata name
- ('availability_zone', 'availability-zone'),
- ('hostname', 'local-hostname'),
- ('launch_index', 'launch-index'),
- ('uuid', 'instance-id'),
+ fetch_vnics_data = self.ds_cfg.get(
+ 'configure_secondary_nics',
+ BUILTIN_DS_CONFIG["configure_secondary_nics"]
+ )
+ network_context = noop()
+ if not _is_iscsi_root():
+ network_context = dhcp.EphemeralDHCPv4(net.find_fallback_nic())
+ with network_context:
+ fetched_metadata = read_opc_metadata(
+ fetch_vnics_data=fetch_vnics_data
+ )
+
+ data = self._crawled_metadata = fetched_metadata.instance_data
+ self.metadata_address = METADATA_ROOT.format(
+ version=fetched_metadata.version
)
- for dsname, ciname in mdtrans:
- if dsname in vdata['meta_data']:
- md[ciname] = vdata['meta_data'][dsname]
+ self._vnics_data = fetched_metadata.vnics_data
+
+ self.metadata = {
+ "availability-zone": data["ociAdName"],
+ "instance-id": data["id"],
+ "launch-index": 0,
+ "local-hostname": data["hostname"],
+ "name": data["displayName"],
+ }
+
+ if "metadata" in data:
+ user_data = data["metadata"].get("user_data")
+ if user_data:
+ self.userdata_raw = base64.b64decode(user_data)
+ self.metadata["public_keys"] = data["metadata"].get(
+ "ssh_authorized_keys"
+ )
- self.metadata = md
return True
- def crawl_metadata(self):
- return read_metadata()
-
- def _get_subplatform(self):
- """Return the subplatform metadata source details."""
- return 'metadata (%s)' % METADATA_ENDPOINT
-
def check_instance_id(self, sys_cfg):
"""quickly check (local only) if self.instance_id is still valid
@@ -248,15 +176,9 @@ class DataSourceOracle(sources.DataSource):
@property
def network_config(self):
"""Network config is read from initramfs provided files
- If none is present, then we fall back to fallback configuration.
- One thing to note here is that this method is not currently
- considered at all if there is is kernel/initramfs provided
- data. In that case, stages considers that the cmdline data
- overrides datasource provided data and does not consult here.
-
- We nonetheless return cmdline provided config if present
- and fallback to generate fallback."""
+ If none is present, then we fall back to fallback configuration.
+ """
if self._network_config == sources.UNSET:
# this is v1
self._network_config = cmdline.read_initramfs_config()
@@ -265,14 +187,18 @@ class DataSourceOracle(sources.DataSource):
# this is now v2
self._network_config = self.distro.generate_fallback_config()
- if self.ds_cfg.get('configure_secondary_nics'):
+ if self.ds_cfg.get(
+ 'configure_secondary_nics',
+ BUILTIN_DS_CONFIG["configure_secondary_nics"]
+ ):
try:
- # Mutate self._network_config to include secondary VNICs
- _add_network_config_from_opc_imds(self._network_config)
+ # Mutate self._network_config to include secondary
+ # VNICs
+ self._add_network_config_from_opc_imds()
except Exception:
util.logexc(
LOG,
- "Failed to fetch secondary network configuration!")
+ "Failed to parse secondary network configuration!")
# we need to verify that the nic selected is not a netfail over
# device and, if it is a netfail master, then we need to avoid
@@ -281,6 +207,70 @@ class DataSourceOracle(sources.DataSource):
return self._network_config
+ def _add_network_config_from_opc_imds(self):
+ """Generate secondary NIC config from IMDS and merge it.
+
+ The primary NIC configuration should not be modified based on the IMDS
+ values, as it should continue to be configured for DHCP. As such, this
+ uses the instance's network config dict which is expected to have the
+ primary NIC configuration already present.
+ It will mutate the network config to include the secondary VNICs.
+
+ :raises:
+ Exceptions are not handled within this function. Likely
+ exceptions are KeyError/IndexError
+ (if the IMDS returns valid JSON with unexpected contents).
+ """
+ if self._vnics_data is None:
+ LOG.warning(
+ "Secondary NIC data is UNSET but should not be")
+ return
+
+ if 'nicIndex' in self._vnics_data[0]:
+ # TODO: Once configure_secondary_nics defaults to True, lower the
+ # level of this log message. (Currently, if we're running this
+ # code at all, someone has explicitly opted-in to secondary
+ # VNIC configuration, so we should warn them that it didn't
+ # happen. Once it's default, this would be emitted on every Bare
+ # Metal Machine launch, which means INFO or DEBUG would be more
+ # appropriate.)
+ LOG.warning(
+ 'VNIC metadata indicates this is a bare metal machine; '
+ 'skipping secondary VNIC configuration.'
+ )
+ return
+
+ interfaces_by_mac = get_interfaces_by_mac()
+
+ for vnic_dict in self._vnics_data[1:]:
+ # We skip the first entry in the response because the primary
+ # interface is already configured by iSCSI boot; applying
+ # configuration from the IMDS is not required.
+ mac_address = vnic_dict['macAddr'].lower()
+ if mac_address not in interfaces_by_mac:
+ LOG.debug('Interface with MAC %s not found; skipping',
+ mac_address)
+ continue
+ name = interfaces_by_mac[mac_address]
+
+ if self._network_config['version'] == 1:
+ subnet = {
+ 'type': 'static',
+ 'address': vnic_dict['privateIp'],
+ }
+ self._network_config['config'].append({
+ 'name': name,
+ 'type': 'physical',
+ 'mac_address': mac_address,
+ 'mtu': MTU,
+ 'subnets': [subnet],
+ })
+ elif self._network_config['version'] == 2:
+ self._network_config['ethernets'][name] = {
+ 'addresses': [vnic_dict['privateIp']],
+ 'mtu': MTU, 'dhcp4': False, 'dhcp6': False,
+ 'match': {'macaddress': mac_address}}
+
def _read_system_uuid():
sys_uuid = util.read_dmi_data('system-uuid')
@@ -296,72 +286,46 @@ def _is_iscsi_root():
return bool(cmdline.read_initramfs_config())
-def _load_index(content):
- """Return a list entries parsed from content.
-
- OpenStack's metadata service returns a newline delimited list
- of items. Oracle's implementation has html formatted list of links.
- The parser here just grabs targets from <a href="target">
- and throws away "../".
-
- Oracle has accepted that to be buggy and may fix in the future
- to instead return a '\n' delimited plain text list. This function
- will continue to work if that change is made."""
- if not content.lower().startswith("<html>"):
- return content.splitlines()
- items = re.findall(
- r'href="(?P<target>[^"]*)"', content, re.MULTILINE | re.IGNORECASE)
- return [i for i in items if not i.startswith(".")]
-
+def read_opc_metadata(*, fetch_vnics_data: bool = False):
+ """Fetch metadata from the /opc/ routes.
-def read_metadata(endpoint_base=METADATA_ENDPOINT, sys_uuid=None,
- version='2013-10-17'):
- """Read metadata, return a dictionary.
+ :return:
+ A namedtuple containing:
+ The metadata version as an integer
+ The JSON-decoded value of the instance data endpoint on the IMDS
+ The JSON-decoded value of the vnics data endpoint if
+ `fetch_vnics_data` is True, else None
- Each path listed in the index will be represented in the dictionary.
- If the path ends in .json, then the content will be decoded and
- populated into the dictionary.
-
- The system uuid (/sys/class/dmi/id/product_uuid) is also populated.
- Example: given paths = ('user_data', 'meta_data.json')
- This would return:
- {version: {'user_data': b'blob', 'meta_data': json.loads(blob.decode())
- 'system_uuid': '3b54f2e0-3ab2-458d-b770-af9926eee3b2'}}
"""
- endpoint = combine_url(endpoint_base, version) + "/"
- if sys_uuid is None:
- sys_uuid = _read_system_uuid()
- if not sys_uuid:
- raise sources.BrokenMetadata("Failed to read system uuid.")
-
+ # Per Oracle, there are short windows (measured in milliseconds) throughout
+ # an instance's lifetime where the IMDS is being updated and may 404 as a
+ # result. To work around these windows, we retry a couple of times.
+ retries = 2
+
+ def _fetch(metadata_version: int, path: str) -> dict:
+ headers = {
+ "Authorization": "Bearer Oracle"} if metadata_version > 1 else None
+ return readurl(
+ url=METADATA_PATTERN.format(version=metadata_version, path=path),
+ headers=headers,
+ retries=retries,
+ )._response.json()
+
+ metadata_version = 2
try:
- resp = readurl(endpoint)
- if not resp.ok():
- raise sources.BrokenMetadata(
- "Bad response from %s: %s" % (endpoint, resp.code))
- except UrlError as e:
- raise sources.BrokenMetadata(
- "Failed to read index at %s: %s" % (endpoint, e))
-
- entries = _load_index(resp.contents.decode('utf-8'))
- LOG.debug("index url %s contained: %s", endpoint, entries)
-
- # meta_data.json is required.
- mdj = 'meta_data.json'
- if mdj not in entries:
- raise sources.BrokenMetadata(
- "Required field '%s' missing in index at %s" % (mdj, endpoint))
-
- ret = {'system_uuid': sys_uuid}
- for path in entries:
- response = readurl(combine_url(endpoint, path))
- if path.endswith(".json"):
- ret[path.rpartition(".")[0]] = (
- json.loads(response.contents.decode('utf-8')))
- else:
- ret[path] = response.contents
-
- return {version: ret}
+ instance_data = _fetch(metadata_version, path="instance")
+ except UrlError:
+ metadata_version = 1
+ instance_data = _fetch(metadata_version, path="instance")
+
+ vnics_data = None
+ if fetch_vnics_data:
+ try:
+ vnics_data = _fetch(metadata_version, path="vnics")
+ except UrlError:
+ util.logexc(LOG,
+ "Failed to fetch secondary network configuration!")
+ return OpcMetadata(metadata_version, instance_data, vnics_data)
# Used to match classes to dependencies
@@ -377,17 +341,21 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import argparse
- import os
-
- parser = argparse.ArgumentParser(description='Query Oracle Cloud Metadata')
- parser.add_argument("--endpoint", metavar="URL",
- help="The url of the metadata service.",
- default=METADATA_ENDPOINT)
- args = parser.parse_args()
- sys_uuid = "uuid-not-available-not-root" if os.geteuid() != 0 else None
-
- data = read_metadata(endpoint_base=args.endpoint, sys_uuid=sys_uuid)
- data['is_platform_viable'] = _is_platform_viable()
- print(util.json_dumps(data))
+
+ description = """
+ Query Oracle Cloud metadata and emit a JSON object with two keys:
+ `read_opc_metadata` and `_is_platform_viable`. The values of each are
+ the return values of the corresponding functions defined in
+ DataSourceOracle.py."""
+ parser = argparse.ArgumentParser(description=description)
+ parser.parse_args()
+ print(
+ util.json_dumps(
+ {
+ "read_opc_metadata": read_opc_metadata(),
+ "_is_platform_viable": _is_platform_viable(),
+ }
+ )
+ )
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
index c3cd5c79..e064c8d6 100644
--- a/cloudinit/sources/DataSourceRbxCloud.py
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -15,6 +15,7 @@ import os.path
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.event import EventType
@@ -43,11 +44,11 @@ def int2ip(addr):
def _sub_arp(cmd):
"""
- Uses the prefered cloud-init subprocess def of util.subp
+ Uses the preferred cloud-init subprocess def of subp.subp
and runs arping. Breaking this to a separate function
for later use in mocking and unittests
"""
- return util.subp(['arping'] + cmd)
+ return subp.subp(['arping'] + cmd)
def gratuitous_arp(items, distro):
@@ -55,26 +56,32 @@ def gratuitous_arp(items, distro):
if distro.name in ['fedora', 'centos', 'rhel']:
source_param = '-s'
for item in items:
- _sub_arp([
- '-c', '2',
- source_param, item['source'],
- item['destination']
- ])
+ try:
+ _sub_arp([
+ '-c', '2',
+ source_param, item['source'],
+ item['destination']
+ ])
+ except subp.ProcessExecutionError as error:
+ # warning, because the system is able to function properly
+ # despite no success - some ARP table may be waiting for
+ # expiration, but the system may continue
+ LOG.warning('Failed to arping from "%s" to "%s": %s',
+ item['source'], item['destination'], error)
def get_md():
rbx_data = None
- devices = [
- dev
- for dev, bdata in util.blkid().items()
- if bdata.get('LABEL', '').upper() == 'CLOUDMD'
- ]
+ devices = set(
+ util.find_devs_with('LABEL=CLOUDMD') +
+ util.find_devs_with('LABEL=cloudmd')
+ )
for device in devices:
try:
rbx_data = util.mount_cb(
device=device,
callback=read_user_data_callback,
- mtype=['vfat', 'fat']
+ mtype=['vfat', 'fat', 'msdosfs']
)
if rbx_data:
break
@@ -182,7 +189,6 @@ def read_user_data_callback(mount_dir):
'passwd': hash,
'lock_passwd': False,
'ssh_authorized_keys': ssh_keys,
- 'shell': '/bin/bash'
}
},
'network_config': network,
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index cf676504..f1f903bc 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -33,6 +33,7 @@ import socket
from cloudinit import log as logging
from cloudinit import serial
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.event import EventType
@@ -412,7 +413,9 @@ class JoyentMetadataClient(object):
response.append(byte)
except OSError as exc:
if exc.errno == errno.EAGAIN:
- raise JoyentMetadataTimeoutException(msg % as_ascii())
+ raise JoyentMetadataTimeoutException(
+ msg % as_ascii()
+ ) from exc
raise
def _write(self, msg):
@@ -696,9 +699,9 @@ def identify_file(content_f):
cmd = ["file", "--brief", "--mime-type", content_f]
f_type = None
try:
- (f_type, _err) = util.subp(cmd)
+ (f_type, _err) = subp.subp(cmd)
LOG.debug("script %s mime type is %s", content_f, f_type)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
util.logexc(
LOG, ("Failed to identify script type for %s" % content_f, e))
return None if f_type is None else f_type.strip()
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index dd93cfd8..c4d60fff 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -78,7 +78,6 @@ class DataSourceNotFoundException(Exception):
class InvalidMetaDataException(Exception):
"""Raised when metadata is broken, unavailable or disabled."""
- pass
def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
@@ -89,26 +88,26 @@ def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
@return Dict copy of processed metadata.
"""
md_copy = copy.deepcopy(metadata)
- md_copy['base64_encoded_keys'] = []
- md_copy['sensitive_keys'] = []
+ base64_encoded_keys = []
+ sens_keys = []
for key, val in metadata.items():
if key_path:
sub_key_path = key_path + '/' + key
else:
sub_key_path = key
if key in sensitive_keys or sub_key_path in sensitive_keys:
- md_copy['sensitive_keys'].append(sub_key_path)
+ sens_keys.append(sub_key_path)
if isinstance(val, str) and val.startswith('ci-b64:'):
- md_copy['base64_encoded_keys'].append(sub_key_path)
+ base64_encoded_keys.append(sub_key_path)
md_copy[key] = val.replace('ci-b64:', '')
if isinstance(val, dict):
return_val = process_instance_metadata(
val, sub_key_path, sensitive_keys)
- md_copy['base64_encoded_keys'].extend(
- return_val.pop('base64_encoded_keys'))
- md_copy['sensitive_keys'].extend(
- return_val.pop('sensitive_keys'))
+ base64_encoded_keys.extend(return_val.pop('base64_encoded_keys'))
+ sens_keys.extend(return_val.pop('sensitive_keys'))
md_copy[key] = return_val
+ md_copy['base64_encoded_keys'] = sorted(base64_encoded_keys)
+ md_copy['sensitive_keys'] = sorted(sens_keys)
return md_copy
@@ -193,7 +192,7 @@ class DataSource(metaclass=abc.ABCMeta):
# N-tuple of keypaths or keynames redact from instance-data.json for
# non-root users
- sensitive_metadata_keys = ('security-credentials',)
+ sensitive_metadata_keys = ('merged_cfg', 'security-credentials',)
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
@@ -218,14 +217,15 @@ class DataSource(metaclass=abc.ABCMeta):
def __str__(self):
return type_utils.obj_name(self)
- def _get_standardized_metadata(self):
+ def _get_standardized_metadata(self, instance_data):
"""Return a dictionary of standardized metadata keys."""
local_hostname = self.get_hostname()
instance_id = self.get_instance_id()
availability_zone = self.availability_zone
# In the event of upgrade from existing cloudinit, pickled datasource
# will not contain these new class attributes. So we need to recrawl
- # metadata to discover that content.
+ # metadata to discover that content
+ sysinfo = instance_data["sys_info"]
return {
'v1': {
'_beta_keys': ['subplatform'],
@@ -233,14 +233,22 @@ class DataSource(metaclass=abc.ABCMeta):
'availability_zone': availability_zone,
'cloud-name': self.cloud_name,
'cloud_name': self.cloud_name,
+ 'distro': sysinfo["dist"][0],
+ 'distro_version': sysinfo["dist"][1],
+ 'distro_release': sysinfo["dist"][2],
'platform': self.platform_type,
'public_ssh_keys': self.get_public_ssh_keys(),
+ 'python_version': sysinfo["python"],
'instance-id': instance_id,
'instance_id': instance_id,
+ 'kernel_release': sysinfo["uname"][2],
'local-hostname': local_hostname,
'local_hostname': local_hostname,
+ 'machine': sysinfo["uname"][4],
'region': self.region,
- 'subplatform': self.subplatform}}
+ 'subplatform': self.subplatform,
+ 'system_platform': sysinfo["platform"],
+ 'variant': sysinfo["variant"]}}
def clear_cached_attrs(self, attr_defaults=()):
"""Reset any cached metadata attributes to datasource defaults.
@@ -299,9 +307,15 @@ class DataSource(metaclass=abc.ABCMeta):
ec2_metadata = getattr(self, 'ec2_metadata')
if ec2_metadata != UNSET:
instance_data['ds']['ec2_metadata'] = ec2_metadata
- instance_data.update(
- self._get_standardized_metadata())
instance_data['ds']['_doc'] = EXPERIMENTAL_TEXT
+ # Add merged cloud.cfg and sys info for jinja templates and cli query
+ instance_data['merged_cfg'] = copy.deepcopy(self.sys_cfg)
+ instance_data['merged_cfg']['_doc'] = (
+ 'Merged cloud-init system config from /etc/cloud/cloud.cfg and'
+ ' /etc/cloud/cloud.cfg.d/')
+ instance_data['sys_info'] = util.system_info()
+ instance_data.update(
+ self._get_standardized_metadata(instance_data))
try:
# Process content base64encoding unserializable values
content = util.json_dumps(instance_data)
@@ -315,12 +329,12 @@ class DataSource(metaclass=abc.ABCMeta):
except UnicodeDecodeError as e:
LOG.warning('Error persisting instance-data.json: %s', str(e))
return False
- json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
- write_json(json_file, processed_data) # World readable
json_sensitive_file = os.path.join(self.paths.run_dir,
INSTANCE_JSON_SENSITIVE_FILE)
- write_json(json_sensitive_file,
- redact_sensitive_keys(processed_data), mode=0o600)
+ write_json(json_sensitive_file, processed_data, mode=0o600)
+ json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
+ # World readable
+ write_json(json_file, redact_sensitive_keys(processed_data))
return True
def _get_data(self):
@@ -496,7 +510,6 @@ class DataSource(metaclass=abc.ABCMeta):
(e.g. 'ssh-rsa') and key_value is the key itself
(e.g. 'AAAAB3NzaC1y...').
"""
- pass
def _remap_device(self, short_name):
# LP: #611137
@@ -587,7 +600,7 @@ class DataSource(metaclass=abc.ABCMeta):
# if there is an ipv4 address in 'local-hostname', then
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
lhost = self.metadata['local-hostname']
- if util.is_ipv4(lhost):
+ if net.is_ipv4_address(lhost):
toks = []
if resolve_ip:
toks = util.gethostbyaddr(lhost)
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index fc760581..b968a96f 100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -1,5 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-
+import base64
import json
import logging
import os
@@ -8,13 +8,16 @@ import socket
import struct
import time
import textwrap
+import zlib
+from cloudinit.settings import CFG_BUILTIN
from cloudinit.net import dhcp
from cloudinit import stages
from cloudinit import temp_utils
from contextlib import contextmanager
from xml.etree import ElementTree
+from cloudinit import subp
from cloudinit import url_helper
from cloudinit import util
from cloudinit import version
@@ -32,7 +35,14 @@ DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10"
BOOT_EVENT_TYPE = 'boot-telemetry'
SYSTEMINFO_EVENT_TYPE = 'system-info'
DIAGNOSTIC_EVENT_TYPE = 'diagnostic'
-
+COMPRESSED_EVENT_TYPE = 'compressed'
+# Maximum number of bytes of the cloud-init.log file that can be dumped to KVP
+# at once. This number is based on the analysis done on a large sample of
+# cloud-init.log files where the P95 of the file sizes was 537KB and the time
+# consumed to dump 500KB file was (P95:76, P99:233, P99.9:1170) in ms
+MAX_LOG_TO_KVP_LENGTH = 512000
+# Marker file to indicate whether cloud-init.log is pushed to KVP
+LOG_PUSHED_TO_KVP_MARKER_FILE = '/var/lib/cloud/data/log_pushed_to_kvp'
azure_ds_reporter = events.ReportEventStack(
name="azure-ds",
description="initialize reporter for azure ds",
@@ -64,13 +74,15 @@ def is_byte_swapped(previous_id, current_id):
return ''.join(dd)
parts = current_id.split('-')
- swapped_id = '-'.join([
+ swapped_id = '-'.join(
+ [
swap_bytestring(parts[0]),
swap_bytestring(parts[1]),
swap_bytestring(parts[2]),
parts[3],
parts[4]
- ])
+ ]
+ )
return previous_id == swapped_id
@@ -86,11 +98,13 @@ def get_boot_telemetry():
LOG.debug("Collecting boot telemetry")
try:
kernel_start = float(time.time()) - float(util.uptime())
- except ValueError:
- raise RuntimeError("Failed to determine kernel start timestamp")
+ except ValueError as e:
+ raise RuntimeError(
+ "Failed to determine kernel start timestamp"
+ ) from e
try:
- out, _ = util.subp(['/bin/systemctl',
+ out, _ = subp.subp(['/bin/systemctl',
'show', '-p',
'UserspaceTimestampMonotonic'],
capture=True)
@@ -103,16 +117,17 @@ def get_boot_telemetry():
"UserspaceTimestampMonotonic from systemd")
user_start = kernel_start + (float(tsm) / 1000000)
- except util.ProcessExecutionError as e:
- raise RuntimeError("Failed to get UserspaceTimestampMonotonic: %s"
- % e)
+ except subp.ProcessExecutionError as e:
+ raise RuntimeError(
+ "Failed to get UserspaceTimestampMonotonic: %s" % e
+ ) from e
except ValueError as e:
- raise RuntimeError("Failed to parse "
- "UserspaceTimestampMonotonic from systemd: %s"
- % e)
+ raise RuntimeError(
+ "Failed to parse UserspaceTimestampMonotonic from systemd: %s" % e
+ ) from e
try:
- out, _ = util.subp(['/bin/systemctl', 'show',
+ out, _ = subp.subp(['/bin/systemctl', 'show',
'cloud-init-local', '-p',
'InactiveExitTimestampMonotonic'],
capture=True)
@@ -124,13 +139,15 @@ def get_boot_telemetry():
"InactiveExitTimestampMonotonic from systemd")
cloudinit_activation = kernel_start + (float(tsm) / 1000000)
- except util.ProcessExecutionError as e:
- raise RuntimeError("Failed to get InactiveExitTimestampMonotonic: %s"
- % e)
+ except subp.ProcessExecutionError as e:
+ raise RuntimeError(
+ "Failed to get InactiveExitTimestampMonotonic: %s" % e
+ ) from e
except ValueError as e:
- raise RuntimeError("Failed to parse "
- "InactiveExitTimestampMonotonic from systemd: %s"
- % e)
+ raise RuntimeError(
+ "Failed to parse InactiveExitTimestampMonotonic from systemd: %s"
+ % e
+ ) from e
evt = events.ReportingEvent(
BOOT_EVENT_TYPE, 'boot-telemetry',
@@ -174,6 +191,49 @@ def report_diagnostic_event(str):
return evt
+def report_compressed_event(event_name, event_content):
+ """Report a compressed event"""
+ compressed_data = base64.encodebytes(zlib.compress(event_content))
+ event_data = {"encoding": "gz+b64",
+ "data": compressed_data.decode('ascii')}
+ evt = events.ReportingEvent(
+ COMPRESSED_EVENT_TYPE, event_name,
+ json.dumps(event_data),
+ events.DEFAULT_EVENT_ORIGIN)
+ events.report_event(evt,
+ excluded_handler_types={"log", "print", "webhook"})
+
+ # return the event for unit testing purpose
+ return evt
+
+
+@azure_ds_telemetry_reporter
+def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']):
+ """Push a portion of cloud-init.log file or the whole file to KVP
+ based on the file size.
+ If called more than once, it skips pushing the log file to KVP again."""
+
+ log_pushed_to_kvp = bool(os.path.isfile(LOG_PUSHED_TO_KVP_MARKER_FILE))
+ if log_pushed_to_kvp:
+ report_diagnostic_event("cloud-init.log is already pushed to KVP")
+ return
+
+ LOG.debug("Dumping cloud-init.log file to KVP")
+ try:
+ with open(file_name, "rb") as f:
+ f.seek(0, os.SEEK_END)
+ seek_index = max(f.tell() - MAX_LOG_TO_KVP_LENGTH, 0)
+ report_diagnostic_event(
+ "Dumping last {} bytes of cloud-init.log file to KVP".format(
+ f.tell() - seek_index))
+ f.seek(seek_index, os.SEEK_SET)
+ report_compressed_event("cloud-init.log", f.read())
+ util.write_file(LOG_PUSHED_TO_KVP_MARKER_FILE, '')
+ except Exception as ex:
+ report_diagnostic_event("Exception when dumping log file: %s" %
+ repr(ex))
+
+
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
@@ -192,7 +252,7 @@ def _get_dhcp_endpoint_option_name():
return azure_endpoint
-class AzureEndpointHttpClient(object):
+class AzureEndpointHttpClient:
headers = {
'x-ms-agent-name': 'WALinuxAgent',
@@ -210,57 +270,77 @@ class AzureEndpointHttpClient(object):
if secure:
headers = self.headers.copy()
headers.update(self.extra_secure_headers)
- return url_helper.read_file_or_url(url, headers=headers, timeout=5,
- retries=10)
+ return url_helper.readurl(url, headers=headers,
+ timeout=5, retries=10, sec_between=5)
def post(self, url, data=None, extra_headers=None):
headers = self.headers
if extra_headers is not None:
headers = self.headers.copy()
headers.update(extra_headers)
- return url_helper.read_file_or_url(url, data=data, headers=headers,
- timeout=5, retries=10)
+ return url_helper.readurl(url, data=data, headers=headers,
+ timeout=5, retries=10, sec_between=5)
-class GoalState(object):
+class InvalidGoalStateXMLException(Exception):
+ """Raised when GoalState XML is invalid or has missing data."""
- def __init__(self, xml, http_client):
- self.http_client = http_client
- self.root = ElementTree.fromstring(xml)
- self._certificates_xml = None
- def _text_from_xpath(self, xpath):
- element = self.root.find(xpath)
- if element is not None:
- return element.text
- return None
+class GoalState:
- @property
- def container_id(self):
- return self._text_from_xpath('./Container/ContainerId')
+ def __init__(self, unparsed_xml, azure_endpoint_client):
+ """Parses a GoalState XML string and returns a GoalState object.
- @property
- def incarnation(self):
- return self._text_from_xpath('./Incarnation')
+ @param unparsed_xml: string representing a GoalState XML.
+ @param azure_endpoint_client: instance of AzureEndpointHttpClient
+ @return: GoalState object representing the GoalState XML string.
+ """
+ self.azure_endpoint_client = azure_endpoint_client
- @property
- def instance_id(self):
- return self._text_from_xpath(
+ try:
+ self.root = ElementTree.fromstring(unparsed_xml)
+ except ElementTree.ParseError as e:
+ msg = 'Failed to parse GoalState XML: %s'
+ LOG.warning(msg, e)
+ report_diagnostic_event(msg % (e,))
+ raise
+
+ self.container_id = self._text_from_xpath('./Container/ContainerId')
+ self.instance_id = self._text_from_xpath(
'./Container/RoleInstanceList/RoleInstance/InstanceId')
+ self.incarnation = self._text_from_xpath('./Incarnation')
+
+ for attr in ("container_id", "instance_id", "incarnation"):
+ if getattr(self, attr) is None:
+ msg = 'Missing %s in GoalState XML'
+ LOG.warning(msg, attr)
+ report_diagnostic_event(msg % (attr,))
+ raise InvalidGoalStateXMLException(msg)
+
+ self.certificates_xml = None
+ url = self._text_from_xpath(
+ './Container/RoleInstanceList/RoleInstance'
+ '/Configuration/Certificates')
+ if url is not None:
+ with events.ReportEventStack(
+ name="get-certificates-xml",
+ description="get certificates xml",
+ parent=azure_ds_reporter):
+ self.certificates_xml = \
+ self.azure_endpoint_client.get(
+ url, secure=True).contents
+ if self.certificates_xml is None:
+ raise InvalidGoalStateXMLException(
+ 'Azure endpoint returned empty certificates xml.')
- @property
- def certificates_xml(self):
- if self._certificates_xml is None:
- url = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance'
- '/Configuration/Certificates')
- if url is not None:
- self._certificates_xml = self.http_client.get(
- url, secure=True).contents
- return self._certificates_xml
+ def _text_from_xpath(self, xpath):
+ element = self.root.find(xpath)
+ if element is not None:
+ return element.text
+ return None
-class OpenSSLManager(object):
+class OpenSSLManager:
certificate_names = {
'private_key': 'TransportPrivate.pem',
@@ -282,7 +362,7 @@ class OpenSSLManager(object):
LOG.debug('Certificate already generated.')
return
with cd(self.tmpdir):
- util.subp([
+ subp.subp([
'openssl', 'req', '-x509', '-nodes', '-subj',
'/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
'-keyout', self.certificate_names['private_key'],
@@ -299,14 +379,14 @@ class OpenSSLManager(object):
@azure_ds_telemetry_reporter
def _run_x509_action(action, cert):
cmd = ['openssl', 'x509', '-noout', action]
- result, _ = util.subp(cmd, data=cert)
+ result, _ = subp.subp(cmd, data=cert)
return result
@azure_ds_telemetry_reporter
def _get_ssh_key_from_cert(self, certificate):
pub_key = self._run_x509_action('-pubkey', certificate)
keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
- ssh_key, _ = util.subp(keygen_cmd, data=pub_key)
+ ssh_key, _ = subp.subp(keygen_cmd, data=pub_key)
return ssh_key
@azure_ds_telemetry_reporter
@@ -339,7 +419,7 @@ class OpenSSLManager(object):
certificates_content.encode('utf-8'),
]
with cd(self.tmpdir):
- out, _ = util.subp(
+ out, _ = subp.subp(
'openssl cms -decrypt -in /dev/stdin -inkey'
' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
' -password pass:'.format(**self.certificate_names),
@@ -367,25 +447,122 @@ class OpenSSLManager(object):
return keys
-class WALinuxAgentShim(object):
-
- REPORT_READY_XML_TEMPLATE = '\n'.join([
- '<?xml version="1.0" encoding="utf-8"?>',
- '<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
- ' xmlns:xsd="http://www.w3.org/2001/XMLSchema">',
- ' <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>',
- ' <Container>',
- ' <ContainerId>{container_id}</ContainerId>',
- ' <RoleInstanceList>',
- ' <Role>',
- ' <InstanceId>{instance_id}</InstanceId>',
- ' <Health>',
- ' <State>Ready</State>',
- ' </Health>',
- ' </Role>',
- ' </RoleInstanceList>',
- ' </Container>',
- '</Health>'])
+class GoalStateHealthReporter:
+
+ HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent('''\
+ <?xml version="1.0" encoding="utf-8"?>
+ <Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:xsd="http://www.w3.org/2001/XMLSchema">
+ <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>
+ <Container>
+ <ContainerId>{container_id}</ContainerId>
+ <RoleInstanceList>
+ <Role>
+ <InstanceId>{instance_id}</InstanceId>
+ <Health>
+ <State>{health_status}</State>
+ {health_detail_subsection}
+ </Health>
+ </Role>
+ </RoleInstanceList>
+ </Container>
+ </Health>
+ ''')
+
+ HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent('''\
+ <Details>
+ <SubStatus>{health_substatus}</SubStatus>
+ <Description>{health_description}</Description>
+ </Details>
+ ''')
+
+ PROVISIONING_SUCCESS_STATUS = 'Ready'
+
+ def __init__(self, goal_state, azure_endpoint_client, endpoint):
+ """Creates instance that will report provisioning status to an endpoint
+
+ @param goal_state: An instance of class GoalState that contains
+ goal state info such as incarnation, container id, and instance id.
+ These 3 values are needed when reporting the provisioning status
+ to Azure
+ @param azure_endpoint_client: Instance of class AzureEndpointHttpClient
+ @param endpoint: Endpoint (string) where the provisioning status report
+ will be sent to
+ @return: Instance of class GoalStateHealthReporter
+ """
+ self._goal_state = goal_state
+ self._azure_endpoint_client = azure_endpoint_client
+ self._endpoint = endpoint
+
+ @azure_ds_telemetry_reporter
+ def send_ready_signal(self):
+ document = self.build_report(
+ incarnation=self._goal_state.incarnation,
+ container_id=self._goal_state.container_id,
+ instance_id=self._goal_state.instance_id,
+ status=self.PROVISIONING_SUCCESS_STATUS)
+ LOG.debug('Reporting ready to Azure fabric.')
+ try:
+ self._post_health_report(document=document)
+ except Exception as e:
+ msg = "exception while reporting ready: %s" % e
+ LOG.error(msg)
+ report_diagnostic_event(msg)
+ raise
+
+ LOG.info('Reported ready to Azure fabric.')
+
+ def build_report(
+ self, incarnation, container_id, instance_id,
+ status, substatus=None, description=None):
+ health_detail = ''
+ if substatus is not None:
+ health_detail = self.HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(
+ health_substatus=substatus, health_description=description)
+
+ health_report = self.HEALTH_REPORT_XML_TEMPLATE.format(
+ incarnation=incarnation,
+ container_id=container_id,
+ instance_id=instance_id,
+ health_status=status,
+ health_detail_subsection=health_detail)
+
+ return health_report
+
+ @azure_ds_telemetry_reporter
+ def _post_health_report(self, document):
+ push_log_to_kvp()
+
+ # Whenever report_diagnostic_event(diagnostic_msg) is invoked in code,
+ # the diagnostic messages are written to special files
+ # (/var/opt/hyperv/.kvp_pool_*) as Hyper-V KVP messages.
+ # Hyper-V KVP message communication is done through these files,
+ # and KVP functionality is used to communicate and share diagnostic
+ # info with the Azure Host.
+ # The Azure Host will collect the VM's Hyper-V KVP diagnostic messages
+ # when cloud-init reports to fabric.
+ # When the Azure Host receives the health report signal, it will only
+ # collect and process whatever KVP diagnostic messages have been
+ # written to the KVP files.
+ # KVP messages that are published after the Azure Host receives the
+ # signal are ignored and unprocessed, so yield this thread to the
+ # Hyper-V KVP Reporting thread so that they are written.
+ # time.sleep(0) is a low-cost and proven method to yield the scheduler
+ # and ensure that events are flushed.
+ # See HyperVKvpReportingHandler class, which is a multi-threaded
+ # reporting handler that writes to the special KVP files.
+ time.sleep(0)
+
+ LOG.debug('Sending health report to Azure fabric.')
+ url = "http://{}/machine?comp=health".format(self._endpoint)
+ self._azure_endpoint_client.post(
+ url,
+ data=document,
+ extra_headers={'Content-Type': 'text/xml; charset=utf-8'})
+ LOG.debug('Successfully sent health report to Azure fabric')
+
+
+class WALinuxAgentShim:
def __init__(self, fallback_lease_file=None, dhcp_options=None):
LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s',
@@ -393,6 +570,7 @@ class WALinuxAgentShim(object):
self.dhcpoptions = dhcp_options
self._endpoint = None
self.openssl_manager = None
+ self.azure_endpoint_client = None
self.lease_file = fallback_lease_file
def clean_up(self):
@@ -469,9 +647,10 @@ class WALinuxAgentShim(object):
try:
name = os.path.basename(hook_file).replace('.json', '')
dhcp_options[name] = json.loads(util.load_file((hook_file)))
- except ValueError:
+ except ValueError as e:
raise ValueError(
- '{_file} is not valid JSON data'.format(_file=hook_file))
+ '{_file} is not valid JSON data'.format(_file=hook_file)
+ ) from e
return dhcp_options
@staticmethod
@@ -491,7 +670,22 @@ class WALinuxAgentShim(object):
@staticmethod
@azure_ds_telemetry_reporter
def find_endpoint(fallback_lease_file=None, dhcp245=None):
+ """Finds and returns the Azure endpoint using various methods.
+
+ The Azure endpoint is searched in the following order:
+ 1. Endpoint from dhcp options (dhcp option 245).
+ 2. Endpoint from networkd.
+ 3. Endpoint from dhclient hook json.
+ 4. Endpoint from fallback lease file.
+ 5. The default Azure endpoint.
+
+ @param fallback_lease_file: Fallback lease file that will be used
+ during endpoint search.
+ @param dhcp245: dhcp options that will be used during endpoint search.
+ @return: Azure endpoint IP address.
+ """
value = None
+
if dhcp245 is not None:
value = dhcp245
LOG.debug("Using Azure Endpoint from dhcp options")
@@ -533,42 +727,128 @@ class WALinuxAgentShim(object):
@azure_ds_telemetry_reporter
def register_with_azure_and_fetch_data(self, pubkey_info=None):
+ """Gets the VM's GoalState from Azure, uses the GoalState information
+ to report ready/send the ready signal/provisioning complete signal to
+ Azure, and then uses pubkey_info to filter and obtain the user's
+ pubkeys from the GoalState.
+
+ @param pubkey_info: List of pubkey values and fingerprints which are
+ used to filter and obtain the user's pubkey values from the
+ GoalState.
+ @return: The list of user's authorized pubkey values.
+ """
if self.openssl_manager is None:
self.openssl_manager = OpenSSLManager()
- http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
+ if self.azure_endpoint_client is None:
+ self.azure_endpoint_client = AzureEndpointHttpClient(
+ self.openssl_manager.certificate)
+ goal_state = self._fetch_goal_state_from_azure()
+ ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info)
+ health_reporter = GoalStateHealthReporter(
+ goal_state, self.azure_endpoint_client, self.endpoint)
+ health_reporter.send_ready_signal()
+ return {'public-keys': ssh_keys}
+
+ @azure_ds_telemetry_reporter
+ def _fetch_goal_state_from_azure(self):
+ """Fetches the GoalState XML from the Azure endpoint, parses the XML,
+ and returns a GoalState object.
+
+ @return: GoalState object representing the GoalState XML
+ """
+ unparsed_goal_state_xml = self._get_raw_goal_state_xml_from_azure()
+ return self._parse_raw_goal_state_xml(unparsed_goal_state_xml)
+
+ @azure_ds_telemetry_reporter
+ def _get_raw_goal_state_xml_from_azure(self):
+ """Fetches the GoalState XML from the Azure endpoint and returns
+ the XML as a string.
+
+ @return: GoalState XML string
+ """
+
LOG.info('Registering with Azure...')
- attempts = 0
- while True:
- try:
- response = http_client.get(
- 'http://{0}/machine/?comp=goalstate'.format(self.endpoint))
- except Exception as e:
- if attempts < 10:
- time.sleep(attempts + 1)
- else:
- report_diagnostic_event(
- "failed to register with Azure: %s" % e)
- raise
- else:
- break
- attempts += 1
+ url = 'http://{}/machine/?comp=goalstate'.format(self.endpoint)
+ try:
+ response = self.azure_endpoint_client.get(url)
+ except Exception as e:
+ msg = 'failed to register with Azure: %s' % e
+ LOG.warning(msg)
+ report_diagnostic_event(msg)
+ raise
LOG.debug('Successfully fetched GoalState XML.')
- goal_state = GoalState(response.contents, http_client)
- report_diagnostic_event("container_id %s" % goal_state.container_id)
+ return response.contents
+
+ @azure_ds_telemetry_reporter
+ def _parse_raw_goal_state_xml(self, unparsed_goal_state_xml):
+ """Parses a GoalState XML string and returns a GoalState object.
+
+ @param unparsed_goal_state_xml: GoalState XML string
+ @return: GoalState object representing the GoalState XML
+ """
+ try:
+ goal_state = GoalState(
+ unparsed_goal_state_xml, self.azure_endpoint_client)
+ except Exception as e:
+ msg = 'Error processing GoalState XML: %s' % e
+ LOG.warning(msg)
+ report_diagnostic_event(msg)
+ raise
+ msg = ', '.join([
+ 'GoalState XML container id: %s' % goal_state.container_id,
+ 'GoalState XML instance id: %s' % goal_state.instance_id,
+ 'GoalState XML incarnation: %s' % goal_state.incarnation])
+ LOG.debug(msg)
+ report_diagnostic_event(msg)
+ return goal_state
+
+ @azure_ds_telemetry_reporter
+ def _get_user_pubkeys(self, goal_state, pubkey_info):
+ """Gets and filters the VM admin user's authorized pubkeys.
+
+ The admin user in this case is the username specified as "admin"
+ when deploying VMs on Azure.
+ See https://docs.microsoft.com/en-us/cli/azure/vm#az-vm-create.
+ cloud-init expects a straightforward array of keys to be dropped
+ into the admin user's authorized_keys file. Azure control plane exposes
+ multiple public keys to the VM via wireserver. Select just the
+ admin user's key(s) and return them, ignoring any other certs.
+
+ @param goal_state: GoalState object. The GoalState object contains
+ a certificate XML, which contains both the VM user's authorized
+ pubkeys and other non-user pubkeys, which are used for
+ MSI and protected extension handling.
+ @param pubkey_info: List of VM user pubkey dicts that were previously
+ obtained from provisioning data.
+ Each pubkey dict in this list can either have the format
+ pubkey['value'] or pubkey['fingerprint'].
+ Each pubkey['fingerprint'] in the list is used to filter
+ and obtain the actual pubkey value from the GoalState
+ certificates XML.
+ Each pubkey['value'] requires no further processing and is
+ immediately added to the return list.
+ @return: A list of the VM user's authorized pubkey values.
+ """
ssh_keys = []
if goal_state.certificates_xml is not None and pubkey_info is not None:
LOG.debug('Certificate XML found; parsing out public keys.')
keys_by_fingerprint = self.openssl_manager.parse_certificates(
goal_state.certificates_xml)
ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info)
- self._report_ready(goal_state, http_client)
- return {'public-keys': ssh_keys}
+ return ssh_keys
- def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info):
- """cloud-init expects a straightforward array of keys to be dropped
- into the user's authorized_keys file. Azure control plane exposes
- multiple public keys to the VM via wireserver. Select just the
- user's key(s) and return them, ignoring any other certs.
+ @staticmethod
+ def _filter_pubkeys(keys_by_fingerprint, pubkey_info):
+ """ Filter and return only the user's actual pubkeys.
+
+ @param keys_by_fingerprint: pubkey fingerprint -> pubkey value dict
+ that was obtained from GoalState Certificates XML. May contain
+ non-user pubkeys.
+ @param pubkey_info: List of VM user pubkeys. Pubkey values are added
+ to the return list without further processing. Pubkey fingerprints
+ are used to filter and obtain the actual pubkey values from
+ keys_by_fingerprint.
+ @return: A list of the VM user's authorized pubkey values.
"""
keys = []
for pubkey in pubkey_info:
@@ -587,30 +867,6 @@ class WALinuxAgentShim(object):
return keys
- @azure_ds_telemetry_reporter
- def _report_ready(self, goal_state, http_client):
- LOG.debug('Reporting ready to Azure fabric.')
- document = self.REPORT_READY_XML_TEMPLATE.format(
- incarnation=goal_state.incarnation,
- container_id=goal_state.container_id,
- instance_id=goal_state.instance_id,
- )
- # Host will collect kvps when cloud-init reports ready.
- # some kvps might still be in the queue. We yield the scheduler
- # to make sure we process all kvps up till this point.
- time.sleep(0)
- try:
- http_client.post(
- "http://{0}/machine?comp=health".format(self.endpoint),
- data=document,
- extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
- )
- except Exception as e:
- report_diagnostic_event("exception while reporting ready: %s" % e)
- raise
-
- LOG.info('Reported ready to Azure fabric.')
-
@azure_ds_telemetry_reporter
def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
@@ -623,10 +879,16 @@ def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
shim.clean_up()
-class EphemeralDHCPv4WithReporting(object):
+def dhcp_log_cb(out, err):
+ report_diagnostic_event("dhclient output stream: %s" % out)
+ report_diagnostic_event("dhclient error stream: %s" % err)
+
+
+class EphemeralDHCPv4WithReporting:
def __init__(self, reporter, nic=None):
self.reporter = reporter
- self.ephemeralDHCPv4 = EphemeralDHCPv4(iface=nic)
+ self.ephemeralDHCPv4 = EphemeralDHCPv4(
+ iface=nic, dhcp_log_func=dhcp_log_cb)
def __enter__(self):
with events.ReportEventStack(
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
index 0e7cccac..b545c4d6 100644
--- a/cloudinit/sources/helpers/digitalocean.py
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -8,6 +8,7 @@ import random
from cloudinit import log as logging
from cloudinit import net as cloudnet
from cloudinit import url_helper
+from cloudinit import subp
from cloudinit import util
NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
@@ -15,7 +16,7 @@ NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
LOG = logging.getLogger(__name__)
-def assign_ipv4_link_local(nic=None):
+def assign_ipv4_link_local(distro, nic=None):
"""Bring up NIC using an address using link-local (ip4LL) IPs. On
DigitalOcean, the link-local domain is per-droplet routed, so there
is no risk of collisions. However, to be more safe, the ip4LL
@@ -23,7 +24,7 @@ def assign_ipv4_link_local(nic=None):
"""
if not nic:
- nic = get_link_local_nic()
+ nic = get_link_local_nic(distro)
LOG.debug("selected interface '%s' for reading metadata", nic)
if not nic:
@@ -36,14 +37,14 @@ def assign_ipv4_link_local(nic=None):
ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic]
ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up']
- if not util.which('ip'):
+ if not subp.which('ip'):
raise RuntimeError("No 'ip' command available to configure ip4LL "
"address")
try:
- util.subp(ip_addr_cmd)
+ subp.subp(ip_addr_cmd)
LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic)
- util.subp(ip_link_cmd)
+ subp.subp(ip_link_cmd)
LOG.debug("brought device '%s' up", nic)
except Exception:
util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed."
@@ -53,8 +54,12 @@ def assign_ipv4_link_local(nic=None):
return nic
-def get_link_local_nic():
- nics = [f for f in cloudnet.get_devicelist() if cloudnet.is_physical(f)]
+def get_link_local_nic(distro):
+ nics = [
+ f
+ for f in cloudnet.get_devicelist()
+ if distro.networking.is_physical(f)
+ ]
if not nics:
return None
return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex'))
@@ -74,7 +79,7 @@ def del_ipv4_link_local(nic=None):
ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
try:
- util.subp(ip_addr_cmd)
+ subp.subp(ip_addr_cmd)
LOG.debug("removed ip4LL addresses from %s", nic)
except Exception as e:
diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py
index 2554530d..72edb023 100644
--- a/cloudinit/sources/helpers/hetzner.py
+++ b/cloudinit/sources/helpers/hetzner.py
@@ -7,6 +7,9 @@ from cloudinit import log as logging
from cloudinit import url_helper
from cloudinit import util
+import base64
+import binascii
+
LOG = logging.getLogger(__name__)
@@ -24,3 +27,19 @@ def read_userdata(url, timeout=2, sec_between=2, retries=30):
if not response.ok():
raise RuntimeError("unable to read userdata at %s" % url)
return response.contents
+
+
+def maybe_b64decode(data: bytes) -> bytes:
+ """base64 decode data
+
+ If data is base64 encoded bytes, return b64decode(data).
+ If not, return data unmodified.
+
+ @param data: data as bytes. TypeError is raised if not bytes.
+ """
+ if not isinstance(data, bytes):
+ raise TypeError("data is '%s', expected bytes" % type(data))
+ try:
+ return base64.b64decode(data, validate=True)
+ except binascii.Error:
+ return data
diff --git a/cloudinit/sources/helpers/netlink.py b/cloudinit/sources/helpers/netlink.py
index d377ae3d..c2ad587b 100644
--- a/cloudinit/sources/helpers/netlink.py
+++ b/cloudinit/sources/helpers/netlink.py
@@ -55,7 +55,6 @@ NetlinkHeader = namedtuple('NetlinkHeader', ['length', 'type', 'flags', 'seq',
class NetlinkCreateSocketError(RuntimeError):
'''Raised if netlink socket fails during create or bind.'''
- pass
def create_bound_netlink_socket():
@@ -75,7 +74,7 @@ def create_bound_netlink_socket():
netlink_socket.setblocking(0)
except socket.error as e:
msg = "Exception during netlink socket create: %s" % e
- raise NetlinkCreateSocketError(msg)
+ raise NetlinkCreateSocketError(msg) from e
LOG.debug("Created netlink socket")
return netlink_socket
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 441db506..65e020c5 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -16,6 +16,7 @@ from cloudinit import ec2_utils
from cloudinit import log as logging
from cloudinit import net
from cloudinit import sources
+from cloudinit import subp
from cloudinit import url_helper
from cloudinit import util
from cloudinit.sources import BrokenMetadata
@@ -68,6 +69,7 @@ KNOWN_PHYSICAL_TYPES = (
None,
'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
'bridge',
+ 'cascading', # not present in OpenStack upstream, used on OpenTelekomCloud
'dvs',
'ethernet',
'hw_veb',
@@ -109,7 +111,7 @@ class SourceMixin(object):
dev_entries = util.find_devs_with(criteria)
if dev_entries:
device = dev_entries[0]
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
return device
@@ -278,8 +280,9 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
data = translator(data)
except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to process path %s: %s" % (path, e)
+ ) from e
if found:
results[name] = data
@@ -289,8 +292,9 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
metadata['random_seed'] = base64.b64decode(random_seed)
except (ValueError, TypeError) as e:
- raise BrokenMetadata("Badly formatted metadata"
- " random_seed entry: %s" % e)
+ raise BrokenMetadata(
+ "Badly formatted metadata random_seed entry: %s" % e
+ ) from e
# load any files that were provided
files = {}
@@ -302,8 +306,9 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
files[path] = self._read_content_path(item)
except Exception as e:
- raise BrokenMetadata("Failed to read provided "
- "file %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to read provided file %s: %s" % (path, e)
+ ) from e
results['files'] = files
# The 'network_config' item in metadata is a content pointer
@@ -315,8 +320,9 @@ class BaseReader(metaclass=abc.ABCMeta):
content = self._read_content_path(net_item, decode=True)
results['network_config'] = content
except IOError as e:
- raise BrokenMetadata("Failed to read network"
- " configuration: %s" % (e))
+ raise BrokenMetadata(
+ "Failed to read network configuration: %s" % (e)
+ ) from e
# To openstack, user can specify meta ('nova boot --meta=key=value')
# and those will appear under metadata['meta'].
@@ -368,8 +374,9 @@ class ConfigDriveReader(BaseReader):
try:
return util.load_json(self._path_read(path))
except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to process path %s: %s" % (path, e)
+ ) from e
def read_v1(self):
"""Reads a version 1 formatted location.
@@ -393,13 +400,17 @@ class ConfigDriveReader(BaseReader):
path = found[name]
try:
contents = self._path_read(path)
- except IOError:
- raise BrokenMetadata("Failed to read: %s" % path)
+ except IOError as e:
+ raise BrokenMetadata("Failed to read: %s" % path) from e
try:
- md[key] = translator(contents)
+ # Disable not-callable pylint check; pylint isn't able to
+ # determine that every member of FILES_V1 has a callable in
+ # the appropriate position
+ md[key] = translator(contents) # pylint: disable=E1102
except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to process path %s: %s" % (path, e)
+ ) from e
else:
md[key] = copy.deepcopy(default)
@@ -410,8 +421,11 @@ class ConfigDriveReader(BaseReader):
keydata = meta_js.get('public-keys', keydata)
if keydata:
lines = keydata.splitlines()
- md['public-keys'] = [l for l in lines
- if len(l) and not l.startswith("#")]
+ md['public-keys'] = [
+ line
+ for line in lines
+ if len(line) and not line.startswith("#")
+ ]
# config-drive-v1 has no way for openstack to provide the instance-id
# so we copy that into metadata from the user input
@@ -673,11 +687,13 @@ def convert_net_json(network_json=None, known_macs=None):
raise ValueError("Unable to find a system nic for %s" % d)
d['name'] = known_macs[mac]
- for cfg, key, fmt, target in link_updates:
- if isinstance(target, (list, tuple)):
- cfg[key] = [fmt % link_id_info[l]['name'] for l in target]
+ for cfg, key, fmt, targets in link_updates:
+ if isinstance(targets, (list, tuple)):
+ cfg[key] = [
+ fmt % link_id_info[target]['name'] for target in targets
+ ]
else:
- cfg[key] = fmt % link_id_info[target]['name']
+ cfg[key] = fmt % link_id_info[targets]['name']
# Infiniband interfaces may be referenced in network_data.json by a 6 byte
# Ethernet MAC-style address, and we use that address to look up the
diff --git a/cloudinit/sources/helpers/tests/test_netlink.py b/cloudinit/sources/helpers/tests/test_netlink.py
index c2898a16..10760bd6 100644
--- a/cloudinit/sources/helpers/tests/test_netlink.py
+++ b/cloudinit/sources/helpers/tests/test_netlink.py
@@ -87,7 +87,7 @@ class TestParseNetlinkMessage(CiTestCase):
data = None
with self.assertRaises(AssertionError) as context:
read_rta_oper_state(data)
- self.assertTrue('data is none', str(context.exception))
+ self.assertEqual('data is none', str(context.exception))
def test_read_invalid_rta_operstate_none(self):
'''read_rta_oper_state returns none if operstate is none'''
@@ -180,17 +180,22 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
other_ifname = "eth1"
expected_ifname = "eth0"
data_op_down_eth1 = self._media_switch_data(
- other_ifname, RTM_NEWLINK, OPER_DOWN)
+ other_ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_op_up_eth1 = self._media_switch_data(
- other_ifname, RTM_NEWLINK, OPER_UP)
+ other_ifname, RTM_NEWLINK, OPER_UP
+ )
data_op_down_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_DOWN)
+ expected_ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_op_up_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_op_down_eth1,
- data_op_up_eth1,
- data_op_down_eth0,
- data_op_up_eth0]
+ expected_ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [
+ data_op_down_eth1,
+ data_op_up_eth1,
+ data_op_down_eth0,
+ data_op_up_eth0
+ ]
wait_for_media_disconnect_connect(m_socket, expected_ifname)
self.assertIn('Ignored netlink event on interface %s' % other_ifname,
self.logs.getvalue())
@@ -207,17 +212,23 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
'''
ifname = "eth0"
data_getlink_down = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_DOWN)
+ ifname, RTM_GETLINK, OPER_DOWN
+ )
data_getlink_up = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_UP)
+ ifname, RTM_GETLINK, OPER_UP
+ )
data_newlink_down = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DOWN)
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_getlink_down,
- data_getlink_up,
- data_newlink_down,
- data_newlink_up]
+ ifname, RTM_NEWLINK, OPER_UP
+ )
+ m_read_netlink_socket.side_effect = [
+ data_getlink_down,
+ data_getlink_up,
+ data_newlink_down,
+ data_newlink_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -233,19 +244,25 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
'''
ifname = "eth0"
data_setlink_down = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_DOWN)
+ ifname, RTM_SETLINK, OPER_DOWN
+ )
data_setlink_up = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_UP)
+ ifname, RTM_SETLINK, OPER_UP
+ )
data_newlink_down = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DOWN)
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_setlink_down,
- data_setlink_up,
- data_newlink_down,
- data_newlink_up,
- data_newlink_down,
- data_newlink_up]
+ ifname, RTM_NEWLINK, OPER_UP
+ )
+ m_read_netlink_socket.side_effect = [
+ data_setlink_down,
+ data_setlink_up,
+ data_newlink_down,
+ data_newlink_up,
+ data_newlink_down,
+ data_newlink_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -255,23 +272,30 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_DORMANT)
- data_op_notpresent = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_NOTPRESENT)
- data_op_lowerdown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_LOWERLAYERDOWN)
- data_op_testing = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_TESTING)
- data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_UNKNOWN)
- m_read_netlink_socket.side_effect = [data_op_up, data_op_up,
- data_op_dormant, data_op_up,
- data_op_notpresent, data_op_up,
- data_op_lowerdown, data_op_up,
- data_op_testing, data_op_up,
- data_op_unknown, data_op_up,
- data_op_down, data_op_up]
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT
+ )
+ data_op_notpresent = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_NOTPRESENT
+ )
+ data_op_lowerdown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_LOWERLAYERDOWN
+ )
+ data_op_testing = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_TESTING
+ )
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN
+ )
+ m_read_netlink_socket.side_effect = [
+ data_op_up, data_op_up,
+ data_op_dormant, data_op_up,
+ data_op_notpresent, data_op_up,
+ data_op_lowerdown, data_op_up,
+ data_op_testing, data_op_up,
+ data_op_unknown, data_op_up,
+ data_op_down, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 14)
@@ -281,12 +305,14 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_DORMANT)
- data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_UNKNOWN)
- m_read_netlink_socket.side_effect = [data_op_down, data_op_dormant,
- data_op_unknown, data_op_up]
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT)
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN)
+ m_read_netlink_socket.side_effect = [
+ data_op_down, data_op_dormant,
+ data_op_unknown, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -300,9 +326,11 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7)
- m_read_netlink_socket.side_effect = [data_op_invalid, data_op_up,
- data_op_down, data_op_invalid,
- data_op_up]
+ m_read_netlink_socket.side_effect = [
+ data_op_invalid, data_op_up,
+ data_op_down, data_op_invalid,
+ data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 5)
@@ -333,8 +361,9 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None)
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_invalid1, data_invalid2,
- data_op_down, data_op_up]
+ m_read_netlink_socket.side_effect = [
+ data_invalid1, data_invalid2, data_op_down, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -344,11 +373,15 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
bytes = ifname.encode("utf-8")
data = bytearray(96)
struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
+ bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
- 3, bytes, 5, 16, int_to_bytes(OPER_UP))
+ struct.pack_into(
+ "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
+ 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ )
m_read_netlink_socket.return_value = data
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 1)
@@ -360,14 +393,18 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data1 = bytearray(112)
data2 = bytearray(32)
struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
+ bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data1, 80, 8, 3, bytes, 5, 16,
- int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data2, 16, 8, 3, bytes, 5, 16,
- int_to_bytes(OPER_UP))
+ struct.pack_into(
+ "HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ )
m_read_netlink_socket.side_effect = [data1, data2]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 2)
diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py
new file mode 100644
index 00000000..2bde1e3f
--- /dev/null
+++ b/cloudinit/sources/helpers/tests/test_openstack.py
@@ -0,0 +1,44 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+# ./cloudinit/sources/helpers/tests/test_openstack.py
+
+from cloudinit.sources.helpers import openstack
+from cloudinit.tests import helpers as test_helpers
+
+
+class TestConvertNetJson(test_helpers.CiTestCase):
+
+ def test_phy_types(self):
+ """Verify the different known physical types are handled."""
+ # network_data.json example from
+ # https://docs.openstack.org/nova/latest/user/metadata.html
+ mac0 = "fa:16:3e:9c:bf:3d"
+ net_json = {
+ "links": [
+ {"ethernet_mac_address": mac0, "id": "tapcd9f6d46-4a",
+ "mtu": None, "type": "bridge",
+ "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc"}
+ ],
+ "networks": [
+ {"id": "network0", "link": "tapcd9f6d46-4a",
+ "network_id": "99e88329-f20d-4741-9593-25bf07847b16",
+ "type": "ipv4_dhcp"}
+ ],
+ "services": [{"address": "8.8.8.8", "type": "dns"}]
+ }
+ macs = {mac0: 'eth0'}
+
+ expected = {
+ 'version': 1,
+ 'config': [
+ {'mac_address': 'fa:16:3e:9c:bf:3d',
+ 'mtu': None, 'name': 'eth0',
+ 'subnets': [{'type': 'dhcp4'}],
+ 'type': 'physical'},
+ {'address': '8.8.8.8', 'type': 'nameserver'}]}
+
+ for t in openstack.KNOWN_PHYSICAL_TYPES:
+ net_json["links"][0]["type"] = t
+ self.assertEqual(
+ expected,
+ openstack.convert_net_json(network_json=net_json,
+ known_macs=macs))
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 2eaeff34..7109aef3 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -25,6 +25,8 @@ class Config(object):
SUFFIX = 'DNS|SUFFIX|'
TIMEZONE = 'DATETIME|TIMEZONE'
UTC = 'DATETIME|UTC'
+ POST_GC_STATUS = 'MISC|POST-GC-STATUS'
+ DEFAULT_RUN_POST_SCRIPT = 'MISC|DEFAULT-RUN-POST-CUST-SCRIPT'
def __init__(self, configFile):
self._configFile = configFile
@@ -104,4 +106,28 @@ class Config(object):
def custom_script_name(self):
"""Return the name of custom (pre/post) script."""
return self._configFile.get(Config.CUSTOM_SCRIPT, None)
+
+ @property
+ def post_gc_status(self):
+ """Return whether to post guestinfo.gc.status VMX property."""
+ postGcStatus = self._configFile.get(Config.POST_GC_STATUS, 'no')
+ postGcStatus = postGcStatus.lower()
+ if postGcStatus not in ('yes', 'no'):
+ raise ValueError('PostGcStatus value should be yes/no')
+ return postGcStatus == 'yes'
+
+ @property
+ def default_run_post_script(self):
+ """
+ Return enable-custom-scripts default value if enable-custom-scripts
+ is absent in VM Tools configuration
+ """
+ defaultRunPostScript = self._configFile.get(
+ Config.DEFAULT_RUN_POST_SCRIPT,
+ 'no')
+ defaultRunPostScript = defaultRunPostScript.lower()
+ if defaultRunPostScript not in ('yes', 'no'):
+ raise ValueError('defaultRunPostScript value should be yes/no')
+ return defaultRunPostScript == 'yes'
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
index 9f14770e..2ab22de9 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -9,6 +9,7 @@ import logging
import os
import stat
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -61,7 +62,7 @@ class PreCustomScript(RunCustomScript):
"""Executing custom script with precustomization argument."""
LOG.debug("Executing pre-customization script")
self.prepare_script()
- util.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"])
+ subp.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"])
class PostCustomScript(RunCustomScript):
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index 602af078..fc034c95 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -22,7 +22,6 @@ class ConfigFile(ConfigSource, dict):
def __init__(self, filename):
self._loadConfigFile(filename)
- pass
def _insertKey(self, key, val):
"""
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
index 2f29edd4..5899d8f7 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
@@ -10,6 +10,5 @@ from .config_source import ConfigSource
class ConfigNamespace(ConfigSource):
"""Specifies the Config Namespace."""
- pass
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 77cbf3b6..3745a262 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -10,6 +10,7 @@ import os
import re
from cloudinit.net.network_state import mask_to_net_prefix
+from cloudinit import subp
from cloudinit import util
logger = logging.getLogger(__name__)
@@ -73,7 +74,7 @@ class NicConfigurator(object):
The mac address(es) are in the lower case
"""
cmd = ['ip', 'addr', 'show']
- output, _err = util.subp(cmd)
+ output, _err = subp.subp(cmd)
sections = re.split(r'\n\d+: ', '\n' + output)[1:]
macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
@@ -248,8 +249,8 @@ class NicConfigurator(object):
logger.info('Clearing DHCP leases')
# Ignore the return code 1.
- util.subp(["pkill", "dhclient"], rcs=[0, 1])
- util.subp(["rm", "-f", "/var/lib/dhcp/*"])
+ subp.subp(["pkill", "dhclient"], rcs=[0, 1])
+ subp.subp(["rm", "-f", "/var/lib/dhcp/*"])
def configure(self, osfamily=None):
"""
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
index 8c91fa41..d16a7690 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -9,6 +9,7 @@
import logging
import os
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -56,10 +57,10 @@ class PasswordConfigurator(object):
LOG.info('Expiring password.')
for user in uidUserList:
try:
- util.subp(['passwd', '--expire', user])
- except util.ProcessExecutionError as e:
+ subp.subp(['passwd', '--expire', user])
+ except subp.ProcessExecutionError as e:
if os.path.exists('/usr/bin/chage'):
- util.subp(['chage', '-d', '0', user])
+ subp.subp(['chage', '-d', '0', user])
else:
LOG.warning('Failed to expire password for %s with error: '
'%s', user, e)
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
index 2f8ea546..7ec06a9c 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_source.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_source.py
@@ -8,6 +8,5 @@
class ConfigSource(object):
"""Specifies a source for the Config Content."""
- pass
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index 3d369d04..d919f693 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -10,7 +10,7 @@ import os
import re
import time
-from cloudinit import util
+from cloudinit import subp
from .guestcust_event import GuestCustEventEnum
from .guestcust_state import GuestCustStateEnum
@@ -34,7 +34,7 @@ def send_rpc(rpc):
try:
logger.debug("Sending RPC command: %s", rpc)
- (out, err) = util.subp(["vmware-rpctool", rpc], rcs=[0])
+ (out, err) = subp.subp(["vmware-rpctool", rpc], rcs=[0])
# Remove the trailing newline in the output.
if out:
out = out.rstrip()
@@ -128,30 +128,46 @@ def get_tools_config(section, key, defaultVal):
not installed.
"""
- if not util.which('vmware-toolbox-cmd'):
+ if not subp.which('vmware-toolbox-cmd'):
logger.debug(
'vmware-toolbox-cmd not installed, returning default value')
return defaultVal
- retValue = defaultVal
cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key]
try:
- (outText, _) = util.subp(cmd)
- m = re.match(r'([^=]+)=(.*)', outText)
- if m:
- retValue = m.group(2).strip()
- logger.debug("Get tools config: [%s] %s = %s",
- section, key, retValue)
- else:
+ (outText, _) = subp.subp(cmd)
+ except subp.ProcessExecutionError as e:
+ if e.exit_code == 69:
logger.debug(
- "Tools config: [%s] %s is not found, return default value: %s",
- section, key, retValue)
- except util.ProcessExecutionError as e:
- logger.error("Failed running %s[%s]", cmd, e.exit_code)
- logger.exception(e)
+ "vmware-toolbox-cmd returned 69 (unavailable) for cmd: %s."
+ " Return default value: %s", " ".join(cmd), defaultVal)
+ else:
+ logger.error("Failed running %s[%s]", cmd, e.exit_code)
+ logger.exception(e)
+ return defaultVal
+
+ retValue = defaultVal
+ m = re.match(r'([^=]+)=(.*)', outText)
+ if m:
+ retValue = m.group(2).strip()
+ logger.debug("Get tools config: [%s] %s = %s",
+ section, key, retValue)
+ else:
+ logger.debug(
+ "Tools config: [%s] %s is not found, return default value: %s",
+ section, key, retValue)
return retValue
+# Sets message to the VMX guestinfo.gc.status property to the
+# underlying VMware Virtualization Platform.
+def set_gc_status(config, gcMsg):
+ if config and config.post_gc_status:
+ rpc = "info-set guestinfo.gc.status %s" % gcMsg
+ return send_rpc(rpc)
+ return None
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index f73b37ed..1420a988 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -55,6 +55,7 @@ class InvalidDataSourceTestSubclassNet(DataSource):
class TestDataSource(CiTestCase):
with_logs = True
+ maxDiff = None
def setUp(self):
super(TestDataSource, self).setUp()
@@ -288,27 +289,47 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- datasource.get_data()
+ sys_info = {
+ "python": "3.7",
+ "platform":
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
+ "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
+ "x86_64"],
+ "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
expected = {
'base64_encoded_keys': [],
- 'sensitive_keys': [],
+ 'merged_cfg': REDACT_SENSITIVE_VALUE,
+ 'sensitive_keys': ['merged_cfg'],
+ 'sys_info': sys_info,
'v1': {
'_beta_keys': ['subplatform'],
'availability-zone': 'myaz',
'availability_zone': 'myaz',
'cloud-name': 'subclasscloudname',
'cloud_name': 'subclasscloudname',
+ 'distro': 'ubuntu',
+ 'distro_release': 'focal',
+ 'distro_version': '20.04',
'instance-id': 'iid-datasource',
'instance_id': 'iid-datasource',
'local-hostname': 'test-subclass-hostname',
'local_hostname': 'test-subclass-hostname',
+ 'kernel_release': '5.4.0-24-generic',
+ 'machine': 'x86_64',
'platform': 'mytestsubclass',
'public_ssh_keys': [],
+ 'python_version': '3.7',
'region': 'myregion',
- 'subplatform': 'unknown'},
+ 'system_platform':
+ 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
+ 'subplatform': 'unknown',
+ 'variant': 'ubuntu'},
'ds': {
+
'_doc': EXPERIMENTAL_TEXT,
'meta_data': {'availability_zone': 'myaz',
'local-hostname': 'test-subclass-hostname',
@@ -318,8 +339,8 @@ class TestDataSource(CiTestCase):
self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
self.assertEqual(expected, util.load_json(content))
- def test_get_data_writes_json_instance_data_sensitive(self):
- """get_data writes INSTANCE_JSON_SENSITIVE_FILE as readonly root."""
+ def test_get_data_writes_redacted_public_json_instance_data(self):
+ """get_data writes redacted content to public INSTANCE_JSON_FILE."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
@@ -329,33 +350,49 @@ class TestDataSource(CiTestCase):
'region': 'myregion',
'some': {'security-credentials': {
'cred1': 'sekret', 'cred2': 'othersekret'}}})
- self.assertEqual(
- ('security-credentials',), datasource.sensitive_metadata_keys)
- datasource.get_data()
+ self.assertCountEqual(
+ ('merged_cfg', 'security-credentials',),
+ datasource.sensitive_metadata_keys)
+ sys_info = {
+ "python": "3.7",
+ "platform":
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
+ "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
+ "x86_64"],
+ "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
redacted = util.load_json(util.load_file(json_file))
- self.assertEqual(
- {'cred1': 'sekret', 'cred2': 'othersekret'},
- redacted['ds']['meta_data']['some']['security-credentials'])
- content = util.load_file(sensitive_json_file)
expected = {
'base64_encoded_keys': [],
- 'sensitive_keys': ['ds/meta_data/some/security-credentials'],
+ 'merged_cfg': REDACT_SENSITIVE_VALUE,
+ 'sensitive_keys': [
+ 'ds/meta_data/some/security-credentials', 'merged_cfg'],
+ 'sys_info': sys_info,
'v1': {
'_beta_keys': ['subplatform'],
'availability-zone': 'myaz',
'availability_zone': 'myaz',
'cloud-name': 'subclasscloudname',
'cloud_name': 'subclasscloudname',
+ 'distro': 'ubuntu',
+ 'distro_release': 'focal',
+ 'distro_version': '20.04',
'instance-id': 'iid-datasource',
'instance_id': 'iid-datasource',
'local-hostname': 'test-subclass-hostname',
'local_hostname': 'test-subclass-hostname',
+ 'kernel_release': '5.4.0-24-generic',
+ 'machine': 'x86_64',
'platform': 'mytestsubclass',
'public_ssh_keys': [],
+ 'python_version': '3.7',
'region': 'myregion',
- 'subplatform': 'unknown'},
+ 'system_platform':
+ 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
+ 'subplatform': 'unknown',
+ 'variant': 'ubuntu'},
'ds': {
'_doc': EXPERIMENTAL_TEXT,
'meta_data': {
@@ -364,8 +401,83 @@ class TestDataSource(CiTestCase):
'region': 'myregion',
'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}}
}
- self.maxDiff = None
- self.assertEqual(expected, util.load_json(content))
+ self.assertCountEqual(expected, redacted)
+ file_stat = os.stat(json_file)
+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+
+ def test_get_data_writes_json_instance_data_sensitive(self):
+ """
+ get_data writes unmodified data to sensitive file as root-readonly.
+ """
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ custom_metadata={
+ 'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion',
+ 'some': {'security-credentials': {
+ 'cred1': 'sekret', 'cred2': 'othersekret'}}})
+ sys_info = {
+ "python": "3.7",
+ "platform":
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
+ "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
+ "x86_64"],
+ "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+
+ self.assertCountEqual(
+ ('merged_cfg', 'security-credentials',),
+ datasource.sensitive_metadata_keys)
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
+ sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
+ content = util.load_file(sensitive_json_file)
+ expected = {
+ 'base64_encoded_keys': [],
+ 'merged_cfg': {
+ '_doc': (
+ 'Merged cloud-init system config from '
+ '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/'
+ ),
+ 'datasource': {'_undef': {'key1': False}}},
+ 'sensitive_keys': [
+ 'ds/meta_data/some/security-credentials', 'merged_cfg'],
+ 'sys_info': sys_info,
+ 'v1': {
+ '_beta_keys': ['subplatform'],
+ 'availability-zone': 'myaz',
+ 'availability_zone': 'myaz',
+ 'cloud-name': 'subclasscloudname',
+ 'cloud_name': 'subclasscloudname',
+ 'distro': 'ubuntu',
+ 'distro_release': 'focal',
+ 'distro_version': '20.04',
+ 'instance-id': 'iid-datasource',
+ 'instance_id': 'iid-datasource',
+ 'kernel_release': '5.4.0-24-generic',
+ 'local-hostname': 'test-subclass-hostname',
+ 'local_hostname': 'test-subclass-hostname',
+ 'machine': 'x86_64',
+ 'platform': 'mytestsubclass',
+ 'public_ssh_keys': [],
+ 'python_version': '3.7',
+ 'region': 'myregion',
+ 'subplatform': 'unknown',
+ 'system_platform':
+ 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
+ 'variant': 'ubuntu'},
+ 'ds': {
+ '_doc': EXPERIMENTAL_TEXT,
+ 'meta_data': {
+ 'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion',
+ 'some': {
+ 'security-credentials':
+ {'cred1': 'sekret', 'cred2': 'othersekret'}}}}
+ }
+ self.assertCountEqual(expected, util.load_json(content))
file_stat = os.stat(sensitive_json_file)
self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
self.assertEqual(expected, util.load_json(content))
@@ -431,7 +543,7 @@ class TestDataSource(CiTestCase):
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
instance_json = util.load_json(content)
- self.assertItemsEqual(
+ self.assertCountEqual(
['ds/meta_data/key2/key2.1'],
instance_json['base64_encoded_keys'])
self.assertEqual(
@@ -440,9 +552,7 @@ class TestDataSource(CiTestCase):
def test_get_hostname_subclass_support(self):
"""Validate get_hostname signature on all subclasses of DataSource."""
- # Use inspect.getfullargspec when we drop py2.6 and py2.7
- get_args = inspect.getargspec # pylint: disable=W1505
- base_args = get_args(DataSource.get_hostname) # pylint: disable=W1505
+ base_args = inspect.getfullargspec(DataSource.get_hostname)
# Import all DataSource subclasses so we can inspect them.
modules = util.find_modules(os.path.dirname(os.path.dirname(__file__)))
for _loc, name in modules.items():
@@ -454,13 +564,13 @@ class TestDataSource(CiTestCase):
continue
self.assertEqual(
base_args,
- get_args(child.get_hostname), # pylint: disable=W1505
+ inspect.getfullargspec(child.get_hostname),
'%s does not implement DataSource.get_hostname params'
% child)
for grandchild in child.__subclasses__():
self.assertEqual(
base_args,
- get_args(grandchild.get_hostname), # pylint: disable=W1505
+ inspect.getfullargspec(grandchild.get_hostname),
'%s does not implement DataSource.get_hostname params'
% grandchild)
diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py
index abf3d359..7bd23813 100644
--- a/cloudinit/sources/tests/test_oracle.py
+++ b/cloudinit/sources/tests/test_oracle.py
@@ -1,22 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.sources import DataSourceOracle as oracle
-from cloudinit.sources import BrokenMetadata, NetworkConfigSource
-from cloudinit import helpers
-
-from cloudinit.tests import helpers as test_helpers
-
-from textwrap import dedent
-import argparse
+import base64
import copy
-import httpretty
import json
-import os
-import uuid
+from contextlib import ExitStack
from unittest import mock
+import pytest
+
+from cloudinit.sources import DataSourceOracle as oracle
+from cloudinit.sources import NetworkConfigSource
+from cloudinit.sources.DataSourceOracle import OpcMetadata
+from cloudinit.tests import helpers as test_helpers
+from cloudinit.url_helper import UrlError
+
DS_PATH = "cloudinit.sources.DataSourceOracle"
-MD_VER = "2013-10-17"
# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Bare Metal Machine
# with a secondary VNIC attached (vnicId truncated for Python line length)
@@ -59,328 +57,99 @@ OPC_VM_SECONDARY_VNIC_RESPONSE = """\
} ]"""
-class TestDataSourceOracle(test_helpers.CiTestCase):
- """Test datasource DataSourceOracle."""
-
- with_logs = True
-
- ds_class = oracle.DataSourceOracle
-
- my_uuid = str(uuid.uuid4())
- my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj",
- "name": "ci-vm1", "availability_zone": "phx-ad-3",
- "hostname": "ci-vm1hostname",
- "launch_index": 0, "files": [],
- "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"},
- "meta": {}}
-
- def _patch_instance(self, inst, patches):
- """Patch an instance of a class 'inst'.
- for each name, kwargs in patches:
- inst.name = mock.Mock(**kwargs)
- returns a namespace object that has
- namespace.name = mock.Mock(**kwargs)
- Do not bother with cleanup as instance is assumed transient."""
- mocks = argparse.Namespace()
- for name, kwargs in patches.items():
- imock = mock.Mock(name=name, spec=getattr(inst, name), **kwargs)
- setattr(mocks, name, imock)
- setattr(inst, name, imock)
- return mocks
-
- def _get_ds(self, sys_cfg=None, distro=None, paths=None, ud_proc=None,
- patches=None):
- if sys_cfg is None:
- sys_cfg = {}
- if patches is None:
- patches = {}
- if paths is None:
- tmpd = self.tmp_dir()
- dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd),
- 'run_dir': self.tmp_path('run_dir')}
- for d in dirs.values():
- os.mkdir(d)
- paths = helpers.Paths(dirs)
-
- ds = self.ds_class(sys_cfg=sys_cfg, distro=distro,
- paths=paths, ud_proc=ud_proc)
-
- return ds, self._patch_instance(ds, patches)
-
- def test_platform_not_viable_returns_false(self):
- ds, mocks = self._get_ds(
- patches={'_is_platform_viable': {'return_value': False}})
- self.assertFalse(ds._get_data())
- mocks._is_platform_viable.assert_called_once_with()
-
- def test_platform_info(self):
- """Return platform-related information for Oracle Datasource."""
- ds, _mocks = self._get_ds()
- self.assertEqual('oracle', ds.cloud_name)
- self.assertEqual('oracle', ds.platform_type)
- self.assertEqual(
- 'metadata (http://169.254.169.254/openstack/)', ds.subplatform)
-
- def test_sys_cfg_can_enable_configure_secondary_nics(self):
- # Confirm that behaviour is toggled by sys_cfg
- ds, _mocks = self._get_ds()
- self.assertFalse(ds.ds_cfg['configure_secondary_nics'])
-
- sys_cfg = {
- 'datasource': {'Oracle': {'configure_secondary_nics': True}}}
- ds, _mocks = self._get_ds(sys_cfg=sys_cfg)
- self.assertTrue(ds.ds_cfg['configure_secondary_nics'])
-
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_without_userdata(self, m_is_iscsi_root):
- """If no user-data is provided, it should not be in return dict."""
- ds, mocks = self._get_ds(patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- self.assertTrue(ds._get_data())
- mocks._is_platform_viable.assert_called_once_with()
- mocks.crawl_metadata.assert_called_once_with()
- self.assertEqual(self.my_uuid, ds.system_uuid)
- self.assertEqual(self.my_md['availability_zone'], ds.availability_zone)
- self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys())
- self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
- self.assertIsNone(ds.userdata_raw)
-
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_with_vendordata(self, m_is_iscsi_root):
- """Test with vendor data."""
- vd = {'cloud-init': '#cloud-config\nkey: value'}
- ds, mocks = self._get_ds(patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md,
- 'vendor_data': vd}}}})
- self.assertTrue(ds._get_data())
- mocks._is_platform_viable.assert_called_once_with()
- mocks.crawl_metadata.assert_called_once_with()
- self.assertEqual(vd, ds.vendordata_pure)
- self.assertEqual(vd['cloud-init'], ds.vendordata_raw)
-
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_with_userdata(self, m_is_iscsi_root):
- """Ensure user-data is populated if present and is binary."""
- my_userdata = b'abcdefg'
- ds, mocks = self._get_ds(patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md,
- 'user_data': my_userdata}}}})
- self.assertTrue(ds._get_data())
- mocks._is_platform_viable.assert_called_once_with()
- mocks.crawl_metadata.assert_called_once_with()
- self.assertEqual(self.my_uuid, ds.system_uuid)
- self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys())
- self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
- self.assertEqual(my_userdata, ds.userdata_raw)
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds",
- side_effect=lambda network_config: network_config)
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_network_cmdline(self, m_is_iscsi_root, m_initramfs_config,
- _m_add_network_config_from_opc_imds):
- """network_config should read kernel cmdline."""
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- ncfg = {'version': 1, 'config': [{'a': 'b'}]}
- m_initramfs_config.return_value = ncfg
- self.assertTrue(ds._get_data())
- self.assertEqual(ncfg, ds.network_config)
- self.assertEqual([mock.call()], m_initramfs_config.call_args_list)
- self.assertFalse(distro.generate_fallback_config.called)
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds",
- side_effect=lambda network_config: network_config)
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_network_fallback(self, m_is_iscsi_root, m_initramfs_config,
- _m_add_network_config_from_opc_imds):
- """test that fallback network is generated if no kernel cmdline."""
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- ncfg = {'version': 1, 'config': [{'a': 'b'}]}
- m_initramfs_config.return_value = None
- self.assertTrue(ds._get_data())
- ncfg = {'version': 1, 'config': [{'distro1': 'value'}]}
- distro.generate_fallback_config.return_value = ncfg
- self.assertEqual(ncfg, ds.network_config)
- self.assertEqual([mock.call()], m_initramfs_config.call_args_list)
- distro.generate_fallback_config.assert_called_once_with()
-
- # test that the result got cached, and the methods not re-called.
- self.assertEqual(ncfg, ds.network_config)
- self.assertEqual(1, m_initramfs_config.call_count)
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config",
- return_value={'some': 'config'})
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_secondary_nics_added_to_network_config_if_enabled(
- self, _m_is_iscsi_root, _m_initramfs_config,
- m_add_network_config_from_opc_imds):
-
- needle = object()
-
- def network_config_side_effect(network_config):
- network_config['secondary_added'] = needle
-
- m_add_network_config_from_opc_imds.side_effect = (
- network_config_side_effect)
-
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- ds.ds_cfg['configure_secondary_nics'] = True
- self.assertEqual(needle, ds.network_config['secondary_added'])
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config",
- return_value={'some': 'config'})
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_secondary_nics_not_added_to_network_config_by_default(
- self, _m_is_iscsi_root, _m_initramfs_config,
- m_add_network_config_from_opc_imds):
-
- def network_config_side_effect(network_config):
- network_config['secondary_added'] = True
-
- m_add_network_config_from_opc_imds.side_effect = (
- network_config_side_effect)
-
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- self.assertNotIn('secondary_added', ds.network_config)
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_secondary_nic_failure_isnt_blocking(
- self, _m_is_iscsi_root, m_initramfs_config,
- m_add_network_config_from_opc_imds):
-
- m_add_network_config_from_opc_imds.side_effect = Exception()
-
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- ds.ds_cfg['configure_secondary_nics'] = True
- self.assertEqual(ds.network_config, m_initramfs_config.return_value)
- self.assertIn('Failed to fetch secondary network configuration',
- self.logs.getvalue())
-
- def test_ds_network_cfg_preferred_over_initramfs(self):
- """Ensure that DS net config is preferred over initramfs config"""
- network_config_sources = oracle.DataSourceOracle.network_config_sources
- self.assertLess(
- network_config_sources.index(NetworkConfigSource.ds),
- network_config_sources.index(NetworkConfigSource.initramfs)
- )
-
-
-@mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4()))
-class TestReadMetaData(test_helpers.HttprettyTestCase):
- """Test the read_metadata which interacts with http metadata service."""
-
- mdurl = oracle.METADATA_ENDPOINT
- my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj",
- "name": "ci-vm1", "availability_zone": "phx-ad-3",
- "hostname": "ci-vm1hostname",
- "launch_index": 0, "files": [],
- "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"},
- "meta": {}}
-
- def populate_md(self, data):
- """call httppretty.register_url for each item dict 'data',
- including valid indexes. Text values converted to bytes."""
- httpretty.register_uri(
- httpretty.GET, self.mdurl + MD_VER + "/",
- '\n'.join(data.keys()).encode('utf-8'))
- for k, v in data.items():
- httpretty.register_uri(
- httpretty.GET, self.mdurl + MD_VER + "/" + k,
- v if not isinstance(v, str) else v.encode('utf-8'))
-
- def test_broken_no_sys_uuid(self, m_read_system_uuid):
- """Datasource requires ability to read system_uuid and true return."""
- m_read_system_uuid.return_value = None
- self.assertRaises(BrokenMetadata, oracle.read_metadata)
-
- def test_broken_no_metadata_json(self, m_read_system_uuid):
- """Datasource requires meta_data.json."""
- httpretty.register_uri(
- httpretty.GET, self.mdurl + MD_VER + "/",
- '\n'.join(['user_data']).encode('utf-8'))
- with self.assertRaises(BrokenMetadata) as cm:
- oracle.read_metadata()
- self.assertIn("Required field 'meta_data.json' missing",
- str(cm.exception))
-
- def test_with_userdata(self, m_read_system_uuid):
- data = {'user_data': b'#!/bin/sh\necho hi world\n',
- 'meta_data.json': json.dumps(self.my_md)}
- self.populate_md(data)
- result = oracle.read_metadata()[MD_VER]
- self.assertEqual(data['user_data'], result['user_data'])
- self.assertEqual(self.my_md, result['meta_data'])
-
- def test_without_userdata(self, m_read_system_uuid):
- data = {'meta_data.json': json.dumps(self.my_md)}
- self.populate_md(data)
- result = oracle.read_metadata()[MD_VER]
- self.assertNotIn('user_data', result)
- self.assertEqual(self.my_md, result['meta_data'])
-
- def test_unknown_fields_included(self, m_read_system_uuid):
- """Unknown fields listed in index should be included.
- And those ending in .json should be decoded."""
- some_data = {'key1': 'data1', 'subk1': {'subd1': 'subv'}}
- some_vendor_data = {'cloud-init': 'foo'}
- data = {'meta_data.json': json.dumps(self.my_md),
- 'some_data.json': json.dumps(some_data),
- 'vendor_data.json': json.dumps(some_vendor_data),
- 'other_blob': b'this is blob'}
- self.populate_md(data)
- result = oracle.read_metadata()[MD_VER]
- self.assertNotIn('user_data', result)
- self.assertEqual(self.my_md, result['meta_data'])
- self.assertEqual(some_data, result['some_data'])
- self.assertEqual(some_vendor_data, result['vendor_data'])
- self.assertEqual(data['other_blob'], result['other_blob'])
+# Fetched with `curl http://169.254.169.254/opc/v1/instance/` (and then
+# truncated for line length)
+OPC_V2_METADATA = """\
+{
+ "availabilityDomain" : "qIZq:PHX-AD-1",
+ "faultDomain" : "FAULT-DOMAIN-2",
+ "compartmentId" : "ocid1.tenancy.oc1..aaaaaaaao7f7cccogqrg5emjxkxmTRUNCATED",
+ "displayName" : "instance-20200320-1400",
+ "hostname" : "instance-20200320-1400",
+ "id" : "ocid1.instance.oc1.phx.anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED",
+ "image" : "ocid1.image.oc1.phx.aaaaaaaagmkn4gdhvvx24kiahh2b2qchsicTRUNCATED",
+ "metadata" : {
+ "ssh_authorized_keys" : "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated",
+ "user_data" : "IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"
+ },
+ "region" : "phx",
+ "canonicalRegionName" : "us-phoenix-1",
+ "ociAdName" : "phx-ad-3",
+ "shape" : "VM.Standard2.1",
+ "state" : "Running",
+ "timeCreated" : 1584727285318,
+ "agentConfig" : {
+ "monitoringDisabled" : true,
+ "managementDisabled" : true
+ }
+}"""
+
+# Just a small meaningless change to differentiate the two metadatas
+OPC_V1_METADATA = OPC_V2_METADATA.replace("ocid1.instance", "ocid2.instance")
+
+
+@pytest.fixture
+def metadata_version():
+ return 2
+
+
+@pytest.yield_fixture
+def oracle_ds(request, fixture_utils, paths, metadata_version):
+ """
+ Return an instantiated DataSourceOracle.
+
+ This also performs the mocking required for the default test case:
+ * ``_read_system_uuid`` returns something,
+ * ``_is_platform_viable`` returns True,
+ * ``_is_iscsi_root`` returns True (the simpler code path),
+ * ``read_opc_metadata`` returns ``OPC_V1_METADATA``
+
+ (This uses the paths fixture for the required helpers.Paths object, and the
+ fixture_utils fixture for fetching markers.)
+ """
+ sys_cfg = fixture_utils.closest_marker_first_arg_or(
+ request, "ds_sys_cfg", mock.MagicMock()
+ )
+ metadata = OpcMetadata(metadata_version, json.loads(OPC_V2_METADATA), None)
+ with mock.patch(DS_PATH + "._read_system_uuid", return_value="someuuid"):
+ with mock.patch(DS_PATH + "._is_platform_viable", return_value=True):
+ with mock.patch(DS_PATH + "._is_iscsi_root", return_value=True):
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ return_value=metadata,
+ ):
+ yield oracle.DataSourceOracle(
+ sys_cfg=sys_cfg, distro=mock.Mock(), paths=paths,
+ )
+
+
+class TestDataSourceOracle:
+ def test_platform_info(self, oracle_ds):
+ assert "oracle" == oracle_ds.cloud_name
+ assert "oracle" == oracle_ds.platform_type
+
+ def test_subplatform_before_fetch(self, oracle_ds):
+ assert 'unknown' == oracle_ds.subplatform
+
+ def test_platform_info_after_fetch(self, oracle_ds):
+ oracle_ds._get_data()
+ assert 'metadata (http://169.254.169.254/opc/v2/)' == \
+ oracle_ds.subplatform
+
+ @pytest.mark.parametrize('metadata_version', [1])
+ def test_v1_platform_info_after_fetch(self, oracle_ds):
+ oracle_ds._get_data()
+ assert 'metadata (http://169.254.169.254/opc/v1/)' == \
+ oracle_ds.subplatform
+
+ def test_secondary_nics_disabled_by_default(self, oracle_ds):
+ assert not oracle_ds.ds_cfg["configure_secondary_nics"]
+
+ @pytest.mark.ds_sys_cfg(
+ {"datasource": {"Oracle": {"configure_secondary_nics": True}}}
+ )
+ def test_sys_cfg_can_enable_configure_secondary_nics(self, oracle_ds):
+ assert oracle_ds.ds_cfg["configure_secondary_nics"]
class TestIsPlatformViable(test_helpers.CiTestCase):
@@ -404,192 +173,99 @@ class TestIsPlatformViable(test_helpers.CiTestCase):
m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
-class TestLoadIndex(test_helpers.CiTestCase):
- """_load_index handles parsing of an index into a proper list.
- The tests here guarantee correct parsing of html version or
- a fixed version. See the function docstring for more doc."""
-
- _known_html_api_versions = dedent("""\
- <html>
- <head><title>Index of /openstack/</title></head>
- <body bgcolor="white">
- <h1>Index of /openstack/</h1><hr><pre><a href="../">../</a>
- <a href="2013-10-17/">2013-10-17/</a> 27-Jun-2018 12:22 -
- <a href="latest/">latest/</a> 27-Jun-2018 12:22 -
- </pre><hr></body>
- </html>""")
-
- _known_html_contents = dedent("""\
- <html>
- <head><title>Index of /openstack/2013-10-17/</title></head>
- <body bgcolor="white">
- <h1>Index of /openstack/2013-10-17/</h1><hr><pre><a href="../">../</a>
- <a href="meta_data.json">meta_data.json</a> 27-Jun-2018 12:22 679
- <a href="user_data">user_data</a> 27-Jun-2018 12:22 146
- </pre><hr></body>
- </html>""")
-
- def test_parse_html(self):
- """Test parsing of lower case html."""
- self.assertEqual(
- ['2013-10-17/', 'latest/'],
- oracle._load_index(self._known_html_api_versions))
- self.assertEqual(
- ['meta_data.json', 'user_data'],
- oracle._load_index(self._known_html_contents))
-
- def test_parse_html_upper(self):
- """Test parsing of upper case html, although known content is lower."""
- def _toupper(data):
- return data.replace("<a", "<A").replace("html>", "HTML>")
-
- self.assertEqual(
- ['2013-10-17/', 'latest/'],
- oracle._load_index(_toupper(self._known_html_api_versions)))
- self.assertEqual(
- ['meta_data.json', 'user_data'],
- oracle._load_index(_toupper(self._known_html_contents)))
-
- def test_parse_newline_list_with_endl(self):
- """Test parsing of newline separated list with ending newline."""
- self.assertEqual(
- ['2013-10-17/', 'latest/'],
- oracle._load_index("\n".join(["2013-10-17/", "latest/", ""])))
- self.assertEqual(
- ['meta_data.json', 'user_data'],
- oracle._load_index("\n".join(["meta_data.json", "user_data", ""])))
-
- def test_parse_newline_list_without_endl(self):
- """Test parsing of newline separated list with no ending newline.
-
- Actual openstack implementation does not include trailing newline."""
- self.assertEqual(
- ['2013-10-17/', 'latest/'],
- oracle._load_index("\n".join(["2013-10-17/", "latest/"])))
- self.assertEqual(
- ['meta_data.json', 'user_data'],
- oracle._load_index("\n".join(["meta_data.json", "user_data"])))
-
-
-class TestNetworkConfigFromOpcImds(test_helpers.CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestNetworkConfigFromOpcImds, self).setUp()
- self.add_patch(DS_PATH + '.readurl', 'm_readurl')
- self.add_patch(DS_PATH + '.get_interfaces_by_mac',
- 'm_get_interfaces_by_mac')
-
- def test_failure_to_readurl(self):
- # readurl failures should just bubble out to the caller
- self.m_readurl.side_effect = Exception('oh no')
- with self.assertRaises(Exception) as excinfo:
- oracle._add_network_config_from_opc_imds({})
- self.assertEqual(str(excinfo.exception), 'oh no')
-
- def test_empty_response(self):
- # empty response error should just bubble out to the caller
- self.m_readurl.return_value = ''
- with self.assertRaises(Exception):
- oracle._add_network_config_from_opc_imds([])
-
- def test_invalid_json(self):
- # invalid JSON error should just bubble out to the caller
- self.m_readurl.return_value = '{'
- with self.assertRaises(Exception):
- oracle._add_network_config_from_opc_imds([])
-
- def test_no_secondary_nics_does_not_mutate_input(self):
- self.m_readurl.return_value = json.dumps([{}])
- # We test this by passing in a non-dict to ensure that no dict
+class TestNetworkConfigFromOpcImds:
+ def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds):
+ oracle_ds._vnics_data = [{}]
+ # We test this by using in a non-dict to ensure that no dict
# operations are used; failure would be seen as exceptions
- oracle._add_network_config_from_opc_imds(object())
+ oracle_ds._network_config = object()
+ oracle_ds._add_network_config_from_opc_imds()
- def test_bare_metal_machine_skipped(self):
+ def test_bare_metal_machine_skipped(self, oracle_ds, caplog):
# nicIndex in the first entry indicates a bare metal machine
- self.m_readurl.return_value = OPC_BM_SECONDARY_VNIC_RESPONSE
- # We test this by passing in a non-dict to ensure that no dict
+ oracle_ds._vnics_data = json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)
+ # We test this by using a non-dict to ensure that no dict
# operations are used
- self.assertFalse(oracle._add_network_config_from_opc_imds(object()))
- self.assertIn('bare metal machine', self.logs.getvalue())
+ oracle_ds._network_config = object()
+ oracle_ds._add_network_config_from_opc_imds()
+ assert 'bare metal machine' in caplog.text
- def test_missing_mac_skipped(self):
- self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
- self.m_get_interfaces_by_mac.return_value = {}
+ def test_missing_mac_skipped(self, oracle_ds, caplog):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
- network_config = {'version': 1, 'config': [{'primary': 'nic'}]}
- oracle._add_network_config_from_opc_imds(network_config)
+ oracle_ds._network_config = {
+ 'version': 1, 'config': [{'primary': 'nic'}]
+ }
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
+ oracle_ds._add_network_config_from_opc_imds()
- self.assertEqual(1, len(network_config['config']))
- self.assertIn(
- 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping',
- self.logs.getvalue())
+ assert 1 == len(oracle_ds.network_config['config'])
+ assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \
+ caplog.text
- def test_missing_mac_skipped_v2(self):
- self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
- self.m_get_interfaces_by_mac.return_value = {}
+ def test_missing_mac_skipped_v2(self, oracle_ds, caplog):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
- network_config = {'version': 2, 'ethernets': {'primary': {'nic': {}}}}
- oracle._add_network_config_from_opc_imds(network_config)
+ oracle_ds._network_config = {
+ 'version': 2, 'ethernets': {'primary': {'nic': {}}}
+ }
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
+ oracle_ds._add_network_config_from_opc_imds()
- self.assertEqual(1, len(network_config['ethernets']))
- self.assertIn(
- 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping',
- self.logs.getvalue())
+ assert 1 == len(oracle_ds.network_config['ethernets'])
+ assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \
+ caplog.text
- def test_secondary_nic(self):
- self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
- self.m_get_interfaces_by_mac.return_value = {
- mac_addr: nic_name,
+ def test_secondary_nic(self, oracle_ds):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+ oracle_ds._network_config = {
+ 'version': 1, 'config': [{'primary': 'nic'}]
}
-
- network_config = {'version': 1, 'config': [{'primary': 'nic'}]}
- oracle._add_network_config_from_opc_imds(network_config)
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac",
+ return_value={mac_addr: nic_name}):
+ oracle_ds._add_network_config_from_opc_imds()
# The input is mutated
- self.assertEqual(2, len(network_config['config']))
+ assert 2 == len(oracle_ds.network_config['config'])
- secondary_nic_cfg = network_config['config'][1]
- self.assertEqual(nic_name, secondary_nic_cfg['name'])
- self.assertEqual('physical', secondary_nic_cfg['type'])
- self.assertEqual(mac_addr, secondary_nic_cfg['mac_address'])
- self.assertEqual(9000, secondary_nic_cfg['mtu'])
+ secondary_nic_cfg = oracle_ds.network_config['config'][1]
+ assert nic_name == secondary_nic_cfg['name']
+ assert 'physical' == secondary_nic_cfg['type']
+ assert mac_addr == secondary_nic_cfg['mac_address']
+ assert 9000 == secondary_nic_cfg['mtu']
- self.assertEqual(1, len(secondary_nic_cfg['subnets']))
+ assert 1 == len(secondary_nic_cfg['subnets'])
subnet_cfg = secondary_nic_cfg['subnets'][0]
# These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
- self.assertEqual('10.0.0.231', subnet_cfg['address'])
+ assert '10.0.0.231' == subnet_cfg['address']
- def test_secondary_nic_v2(self):
- self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
- self.m_get_interfaces_by_mac.return_value = {
- mac_addr: nic_name,
+ def test_secondary_nic_v2(self, oracle_ds):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+ oracle_ds._network_config = {
+ 'version': 2, 'ethernets': {'primary': {'nic': {}}}
}
-
- network_config = {'version': 2, 'ethernets': {'primary': {'nic': {}}}}
- oracle._add_network_config_from_opc_imds(network_config)
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac",
+ return_value={mac_addr: nic_name}):
+ oracle_ds._add_network_config_from_opc_imds()
# The input is mutated
- self.assertEqual(2, len(network_config['ethernets']))
+ assert 2 == len(oracle_ds.network_config['ethernets'])
- secondary_nic_cfg = network_config['ethernets']['ens3']
- self.assertFalse(secondary_nic_cfg['dhcp4'])
- self.assertFalse(secondary_nic_cfg['dhcp6'])
- self.assertEqual(mac_addr, secondary_nic_cfg['match']['macaddress'])
- self.assertEqual(9000, secondary_nic_cfg['mtu'])
+ secondary_nic_cfg = oracle_ds.network_config['ethernets']['ens3']
+ assert secondary_nic_cfg['dhcp4'] is False
+ assert secondary_nic_cfg['dhcp6'] is False
+ assert mac_addr == secondary_nic_cfg['match']['macaddress']
+ assert 9000 == secondary_nic_cfg['mtu']
- self.assertEqual(1, len(secondary_nic_cfg['addresses']))
+ assert 1 == len(secondary_nic_cfg['addresses'])
# These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
- self.assertEqual('10.0.0.231', secondary_nic_cfg['addresses'][0])
+ assert '10.0.0.231' == secondary_nic_cfg['addresses'][0]
class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
- with_logs = True
-
def setUp(self):
super(TestNetworkConfigFiltersNetFailover, self).setUp()
self.add_patch(DS_PATH + '.get_interfaces_by_mac',
@@ -732,4 +408,378 @@ class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
self.assertEqual(expected_cfg, netcfg)
+def _mock_v2_urls(httpretty):
+ def instance_callback(request, uri, response_headers):
+ print(response_headers)
+ assert request.headers.get("Authorization") == "Bearer Oracle"
+ return [200, response_headers, OPC_V2_METADATA]
+
+ def vnics_callback(request, uri, response_headers):
+ assert request.headers.get("Authorization") == "Bearer Oracle"
+ return [200, response_headers, OPC_BM_SECONDARY_VNIC_RESPONSE]
+
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ body=instance_callback
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/vnics/",
+ body=vnics_callback
+ )
+
+
+def _mock_no_v2_urls(httpretty):
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ status=404,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/instance/",
+ body=OPC_V1_METADATA
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/vnics/",
+ body=OPC_BM_SECONDARY_VNIC_RESPONSE
+ )
+
+
+class TestReadOpcMetadata:
+ # See https://docs.pytest.org/en/stable/example
+ # /parametrize.html#parametrizing-conditional-raising
+ does_not_raise = ExitStack
+
+ @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
+ @pytest.mark.parametrize(
+ 'version,setup_urls,instance_data,fetch_vnics,vnics_data', [
+ (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), True,
+ json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)),
+ (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), False, None),
+ (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), True,
+ json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)),
+ (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), False, None),
+ ]
+ )
+ def test_metadata_returned(
+ self, version, setup_urls, instance_data,
+ fetch_vnics, vnics_data, httpretty
+ ):
+ setup_urls(httpretty)
+ metadata = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics)
+
+ assert version == metadata.version
+ assert instance_data == metadata.instance_data
+ assert vnics_data == metadata.vnics_data
+
+ # No need to actually wait between retries in the tests
+ @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
+ @pytest.mark.parametrize(
+ "v2_failure_count,v1_failure_count,expected_body,expectation",
+ [
+ (1, 0, json.loads(OPC_V2_METADATA), does_not_raise()),
+ (2, 0, json.loads(OPC_V2_METADATA), does_not_raise()),
+ (3, 0, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 1, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 2, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 3, None, pytest.raises(UrlError)),
+ ]
+ )
+ def test_retries(self, v2_failure_count, v1_failure_count,
+ expected_body, expectation, httpretty):
+ v2_responses = [httpretty.Response("", status=404)] * v2_failure_count
+ v2_responses.append(httpretty.Response(OPC_V2_METADATA))
+ v1_responses = [httpretty.Response("", status=404)] * v1_failure_count
+ v1_responses.append(httpretty.Response(OPC_V1_METADATA))
+
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/instance/",
+ responses=v1_responses,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ responses=v2_responses,
+ )
+ with expectation:
+ assert expected_body == oracle.read_opc_metadata().instance_data
+
+
+class TestCommon_GetDataBehaviour:
+ """This test class tests behaviour common to iSCSI and non-iSCSI root.
+
+ It defines a fixture, parameterized_oracle_ds, which is used in all the
+ tests herein to test that the commonly expected behaviour is the same with
+ iSCSI root and without.
+
+ (As non-iSCSI root behaviour is a superset of iSCSI root behaviour this
+ class is implicitly also testing all iSCSI root behaviour so there is no
+ separate class for that case.)
+ """
+
+ @pytest.yield_fixture(params=[True, False])
+ def parameterized_oracle_ds(self, request, oracle_ds):
+ """oracle_ds parameterized for iSCSI and non-iSCSI root respectively"""
+ is_iscsi_root = request.param
+ with ExitStack() as stack:
+ stack.enter_context(
+ mock.patch(
+ DS_PATH + "._is_iscsi_root", return_value=is_iscsi_root
+ )
+ )
+ if not is_iscsi_root:
+ stack.enter_context(
+ mock.patch(DS_PATH + ".net.find_fallback_nic")
+ )
+ stack.enter_context(
+ mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4")
+ )
+ yield oracle_ds
+
+ @mock.patch(
+ DS_PATH + "._is_platform_viable", mock.Mock(return_value=False)
+ )
+ def test_false_if_platform_not_viable(
+ self, parameterized_oracle_ds,
+ ):
+ assert not parameterized_oracle_ds._get_data()
+
+ @pytest.mark.parametrize(
+ "keyname,expected_value",
+ (
+ ("availability-zone", "phx-ad-3"),
+ ("launch-index", 0),
+ ("local-hostname", "instance-20200320-1400"),
+ (
+ "instance-id",
+ "ocid1.instance.oc1.phx"
+ ".anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED",
+ ),
+ ("name", "instance-20200320-1400"),
+ (
+ "public_keys",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated",
+ ),
+ ),
+ )
+ def test_metadata_keys_set_correctly(
+ self, keyname, expected_value, parameterized_oracle_ds,
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert expected_value == parameterized_oracle_ds.metadata[keyname]
+
+ @pytest.mark.parametrize(
+ "attribute_name,expected_value",
+ [
+ ("_crawled_metadata", json.loads(OPC_V2_METADATA)),
+ (
+ "userdata_raw",
+ base64.b64decode(b"IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"),
+ ),
+ ("system_uuid", "my-test-uuid"),
+ ],
+ )
+ @mock.patch(
+ DS_PATH + "._read_system_uuid", mock.Mock(return_value="my-test-uuid")
+ )
+ def test_attributes_set_correctly(
+ self, attribute_name, expected_value, parameterized_oracle_ds,
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert expected_value == getattr(
+ parameterized_oracle_ds, attribute_name
+ )
+
+ @pytest.mark.parametrize(
+ "ssh_keys,expected_value",
+ [
+ # No SSH keys in metadata => no keys detected
+ (None, []),
+ # Empty SSH keys in metadata => no keys detected
+ ("", []),
+ # Single SSH key in metadata => single key detected
+ ("ssh-rsa ... test@test", ["ssh-rsa ... test@test"]),
+ # Multiple SSH keys in metadata => multiple keys detected
+ (
+ "ssh-rsa ... test@test\nssh-rsa ... test2@test2",
+ ["ssh-rsa ... test@test", "ssh-rsa ... test2@test2"],
+ ),
+ ],
+ )
+ def test_public_keys_handled_correctly(
+ self, ssh_keys, expected_value, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ if ssh_keys is None:
+ del instance_data["metadata"]["ssh_authorized_keys"]
+ else:
+ instance_data["metadata"]["ssh_authorized_keys"] = ssh_keys
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert (
+ expected_value == parameterized_oracle_ds.get_public_ssh_keys()
+ )
+
+ def test_missing_user_data_handled_gracefully(
+ self, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ del instance_data["metadata"]["user_data"]
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+
+ assert parameterized_oracle_ds.userdata_raw is None
+
+ def test_missing_metadata_handled_gracefully(
+ self, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ del instance_data["metadata"]
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+
+ assert parameterized_oracle_ds.userdata_raw is None
+ assert [] == parameterized_oracle_ds.get_public_ssh_keys()
+
+
+@mock.patch(DS_PATH + "._is_iscsi_root", lambda: False)
+class TestNonIscsiRoot_GetDataBehaviour:
+ @mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4")
+ @mock.patch(DS_PATH + ".net.find_fallback_nic")
+ def test_read_opc_metadata_called_with_ephemeral_dhcp(
+ self, m_find_fallback_nic, m_EphemeralDHCPv4, oracle_ds
+ ):
+ in_context_manager = False
+
+ def enter_context_manager():
+ nonlocal in_context_manager
+ in_context_manager = True
+
+ def exit_context_manager(*args):
+ nonlocal in_context_manager
+ in_context_manager = False
+
+ m_EphemeralDHCPv4.return_value.__enter__.side_effect = (
+ enter_context_manager
+ )
+ m_EphemeralDHCPv4.return_value.__exit__.side_effect = (
+ exit_context_manager
+ )
+
+ def assert_in_context_manager(**kwargs):
+ assert in_context_manager
+ return mock.MagicMock()
+
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(side_effect=assert_in_context_manager),
+ ):
+ assert oracle_ds._get_data()
+
+ assert [
+ mock.call(m_find_fallback_nic.return_value)
+ ] == m_EphemeralDHCPv4.call_args_list
+
+
+@mock.patch(DS_PATH + ".get_interfaces_by_mac", lambda: {})
+@mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
+class TestNetworkConfig:
+ def test_network_config_cached(self, m_read_initramfs_config, oracle_ds):
+ """.network_config should be cached"""
+ assert 0 == m_read_initramfs_config.call_count
+ oracle_ds.network_config # pylint: disable=pointless-statement
+ assert 1 == m_read_initramfs_config.call_count
+ oracle_ds.network_config # pylint: disable=pointless-statement
+ assert 1 == m_read_initramfs_config.call_count
+
+ def test_network_cmdline(self, m_read_initramfs_config, oracle_ds):
+ """network_config should prefer initramfs config over fallback"""
+ ncfg = {"version": 1, "config": [{"a": "b"}]}
+ m_read_initramfs_config.return_value = copy.deepcopy(ncfg)
+
+ assert ncfg == oracle_ds.network_config
+ assert 0 == oracle_ds.distro.generate_fallback_config.call_count
+
+ def test_network_fallback(self, m_read_initramfs_config, oracle_ds):
+ """network_config should prefer initramfs config over fallback"""
+ ncfg = {"version": 1, "config": [{"a": "b"}]}
+
+ m_read_initramfs_config.return_value = None
+ oracle_ds.distro.generate_fallback_config.return_value = copy.deepcopy(
+ ncfg
+ )
+
+ assert ncfg == oracle_ds.network_config
+
+ @pytest.mark.parametrize(
+ "configure_secondary_nics,expect_secondary_nics",
+ [(True, True), (False, False), (None, False)],
+ )
+ def test_secondary_nic_addition(
+ self,
+ m_read_initramfs_config,
+ configure_secondary_nics,
+ expect_secondary_nics,
+ oracle_ds,
+ ):
+ """Test that _add_network_config_from_opc_imds is called as expected
+
+ (configure_secondary_nics=None is used to test the default behaviour.)
+ """
+ m_read_initramfs_config.return_value = {"version": 1, "config": []}
+
+ if configure_secondary_nics is not None:
+ oracle_ds.ds_cfg[
+ "configure_secondary_nics"
+ ] = configure_secondary_nics
+
+ def side_effect(self):
+ self._network_config["secondary_added"] = mock.sentinel.needle
+
+ oracle_ds._vnics_data = 'DummyData'
+ with mock.patch.object(
+ oracle.DataSourceOracle, "_add_network_config_from_opc_imds",
+ new=side_effect,
+ ):
+ was_secondary_added = "secondary_added" in oracle_ds.network_config
+ assert expect_secondary_nics == was_secondary_added
+
+ def test_secondary_nic_failure_isnt_blocking(
+ self,
+ m_read_initramfs_config,
+ caplog,
+ oracle_ds,
+ ):
+ oracle_ds.ds_cfg["configure_secondary_nics"] = True
+ oracle_ds._vnics_data = "DummyData"
+
+ with mock.patch.object(
+ oracle.DataSourceOracle, "_add_network_config_from_opc_imds",
+ side_effect=Exception()
+ ):
+ network_config = oracle_ds.network_config
+ assert network_config == m_read_initramfs_config.return_value
+ assert "Failed to parse secondary network configuration" in caplog.text
+
+ def test_ds_network_cfg_preferred_over_initramfs(self, _m):
+ """Ensure that DS net config is preferred over initramfs config"""
+ config_sources = oracle.DataSourceOracle.network_config_sources
+ ds_idx = config_sources.index(NetworkConfigSource.ds)
+ initramfs_idx = config_sources.index(NetworkConfigSource.initramfs)
+ assert ds_idx < initramfs_idx
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index c3a9b5b7..c08042d6 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -17,34 +17,52 @@ LOG = logging.getLogger(__name__)
# See: man sshd_config
DEF_SSHD_CFG = "/etc/ssh/sshd_config"
-# taken from OpenSSH source openssh-7.3p1/sshkey.c:
-# static const struct keytype keytypes[] = { ... }
+# this list has been filtered out from keytypes of OpenSSH source
+# openssh-8.3p1/sshkey.c:
+# static const struct keytype keytypes[] = {
+# filter out the keytypes with the sigonly flag, eg:
+# { "rsa-sha2-256", "RSA", NULL, KEY_RSA, 0, 0, 1 },
+# refer to the keytype struct of OpenSSH in the same file, to see
+# if the position of the sigonly flag has been moved.
+#
+# dsa, rsa, ecdsa and ed25519 are added for legacy, as they are valid
+# public keys in some old distros. They can possibly be removed
+# in the future when support for the older distros is dropped
+#
+# When updating the list, also update the _is_printable_key list in
+# cloudinit/config/cc_ssh_authkey_fingerprints.py
VALID_KEY_TYPES = (
"dsa",
+ "rsa",
"ecdsa",
- "ecdsa-sha2-nistp256",
+ "ed25519",
"ecdsa-sha2-nistp256-cert-v01@openssh.com",
- "ecdsa-sha2-nistp384",
+ "ecdsa-sha2-nistp256",
"ecdsa-sha2-nistp384-cert-v01@openssh.com",
- "ecdsa-sha2-nistp521",
+ "ecdsa-sha2-nistp384",
"ecdsa-sha2-nistp521-cert-v01@openssh.com",
- "ed25519",
- "rsa",
- "rsa-sha2-256",
- "rsa-sha2-512",
- "ssh-dss",
+ "ecdsa-sha2-nistp521",
+ "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com",
+ "sk-ecdsa-sha2-nistp256@openssh.com",
+ "sk-ssh-ed25519-cert-v01@openssh.com",
+ "sk-ssh-ed25519@openssh.com",
"ssh-dss-cert-v01@openssh.com",
- "ssh-ed25519",
+ "ssh-dss",
"ssh-ed25519-cert-v01@openssh.com",
- "ssh-rsa",
+ "ssh-ed25519",
"ssh-rsa-cert-v01@openssh.com",
+ "ssh-rsa",
+ "ssh-xmss-cert-v01@openssh.com",
+ "ssh-xmss@openssh.com",
)
+_DISABLE_USER_SSH_EXIT = 142
DISABLE_USER_OPTS = (
"no-port-forwarding,no-agent-forwarding,"
"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
- " rather than the user \\\"$DISABLE_USER\\\".\';echo;sleep 10\"")
+ " rather than the user \\\"$DISABLE_USER\\\".\';echo;sleep 10;"
+ "exit " + str(_DISABLE_USER_SSH_EXIT) + "\"")
class AuthKeyLine(object):
@@ -344,7 +362,9 @@ def update_ssh_config(updates, fname=DEF_SSHD_CFG):
changed = update_ssh_config_lines(lines=lines, updates=updates)
if changed:
util.write_file(
- fname, "\n".join([str(l) for l in lines]) + "\n", copy_mode=True)
+ fname, "\n".join(
+ [str(line) for line in lines]
+ ) + "\n", preserve_mode=True)
return len(changed) != 0
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index db8ba64c..765f4aab 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -696,7 +696,7 @@ class Init(object):
netcfg, src = self._find_networking_config()
# ensure all physical devices in config are present
- net.wait_for_physdevs(netcfg)
+ self.distro.networking.wait_for_physdevs(netcfg)
# apply renames from config
self._apply_netcfg_names(netcfg)
@@ -947,7 +947,6 @@ def _pkl_load(fname):
except Exception as e:
if os.path.isfile(fname):
LOG.warning("failed loading pickle in %s: %s", fname, e)
- pass
# This is allowed so just return nothing successfully loaded...
if not pickle_contents:
diff --git a/cloudinit/subp.py b/cloudinit/subp.py
index 0ad09306..3e4efa42 100644
--- a/cloudinit/subp.py
+++ b/cloudinit/subp.py
@@ -1,9 +1,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Common utility functions for interacting with subprocess."""
-# TODO move subp shellify and runparts related functions out of util.py
-
import logging
+import os
+import subprocess
+
+from errno import ENOEXEC
LOG = logging.getLogger(__name__)
@@ -54,4 +56,332 @@ def prepend_base_command(base_command, commands):
return fixed_commands
+class ProcessExecutionError(IOError):
+
+ MESSAGE_TMPL = ('%(description)s\n'
+ 'Command: %(cmd)s\n'
+ 'Exit code: %(exit_code)s\n'
+ 'Reason: %(reason)s\n'
+ 'Stdout: %(stdout)s\n'
+ 'Stderr: %(stderr)s')
+ empty_attr = '-'
+
+ def __init__(self, stdout=None, stderr=None,
+ exit_code=None, cmd=None,
+ description=None, reason=None,
+ errno=None):
+ if not cmd:
+ self.cmd = self.empty_attr
+ else:
+ self.cmd = cmd
+
+ if not description:
+ if not exit_code and errno == ENOEXEC:
+ self.description = 'Exec format error. Missing #! in script?'
+ else:
+ self.description = 'Unexpected error while running command.'
+ else:
+ self.description = description
+
+ if not isinstance(exit_code, int):
+ self.exit_code = self.empty_attr
+ else:
+ self.exit_code = exit_code
+
+ if not stderr:
+ if stderr is None:
+ self.stderr = self.empty_attr
+ else:
+ self.stderr = stderr
+ else:
+ self.stderr = self._indent_text(stderr)
+
+ if not stdout:
+ if stdout is None:
+ self.stdout = self.empty_attr
+ else:
+ self.stdout = stdout
+ else:
+ self.stdout = self._indent_text(stdout)
+
+ if reason:
+ self.reason = reason
+ else:
+ self.reason = self.empty_attr
+
+ self.errno = errno
+ message = self.MESSAGE_TMPL % {
+ 'description': self._ensure_string(self.description),
+ 'cmd': self._ensure_string(self.cmd),
+ 'exit_code': self._ensure_string(self.exit_code),
+ 'stdout': self._ensure_string(self.stdout),
+ 'stderr': self._ensure_string(self.stderr),
+ 'reason': self._ensure_string(self.reason),
+ }
+ IOError.__init__(self, message)
+
+ def _ensure_string(self, text):
+ """
+ if data is bytes object, decode
+ """
+ return text.decode() if isinstance(text, bytes) else text
+
+ def _indent_text(self, text, indent_level=8):
+ """
+ indent text on all but the first line, allowing for easy to read output
+ """
+ cr = '\n'
+ indent = ' ' * indent_level
+ # if input is bytes, return bytes
+ if isinstance(text, bytes):
+ cr = cr.encode()
+ indent = indent.encode()
+ # remove any newlines at end of text first to prevent unneeded blank
+ # line in output
+ return text.rstrip(cr).replace(cr, cr + indent)
+
+
+def subp(args, data=None, rcs=None, env=None, capture=True,
+ combine_capture=False, shell=False,
+ logstring=False, decode="replace", target=None, update_env=None,
+ status_cb=None):
+ """Run a subprocess.
+
+ :param args: command to run in a list. [cmd, arg1, arg2...]
+ :param data: input to the command, made available on its stdin.
+ :param rcs:
+ a list of allowed return codes. If subprocess exits with a value not
+ in this list, a ProcessExecutionError will be raised. By default,
+ data is returned as a string. See 'decode' parameter.
+ :param env: a dictionary for the command's environment.
+ :param capture:
+ boolean indicating if output should be captured. If True, then stderr
+ and stdout will be returned. If False, they will not be redirected.
+ :param combine_capture:
+ boolean indicating if stderr should be redirected to stdout. When True,
+ interleaved stderr and stdout will be returned as the first element of
+ a tuple, the second will be empty string or bytes (per decode).
+ if combine_capture is True, then output is captured independent of
+ the value of capture.
+ :param shell: boolean indicating if this should be run with a shell.
+ :param logstring:
+ the command will be logged to DEBUG. If it contains info that should
+ not be logged, then logstring will be logged instead.
+ :param decode:
+ if False, no decoding will be done and returned stdout and stderr will
+ be bytes. Other allowed values are 'strict', 'ignore', and 'replace'.
+ These values are passed through to bytes().decode() as the 'errors'
+ parameter. There is no support for decoding to other than utf-8.
+ :param target:
+ not supported, kwarg present only to make function signature similar
+ to curtin's subp.
+ :param update_env:
+ update the enviornment for this command with this dictionary.
+ this will not affect the current processes os.environ.
+ :param status_cb:
+ call this fuction with a single string argument before starting
+ and after finishing.
+
+ :return
+ if not capturing, return is (None, None)
+ if capturing, stdout and stderr are returned.
+ if decode:
+ entries in tuple will be python2 unicode or python3 string
+ if not decode:
+ entries in tuple will be python2 string or python3 bytes
+ """
+
+ # not supported in cloud-init (yet), for now kept in the call signature
+ # to ease maintaining code shared between cloud-init and curtin
+ if target is not None:
+ raise ValueError("target arg not supported by cloud-init")
+
+ if rcs is None:
+ rcs = [0]
+
+ devnull_fp = None
+
+ if update_env:
+ if env is None:
+ env = os.environ
+ env = env.copy()
+ env.update(update_env)
+
+ if target_path(target) != "/":
+ args = ['chroot', target] + list(args)
+
+ if status_cb:
+ command = ' '.join(args) if isinstance(args, list) else args
+ status_cb('Begin run command: {command}\n'.format(command=command))
+ if not logstring:
+ LOG.debug(("Running command %s with allowed return codes %s"
+ " (shell=%s, capture=%s)"),
+ args, rcs, shell, 'combine' if combine_capture else capture)
+ else:
+ LOG.debug(("Running hidden command to protect sensitive "
+ "input/output logstring: %s"), logstring)
+
+ stdin = None
+ stdout = None
+ stderr = None
+ if capture:
+ stdout = subprocess.PIPE
+ stderr = subprocess.PIPE
+ if combine_capture:
+ stdout = subprocess.PIPE
+ stderr = subprocess.STDOUT
+ if data is None:
+ # using devnull assures any reads get null, rather
+ # than possibly waiting on input.
+ devnull_fp = open(os.devnull)
+ stdin = devnull_fp
+ else:
+ stdin = subprocess.PIPE
+ if not isinstance(data, bytes):
+ data = data.encode()
+
+ # Popen converts entries in the arguments array from non-bytes to bytes.
+ # When locale is unset it may use ascii for that encoding which can
+ # cause UnicodeDecodeErrors. (LP: #1751051)
+ if isinstance(args, bytes):
+ bytes_args = args
+ elif isinstance(args, str):
+ bytes_args = args.encode("utf-8")
+ else:
+ bytes_args = [
+ x if isinstance(x, bytes) else x.encode("utf-8")
+ for x in args]
+ try:
+ sp = subprocess.Popen(bytes_args, stdout=stdout,
+ stderr=stderr, stdin=stdin,
+ env=env, shell=shell)
+ (out, err) = sp.communicate(data)
+ except OSError as e:
+ if status_cb:
+ status_cb('ERROR: End run command: invalid command provided\n')
+ raise ProcessExecutionError(
+ cmd=args, reason=e, errno=e.errno,
+ stdout="-" if decode else b"-",
+ stderr="-" if decode else b"-"
+ ) from e
+ finally:
+ if devnull_fp:
+ devnull_fp.close()
+
+ # Just ensure blank instead of none.
+ if capture or combine_capture:
+ if not out:
+ out = b''
+ if not err:
+ err = b''
+ if decode:
+ def ldecode(data, m='utf-8'):
+ if not isinstance(data, bytes):
+ return data
+ return data.decode(m, decode)
+
+ out = ldecode(out)
+ err = ldecode(err)
+
+ rc = sp.returncode
+ if rc not in rcs:
+ if status_cb:
+ status_cb(
+ 'ERROR: End run command: exit({code})\n'.format(code=rc))
+ raise ProcessExecutionError(stdout=out, stderr=err,
+ exit_code=rc,
+ cmd=args)
+ if status_cb:
+ status_cb('End run command: exit({code})\n'.format(code=rc))
+ return (out, err)
+
+
+def target_path(target, path=None):
+ # return 'path' inside target, accepting target as None
+ if target in (None, ""):
+ target = "/"
+ elif not isinstance(target, str):
+ raise ValueError("Unexpected input for target: %s" % target)
+ else:
+ target = os.path.abspath(target)
+ # abspath("//") returns "//" specifically for 2 slashes.
+ if target.startswith("//"):
+ target = target[1:]
+
+ if not path:
+ return target
+
+ # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.
+ while len(path) and path[0] == "/":
+ path = path[1:]
+
+ return os.path.join(target, path)
+
+
+def which(program, search=None, target=None):
+ target = target_path(target)
+
+ if os.path.sep in program:
+ # if program had a '/' in it, then do not search PATH
+ # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls
+ # so effectively we set cwd to / (or target)
+ if is_exe(target_path(target, program)):
+ return program
+
+ if search is None:
+ paths = [p.strip('"') for p in
+ os.environ.get("PATH", "").split(os.pathsep)]
+ if target == "/":
+ search = paths
+ else:
+ search = [p for p in paths if p.startswith("/")]
+
+ # normalize path input
+ search = [os.path.abspath(p) for p in search]
+
+ for path in search:
+ ppath = os.path.sep.join((path, program))
+ if is_exe(target_path(target, ppath)):
+ return ppath
+
+ return None
+
+
+def is_exe(fpath):
+ # return boolean indicating if fpath exists and is executable.
+ return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+
+def runparts(dirp, skip_no_exist=True, exe_prefix=None):
+ if skip_no_exist and not os.path.isdir(dirp):
+ return
+
+ failed = []
+ attempted = []
+
+ if exe_prefix is None:
+ prefix = []
+ elif isinstance(exe_prefix, str):
+ prefix = [str(exe_prefix)]
+ elif isinstance(exe_prefix, list):
+ prefix = exe_prefix
+ else:
+ raise TypeError("exe_prefix must be None, str, or list")
+
+ for exe_name in sorted(os.listdir(dirp)):
+ exe_path = os.path.join(dirp, exe_name)
+ if is_exe(exe_path):
+ attempted.append(exe_path)
+ try:
+ subp(prefix + [exe_path], capture=False)
+ except ProcessExecutionError as e:
+ LOG.debug(e)
+ failed.append(exe_name)
+
+ if failed and attempted:
+ raise RuntimeError(
+ 'Runparts: %s failures (%s) in %s attempted commands' %
+ (len(failed), ",".join(failed), len(attempted)))
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index e47cdeda..a00ade20 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -21,13 +21,10 @@ except (ImportError, AttributeError):
CHEETAH_AVAILABLE = False
try:
- from jinja2.runtime import implements_to_string
from jinja2 import Template as JTemplate
from jinja2 import DebugUndefined as JUndefined
JINJA_AVAILABLE = True
except (ImportError, AttributeError):
- from cloudinit.helpers import identity
- implements_to_string = identity
JINJA_AVAILABLE = False
JUndefined = object
@@ -42,7 +39,6 @@ BASIC_MATCHER = re.compile(r'\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)')
MISSING_JINJA_PREFIX = u'CI_MISSING_JINJA_VAR/'
-@implements_to_string # Needed for python2.7. Otherwise cached super.__str__
class UndefinedJinjaVariable(JUndefined):
"""Class used to represent any undefined jinja template variable."""
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 70f6bad7..58f63b69 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -1,7 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import print_function
-
import functools
import httpretty
import io
@@ -13,20 +11,10 @@ import string
import sys
import tempfile
import time
+import unittest
+from contextlib import ExitStack, contextmanager
from unittest import mock
-
-import unittest2
-from unittest2.util import strclass
-
-try:
- from contextlib import ExitStack, contextmanager
-except ImportError:
- from contextlib2 import ExitStack, contextmanager
-
-try:
- from configparser import ConfigParser
-except ImportError:
- from ConfigParser import ConfigParser
+from unittest.util import strclass
from cloudinit.config.schema import (
SchemaValidationError, validate_cloudconfig_schema)
@@ -35,13 +23,14 @@ from cloudinit import distros
from cloudinit import helpers as ch
from cloudinit.sources import DataSourceNone
from cloudinit.templater import JINJA_AVAILABLE
+from cloudinit import subp
from cloudinit import util
-_real_subp = util.subp
+_real_subp = subp.subp
# Used for skipping tests
-SkipTest = unittest2.SkipTest
-skipIf = unittest2.skipIf
+SkipTest = unittest.SkipTest
+skipIf = unittest.skipIf
# Makes the old path start
@@ -78,7 +67,7 @@ def retarget_many_wrapper(new_base, am, old_func):
return wrapper
-class TestCase(unittest2.TestCase):
+class TestCase(unittest.TestCase):
def reset_global_state(self):
"""Reset any global state to its original settings.
@@ -114,16 +103,6 @@ class TestCase(unittest2.TestCase):
self.addCleanup(m.stop)
setattr(self, attr, p)
- # prefer python3 read_file over readfp but allow fallback
- def parse_and_read(self, contents):
- parser = ConfigParser()
- if hasattr(parser, 'read_file'):
- parser.read_file(contents)
- elif hasattr(parser, 'readfp'):
- # pylint: disable=W1505
- parser.readfp(contents)
- return parser
-
class CiTestCase(TestCase):
"""This is the preferred test case base class unless user
@@ -156,14 +135,17 @@ class CiTestCase(TestCase):
self.old_handlers = self.logger.handlers
self.logger.handlers = [handler]
if self.allowed_subp is True:
- util.subp = _real_subp
+ subp.subp = _real_subp
else:
- util.subp = self._fake_subp
+ subp.subp = self._fake_subp
def _fake_subp(self, *args, **kwargs):
if 'args' in kwargs:
cmd = kwargs['args']
else:
+ if not args:
+ raise TypeError(
+ "subp() missing 1 required positional argument: 'args'")
cmd = args[0]
if not isinstance(cmd, str):
@@ -190,7 +172,7 @@ class CiTestCase(TestCase):
# Remove the handler we setup
logging.getLogger().handlers = self.old_handlers
logging.getLogger().level = None
- util.subp = _real_subp
+ subp.subp = _real_subp
super(CiTestCase, self).tearDown()
def tmp_dir(self, dir=None, cleanup=True):
@@ -212,16 +194,6 @@ class CiTestCase(TestCase):
dir = self.tmp_dir()
return os.path.normpath(os.path.abspath(os.path.join(dir, path)))
- def sys_exit(self, code):
- """Provide a wrapper around sys.exit for python 2.6
-
- In 2.6, this code would produce 'cm.exception' with value int(2)
- rather than the SystemExit that was raised by sys.exit(2).
- with assertRaises(SystemExit) as cm:
- sys.exit(2)
- """
- raise SystemExit(code)
-
def tmp_cloud(self, distro, sys_cfg=None, metadata=None):
"""Create a cloud with tmp working directory paths.
@@ -309,13 +281,13 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
mock.patch.object(mod, f, trap_func))
# Handle subprocess calls
- func = getattr(util, 'subp')
+ func = getattr(subp, 'subp')
def nsubp(*_args, **_kwargs):
return ('', '')
self.patched_funcs.enter_context(
- mock.patch.object(util, 'subp', nsubp))
+ mock.patch.object(subp, 'subp', nsubp))
def null_func(*_args, **_kwargs):
return None
@@ -363,6 +335,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
root = self.tmp_dir()
self.patchUtils(root)
self.patchOS(root)
+ self.patchOpen(root)
return root
@contextmanager
@@ -396,7 +369,7 @@ class HttprettyTestCase(CiTestCase):
super(HttprettyTestCase, self).tearDown()
-class SchemaTestCaseMixin(unittest2.TestCase):
+class SchemaTestCaseMixin(unittest.TestCase):
def assertSchemaValid(self, cfg, msg="Valid Schema failed validation."):
"""Assert the config is valid per self.schema.
@@ -528,13 +501,4 @@ if not hasattr(mock.Mock, 'assert_not_called'):
raise AssertionError(msg)
mock.Mock.assert_not_called = __mock_assert_not_called
-
-# older unittest2.TestCase (centos6) have only the now-deprecated
-# assertRaisesRegexp. Simple assignment makes pylint complain, about
-# users of assertRaisesRegex so we use getattr to trick it.
-# https://github.com/PyCQA/pylint/issues/1946
-if not hasattr(unittest2.TestCase, 'assertRaisesRegex'):
- unittest2.TestCase.assertRaisesRegex = (
- getattr(unittest2.TestCase, 'assertRaisesRegexp'))
-
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_conftest.py b/cloudinit/tests/test_conftest.py
new file mode 100644
index 00000000..6f1263a5
--- /dev/null
+++ b/cloudinit/tests/test_conftest.py
@@ -0,0 +1,65 @@
+import pytest
+
+from cloudinit import subp
+from cloudinit.tests.helpers import CiTestCase
+
+
+class TestDisableSubpUsage:
+ """Test that the disable_subp_usage fixture behaves as expected."""
+
+ def test_using_subp_raises_assertion_error(self):
+ with pytest.raises(AssertionError):
+ subp.subp(["some", "args"])
+
+ def test_typeerrors_on_incorrect_usage(self):
+ with pytest.raises(TypeError):
+ # We are intentionally passing no value for a parameter, so:
+ # pylint: disable=no-value-for-parameter
+ subp.subp()
+
+ @pytest.mark.allow_all_subp
+ def test_subp_usage_can_be_reenabled(self):
+ subp.subp(['whoami'])
+
+ @pytest.mark.allow_subp_for("whoami")
+ def test_subp_usage_can_be_conditionally_reenabled(self):
+ # The two parameters test each potential invocation with a single
+ # argument
+ with pytest.raises(AssertionError) as excinfo:
+ subp.subp(["some", "args"])
+ assert "allowed: whoami" in str(excinfo.value)
+ subp.subp(['whoami'])
+
+ @pytest.mark.allow_subp_for("whoami", "bash")
+ def test_subp_usage_can_be_conditionally_reenabled_for_multiple_cmds(self):
+ with pytest.raises(AssertionError) as excinfo:
+ subp.subp(["some", "args"])
+ assert "allowed: whoami,bash" in str(excinfo.value)
+ subp.subp(['bash', '-c', 'true'])
+ subp.subp(['whoami'])
+
+ @pytest.mark.allow_all_subp
+ @pytest.mark.allow_subp_for("bash")
+ def test_both_marks_raise_an_error(self):
+ with pytest.raises(AssertionError, match="marked both"):
+ subp.subp(["bash"])
+
+
+class TestDisableSubpUsageInTestSubclass(CiTestCase):
+ """Test that disable_subp_usage doesn't impact CiTestCase's subp logic."""
+
+ def test_using_subp_raises_exception(self):
+ with pytest.raises(Exception):
+ subp.subp(["some", "args"])
+
+ def test_typeerrors_on_incorrect_usage(self):
+ with pytest.raises(TypeError):
+ subp.subp()
+
+ def test_subp_usage_can_be_reenabled(self):
+ _old_allowed_subp = self.allow_subp
+ self.allowed_subp = True
+ try:
+ subp.subp(['bash', '-c', 'true'])
+ finally:
+ self.allowed_subp = _old_allowed_subp
diff --git a/cloudinit/tests/test_features.py b/cloudinit/tests/test_features.py
new file mode 100644
index 00000000..d7a7226d
--- /dev/null
+++ b/cloudinit/tests/test_features.py
@@ -0,0 +1,60 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+# pylint: disable=no-member,no-name-in-module
+"""
+This file is for testing the feature flag functionality itself,
+NOT for testing any individual feature flag
+"""
+import pytest
+import sys
+from pathlib import Path
+
+import cloudinit
+
+
+@pytest.yield_fixture()
+def create_override(request):
+ """
+ Create a feature overrides file and do some module wizardry to make
+ it seem like we're importing the features file for the first time.
+
+ After creating the override file with the values passed by the test,
+ we need to reload cloudinit.features
+ to get all of the current features (including the overridden ones).
+ Once the test is complete, we remove the file we created and set
+ features and feature_overrides modules to how they were before
+ the test started
+ """
+ override_path = Path(cloudinit.__file__).parent / 'feature_overrides.py'
+ if override_path.exists():
+ raise Exception("feature_overrides.py unexpectedly exists! "
+ "Remove it to run this test.")
+ with override_path.open('w') as f:
+ for key, value in request.param.items():
+ f.write('{} = {}\n'.format(key, value))
+
+ sys.modules.pop('cloudinit.features', None)
+
+ yield
+
+ override_path.unlink()
+ sys.modules.pop('cloudinit.feature_overrides', None)
+
+
+class TestFeatures:
+ def test_feature_without_override(self):
+ from cloudinit.features import ERROR_ON_USER_DATA_FAILURE
+ assert ERROR_ON_USER_DATA_FAILURE is True
+
+ @pytest.mark.parametrize('create_override',
+ [{'ERROR_ON_USER_DATA_FAILURE': False}],
+ indirect=True)
+ def test_feature_with_override(self, create_override):
+ from cloudinit.features import ERROR_ON_USER_DATA_FAILURE
+ assert ERROR_ON_USER_DATA_FAILURE is False
+
+ @pytest.mark.parametrize('create_override',
+ [{'SPAM': True}],
+ indirect=True)
+ def test_feature_only_in_override(self, create_override):
+ from cloudinit.features import SPAM
+ assert SPAM is True
diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py
index 8dd57137..f96f5372 100644
--- a/cloudinit/tests/test_gpg.py
+++ b/cloudinit/tests/test_gpg.py
@@ -4,19 +4,19 @@
from unittest import mock
from cloudinit import gpg
-from cloudinit import util
+from cloudinit import subp
from cloudinit.tests.helpers import CiTestCase
@mock.patch("cloudinit.gpg.time.sleep")
-@mock.patch("cloudinit.gpg.util.subp")
+@mock.patch("cloudinit.gpg.subp.subp")
class TestReceiveKeys(CiTestCase):
"""Test the recv_key method."""
def test_retries_on_subp_exc(self, m_subp, m_sleep):
"""retry should be done on gpg receive keys failure."""
retries = (1, 2, 4)
- my_exc = util.ProcessExecutionError(
+ my_exc = subp.ProcessExecutionError(
stdout='', stderr='', exit_code=2, cmd=['mycmd'])
m_subp.side_effect = (my_exc, my_exc, ('', ''))
gpg.recv_key("ABCD", "keyserver.example.com", retries=retries)
@@ -26,7 +26,7 @@ class TestReceiveKeys(CiTestCase):
"""If the final run fails, error should be raised."""
naplen = 1
keyid, keyserver = ("ABCD", "keyserver.example.com")
- m_subp.side_effect = util.ProcessExecutionError(
+ m_subp.side_effect = subp.ProcessExecutionError(
stdout='', stderr='', exit_code=2, cmd=['mycmd'])
with self.assertRaises(ValueError) as rcm:
gpg.recv_key(keyid, keyserver, retries=(naplen,))
@@ -36,7 +36,7 @@ class TestReceiveKeys(CiTestCase):
def test_no_retries_on_none(self, m_subp, m_sleep):
"""retry should not be done if retries is None."""
- m_subp.side_effect = util.ProcessExecutionError(
+ m_subp.side_effect = subp.ProcessExecutionError(
stdout='', stderr='', exit_code=2, cmd=['mycmd'])
with self.assertRaises(ValueError):
gpg.recv_key("ABCD", "keyserver.example.com", retries=None)
diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py
index 1c8a791e..e44b16d8 100644
--- a/cloudinit/tests/test_netinfo.py
+++ b/cloudinit/tests/test_netinfo.py
@@ -27,8 +27,8 @@ class TestNetInfo(CiTestCase):
maxDiff = None
with_logs = True
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_old_nettools_pformat(self, m_subp, m_which):
"""netdev_pformat properly rendering old nettools info."""
m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, '')
@@ -36,8 +36,8 @@ class TestNetInfo(CiTestCase):
content = netdev_pformat()
self.assertEqual(NETDEV_FORMATTED_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_new_nettools_pformat(self, m_subp, m_which):
"""netdev_pformat properly rendering netdev new nettools info."""
m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, '')
@@ -45,8 +45,8 @@ class TestNetInfo(CiTestCase):
content = netdev_pformat()
self.assertEqual(NETDEV_FORMATTED_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which):
"""netdev_pformat properly rendering netdev new nettools info."""
m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, '')
@@ -57,8 +57,8 @@ class TestNetInfo(CiTestCase):
print()
self.assertEqual(FREEBSD_NETDEV_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_iproute_pformat(self, m_subp, m_which):
"""netdev_pformat properly rendering ip route info."""
m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '')
@@ -72,8 +72,8 @@ class TestNetInfo(CiTestCase):
'255.0.0.0 | . |', '255.0.0.0 | host |')
self.assertEqual(new_output, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_warn_on_missing_commands(self, m_subp, m_which):
"""netdev_pformat warns when missing both ip and 'netstat'."""
m_which.return_value = None # Niether ip nor netstat found
@@ -85,8 +85,8 @@ class TestNetInfo(CiTestCase):
self.logs.getvalue())
m_subp.assert_not_called()
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_info_nettools_down(self, m_subp, m_which):
"""test netdev_info using nettools and down interfaces."""
m_subp.return_value = (
@@ -100,8 +100,8 @@ class TestNetInfo(CiTestCase):
'hwaddr': '.', 'up': True}},
netdev_info("."))
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_info_iproute_down(self, m_subp, m_which):
"""Test netdev_info with ip and down interfaces."""
m_subp.return_value = (
@@ -130,8 +130,8 @@ class TestNetInfo(CiTestCase):
readResource("netinfo/netdev-formatted-output-down"),
netdev_pformat())
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_route_nettools_pformat(self, m_subp, m_which):
"""route_pformat properly rendering nettools route info."""
@@ -147,8 +147,8 @@ class TestNetInfo(CiTestCase):
content = route_pformat()
self.assertEqual(ROUTE_FORMATTED_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_route_iproute_pformat(self, m_subp, m_which):
"""route_pformat properly rendering ip route info."""
@@ -165,8 +165,8 @@ class TestNetInfo(CiTestCase):
content = route_pformat()
self.assertEqual(ROUTE_FORMATTED_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_route_warn_on_missing_commands(self, m_subp, m_which):
"""route_pformat warns when missing both ip and 'netstat'."""
m_which.return_value = None # Niether ip nor netstat found
diff --git a/cloudinit/tests/test_subp.py b/cloudinit/tests/test_subp.py
index 448097d3..911c1f3d 100644
--- a/cloudinit/tests/test_subp.py
+++ b/cloudinit/tests/test_subp.py
@@ -2,10 +2,21 @@
"""Tests for cloudinit.subp utility functions"""
-from cloudinit import subp
+import json
+import os
+import sys
+import stat
+
+from unittest import mock
+
+from cloudinit import subp, util
from cloudinit.tests.helpers import CiTestCase
+BASH = subp.which('bash')
+BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name'
+
+
class TestPrependBaseCommands(CiTestCase):
with_logs = True
@@ -58,4 +69,218 @@ class TestPrependBaseCommands(CiTestCase):
self.assertEqual('', self.logs.getvalue())
self.assertEqual(expected, fixed_commands)
+
+class TestSubp(CiTestCase):
+ allowed_subp = [BASH, 'cat', CiTestCase.SUBP_SHELL_TRUE,
+ BOGUS_COMMAND, sys.executable]
+
+ stdin2err = [BASH, '-c', 'cat >&2']
+ stdin2out = ['cat']
+ utf8_invalid = b'ab\xaadef'
+ utf8_valid = b'start \xc3\xa9 end'
+ utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7'
+ printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--']
+
+ def printf_cmd(self, *args):
+ # bash's printf supports \xaa. So does /usr/bin/printf
+ # but by using bash, we remove dependency on another program.
+ return([BASH, '-c', 'printf "$@"', 'printf'] + list(args))
+
+ def test_subp_handles_bytestrings(self):
+ """subp can run a bytestring command if shell is True."""
+ tmp_file = self.tmp_path('test.out')
+ cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file)
+ (out, _err) = subp.subp(cmd.encode('utf-8'), shell=True)
+ self.assertEqual(u'', out)
+ self.assertEqual(u'', _err)
+ self.assertEqual('HI MOM\n', util.load_file(tmp_file))
+
+ def test_subp_handles_strings(self):
+ """subp can run a string command if shell is True."""
+ tmp_file = self.tmp_path('test.out')
+ cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file)
+ (out, _err) = subp.subp(cmd, shell=True)
+ self.assertEqual(u'', out)
+ self.assertEqual(u'', _err)
+ self.assertEqual('HI MOM\n', util.load_file(tmp_file))
+
+ def test_subp_handles_utf8(self):
+ # The given bytes contain utf-8 accented characters as seen in e.g.
+ # the "deja dup" package in Ubuntu.
+ cmd = self.printf_cmd(self.utf8_valid_2)
+ (out, _err) = subp.subp(cmd, capture=True)
+ self.assertEqual(out, self.utf8_valid_2.decode('utf-8'))
+
+ def test_subp_respects_decode_false(self):
+ (out, err) = subp.subp(self.stdin2out, capture=True, decode=False,
+ data=self.utf8_valid)
+ self.assertTrue(isinstance(out, bytes))
+ self.assertTrue(isinstance(err, bytes))
+ self.assertEqual(out, self.utf8_valid)
+
+ def test_subp_decode_ignore(self):
+ # this executes a string that writes invalid utf-8 to stdout
+ (out, _err) = subp.subp(self.printf_cmd('abc\\xaadef'),
+ capture=True, decode='ignore')
+ self.assertEqual(out, 'abcdef')
+
+ def test_subp_decode_strict_valid_utf8(self):
+ (out, _err) = subp.subp(self.stdin2out, capture=True,
+ decode='strict', data=self.utf8_valid)
+ self.assertEqual(out, self.utf8_valid.decode('utf-8'))
+
+ def test_subp_decode_invalid_utf8_replaces(self):
+ (out, _err) = subp.subp(self.stdin2out, capture=True,
+ data=self.utf8_invalid)
+ expected = self.utf8_invalid.decode('utf-8', 'replace')
+ self.assertEqual(out, expected)
+
+ def test_subp_decode_strict_raises(self):
+ args = []
+ kwargs = {'args': self.stdin2out, 'capture': True,
+ 'decode': 'strict', 'data': self.utf8_invalid}
+ self.assertRaises(UnicodeDecodeError, subp.subp, *args, **kwargs)
+
+ def test_subp_capture_stderr(self):
+ data = b'hello world'
+ (out, err) = subp.subp(self.stdin2err, capture=True,
+ decode=False, data=data,
+ update_env={'LC_ALL': 'C'})
+ self.assertEqual(err, data)
+ self.assertEqual(out, b'')
+
+ def test_subp_reads_env(self):
+ with mock.patch.dict("os.environ", values={'FOO': 'BAR'}):
+ out, _err = subp.subp(self.printenv + ['FOO'], capture=True)
+ self.assertEqual('FOO=BAR', out.splitlines()[0])
+
+ def test_subp_env_and_update_env(self):
+ out, _err = subp.subp(
+ self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
+ env={'FOO': 'BAR'},
+ update_env={'HOME': '/myhome', 'K2': 'V2'})
+ self.assertEqual(
+ ['FOO=BAR', 'HOME=/myhome', 'K1=', 'K2=V2'], out.splitlines())
+
+ def test_subp_update_env(self):
+ extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'}
+ with mock.patch.dict("os.environ", values=extra):
+ out, _err = subp.subp(
+ self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
+ update_env={'HOME': '/myhome', 'K2': 'V2'})
+
+ self.assertEqual(
+ ['FOO=BAR', 'HOME=/myhome', 'K1=V1', 'K2=V2'], out.splitlines())
+
+ def test_subp_warn_missing_shebang(self):
+ """Warn on no #! in script"""
+ noshebang = self.tmp_path('noshebang')
+ util.write_file(noshebang, 'true\n')
+
+ print("os is %s" % os)
+ os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC)
+ with self.allow_subp([noshebang]):
+ self.assertRaisesRegex(subp.ProcessExecutionError,
+ r'Missing #! in script\?',
+ subp.subp, (noshebang,))
+
+ def test_subp_combined_stderr_stdout(self):
+ """Providing combine_capture as True redirects stderr to stdout."""
+ data = b'hello world'
+ (out, err) = subp.subp(self.stdin2err, capture=True,
+ combine_capture=True, decode=False, data=data)
+ self.assertEqual(b'', err)
+ self.assertEqual(data, out)
+
+ def test_returns_none_if_no_capture(self):
+ (out, err) = subp.subp(self.stdin2out, data=b'', capture=False)
+ self.assertIsNone(err)
+ self.assertIsNone(out)
+
+ def test_exception_has_out_err_are_bytes_if_decode_false(self):
+ """Raised exc should have stderr, stdout as bytes if no decode."""
+ with self.assertRaises(subp.ProcessExecutionError) as cm:
+ subp.subp([BOGUS_COMMAND], decode=False)
+ self.assertTrue(isinstance(cm.exception.stdout, bytes))
+ self.assertTrue(isinstance(cm.exception.stderr, bytes))
+
+ def test_exception_has_out_err_are_bytes_if_decode_true(self):
+ """Raised exc should have stderr, stdout as string if no decode."""
+ with self.assertRaises(subp.ProcessExecutionError) as cm:
+ subp.subp([BOGUS_COMMAND], decode=True)
+ self.assertTrue(isinstance(cm.exception.stdout, str))
+ self.assertTrue(isinstance(cm.exception.stderr, str))
+
+ def test_bunch_of_slashes_in_path(self):
+ self.assertEqual("/target/my/path/",
+ subp.target_path("/target/", "//my/path/"))
+ self.assertEqual("/target/my/path/",
+ subp.target_path("/target/", "///my/path/"))
+
+ def test_c_lang_can_take_utf8_args(self):
+ """Independent of system LC_CTYPE, args can contain utf-8 strings.
+
+ When python starts up, its default encoding gets set based on
+ the value of LC_CTYPE. If no system locale is set, the default
+ encoding for both python2 and python3 in some paths will end up
+ being ascii.
+
+ Attempts to use setlocale or patching (or changing) os.environ
+ in the current environment seem to not be effective.
+
+ This test starts up a python with LC_CTYPE set to C so that
+ the default encoding will be set to ascii. In such an environment
+ Popen(['command', 'non-ascii-arg']) would cause a UnicodeDecodeError.
+ """
+ python_prog = '\n'.join([
+ 'import json, sys',
+ 'from cloudinit.subp import subp',
+ 'data = sys.stdin.read()',
+ 'cmd = json.loads(data)',
+ 'subp(cmd, capture=False)',
+ ''])
+ cmd = [BASH, '-c', 'echo -n "$@"', '--',
+ self.utf8_valid.decode("utf-8")]
+ python_subp = [sys.executable, '-c', python_prog]
+
+ out, _err = subp.subp(
+ python_subp, update_env={'LC_CTYPE': 'C'},
+ data=json.dumps(cmd).encode("utf-8"),
+ decode=False)
+ self.assertEqual(self.utf8_valid, out)
+
+ def test_bogus_command_logs_status_messages(self):
+ """status_cb gets status messages logs on bogus commands provided."""
+ logs = []
+
+ def status_cb(log):
+ logs.append(log)
+
+ with self.assertRaises(subp.ProcessExecutionError):
+ subp.subp([BOGUS_COMMAND], status_cb=status_cb)
+
+ expected = [
+ 'Begin run command: {cmd}\n'.format(cmd=BOGUS_COMMAND),
+ 'ERROR: End run command: invalid command provided\n']
+ self.assertEqual(expected, logs)
+
+ def test_command_logs_exit_codes_to_status_cb(self):
+ """status_cb gets status messages containing command exit code."""
+ logs = []
+
+ def status_cb(log):
+ logs.append(log)
+
+ with self.assertRaises(subp.ProcessExecutionError):
+ subp.subp([BASH, '-c', 'exit 2'], status_cb=status_cb)
+ subp.subp([BASH, '-c', 'exit 0'], status_cb=status_cb)
+
+ expected = [
+ 'Begin run command: %s -c exit 2\n' % BASH,
+ 'ERROR: End run command: exit(2)\n',
+ 'Begin run command: %s -c exit 0\n' % BASH,
+ 'End run command: exit(0)\n']
+ self.assertEqual(expected, logs)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py
index 1674120f..364ec822 100644
--- a/cloudinit/tests/test_url_helper.py
+++ b/cloudinit/tests/test_url_helper.py
@@ -1,7 +1,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.url_helper import (
- NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc)
+ NOT_FOUND, UrlError, REDACTED, oauth_headers, read_file_or_url,
+ retry_on_url_exc)
from cloudinit.tests.helpers import CiTestCase, mock, skipIf
from cloudinit import util
from cloudinit import version
@@ -50,6 +51,9 @@ class TestOAuthHeaders(CiTestCase):
class TestReadFileOrUrl(CiTestCase):
+
+ with_logs = True
+
def test_read_file_or_url_str_from_file(self):
"""Test that str(result.contents) on file is text version of contents.
It should not be "b'data'", but just "'data'" """
@@ -71,6 +75,34 @@ class TestReadFileOrUrl(CiTestCase):
self.assertEqual(result.contents, data)
self.assertEqual(str(result), data.decode('utf-8'))
+ @httpretty.activate
+ def test_read_file_or_url_str_from_url_redacting_headers_from_logs(self):
+ """Headers are redacted from logs but unredacted in requests."""
+ url = 'http://hostname/path'
+ headers = {'sensitive': 'sekret', 'server': 'blah'}
+ httpretty.register_uri(httpretty.GET, url)
+
+ read_file_or_url(url, headers=headers, headers_redact=['sensitive'])
+ logs = self.logs.getvalue()
+ for k in headers.keys():
+ self.assertEqual(headers[k], httpretty.last_request().headers[k])
+ self.assertIn(REDACTED, logs)
+ self.assertNotIn('sekret', logs)
+
+ @httpretty.activate
+ def test_read_file_or_url_str_from_url_redacts_noheaders(self):
+ """When no headers_redact, header values are in logs and requests."""
+ url = 'http://hostname/path'
+ headers = {'sensitive': 'sekret', 'server': 'blah'}
+ httpretty.register_uri(httpretty.GET, url)
+
+ read_file_or_url(url, headers=headers)
+ for k in headers.keys():
+ self.assertEqual(headers[k], httpretty.last_request().headers[k])
+ logs = self.logs.getvalue()
+ self.assertNotIn(REDACTED, logs)
+ self.assertIn('sekret', logs)
+
@mock.patch(M_PATH + 'readurl')
def test_read_file_or_url_passes_params_to_readurl(self, m_readurl):
"""read_file_or_url passes all params through to readurl."""
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index 11f37000..096a3037 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -6,8 +6,10 @@ import base64
import logging
import json
import platform
+import pytest
import cloudinit.util as util
+from cloudinit import subp
from cloudinit.tests.helpers import CiTestCase, mock
from textwrap import dedent
@@ -331,7 +333,7 @@ class TestBlkid(CiTestCase):
"PARTUUID": self.ids["id09"]},
})
- @mock.patch("cloudinit.util.subp")
+ @mock.patch("cloudinit.subp.subp")
def test_functional_blkid(self, m_subp):
m_subp.return_value = (
self.blkid_out.format(**self.ids), "")
@@ -339,7 +341,7 @@ class TestBlkid(CiTestCase):
m_subp.assert_called_with(["blkid", "-o", "full"], capture=True,
decode="replace")
- @mock.patch("cloudinit.util.subp")
+ @mock.patch("cloudinit.subp.subp")
def test_blkid_no_cache_uses_no_cache(self, m_subp):
"""blkid should turn off cache if disable_cache is true."""
m_subp.return_value = (
@@ -350,7 +352,7 @@ class TestBlkid(CiTestCase):
capture=True, decode="replace")
-@mock.patch('cloudinit.util.subp')
+@mock.patch('cloudinit.subp.subp')
class TestUdevadmSettle(CiTestCase):
def test_with_no_params(self, m_subp):
"""called with no parameters."""
@@ -395,8 +397,8 @@ class TestUdevadmSettle(CiTestCase):
'--timeout=%s' % timeout])
def test_subp_exception_raises_to_caller(self, m_subp):
- m_subp.side_effect = util.ProcessExecutionError("BOOM")
- self.assertRaises(util.ProcessExecutionError, util.udevadm_settle)
+ m_subp.side_effect = subp.ProcessExecutionError("BOOM")
+ self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle)
@mock.patch('os.path.exists')
@@ -419,12 +421,6 @@ class TestGetLinuxDistro(CiTestCase):
if path == '/etc/redhat-release':
return 1
- @classmethod
- def freebsd_version_exists(self, path):
- """Side effect function """
- if path == '/bin/freebsd-version':
- return 1
-
@mock.patch('cloudinit.util.load_file')
def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
"""Verify we get the correct name if the os-release file has
@@ -443,11 +439,18 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(('ubuntu', '16.04', 'xenial'), dist)
- @mock.patch('cloudinit.util.subp')
- def test_get_linux_freebsd(self, m_subp, m_path_exists):
+ @mock.patch('platform.system')
+ @mock.patch('platform.release')
+ @mock.patch('cloudinit.util._parse_redhat_release')
+ def test_get_linux_freebsd(self, m_parse_redhat_release,
+ m_platform_release,
+ m_platform_system, m_path_exists):
"""Verify we get the correct name and release name on FreeBSD."""
- m_path_exists.side_effect = TestGetLinuxDistro.freebsd_version_exists
- m_subp.return_value = ("12.0-RELEASE-p10\n", '')
+ m_path_exists.return_value = False
+ m_platform_release.return_value = '12.0-RELEASE-p10'
+ m_platform_system.return_value = 'FreeBSD'
+ m_parse_redhat_release.return_value = {}
+ util.is_BSD.cache_clear()
dist = util.get_linux_distro()
self.assertEqual(('freebsd', '12.0-RELEASE-p10', ''), dist)
@@ -538,27 +541,36 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(
('opensuse-tumbleweed', '20180920', platform.machine()), dist)
+ @mock.patch('platform.system')
@mock.patch('platform.dist', create=True)
- def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists):
+ def test_get_linux_distro_no_data(self, m_platform_dist,
+ m_platform_system, m_path_exists):
"""Verify we get no information if os-release does not exist"""
m_platform_dist.return_value = ('', '', '')
+ m_platform_system.return_value = "Linux"
m_path_exists.return_value = 0
dist = util.get_linux_distro()
self.assertEqual(('', '', ''), dist)
+ @mock.patch('platform.system')
@mock.patch('platform.dist', create=True)
- def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists):
+ def test_get_linux_distro_no_impl(self, m_platform_dist,
+ m_platform_system, m_path_exists):
"""Verify we get an empty tuple when no information exists and
Exceptions are not propagated"""
m_platform_dist.side_effect = Exception()
+ m_platform_system.return_value = "Linux"
m_path_exists.return_value = 0
dist = util.get_linux_distro()
self.assertEqual(('', '', ''), dist)
+ @mock.patch('platform.system')
@mock.patch('platform.dist', create=True)
- def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists):
+ def test_get_linux_distro_plat_data(self, m_platform_dist,
+ m_platform_system, m_path_exists):
"""Verify we get the correct platform information"""
m_platform_dist.return_value = ('foo', '1.1', 'aarch64')
+ m_platform_system.return_value = "Linux"
m_path_exists.return_value = 0
dist = util.get_linux_distro()
self.assertEqual(('foo', '1.1', 'aarch64'), dist)
@@ -597,4 +609,166 @@ class TestIsLXD(CiTestCase):
self.assertFalse(util.is_lxd())
m_exists.assert_called_once_with('/dev/lxd/sock')
+
+class TestReadCcFromCmdline:
+
+ @pytest.mark.parametrize(
+ "cmdline,expected_cfg",
+ [
+ # Return None if cmdline has no cc:<YAML>end_cc content.
+ (CiTestCase.random_string(), None),
+ # Return None if YAML content is empty string.
+ ('foo cc: end_cc bar', None),
+ # Return expected dictionary without trailing end_cc marker.
+ ('foo cc: ssh_pwauth: true', {'ssh_pwauth': True}),
+ # Return expected dictionary w escaped newline and no end_cc.
+ ('foo cc: ssh_pwauth: true\\n', {'ssh_pwauth': True}),
+ # Return expected dictionary of yaml between cc: and end_cc.
+ ('foo cc: ssh_pwauth: true end_cc bar', {'ssh_pwauth': True}),
+ # Return dict with list value w escaped newline, no end_cc.
+ (
+ 'cc: ssh_import_id: [smoser, kirkland]\\n',
+ {'ssh_import_id': ['smoser', 'kirkland']}
+ ),
+ # Parse urlencoded brackets in yaml content.
+ (
+ 'cc: ssh_import_id: %5Bsmoser, kirkland%5D end_cc',
+ {'ssh_import_id': ['smoser', 'kirkland']}
+ ),
+ # Parse complete urlencoded yaml content.
+ (
+ 'cc: ssh_import_id%3A%20%5Buser1%2C%20user2%5D end_cc',
+ {'ssh_import_id': ['user1', 'user2']}
+ ),
+ # Parse nested dictionary in yaml content.
+ (
+ 'cc: ntp: {enabled: true, ntp_client: myclient} end_cc',
+ {'ntp': {'enabled': True, 'ntp_client': 'myclient'}}
+ ),
+ # Parse single mapping value in yaml content.
+ ('cc: ssh_import_id: smoser end_cc', {'ssh_import_id': 'smoser'}),
+ # Parse multiline content with multiple mapping and nested lists.
+ (
+ ('cc: ssh_import_id: [smoser, bob]\\n'
+ 'runcmd: [ [ ls, -l ], echo hi ] end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l'], 'echo hi']}
+ ),
+ # Parse multiline encoded content w/ mappings and nested lists.
+ (
+ ('cc: ssh_import_id: %5Bsmoser, bob%5D\\n'
+ 'runcmd: [ [ ls, -l ], echo hi ] end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l'], 'echo hi']}
+ ),
+ # test encoded escaped newlines work.
+ #
+ # unquote(encoded_content)
+ # 'ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ]'
+ (
+ ('cc: ' +
+ ('ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%5Cn'
+ 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C'
+ '%20echo%20hi%20%5D') + ' end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l'], 'echo hi']}
+ ),
+ # test encoded newlines work.
+ #
+ # unquote(encoded_content)
+ # 'ssh_import_id: [smoser, bob]\nruncmd: [ [ ls, -l ], echo hi ]'
+ (
+ ("cc: " +
+ ('ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%0A'
+ 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C'
+ '%20echo%20hi%20%5D') + ' end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l'], 'echo hi']}
+ ),
+ # Parse and merge multiple yaml content sections.
+ (
+ ('cc:ssh_import_id: [smoser, bob] end_cc '
+ 'cc: runcmd: [ [ ls, -l ] ] end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l']]}
+ ),
+ # Parse and merge multiple encoded yaml content sections.
+ (
+ ('cc:ssh_import_id%3A%20%5Bsmoser%5D end_cc '
+ 'cc:runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%20%5D end_cc'),
+ {'ssh_import_id': ['smoser'], 'runcmd': [['ls', '-l']]}
+ ),
+ ]
+ )
+ def test_read_conf_from_cmdline_config(self, expected_cfg, cmdline):
+ assert expected_cfg == util.read_conf_from_cmdline(cmdline=cmdline)
+
+
+class TestMountCb:
+ """Tests for ``util.mount_cb``.
+
+ These tests consider the "unit" under test to be ``util.mount_cb`` and
+ ``util.unmounter``, which is only used by ``mount_cb``.
+
+ TODO: Test default mtype determination
+ TODO: Test the if/else branch that actually performs the mounting operation
+ """
+
+ @pytest.yield_fixture
+ def already_mounted_device_and_mountdict(self):
+ """Mock an already-mounted device, and yield (device, mount dict)"""
+ device = "/dev/fake0"
+ mountpoint = "/mnt/fake"
+ with mock.patch("cloudinit.util.subp.subp"):
+ with mock.patch("cloudinit.util.mounts") as m_mounts:
+ mounts = {device: {"mountpoint": mountpoint}}
+ m_mounts.return_value = mounts
+ yield device, mounts[device]
+
+ @pytest.fixture
+ def already_mounted_device(self, already_mounted_device_and_mountdict):
+ """already_mounted_device_and_mountdict, but return only the device"""
+ return already_mounted_device_and_mountdict[0]
+
+ @pytest.mark.parametrize("invalid_mtype", [int(0), float(0.0), dict()])
+ def test_typeerror_raised_for_invalid_mtype(self, invalid_mtype):
+ with pytest.raises(TypeError):
+ util.mount_cb(mock.Mock(), mock.Mock(), mtype=invalid_mtype)
+
+ @mock.patch("cloudinit.util.subp.subp")
+ def test_already_mounted_does_not_mount_or_umount_anything(
+ self, m_subp, already_mounted_device
+ ):
+ util.mount_cb(already_mounted_device, mock.Mock())
+
+ assert 0 == m_subp.call_count
+
+ @pytest.mark.parametrize("trailing_slash_in_mounts", ["/", ""])
+ def test_already_mounted_calls_callback(
+ self, trailing_slash_in_mounts, already_mounted_device_and_mountdict
+ ):
+ device, mount_dict = already_mounted_device_and_mountdict
+ mountpoint = mount_dict["mountpoint"]
+ mount_dict["mountpoint"] += trailing_slash_in_mounts
+
+ callback = mock.Mock()
+ util.mount_cb(device, callback)
+
+ # The mountpoint passed to callback should always have a trailing
+ # slash, regardless of the input
+ assert [mock.call(mountpoint + "/")] == callback.call_args_list
+
+ def test_already_mounted_calls_callback_with_data(
+ self, already_mounted_device
+ ):
+ callback = mock.Mock()
+ util.mount_cb(
+ already_mounted_device, callback, data=mock.sentinel.data
+ )
+
+ assert [
+ mock.call(mock.ANY, mock.sentinel.data)
+ ] == callback.call_args_list
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index eeb27aa8..caa88435 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -95,7 +95,7 @@ def read_file_or_url(url, **kwargs):
code = e.errno
if e.errno == ENOENT:
code = NOT_FOUND
- raise UrlError(cause=e, code=code, headers=None, url=url)
+ raise UrlError(cause=e, code=code, headers=None, url=url) from e
return FileResponse(file_path, contents=contents)
else:
return readurl(url, **kwargs)
@@ -281,13 +281,14 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
for (k, v) in req_args.items():
if k == 'data':
continue
- filtered_req_args[k] = v
- if k == 'headers':
- for hkey, _hval in v.items():
- if hkey in headers_redact:
- filtered_req_args[k][hkey] = (
- copy.deepcopy(req_args[k][hkey]))
- filtered_req_args[k][hkey] = REDACTED
+ if k == 'headers' and headers_redact:
+ matched_headers = [k for k in headers_redact if v.get(k)]
+ if matched_headers:
+ filtered_req_args[k] = copy.deepcopy(v)
+ for key in matched_headers:
+ filtered_req_args[k][key] = REDACTED
+ else:
+ filtered_req_args[k] = v
try:
if log_req_resp:
@@ -574,8 +575,8 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
timestamp=None):
try:
import oauthlib.oauth1 as oauth1
- except ImportError:
- raise NotImplementedError('oauth support is not available')
+ except ImportError as e:
+ raise NotImplementedError('oauth support is not available') from e
if timestamp:
timestamp = str(timestamp)
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 6f41b03a..f234b962 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -16,6 +16,7 @@ from email.mime.text import MIMEText
from cloudinit import handlers
from cloudinit import log as logging
+from cloudinit import features
from cloudinit.url_helper import read_file_or_url, UrlError
from cloudinit import util
@@ -25,6 +26,7 @@ LOG = logging.getLogger(__name__)
NOT_MULTIPART_TYPE = handlers.NOT_MULTIPART_TYPE
PART_FN_TPL = handlers.PART_FN_TPL
OCTET_TYPE = handlers.OCTET_TYPE
+INCLUDE_MAP = handlers.INCLUSION_TYPES_MAP
# Saves typing errors
CONTENT_TYPE = 'Content-Type'
@@ -68,6 +70,13 @@ def _set_filename(msg, filename):
'attachment', filename=str(filename))
+def _handle_error(error_message, source_exception=None):
+ if features.ERROR_ON_USER_DATA_FAILURE:
+ raise Exception(error_message) from source_exception
+ else:
+ LOG.warning(error_message)
+
+
class UserDataProcessor(object):
def __init__(self, paths):
self.paths = paths
@@ -107,15 +116,22 @@ class UserDataProcessor(object):
ctype_orig = None
was_compressed = True
except util.DecompressionError as e:
- LOG.warning("Failed decompressing payload from %s of"
- " length %s due to: %s",
- ctype_orig, len(payload), e)
+ error_message = (
+ "Failed decompressing payload from {} of"
+ " length {} due to: {}".format(
+ ctype_orig, len(payload), e))
+ _handle_error(error_message, e)
continue
# Attempt to figure out the payloads content-type
if not ctype_orig:
ctype_orig = UNDEF_TYPE
- if ctype_orig in TYPE_NEEDED:
+ # There are known cases where mime-type text/x-shellscript included
+ # non shell-script content that was user-data instead. It is safe
+ # to check the true MIME type for x-shellscript type since all
+ # shellscript payloads must have a #! header. The other MIME types
+ # that cloud-init supports do not have the same guarantee.
+ if ctype_orig in TYPE_NEEDED + ['text/x-shellscript']:
ctype = find_ctype(payload)
if ctype is None:
ctype = ctype_orig
@@ -229,19 +245,22 @@ class UserDataProcessor(object):
if resp.ok():
content = resp.contents
else:
- LOG.warning(("Fetching from %s resulted in"
- " a invalid http code of %s"),
- include_url, resp.code)
+ error_message = (
+ "Fetching from {} resulted in"
+ " a invalid http code of {}".format(
+ include_url, resp.code))
+ _handle_error(error_message)
except UrlError as urle:
message = str(urle)
# Older versions of requests.exceptions.HTTPError may not
# include the errant url. Append it for clarity in logs.
if include_url not in message:
message += ' for url: {0}'.format(include_url)
- LOG.warning(message)
+ _handle_error(message, urle)
except IOError as ioe:
- LOG.warning("Fetching from %s resulted in %s",
- include_url, ioe)
+ error_message = "Fetching from {} resulted in {}".format(
+ include_url, ioe)
+ _handle_error(error_message, ioe)
if content is not None:
new_msg = convert_string(content)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index c02b3d9a..cf9e349f 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -15,6 +15,7 @@ import glob
import grp
import gzip
import hashlib
+import io
import json
import os
import os.path
@@ -30,34 +31,23 @@ import string
import subprocess
import sys
import time
-
-from errno import ENOENT, ENOEXEC
-
from base64 import b64decode, b64encode
-from six.moves.urllib import parse as urlparse
-
-import six
+from errno import ENOENT
+from functools import lru_cache
+from urllib import parse
from cloudinit import importer
from cloudinit import log as logging
-from cloudinit import mergers
-from cloudinit import safeyaml
-from cloudinit import temp_utils
-from cloudinit import type_utils
-from cloudinit import url_helper
-from cloudinit import version
-
-from cloudinit.settings import (CFG_BUILTIN)
-
-try:
- from functools import lru_cache
-except ImportError:
- def lru_cache():
- """pass-thru replace for Python3's lru_cache()"""
- def wrapper(f):
- return f
- return wrapper
-
+from cloudinit import subp
+from cloudinit import (
+ mergers,
+ safeyaml,
+ temp_utils,
+ type_utils,
+ url_helper,
+ version,
+)
+from cloudinit.settings import CFG_BUILTIN
_DNS_REDIRECT_IP = None
LOG = logging.getLogger(__name__)
@@ -78,6 +68,10 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'],
['lxc-is-container'])
+def kernel_version():
+ return tuple(map(int, os.uname().release.split('.')[:2]))
+
+
@lru_cache()
def get_dpkg_architecture(target=None):
"""Return the sanitized string output by `dpkg --print-architecture`.
@@ -85,8 +79,8 @@ def get_dpkg_architecture(target=None):
N.B. This function is wrapped in functools.lru_cache, so repeated calls
won't shell out every time.
"""
- out, _ = subp(['dpkg', '--print-architecture'], capture=True,
- target=target)
+ out, _ = subp.subp(['dpkg', '--print-architecture'], capture=True,
+ target=target)
return out.strip()
@@ -97,7 +91,8 @@ def lsb_release(target=None):
data = {}
try:
- out, _ = subp(['lsb_release', '--all'], capture=True, target=target)
+ out, _ = subp.subp(['lsb_release', '--all'], capture=True,
+ target=target)
for line in out.splitlines():
fname, _, val = line.partition(":")
if fname in fmap:
@@ -107,45 +102,23 @@ def lsb_release(target=None):
LOG.warning("Missing fields in lsb_release --all output: %s",
','.join(missing))
- except ProcessExecutionError as err:
+ except subp.ProcessExecutionError as err:
LOG.warning("Unable to get lsb_release --all: %s", err)
data = dict((v, "UNAVAILABLE") for v in fmap.values())
return data
-def target_path(target, path=None):
- # return 'path' inside target, accepting target as None
- if target in (None, ""):
- target = "/"
- elif not isinstance(target, six.string_types):
- raise ValueError("Unexpected input for target: %s" % target)
- else:
- target = os.path.abspath(target)
- # abspath("//") returns "//" specifically for 2 slashes.
- if target.startswith("//"):
- target = target[1:]
-
- if not path:
- return target
-
- # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.
- while len(path) and path[0] == "/":
- path = path[1:]
-
- return os.path.join(target, path)
-
-
def decode_binary(blob, encoding='utf-8'):
# Converts a binary type into a text type using given encoding.
- if isinstance(blob, six.string_types):
+ if isinstance(blob, str):
return blob
return blob.decode(encoding)
def encode_text(text, encoding='utf-8'):
# Converts a text string into a binary type using given encoding.
- if isinstance(text, six.binary_type):
+ if isinstance(text, bytes):
return text
return text.encode(encoding)
@@ -175,8 +148,7 @@ def fully_decoded_payload(part):
# bytes, first try to decode to str via CT charset, and failing that, try
# utf-8 using surrogate escapes.
cte_payload = part.get_payload(decode=True)
- if (six.PY3 and
- part.get_content_maintype() == 'text' and
+ if (part.get_content_maintype() == 'text' and
isinstance(cte_payload, bytes)):
charset = part.get_charset()
if charset and charset.input_codec:
@@ -213,91 +185,6 @@ DMIDECODE_TO_DMI_SYS_MAPPING = {
}
-class ProcessExecutionError(IOError):
-
- MESSAGE_TMPL = ('%(description)s\n'
- 'Command: %(cmd)s\n'
- 'Exit code: %(exit_code)s\n'
- 'Reason: %(reason)s\n'
- 'Stdout: %(stdout)s\n'
- 'Stderr: %(stderr)s')
- empty_attr = '-'
-
- def __init__(self, stdout=None, stderr=None,
- exit_code=None, cmd=None,
- description=None, reason=None,
- errno=None):
- if not cmd:
- self.cmd = self.empty_attr
- else:
- self.cmd = cmd
-
- if not description:
- if not exit_code and errno == ENOEXEC:
- self.description = 'Exec format error. Missing #! in script?'
- else:
- self.description = 'Unexpected error while running command.'
- else:
- self.description = description
-
- if not isinstance(exit_code, six.integer_types):
- self.exit_code = self.empty_attr
- else:
- self.exit_code = exit_code
-
- if not stderr:
- if stderr is None:
- self.stderr = self.empty_attr
- else:
- self.stderr = stderr
- else:
- self.stderr = self._indent_text(stderr)
-
- if not stdout:
- if stdout is None:
- self.stdout = self.empty_attr
- else:
- self.stdout = stdout
- else:
- self.stdout = self._indent_text(stdout)
-
- if reason:
- self.reason = reason
- else:
- self.reason = self.empty_attr
-
- self.errno = errno
- message = self.MESSAGE_TMPL % {
- 'description': self._ensure_string(self.description),
- 'cmd': self._ensure_string(self.cmd),
- 'exit_code': self._ensure_string(self.exit_code),
- 'stdout': self._ensure_string(self.stdout),
- 'stderr': self._ensure_string(self.stderr),
- 'reason': self._ensure_string(self.reason),
- }
- IOError.__init__(self, message)
-
- def _ensure_string(self, text):
- """
- if data is bytes object, decode
- """
- return text.decode() if isinstance(text, six.binary_type) else text
-
- def _indent_text(self, text, indent_level=8):
- """
- indent text on all but the first line, allowing for easy to read output
- """
- cr = '\n'
- indent = ' ' * indent_level
- # if input is bytes, return bytes
- if isinstance(text, six.binary_type):
- cr = cr.encode()
- indent = indent.encode()
- # remove any newlines at end of text first to prevent unneeded blank
- # line in output
- return text.rstrip(cr).replace(cr, cr + indent)
-
-
class SeLinuxGuard(object):
def __init__(self, path, recursive=False):
# Late import since it might not always
@@ -322,9 +209,6 @@ class SeLinuxGuard(object):
return
path = os.path.realpath(self.path)
- # path should be a string, not unicode
- if six.PY2:
- path = str(path)
try:
stats = os.lstat(path)
self.selinux.matchpathcon(path, stats[stat.ST_MODE])
@@ -369,7 +253,7 @@ def is_true(val, addons=None):
check_set = TRUE_STRINGS
if addons:
check_set = list(check_set) + addons
- if six.text_type(val).lower().strip() in check_set:
+ if str(val).lower().strip() in check_set:
return True
return False
@@ -380,7 +264,7 @@ def is_false(val, addons=None):
check_set = FALSE_STRINGS
if addons:
check_set = list(check_set) + addons
- if six.text_type(val).lower().strip() in check_set:
+ if str(val).lower().strip() in check_set:
return True
return False
@@ -441,7 +325,7 @@ def uniq_merge_sorted(*lists):
def uniq_merge(*lists):
combined_list = []
for a_list in lists:
- if isinstance(a_list, six.string_types):
+ if isinstance(a_list, str):
a_list = a_list.strip().split(",")
# Kickout the empty ones
a_list = [a for a in a_list if len(a)]
@@ -464,7 +348,7 @@ def clean_filename(fn):
def decomp_gzip(data, quiet=True, decode=True):
try:
- buf = six.BytesIO(encode_text(data))
+ buf = io.BytesIO(encode_text(data))
with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh:
# E1101 is https://github.com/PyCQA/pylint/issues/1444
if decode:
@@ -475,7 +359,7 @@ def decomp_gzip(data, quiet=True, decode=True):
if quiet:
return data
else:
- raise DecompressionError(six.text_type(e))
+ raise DecompressionError(str(e)) from e
def extract_usergroup(ug_pair):
@@ -533,18 +417,9 @@ def multi_log(text, console=True, stderr=True,
log.log(log_level, text)
-def is_ipv4(instr):
- """determine if input string is a ipv4 address. return boolean."""
- toks = instr.split('.')
- if len(toks) != 4:
- return False
-
- try:
- toks = [x for x in toks if 0 <= int(x) < 256]
- except Exception:
- return False
-
- return len(toks) == 4
+@lru_cache()
+def is_BSD():
+ return 'BSD' in platform.system()
@lru_cache()
@@ -552,6 +427,16 @@ def is_FreeBSD():
return system_info()['variant'] == "freebsd"
+@lru_cache()
+def is_NetBSD():
+ return system_info()['variant'] == "netbsd"
+
+
+@lru_cache()
+def is_OpenBSD():
+ return system_info()['variant'] == "openbsd"
+
+
def get_cfg_option_bool(yobj, key, default=False):
if key not in yobj:
return default
@@ -562,7 +447,7 @@ def get_cfg_option_str(yobj, key, default=None):
if key not in yobj:
return default
val = yobj[key]
- if not isinstance(val, six.string_types):
+ if not isinstance(val, str):
val = str(val)
return val
@@ -625,10 +510,9 @@ def get_linux_distro():
flavor = match.groupdict()['codename']
if distro_name == 'rhel':
distro_name = 'redhat'
- elif os.path.exists('/bin/freebsd-version'):
- distro_name = 'freebsd'
- distro_version, _ = subp(['uname', '-r'])
- distro_version = distro_version.strip()
+ elif is_BSD():
+ distro_name = platform.system().lower()
+ distro_version = platform.release()
else:
dist = ('', '', '')
try:
@@ -656,7 +540,7 @@ def system_info():
'system': platform.system(),
'release': platform.release(),
'python': platform.python_version(),
- 'uname': platform.uname(),
+ 'uname': list(platform.uname()),
'dist': get_linux_distro()
}
system = info['system'].lower()
@@ -664,18 +548,20 @@ def system_info():
if system == "linux":
linux_dist = info['dist'][0].lower()
if linux_dist in (
- 'arch', 'centos', 'debian', 'fedora', 'rhel', 'suse'):
+ 'alpine', 'arch', 'centos', 'debian', 'fedora', 'rhel',
+ 'suse'):
var = linux_dist
elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
var = 'ubuntu'
elif linux_dist == 'redhat':
var = 'rhel'
elif linux_dist in (
- 'opensuse', 'opensuse-tumbleweed', 'opensuse-leap', 'sles'):
+ 'opensuse', 'opensuse-tumbleweed', 'opensuse-leap',
+ 'sles', 'sle_hpc'):
var = 'suse'
else:
var = 'linux'
- elif system in ('windows', 'darwin', "freebsd"):
+ elif system in ('windows', 'darwin', "freebsd", "netbsd", "openbsd"):
var = system
info['variant'] = var
@@ -703,7 +589,7 @@ def get_cfg_option_list(yobj, key, default=None):
if isinstance(val, (list)):
cval = [v for v in val]
return cval
- if not isinstance(val, six.string_types):
+ if not isinstance(val, str):
val = str(val)
return [val]
@@ -724,7 +610,7 @@ def get_cfg_by_path(yobj, keyp, default=None):
@return: The value of the item at keyp."
is not found."""
- if isinstance(keyp, six.string_types):
+ if isinstance(keyp, str):
keyp = keyp.split("/")
cur = yobj
for tok in keyp:
@@ -822,7 +708,7 @@ def make_url(scheme, host, port=None,
pieces.append(query or '')
pieces.append(fragment or '')
- return urlparse.urlunparse(pieces)
+ return parse.urlunparse(pieces)
def mergemanydict(srcs, reverse=False):
@@ -869,37 +755,6 @@ def del_dir(path):
shutil.rmtree(path)
-def runparts(dirp, skip_no_exist=True, exe_prefix=None):
- if skip_no_exist and not os.path.isdir(dirp):
- return
-
- failed = []
- attempted = []
-
- if exe_prefix is None:
- prefix = []
- elif isinstance(exe_prefix, str):
- prefix = [str(exe_prefix)]
- elif isinstance(exe_prefix, list):
- prefix = exe_prefix
- else:
- raise TypeError("exe_prefix must be None, str, or list")
-
- for exe_name in sorted(os.listdir(dirp)):
- exe_path = os.path.join(dirp, exe_name)
- if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
- attempted.append(exe_path)
- try:
- subp(prefix + [exe_path], capture=False)
- except ProcessExecutionError as e:
- logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
- failed.append(e)
-
- if failed and attempted:
- raise RuntimeError('Runparts: %s failures in %s attempted commands'
- % (len(failed), len(attempted)))
-
-
# read_optional_seed
# returns boolean indicating success or failure (presense of files)
# if files are present, populates 'fill' dictionary with 'user-data' and
@@ -1031,7 +886,7 @@ def read_conf_with_confd(cfgfile):
if "conf_d" in cfg:
confd = cfg['conf_d']
if confd:
- if not isinstance(confd, six.string_types):
+ if not isinstance(confd, str):
raise TypeError(("Config file %s contains 'conf_d' "
"with non-string type %s") %
(cfgfile, type_utils.obj_name(confd)))
@@ -1049,7 +904,7 @@ def read_conf_with_confd(cfgfile):
def read_conf_from_cmdline(cmdline=None):
- # return a dictionary or config on the cmdline or None
+ # return a dictionary of config on the cmdline or None
return load_yaml(read_cc_from_cmdline(cmdline=cmdline))
@@ -1057,11 +912,12 @@ def read_cc_from_cmdline(cmdline=None):
# this should support reading cloud-config information from
# the kernel command line. It is intended to support content of the
# format:
- # cc: <yaml content here> [end_cc]
+ # cc: <yaml content here|urlencoded yaml content> [end_cc]
# this would include:
# cc: ssh_import_id: [smoser, kirkland]\\n
# cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
# cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc
+ # cc:ssh_import_id: %5Bsmoser%5D end_cc
if cmdline is None:
cmdline = get_cmdline()
@@ -1076,9 +932,9 @@ def read_cc_from_cmdline(cmdline=None):
end = cmdline.find(tag_end, begin + begin_l)
if end < 0:
end = clen
- tokens.append(cmdline[begin + begin_l:end].lstrip().replace("\\n",
- "\n"))
-
+ tokens.append(
+ parse.unquote(
+ cmdline[begin + begin_l:end].lstrip()).replace("\\n", "\n"))
begin = cmdline.find(tag_begin, end + end_l)
return '\n'.join(tokens)
@@ -1223,7 +1079,7 @@ def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
return log_time(logfunc=LOG.debug, msg="Resolving URL: " + url,
func=is_resolvable,
- args=(urlparse.urlparse(url).hostname,))
+ args=(parse.urlparse(url).hostname,))
def search_for_mirror(candidates):
@@ -1231,9 +1087,14 @@ def search_for_mirror(candidates):
Search through a list of mirror urls for one that works
This needs to return quickly.
"""
+ if candidates is None:
+ return None
+
+ LOG.debug("search for mirror in candidates: '%s'", candidates)
for cand in candidates:
try:
if is_resolvable_url(cand):
+ LOG.debug("found working mirror: '%s'", cand)
return cand
except Exception:
pass
@@ -1254,6 +1115,68 @@ def close_stdin():
os.dup2(fp.fileno(), sys.stdin.fileno())
+def find_devs_with_freebsd(criteria=None, oformat='device',
+ tag=None, no_cache=False, path=None):
+ devlist = []
+ if not criteria:
+ return glob.glob("/dev/msdosfs/*") + glob.glob("/dev/iso9660/*")
+ if criteria.startswith("LABEL="):
+ label = criteria.lstrip("LABEL=")
+ devlist = [
+ p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label]
+ if os.path.exists(p)]
+ elif criteria == "TYPE=vfat":
+ devlist = glob.glob("/dev/msdosfs/*")
+ elif criteria == "TYPE=iso9660":
+ devlist = glob.glob("/dev/iso9660/*")
+ return devlist
+
+
+def find_devs_with_netbsd(criteria=None, oformat='device',
+ tag=None, no_cache=False, path=None):
+ devlist = []
+ label = None
+ _type = None
+ if criteria:
+ if criteria.startswith("LABEL="):
+ label = criteria.lstrip("LABEL=")
+ if criteria.startswith("TYPE="):
+ _type = criteria.lstrip("TYPE=")
+ out, _err = subp.subp(['sysctl', '-n', 'hw.disknames'], rcs=[0])
+ for dev in out.split():
+ if label or _type:
+ mscdlabel_out, _ = subp.subp(['mscdlabel', dev], rcs=[0, 1])
+ if label and not ('label "%s"' % label) in mscdlabel_out:
+ continue
+ if _type == "iso9660" and "ISO filesystem" not in mscdlabel_out:
+ continue
+ if _type == "vfat" and "ISO filesystem" in mscdlabel_out:
+ continue
+ devlist.append('/dev/' + dev)
+ return devlist
+
+
+def find_devs_with_openbsd(criteria=None, oformat='device',
+ tag=None, no_cache=False, path=None):
+ out, _err = subp.subp(['sysctl', '-n', 'hw.disknames'], rcs=[0])
+ devlist = []
+ for entry in out.split(','):
+ if not entry.endswith(':'):
+ # ffs partition with a serial, not a config-drive
+ continue
+ if entry == 'fd0:':
+ continue
+ part_id = 'a' if entry.startswith('cd') else 'i'
+ devlist.append(entry[:-1] + part_id)
+ if criteria == "TYPE=iso9660":
+ devlist = [i for i in devlist if i.startswith('cd')]
+ elif criteria in ["LABEL=CONFIG-2", "TYPE=vfat"]:
+ devlist = [i for i in devlist if not i.startswith('cd')]
+ elif criteria:
+ LOG.debug("Unexpected criteria: %s", criteria)
+ return ['/dev/' + i for i in devlist]
+
+
def find_devs_with(criteria=None, oformat='device',
tag=None, no_cache=False, path=None):
"""
@@ -1263,6 +1186,16 @@ def find_devs_with(criteria=None, oformat='device',
LABEL=<label>
UUID=<uuid>
"""
+ if is_FreeBSD():
+ return find_devs_with_freebsd(criteria, oformat,
+ tag, no_cache, path)
+ elif is_NetBSD():
+ return find_devs_with_netbsd(criteria, oformat,
+ tag, no_cache, path)
+ elif is_OpenBSD():
+ return find_devs_with_openbsd(criteria, oformat,
+ tag, no_cache, path)
+
blk_id_cmd = ['blkid']
options = []
if criteria:
@@ -1291,8 +1224,8 @@ def find_devs_with(criteria=None, oformat='device',
cmd = blk_id_cmd + options
# See man blkid for why 2 is added
try:
- (out, _err) = subp(cmd, rcs=[0, 2])
- except ProcessExecutionError as e:
+ (out, _err) = subp.subp(cmd, rcs=[0, 2])
+ except subp.ProcessExecutionError as e:
if e.errno == ENOENT:
# blkid not found...
out = ""
@@ -1327,7 +1260,7 @@ def blkid(devs=None, disable_cache=False):
# we have to decode with 'replace' as shelx.split (called by
# load_shell_content) can't take bytes. So this is potentially
# lossy of non-utf-8 chars in blkid output.
- out, _ = subp(cmd, capture=True, decode="replace")
+ out, _ = subp.subp(cmd, capture=True, decode="replace")
ret = {}
for line in out.splitlines():
dev, _, data = line.partition(":")
@@ -1355,7 +1288,7 @@ def uniq_list(in_list):
def load_file(fname, read_cb=None, quiet=False, decode=True):
LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
- ofh = six.BytesIO()
+ ofh = io.BytesIO()
try:
with open(fname, 'rb') as ifh:
pipe_in_out(ifh, ofh, chunk_cb=read_cb)
@@ -1430,7 +1363,7 @@ def chownbyname(fname, user=None, group=None):
if group:
gid = grp.getgrnam(group).gr_gid
except KeyError as e:
- raise OSError("Unknown user or group: %s" % (e))
+ raise OSError("Unknown user or group: %s" % (e)) from e
chownbyid(fname, uid, gid)
@@ -1647,7 +1580,7 @@ def unmounter(umount):
finally:
if umount:
umount_cmd = ["umount", umount]
- subp(umount_cmd)
+ subp.subp(umount_cmd)
def mounts():
@@ -1658,7 +1591,7 @@ def mounts():
mount_locs = load_file("/proc/mounts").splitlines()
method = 'proc'
else:
- (mountoutput, _err) = subp("mount")
+ (mountoutput, _err) = subp.subp("mount")
mount_locs = mountoutput.splitlines()
method = 'mount'
mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
@@ -1742,7 +1675,7 @@ def mount_cb(device, callback, data=None, mtype=None,
mountcmd.extend(['-t', mtype])
mountcmd.append(device)
mountcmd.append(tmpd)
- subp(mountcmd, update_env=update_env_for_mount)
+ subp.subp(mountcmd, update_env=update_env_for_mount)
umount = tmpd # This forces it to be unmounted (when set)
mountpoint = tmpd
break
@@ -1804,6 +1737,7 @@ def time_rfc2822():
return ts
+@lru_cache()
def boottime():
"""Use sysctlbyname(3) via ctypes to find kern.boottime
@@ -1813,6 +1747,7 @@ def boottime():
@return boottime: float to be compatible with linux
"""
import ctypes
+ import ctypes.util
NULL_BYTES = b"\x00"
@@ -1821,7 +1756,7 @@ def boottime():
("tv_sec", ctypes.c_int64),
("tv_usec", ctypes.c_int64)
]
- libc = ctypes.CDLL('/lib/libc.so.7')
+ libc = ctypes.CDLL(ctypes.util.find_library('c'))
size = ctypes.c_size_t()
size.value = ctypes.sizeof(timeval)
buf = timeval()
@@ -1872,7 +1807,15 @@ def chmod(path, mode):
os.chmod(path, real_mode)
-def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False):
+def write_file(
+ filename,
+ content,
+ mode=0o644,
+ omode="wb",
+ preserve_mode=False,
+ *,
+ ensure_dir_exists=True
+):
"""
Writes a file with the given content and sets the file mode as specified.
Restores the SELinux context if possible.
@@ -1881,16 +1824,22 @@ def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False):
@param content: The content to write to the file.
@param mode: The filesystem mode to set on the file.
@param omode: The open mode used when opening the file (w, wb, a, etc.)
+ @param preserve_mode: If True and `filename` exists, preserve `filename`s
+ current mode instead of applying `mode`.
+ @param ensure_dir_exists: If True (the default), ensure that the directory
+ containing `filename` exists before writing to
+ the file.
"""
- if copy_mode:
+ if preserve_mode:
try:
file_stat = os.stat(filename)
mode = stat.S_IMODE(file_stat.st_mode)
except OSError:
pass
- ensure_dir(os.path.dirname(filename))
+ if ensure_dir_exists:
+ ensure_dir(os.path.dirname(filename))
if 'b' in omode.lower():
content = encode_text(content)
write_type = 'bytes'
@@ -1924,185 +1873,6 @@ def delete_dir_contents(dirname):
del_file(node_fullpath)
-def subp_blob_in_tempfile(blob, *args, **kwargs):
- """Write blob to a tempfile, and call subp with args, kwargs. Then cleanup.
-
- 'basename' as a kwarg allows providing the basename for the file.
- The 'args' argument to subp will be updated with the full path to the
- filename as the first argument.
- """
- basename = kwargs.pop('basename', "subp_blob")
-
- if len(args) == 0 and 'args' not in kwargs:
- args = [tuple()]
-
- # Use tmpdir over tmpfile to avoid 'text file busy' on execute
- with temp_utils.tempdir(needs_exe=True) as tmpd:
- tmpf = os.path.join(tmpd, basename)
- if 'args' in kwargs:
- kwargs['args'] = [tmpf] + list(kwargs['args'])
- else:
- args = list(args)
- args[0] = [tmpf] + args[0]
-
- write_file(tmpf, blob, mode=0o700)
- return subp(*args, **kwargs)
-
-
-def subp(args, data=None, rcs=None, env=None, capture=True,
- combine_capture=False, shell=False,
- logstring=False, decode="replace", target=None, update_env=None,
- status_cb=None):
- """Run a subprocess.
-
- :param args: command to run in a list. [cmd, arg1, arg2...]
- :param data: input to the command, made available on its stdin.
- :param rcs:
- a list of allowed return codes. If subprocess exits with a value not
- in this list, a ProcessExecutionError will be raised. By default,
- data is returned as a string. See 'decode' parameter.
- :param env: a dictionary for the command's environment.
- :param capture:
- boolean indicating if output should be captured. If True, then stderr
- and stdout will be returned. If False, they will not be redirected.
- :param combine_capture:
- boolean indicating if stderr should be redirected to stdout. When True,
- interleaved stderr and stdout will be returned as the first element of
- a tuple, the second will be empty string or bytes (per decode).
- if combine_capture is True, then output is captured independent of
- the value of capture.
- :param shell: boolean indicating if this should be run with a shell.
- :param logstring:
- the command will be logged to DEBUG. If it contains info that should
- not be logged, then logstring will be logged instead.
- :param decode:
- if False, no decoding will be done and returned stdout and stderr will
- be bytes. Other allowed values are 'strict', 'ignore', and 'replace'.
- These values are passed through to bytes().decode() as the 'errors'
- parameter. There is no support for decoding to other than utf-8.
- :param target:
- not supported, kwarg present only to make function signature similar
- to curtin's subp.
- :param update_env:
- update the enviornment for this command with this dictionary.
- this will not affect the current processes os.environ.
- :param status_cb:
- call this fuction with a single string argument before starting
- and after finishing.
-
- :return
- if not capturing, return is (None, None)
- if capturing, stdout and stderr are returned.
- if decode:
- entries in tuple will be python2 unicode or python3 string
- if not decode:
- entries in tuple will be python2 string or python3 bytes
- """
-
- # not supported in cloud-init (yet), for now kept in the call signature
- # to ease maintaining code shared between cloud-init and curtin
- if target is not None:
- raise ValueError("target arg not supported by cloud-init")
-
- if rcs is None:
- rcs = [0]
-
- devnull_fp = None
-
- if update_env:
- if env is None:
- env = os.environ
- env = env.copy()
- env.update(update_env)
-
- if target_path(target) != "/":
- args = ['chroot', target] + list(args)
-
- if status_cb:
- command = ' '.join(args) if isinstance(args, list) else args
- status_cb('Begin run command: {command}\n'.format(command=command))
- if not logstring:
- LOG.debug(("Running command %s with allowed return codes %s"
- " (shell=%s, capture=%s)"),
- args, rcs, shell, 'combine' if combine_capture else capture)
- else:
- LOG.debug(("Running hidden command to protect sensitive "
- "input/output logstring: %s"), logstring)
-
- stdin = None
- stdout = None
- stderr = None
- if capture:
- stdout = subprocess.PIPE
- stderr = subprocess.PIPE
- if combine_capture:
- stdout = subprocess.PIPE
- stderr = subprocess.STDOUT
- if data is None:
- # using devnull assures any reads get null, rather
- # than possibly waiting on input.
- devnull_fp = open(os.devnull)
- stdin = devnull_fp
- else:
- stdin = subprocess.PIPE
- if not isinstance(data, bytes):
- data = data.encode()
-
- # Popen converts entries in the arguments array from non-bytes to bytes.
- # When locale is unset it may use ascii for that encoding which can
- # cause UnicodeDecodeErrors. (LP: #1751051)
- if isinstance(args, six.binary_type):
- bytes_args = args
- elif isinstance(args, six.string_types):
- bytes_args = args.encode("utf-8")
- else:
- bytes_args = [
- x if isinstance(x, six.binary_type) else x.encode("utf-8")
- for x in args]
- try:
- sp = subprocess.Popen(bytes_args, stdout=stdout,
- stderr=stderr, stdin=stdin,
- env=env, shell=shell)
- (out, err) = sp.communicate(data)
- except OSError as e:
- if status_cb:
- status_cb('ERROR: End run command: invalid command provided\n')
- raise ProcessExecutionError(
- cmd=args, reason=e, errno=e.errno,
- stdout="-" if decode else b"-",
- stderr="-" if decode else b"-")
- finally:
- if devnull_fp:
- devnull_fp.close()
-
- # Just ensure blank instead of none.
- if capture or combine_capture:
- if not out:
- out = b''
- if not err:
- err = b''
- if decode:
- def ldecode(data, m='utf-8'):
- if not isinstance(data, bytes):
- return data
- return data.decode(m, decode)
-
- out = ldecode(out)
- err = ldecode(err)
-
- rc = sp.returncode
- if rc not in rcs:
- if status_cb:
- status_cb(
- 'ERROR: End run command: exit({code})\n'.format(code=rc))
- raise ProcessExecutionError(stdout=out, stderr=err,
- exit_code=rc,
- cmd=args)
- if status_cb:
- status_cb('End run command: exit({code})\n'.format(code=rc))
- return (out, err)
-
-
def make_header(comment_char="#", base='created'):
ci_ver = version.version_string()
header = str(comment_char)
@@ -2111,8 +1881,8 @@ def make_header(comment_char="#", base='created'):
return header
-def abs_join(*paths):
- return os.path.abspath(os.path.join(*paths))
+def abs_join(base, *paths):
+ return os.path.abspath(os.path.join(base, *paths))
# shellify, takes a list of commands
@@ -2136,10 +1906,10 @@ def shellify(cmdlist, add_header=True):
if isinstance(args, (list, tuple)):
fixed = []
for f in args:
- fixed.append("'%s'" % (six.text_type(f).replace("'", escaped)))
+ fixed.append("'%s'" % (str(f).replace("'", escaped)))
content = "%s%s\n" % (content, ' '.join(fixed))
cmds_made += 1
- elif isinstance(args, six.string_types):
+ elif isinstance(args, str):
content = "%s%s\n" % (content, args)
cmds_made += 1
else:
@@ -2168,7 +1938,7 @@ def is_container():
try:
# try to run a helper program. if it returns true/zero
# then we're inside a container. otherwise, no
- subp(helper)
+ subp.subp(helper)
return True
except (IOError, OSError):
pass
@@ -2265,7 +2035,7 @@ def expand_package_list(version_fmt, pkgs):
pkglist = []
for pkg in pkgs:
- if isinstance(pkg, six.string_types):
+ if isinstance(pkg, str):
pkglist.append(pkg)
continue
@@ -2374,7 +2144,7 @@ def find_freebsd_part(fs):
return splitted[2]
elif splitted[2] in ['label', 'gpt', 'ufs']:
target_label = fs[5:]
- (part, _err) = subp(['glabel', 'status', '-s'])
+ (part, _err) = subp.subp(['glabel', 'status', '-s'])
for labels in part.split("\n"):
items = labels.split()
if len(items) > 0 and items[0] == target_label:
@@ -2396,10 +2166,10 @@ def get_path_dev_freebsd(path, mnt_list):
def get_mount_info_freebsd(path):
- (result, err) = subp(['mount', '-p', path], rcs=[0, 1])
+ (result, err) = subp.subp(['mount', '-p', path], rcs=[0, 1])
if len(err):
# find a path if the input is not a mounting point
- (mnt_list, err) = subp(['mount', '-p'])
+ (mnt_list, err) = subp.subp(['mount', '-p'])
path_found = get_path_dev_freebsd(path, mnt_list)
if (path_found is None):
return None
@@ -2415,8 +2185,8 @@ def get_device_info_from_zpool(zpool):
LOG.debug('Cannot get zpool info, no /dev/zfs')
return None
try:
- (zpoolstatus, err) = subp(['zpool', 'status', zpool])
- except ProcessExecutionError as err:
+ (zpoolstatus, err) = subp.subp(['zpool', 'status', zpool])
+ except subp.ProcessExecutionError as err:
LOG.warning("Unable to get zpool status of %s: %s", zpool, err)
return None
if len(err):
@@ -2430,7 +2200,7 @@ def get_device_info_from_zpool(zpool):
def parse_mount(path):
- (mountoutput, _err) = subp(['mount'])
+ (mountoutput, _err) = subp.subp(['mount'])
mount_locs = mountoutput.splitlines()
# there are 2 types of mount outputs we have to parse therefore
# the regex is a bit complex. to better understand this regex see:
@@ -2503,40 +2273,6 @@ def get_mount_info(path, log=LOG, get_mnt_opts=False):
return parse_mount(path)
-def is_exe(fpath):
- # return boolean indicating if fpath exists and is executable.
- return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
-
-
-def which(program, search=None, target=None):
- target = target_path(target)
-
- if os.path.sep in program:
- # if program had a '/' in it, then do not search PATH
- # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls
- # so effectively we set cwd to / (or target)
- if is_exe(target_path(target, program)):
- return program
-
- if search is None:
- paths = [p.strip('"') for p in
- os.environ.get("PATH", "").split(os.pathsep)]
- if target == "/":
- search = paths
- else:
- search = [p for p in paths if p.startswith("/")]
-
- # normalize path input
- search = [os.path.abspath(p) for p in search]
-
- for path in search:
- ppath = os.path.sep.join((path, program))
- if is_exe(target_path(target, ppath)):
- return ppath
-
- return None
-
-
def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False):
if args is None:
args = []
@@ -2651,8 +2387,8 @@ def human2bytes(size):
try:
num = float(num)
- except ValueError:
- raise ValueError("'%s' is not valid input." % size_in)
+ except ValueError as e:
+ raise ValueError("'%s' is not valid input." % size_in) from e
if num < 0:
raise ValueError("'%s': cannot be negative" % size_in)
@@ -2700,7 +2436,7 @@ def _call_dmidecode(key, dmidecode_path):
"""
try:
cmd = [dmidecode_path, "--string", key]
- (result, _err) = subp(cmd)
+ (result, _err) = subp.subp(cmd)
result = result.strip()
LOG.debug("dmidecode returned '%s' for '%s'", result, key)
if result.replace(".", "") == "":
@@ -2754,7 +2490,8 @@ def read_dmi_data(key):
LOG.debug("dmidata is not supported on %s", uname_arch)
return None
- dmidecode_path = which('dmidecode')
+ print("hi, now its: %s\n", subp)
+ dmidecode_path = subp.which('dmidecode')
if dmidecode_path:
return _call_dmidecode(key, dmidecode_path)
@@ -2765,12 +2502,12 @@ def read_dmi_data(key):
def message_from_string(string):
if sys.version_info[:2] < (2, 7):
- return email.message_from_file(six.StringIO(string))
+ return email.message_from_file(io.StringIO(string))
return email.message_from_string(string)
def get_installed_packages(target=None):
- (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True)
+ (out, _) = subp.subp(['dpkg-query', '--list'], target=target, capture=True)
pkgs_inst = set()
for line in out.splitlines():
@@ -2906,7 +2643,7 @@ def udevadm_settle(exists=None, timeout=None):
if timeout:
settle_cmd.extend(['--timeout=%s' % timeout])
- return subp(settle_cmd)
+ return subp.subp(settle_cmd)
def get_proc_ppid(pid):
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 1bc1899c..8560d087 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "20.1"
+__VERSION__ = "20.3"
_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
FEATURES = [
diff --git a/config/cloud.cfg.d/05_logging.cfg b/config/cloud.cfg.d/05_logging.cfg
index 937b07f8..bf917a95 100644
--- a/config/cloud.cfg.d/05_logging.cfg
+++ b/config/cloud.cfg.d/05_logging.cfg
@@ -44,7 +44,7 @@ _log:
class=FileHandler
level=DEBUG
formatter=arg0Formatter
- args=('/var/log/cloud-init.log',)
+ args=('/var/log/cloud-init.log', 'a', 'UTF-8')
- &log_syslog |
[handler_cloudLogHandler]
class=handlers.SysLogHandler
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index 99f96ea1..2beb9b0c 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -2,7 +2,7 @@
# The top level settings are used as module
# and system configuration.
-{% if variant in ["freebsd"] %}
+{% if variant.endswith("bsd") %}
syslog_fix_perms: root:wheel
{% elif variant in ["suse"] %}
syslog_fix_perms: root:root
@@ -21,7 +21,7 @@ disable_root: false
disable_root: true
{% endif %}
-{% if variant in ["amazon", "centos", "fedora", "rhel"] %}
+{% if variant in ["alpine", "amazon", "centos", "fedora", "rhel"] %}
mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
{% if variant == "amazon" %}
resize_rootfs: noblock
@@ -33,7 +33,7 @@ ssh_pwauth: 0
# This will cause the set+update hostname module to not operate (if true)
preserve_hostname: false
-{% if variant in ["freebsd"] %}
+{% if variant.endswith("bsd") %}
# This should not be required, but leave it in place until the real cause of
# not finding -any- datasources is resolved.
datasource_list: ['NoCloud', 'ConfigDrive', 'Azure', 'OpenStack', 'Ec2']
@@ -55,19 +55,26 @@ network:
# The modules that run in the 'init' stage
cloud_init_modules:
- migrator
+{% if variant not in ["netbsd"] %}
- seed_random
+{% endif %}
- bootcmd
- write-files
+{% if variant not in ["netbsd"] %}
- growpart
- resizefs
-{% if variant not in ["freebsd"] %}
+{% endif %}
+{% if variant not in ["freebsd", "netbsd"] %}
- disk_setup
- mounts
{% endif %}
- set_hostname
- update_hostname
-{% if variant not in ["freebsd"] %}
- update_etc_hosts
+{% if variant in ["alpine"] %}
+ - resolv_conf
+{% endif %}
+{% if not variant.endswith("bsd") %}
- ca-certs
- rsyslog
{% endif %}
@@ -100,7 +107,10 @@ cloud_config_modules:
{% if variant in ["suse"] %}
- zypper-add-repo
{% endif %}
-{% if variant not in ["freebsd"] %}
+{% if variant in ["alpine"] %}
+ - apk-configure
+{% endif %}
+{% if variant not in ["freebsd", "netbsd"] %}
- ntp
{% endif %}
- timezone
@@ -121,11 +131,9 @@ cloud_final_modules:
{% if variant in ["ubuntu", "unknown"] %}
- ubuntu-drivers
{% endif %}
-{% if variant not in ["freebsd"] %}
- puppet
- chef
- mcollective
-{% endif %}
- salt-minion
- rightscale_userdata
- scripts-vendor
@@ -143,7 +151,9 @@ cloud_final_modules:
# (not accessible to handlers/transforms)
system_info:
# This will affect which distro class gets used
-{% if variant in ["amazon", "arch", "centos", "debian", "fedora", "freebsd", "rhel", "suse", "ubuntu"] %}
+{% if variant in ["alpine", "amazon", "arch", "centos", "debian",
+ "fedora", "freebsd", "netbsd", "openbsd", "rhel",
+ "suse", "ubuntu"] %}
distro: {{ variant }}
{% else %}
# Unknown/fallback distro.
@@ -158,6 +168,9 @@ system_info:
groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/bash
+{# SRU_BLOCKER: do not ship network renderers on Xenial, Bionic or Eoan #}
+ network:
+ renderers: ['netplan', 'eni', 'sysconfig']
# Automatically discover the best ntp_client
ntp_client: auto
# Other config here will be given to the distro class and/or path classes
@@ -191,7 +204,8 @@ system_info:
primary: http://ports.ubuntu.com/ubuntu-ports
security: http://ports.ubuntu.com/ubuntu-ports
ssh_svcname: ssh
-{% elif variant in ["amazon", "arch", "centos", "fedora", "rhel", "suse"] %}
+{% elif variant in ["alpine", "amazon", "arch", "centos", "fedora",
+ "rhel", "suse"] %}
# Default user name + that default users groups (if added/used)
default_user:
{% if variant == "amazon" %}
@@ -205,13 +219,19 @@ system_info:
{% endif %}
{% if variant == "suse" %}
groups: [cdrom, users]
+{% elif variant == "alpine" %}
+ groups: [adm, sudo]
{% elif variant == "arch" %}
groups: [wheel, users]
{% else %}
groups: [wheel, adm, systemd-journal]
{% endif %}
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+{% if variant == "alpine" %}
+ shell: /bin/ash
+{% else %}
shell: /bin/bash
+{% endif %}
# Other config here will be given to the distro class and/or path classes
paths:
cloud_dir: /var/lib/cloud/
@@ -226,4 +246,24 @@ system_info:
groups: [wheel]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/tcsh
+{% elif variant in ["netbsd"] %}
+ default_user:
+ name: netbsd
+ lock_passwd: True
+ gecos: NetBSD
+ groups: [wheel]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/sh
+{% elif variant in ["openbsd"] %}
+ default_user:
+ name: openbsd
+ lock_passwd: True
+ gecos: OpenBSD
+ groups: [wheel]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/ksh
+{% endif %}
+{% if variant in ["freebsd", "netbsd", "openbsd"] %}
+ network:
+ renderers: ['{{ variant }}']
{% endif %}
diff --git a/conftest.py b/conftest.py
new file mode 100644
index 00000000..76e9000a
--- /dev/null
+++ b/conftest.py
@@ -0,0 +1,183 @@
+import os
+from unittest import mock
+
+import pytest
+import httpretty as _httpretty
+
+from cloudinit import helpers, subp
+
+
+class _FixtureUtils:
+ """A namespace for fixture helper functions, used by fixture_utils.
+
+ These helper functions are all defined as staticmethods so they are
+ effectively functions; they are defined in a class only to give us a
+ namespace so calling them can look like
+ ``fixture_utils.fixture_util_function()`` in test code.
+ """
+
+ @staticmethod
+ def closest_marker_args_or(request, marker_name: str, default):
+ """Get the args for closest ``marker_name`` or return ``default``
+
+ :param request:
+ A pytest request, as passed to a fixture.
+ :param marker_name:
+ The name of the marker to look for
+ :param default:
+ The value to return if ``marker_name`` is not found.
+
+ :return:
+ The args for the closest ``marker_name`` marker, or ``default``
+ if no such marker is found.
+ """
+ try:
+ marker = request.node.get_closest_marker(marker_name)
+ except AttributeError:
+ # Older versions of pytest don't have the new API
+ marker = request.node.get_marker(marker_name)
+ if marker is not None:
+ return marker.args
+ return default
+
+ @staticmethod
+ def closest_marker_first_arg_or(request, marker_name: str, default):
+ """Get the first arg for closest ``marker_name`` or return ``default``
+
+ This is a convenience wrapper around closest_marker_args_or, see there
+ for full details.
+ """
+ result = _FixtureUtils.closest_marker_args_or(
+ request, marker_name, [default]
+ )
+ if not result:
+ raise TypeError(
+ "Missing expected argument to {} marker".format(marker_name)
+ )
+ return result[0]
+
+
+@pytest.yield_fixture(autouse=True)
+def disable_subp_usage(request, fixture_utils):
+ """
+ Across all (pytest) tests, ensure that subp.subp is not invoked.
+
+ Note that this can only catch invocations where the util module is imported
+ and ``subp.subp(...)`` is called. ``from cloudinit.subp mport subp``
+ imports happen before the patching here (or the CiTestCase monkey-patching)
+ happens, so are left untouched.
+
+ To allow a particular test method or class to use subp.subp you can mark it
+ as such::
+
+ @pytest.mark.allow_all_subp
+ def test_whoami(self):
+ subp.subp(["whoami"])
+
+ To instead allow subp.subp usage for a specific command, you can use the
+ ``allow_subp_for`` mark::
+
+ @pytest.mark.allow_subp_for("bash")
+ def test_bash(self):
+ subp.subp(["bash"])
+
+ You can pass multiple commands as values; they will all be permitted::
+
+ @pytest.mark.allow_subp_for("bash", "whoami")
+ def test_several_things(self):
+ subp.subp(["bash"])
+ subp.subp(["whoami"])
+
+ This fixture (roughly) mirrors the functionality of
+ CiTestCase.allowed_subp. N.B. While autouse fixtures do affect non-pytest
+ tests, CiTestCase's allowed_subp does take precedence (and we have
+ TestDisableSubpUsageInTestSubclass to confirm that).
+ """
+ allow_subp_for = fixture_utils.closest_marker_args_or(
+ request, "allow_subp_for", None
+ )
+ # Because the mark doesn't take arguments, `allow_all_subp` will be set to
+ # [] if the marker is present, so explicit None checks are required
+ allow_all_subp = fixture_utils.closest_marker_args_or(
+ request, "allow_all_subp", None
+ )
+
+ if allow_all_subp is not None and allow_subp_for is None:
+ # Only allow_all_subp specified, don't mock subp.subp
+ yield
+ return
+
+ if allow_all_subp is None and allow_subp_for is None:
+ # No marks, default behaviour; disallow all subp.subp usage
+ def side_effect(args, *other_args, **kwargs):
+ raise AssertionError("Unexpectedly used subp.subp")
+
+ elif allow_all_subp is not None and allow_subp_for is not None:
+ # Both marks, ambiguous request; raise an exception on all subp usage
+ def side_effect(args, *other_args, **kwargs):
+ raise AssertionError(
+ "Test marked both allow_all_subp and allow_subp_for: resolve"
+ " this either by modifying your test code, or by modifying"
+ " disable_subp_usage to handle precedence."
+ )
+ else:
+ # Look this up before our patch is in place, so we have access to
+ # the real implementation in side_effect
+ real_subp = subp.subp
+
+ def side_effect(args, *other_args, **kwargs):
+ cmd = args[0]
+ if cmd not in allow_subp_for:
+ raise AssertionError(
+ "Unexpectedly used subp.subp to call {} (allowed:"
+ " {})".format(cmd, ",".join(allow_subp_for))
+ )
+ return real_subp(args, *other_args, **kwargs)
+
+ with mock.patch("cloudinit.subp.subp", autospec=True) as m_subp:
+ m_subp.side_effect = side_effect
+ yield
+
+
+@pytest.fixture(scope="session")
+def fixture_utils():
+ """Return a namespace containing fixture utility functions.
+
+ See :py:class:`_FixtureUtils` for further details."""
+ return _FixtureUtils
+
+
+@pytest.yield_fixture
+def httpretty():
+ """
+ Enable HTTPretty for duration of the testcase, resetting before and after.
+
+ This will also ensure allow_net_connect is set to False, and temporarily
+ unset http_proxy in os.environ if present (to work around
+ https://github.com/gabrielfalcao/HTTPretty/issues/122).
+ """
+ restore_proxy = os.environ.pop("http_proxy", None)
+ _httpretty.HTTPretty.allow_net_connect = False
+ _httpretty.reset()
+ _httpretty.enable()
+
+ yield _httpretty
+
+ _httpretty.disable()
+ _httpretty.reset()
+ if restore_proxy is not None:
+ os.environ["http_proxy"] = restore_proxy
+
+
+@pytest.fixture
+def paths(tmpdir):
+ """
+ Return a helpers.Paths object configured to use a tmpdir.
+
+ (This uses the builtin tmpdir fixture.)
+ """
+ dirs = {
+ "cloud_dir": tmpdir.mkdir("cloud_dir").strpath,
+ "run_dir": tmpdir.mkdir("run_dir").strpath,
+ }
+ return helpers.Paths(dirs)
diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt
index ff8206f6..004894b7 100644
--- a/doc/examples/cloud-config-apt.txt
+++ b/doc/examples/cloud-config-apt.txt
@@ -1,3 +1,4 @@
+#cloud-config
# apt_pipelining (configure Acquire::http::Pipeline-Depth)
# Default: disables HTTP pipelining. Certain web servers, such
# as S3 do not pipeline properly (LP: #948461).
@@ -141,7 +142,7 @@ apt:
# as above, allowing to have one config for different per arch mirrors
# security is optional, if not defined it is set to the same value as primary
security:
- uri: http://security.ubuntu.com/ubuntu
+ - uri: http://security.ubuntu.com/ubuntu
# If search_dns is set for security the searched pattern is:
# <distro>-security-mirror
@@ -222,19 +223,19 @@ apt:
# This allows merging between multiple input files than a list like:
# cloud-config1
# sources:
- # s1: {'key': 'key1', 'source': 'source1'}
+ # s1: {'key': 'key1', 'source': 'source1'}
# cloud-config2
# sources:
- # s2: {'key': 'key2'}
- # s1: {'keyserver': 'foo'}
+ # s2: {'key': 'key2'}
+ # s1: {'keyserver': 'foo'}
# This would be merged to
# sources:
- # s1:
- # keyserver: foo
- # key: key1
- # source: source1
- # s2:
- # key: key2
+ # s1:
+ # keyserver: foo
+ # key: key1
+ # source: source1
+ # s2:
+ # key: key2
#
# The following examples number the subfeatures per sources entry to ease
# identification in discussions.
@@ -314,15 +315,15 @@ apt:
# As with keyid's this can be specified with or without some actual source
# content.
key: | # The value needs to start with -----BEGIN PGP PUBLIC KEY BLOCK-----
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: SKS 1.0.10
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: SKS 1.0.10
- mI0ESpA3UQEEALdZKVIMq0j6qWAXAyxSlF63SvPVIgxHPb9Nk0DZUixn+akqytxG4zKCONz6
- qLjoBBfHnynyVLfT4ihg9an1PqxRnTO+JKQxl8NgKGz6Pon569GtAOdWNKw15XKinJTDLjnj
- 9y96ljJqRcpV9t/WsIcdJPcKFR5voHTEoABE2aEXABEBAAG0GUxhdW5jaHBhZCBQUEEgZm9y
- IEFsZXN0aWOItgQTAQIAIAUCSpA3UQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEA7H
- 5Qi+CcVxWZ8D/1MyYvfj3FJPZUm2Yo1zZsQ657vHI9+pPouqflWOayRR9jbiyUFIn0VdQBrP
- t0FwvnOFArUovUWoKAEdqR8hPy3M3APUZjl5K4cMZR/xaMQeQRZ5CHpS4DBKURKAHC0ltS5o
- uBJKQOZm5iltJp15cgyIkBkGe8Mx18VFyVglAZey
- =Y2oI
- -----END PGP PUBLIC KEY BLOCK-----
+ mI0ESpA3UQEEALdZKVIMq0j6qWAXAyxSlF63SvPVIgxHPb9Nk0DZUixn+akqytxG4zKCONz6
+ qLjoBBfHnynyVLfT4ihg9an1PqxRnTO+JKQxl8NgKGz6Pon569GtAOdWNKw15XKinJTDLjnj
+ 9y96ljJqRcpV9t/WsIcdJPcKFR5voHTEoABE2aEXABEBAAG0GUxhdW5jaHBhZCBQUEEgZm9y
+ IEFsZXN0aWOItgQTAQIAIAUCSpA3UQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEA7H
+ 5Qi+CcVxWZ8D/1MyYvfj3FJPZUm2Yo1zZsQ657vHI9+pPouqflWOayRR9jbiyUFIn0VdQBrP
+ t0FwvnOFArUovUWoKAEdqR8hPy3M3APUZjl5K4cMZR/xaMQeQRZ5CHpS4DBKURKAHC0ltS5o
+ uBJKQOZm5iltJp15cgyIkBkGe8Mx18VFyVglAZey
+ =Y2oI
+ -----END PGP PUBLIC KEY BLOCK-----
diff --git a/doc/examples/cloud-config-boot-cmds.txt b/doc/examples/cloud-config-boot-cmds.txt
index 84e487a5..f9357b52 100644
--- a/doc/examples/cloud-config-boot-cmds.txt
+++ b/doc/examples/cloud-config-boot-cmds.txt
@@ -11,5 +11,5 @@
# - the INSTANCE_ID variable will be set to the current instance id.
# - you can use 'cloud-init-per' command to help only run once
bootcmd:
- - echo 192.168.1.130 us.archive.ubuntu.com >> /etc/hosts
- - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
+ - echo 192.168.1.130 us.archive.ubuntu.com >> /etc/hosts
+ - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
diff --git a/doc/examples/cloud-config-chef-oneiric.txt b/doc/examples/cloud-config-chef-oneiric.txt
index 75c9aeed..241fbf9b 100644
--- a/doc/examples/cloud-config-chef-oneiric.txt
+++ b/doc/examples/cloud-config-chef-oneiric.txt
@@ -13,73 +13,74 @@
# Key from http://apt.opscode.com/packages@opscode.com.gpg.key
apt:
sources:
- - source: "deb http://apt.opscode.com/ $RELEASE-0.10 main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.9 (GNU/Linux)
+ source1:
+ source: "deb http://apt.opscode.com/ $RELEASE-0.10 main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v1.4.9 (GNU/Linux)
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR
- lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh
- DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu
- wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx
- EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g
- w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8
- AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN
- QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X
- Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ
- 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V
- Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL
- zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb
- DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG
- 0GLl8EkfA8uhluM=
- =zKAm
- -----END PGP PUBLIC KEY BLOCK-----
+ mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
+ twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
+ dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
+ JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
+ ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
+ XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
+ DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
+ sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
+ Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
+ YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
+ CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
+ +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR
+ lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh
+ DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu
+ wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx
+ EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g
+ w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8
+ AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN
+ QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X
+ Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ
+ 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V
+ Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL
+ zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb
+ DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG
+ 0GLl8EkfA8uhluM=
+ =zKAm
+ -----END PGP PUBLIC KEY BLOCK-----
chef:
- # 11.10 will fail if install_type is "gems" (LP: #960576)
- install_type: "packages"
+ # 11.10 will fail if install_type is "gems" (LP: #960576)
+ install_type: "packages"
- # Chef settings
- server_url: "https://chef.yourorg.com:4000"
+ # Chef settings
+ server_url: "https://chef.yourorg.com:4000"
- # Node Name
- # Defaults to the instance-id if not present
- node_name: "your-node-name"
+ # Node Name
+ # Defaults to the instance-id if not present
+ node_name: "your-node-name"
- # Environment
- # Defaults to '_default' if not present
- environment: "production"
+ # Environment
+ # Defaults to '_default' if not present
+ environment: "production"
- # Default validation name is chef-validator
- validation_name: "yourorg-validator"
+ # Default validation name is chef-validator
+ validation_name: "yourorg-validator"
- # value of validation_cert is not used if validation_key defined,
- # but variable needs to be defined (LP: #960547)
- validation_cert: "unused"
- validation_key: |
- -----BEGIN RSA PRIVATE KEY-----
- YOUR-ORGS-VALIDATION-KEY-HERE
- -----END RSA PRIVATE KEY-----
-
- # A run list for a first boot json
- run_list:
- - "recipe[apache2]"
- - "role[db]"
+ # value of validation_cert is not used if validation_key defined,
+ # but variable needs to be defined (LP: #960547)
+ validation_cert: "unused"
+ validation_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ YOUR-ORGS-VALIDATION-KEY-HERE
+ -----END RSA PRIVATE KEY-----
- # Specify a list of initial attributes used by the cookbooks
- initial_attributes:
+ # A run list for a first boot json
+ run_list:
+ - "recipe[apache2]"
+ - "role[db]"
+
+ # Specify a list of initial attributes used by the cookbooks
+ initial_attributes:
apache:
prefork:
maxclients: 100
diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt
index 2320e01a..bb4b058c 100644
--- a/doc/examples/cloud-config-chef.txt
+++ b/doc/examples/cloud-config-chef.txt
@@ -52,55 +52,58 @@ apt:
chef:
- # Valid values are 'gems' and 'packages' and 'omnibus'
- install_type: "packages"
-
- # Boolean: run 'install_type' code even if chef-client
- # appears already installed.
- force_install: false
-
- # Chef settings
- server_url: "https://chef.yourorg.com"
-
- # Node Name
- # Defaults to the instance-id if not present
- node_name: "your-node-name"
-
- # Environment
- # Defaults to '_default' if not present
- environment: "production"
-
- # Default validation name is chef-validator
- validation_name: "yourorg-validator"
- # if validation_cert's value is "system" then it is expected
- # that the file already exists on the system.
- validation_cert: |
- -----BEGIN RSA PRIVATE KEY-----
- YOUR-ORGS-VALIDATION-KEY-HERE
- -----END RSA PRIVATE KEY-----
-
- # A run list for a first boot json, an example (not required)
- run_list:
- - "recipe[apache2]"
- - "role[db]"
-
- # Specify a list of initial attributes used by the cookbooks
- initial_attributes:
+ # Valid values are 'accept' and 'accept-no-persist'
+ chef_license: "accept"
+
+ # Valid values are 'gems' and 'packages' and 'omnibus'
+ install_type: "packages"
+
+ # Boolean: run 'install_type' code even if chef-client
+ # appears already installed.
+ force_install: false
+
+ # Chef settings
+ server_url: "https://chef.yourorg.com"
+
+ # Node Name
+ # Defaults to the instance-id if not present
+ node_name: "your-node-name"
+
+ # Environment
+ # Defaults to '_default' if not present
+ environment: "production"
+
+ # Default validation name is chef-validator
+ validation_name: "yourorg-validator"
+ # if validation_cert's value is "system" then it is expected
+ # that the file already exists on the system.
+ validation_cert: |
+ -----BEGIN RSA PRIVATE KEY-----
+ YOUR-ORGS-VALIDATION-KEY-HERE
+ -----END RSA PRIVATE KEY-----
+
+ # A run list for a first boot json, an example (not required)
+ run_list:
+ - "recipe[apache2]"
+ - "role[db]"
+
+ # Specify a list of initial attributes used by the cookbooks
+ initial_attributes:
apache:
prefork:
maxclients: 100
keepalive: "off"
- # if install_type is 'omnibus', change the url to download
- omnibus_url: "https://www.chef.io/chef/install.sh"
+ # if install_type is 'omnibus', change the url to download
+ omnibus_url: "https://www.chef.io/chef/install.sh"
- # if install_type is 'omnibus', pass pinned version string
- # to the install script
- omnibus_version: "12.3.0"
+ # if install_type is 'omnibus', pass pinned version string
+ # to the install script
+ omnibus_version: "12.3.0"
- # If encrypted data bags are used, the client needs to have a secrets file
- # configured to decrypt them
- encrypted_data_bag_secret: "/etc/chef/encrypted_data_bag_secret"
+ # If encrypted data bags are used, the client needs to have a secrets file
+ # configured to decrypt them
+ encrypted_data_bag_secret: "/etc/chef/encrypted_data_bag_secret"
# Capture all subprocess output into a logfile
# Useful for troubleshooting cloud-init issues
diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt
index 52a2476b..13bb687c 100644
--- a/doc/examples/cloud-config-datasources.txt
+++ b/doc/examples/cloud-config-datasources.txt
@@ -1,3 +1,5 @@
+#cloud-config
+
# Documentation on data sources configuration options
datasource:
# Ec2
@@ -38,10 +40,10 @@ datasource:
# these are optional, but allow you to basically provide a datasource
# right here
user-data: |
- # This is the user-data verbatim
+ # This is the user-data verbatim
meta-data:
- instance-id: i-87018aed
- local-hostname: myhost.internal
+ instance-id: i-87018aed
+ local-hostname: myhost.internal
Azure:
agent_command: [service, walinuxagent, start]
diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt
index cac44d8c..5c6de77e 100644
--- a/doc/examples/cloud-config-disk-setup.txt
+++ b/doc/examples/cloud-config-disk-setup.txt
@@ -1,3 +1,4 @@
+#cloud-config
# Cloud-init supports the creation of simple partition tables and file systems
# on devices.
@@ -6,47 +7,47 @@
# (Not implemented yet, but provided for future documentation)
disk_setup:
- ephmeral0:
- table_type: 'mbr'
- layout: True
- overwrite: False
+ ephmeral0:
+ table_type: 'mbr'
+ layout: True
+ overwrite: False
fs_setup:
- - label: None,
- filesystem: ext3
- device: ephemeral0
- partition: auto
+ - label: None,
+ filesystem: ext3
+ device: ephemeral0
+ partition: auto
# Default disk definitions for Microsoft Azure
# ------------------------------------------
device_aliases: {'ephemeral0': '/dev/sdb'}
disk_setup:
- ephemeral0:
- table_type: mbr
- layout: True
- overwrite: False
+ ephemeral0:
+ table_type: mbr
+ layout: True
+ overwrite: False
fs_setup:
- - label: ephemeral0
- filesystem: ext4
- device: ephemeral0.1
- replace_fs: ntfs
+ - label: ephemeral0
+ filesystem: ext4
+ device: ephemeral0.1
+ replace_fs: ntfs
# Data disks definitions for Microsoft Azure
# ------------------------------------------
disk_setup:
- /dev/disk/azure/scsi1/lun0:
- table_type: gpt
- layout: True
- overwrite: True
+ /dev/disk/azure/scsi1/lun0:
+ table_type: gpt
+ layout: True
+ overwrite: True
fs_setup:
- - device: /dev/disk/azure/scsi1/lun0
- partition: 1
- filesystem: ext4
+ - device: /dev/disk/azure/scsi1/lun0
+ partition: 1
+ filesystem: ext4
# Default disk definitions for SmartOS
@@ -54,15 +55,15 @@ fs_setup:
device_aliases: {'ephemeral0': '/dev/vdb'}
disk_setup:
- ephemeral0:
- table_type: mbr
- layout: False
- overwrite: False
+ ephemeral0:
+ table_type: mbr
+ layout: False
+ overwrite: False
fs_setup:
- - label: ephemeral0
- filesystem: ext4
- device: ephemeral0.0
+ - label: ephemeral0
+ filesystem: ext4
+ device: ephemeral0.0
# Caveat for SmartOS: if ephemeral disk is not defined, then the disk will
# not be automatically added to the mounts.
@@ -77,87 +78,87 @@ fs_setup:
# The disk_setup directive instructs Cloud-init to partition a disk. The format is:
disk_setup:
- ephmeral0:
- table_type: 'mbr'
- layout: 'auto'
- /dev/xvdh:
- table_type: 'mbr'
- layout:
- - 33
- - [33, 82]
- - 33
- overwrite: True
+ ephmeral0:
+ table_type: 'mbr'
+ layout: 'auto'
+ /dev/xvdh:
+ table_type: 'mbr'
+ layout:
+ - 33
+ - [33, 82]
+ - 33
+ overwrite: True
# The format is a list of dicts of dicts. The first value is the name of the
# device and the subsequent values define how to create and layout the
# partition.
# The general format is:
-# disk_setup:
-# <DEVICE>:
-# table_type: 'mbr'
-# layout: <LAYOUT|BOOL>
-# overwrite: <BOOL>
+# disk_setup:
+# <DEVICE>:
+# table_type: 'mbr'
+# layout: <LAYOUT|BOOL>
+# overwrite: <BOOL>
#
# Where:
-# <DEVICE>: The name of the device. 'ephemeralX' and 'swap' are special
-# values which are specific to the cloud. For these devices
-# Cloud-init will look up what the real devices is and then
-# use it.
+# <DEVICE>: The name of the device. 'ephemeralX' and 'swap' are special
+# values which are specific to the cloud. For these devices
+# Cloud-init will look up what the real devices is and then
+# use it.
#
-# For other devices, the kernel device name is used. At this
-# time only simply kernel devices are supported, meaning
-# that device mapper and other targets may not work.
+# For other devices, the kernel device name is used. At this
+# time only simply kernel devices are supported, meaning
+# that device mapper and other targets may not work.
#
-# Note: At this time, there is no handling or setup of
-# device mapper targets.
+# Note: At this time, there is no handling or setup of
+# device mapper targets.
#
-# table_type=<TYPE>: Currently the following are supported:
-# 'mbr': default and setups a MS-DOS partition table
-# 'gpt': setups a GPT partition table
+# table_type=<TYPE>: Currently the following are supported:
+# 'mbr': default and setups a MS-DOS partition table
+# 'gpt': setups a GPT partition table
#
-# Note: At this time only 'mbr' and 'gpt' partition tables
-# are allowed. It is anticipated in the future that
-# we'll also have "RAID" to create a mdadm RAID.
+# Note: At this time only 'mbr' and 'gpt' partition tables
+# are allowed. It is anticipated in the future that
+# we'll also have "RAID" to create a mdadm RAID.
#
-# layout={...}: The device layout. This is a list of values, with the
-# percentage of disk that partition will take.
-# Valid options are:
-# [<SIZE>, [<SIZE>, <PART_TYPE]]
+# layout={...}: The device layout. This is a list of values, with the
+# percentage of disk that partition will take.
+# Valid options are:
+# [<SIZE>, [<SIZE>, <PART_TYPE]]
#
-# Where <SIZE> is the _percentage_ of the disk to use, while
-# <PART_TYPE> is the numerical value of the partition type.
+# Where <SIZE> is the _percentage_ of the disk to use, while
+# <PART_TYPE> is the numerical value of the partition type.
#
-# The following setups two partitions, with the first
-# partition having a swap label, taking 1/3 of the disk space
-# and the remainder being used as the second partition.
-# /dev/xvdh':
-# table_type: 'mbr'
-# layout:
-# - [33,82]
-# - 66
-# overwrite: True
+# The following setups two partitions, with the first
+# partition having a swap label, taking 1/3 of the disk space
+# and the remainder being used as the second partition.
+# /dev/xvdh':
+# table_type: 'mbr'
+# layout:
+# - [33,82]
+# - 66
+# overwrite: True
#
-# When layout is "true" it means single partition the entire
-# device.
+# When layout is "true" it means single partition the entire
+# device.
#
-# When layout is "false" it means don't partition or ignore
-# existing partitioning.
+# When layout is "false" it means don't partition or ignore
+# existing partitioning.
#
-# If layout is set to "true" and overwrite is set to "false",
-# it will skip partitioning the device without a failure.
+# If layout is set to "true" and overwrite is set to "false",
+# it will skip partitioning the device without a failure.
#
-# overwrite=<BOOL>: This describes whether to ride with saftey's on and
-# everything holstered.
+# overwrite=<BOOL>: This describes whether to ride with saftey's on and
+# everything holstered.
#
-# 'false' is the default, which means that:
-# 1. The device will be checked for a partition table
-# 2. The device will be checked for a file system
-# 3. If either a partition of file system is found, then
-# the operation will be _skipped_.
+# 'false' is the default, which means that:
+# 1. The device will be checked for a partition table
+# 2. The device will be checked for a file system
+# 3. If either a partition of file system is found, then
+# the operation will be _skipped_.
#
-# 'true' is cowboy mode. There are no checks and things are
-# done blindly. USE with caution, you can do things you
-# really, really don't want to do.
+# 'true' is cowboy mode. There are no checks and things are
+# done blindly. USE with caution, you can do things you
+# really, really don't want to do.
#
#
# fs_setup: Setup the file system
@@ -166,101 +167,101 @@ disk_setup:
# fs_setup describes the how the file systems are supposed to look.
fs_setup:
- - label: ephemeral0
- filesystem: 'ext3'
- device: 'ephemeral0'
- partition: 'auto'
- - label: mylabl2
- filesystem: 'ext4'
- device: '/dev/xvda1'
- - cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s
- label: mylabl3
- filesystem: 'btrfs'
- device: '/dev/xvdh'
+ - label: ephemeral0
+ filesystem: 'ext3'
+ device: 'ephemeral0'
+ partition: 'auto'
+ - label: mylabl2
+ filesystem: 'ext4'
+ device: '/dev/xvda1'
+ - cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s
+ label: mylabl3
+ filesystem: 'btrfs'
+ device: '/dev/xvdh'
# The general format is:
-# fs_setup:
-# - label: <LABEL>
-# filesystem: <FS_TYPE>
-# device: <DEVICE>
-# partition: <PART_VALUE>
-# overwrite: <OVERWRITE>
-# replace_fs: <FS_TYPE>
+# fs_setup:
+# - label: <LABEL>
+# filesystem: <FS_TYPE>
+# device: <DEVICE>
+# partition: <PART_VALUE>
+# overwrite: <OVERWRITE>
+# replace_fs: <FS_TYPE>
#
# Where:
-# <LABEL>: The file system label to be used. If set to None, no label is
-# used.
+# <LABEL>: The file system label to be used. If set to None, no label is
+# used.
#
-# <FS_TYPE>: The file system type. It is assumed that the there
-# will be a "mkfs.<FS_TYPE>" that behaves likes "mkfs". On a standard
-# Ubuntu Cloud Image, this means that you have the option of ext{2,3,4},
-# and vfat by default.
+# <FS_TYPE>: The file system type. It is assumed that the there
+# will be a "mkfs.<FS_TYPE>" that behaves likes "mkfs". On a standard
+# Ubuntu Cloud Image, this means that you have the option of ext{2,3,4},
+# and vfat by default.
#
-# <DEVICE>: The device name. Special names of 'ephemeralX' or 'swap'
-# are allowed and the actual device is acquired from the cloud datasource.
-# When using 'ephemeralX' (i.e. ephemeral0), make sure to leave the
-# label as 'ephemeralX' otherwise there may be issues with the mounting
-# of the ephemeral storage layer.
+# <DEVICE>: The device name. Special names of 'ephemeralX' or 'swap'
+# are allowed and the actual device is acquired from the cloud datasource.
+# When using 'ephemeralX' (i.e. ephemeral0), make sure to leave the
+# label as 'ephemeralX' otherwise there may be issues with the mounting
+# of the ephemeral storage layer.
#
-# If you define the device as 'ephemeralX.Y' then Y will be interpetted
-# as a partition value. However, ephermalX.0 is the _same_ as ephemeralX.
+# If you define the device as 'ephemeralX.Y' then Y will be interpetted
+# as a partition value. However, ephermalX.0 is the _same_ as ephemeralX.
#
-# <PART_VALUE>:
-# Partition definitions are overwriten if you use the '<DEVICE>.Y' notation.
+# <PART_VALUE>:
+# Partition definitions are overwriten if you use the '<DEVICE>.Y' notation.
#
-# The valid options are:
-# "auto|any": tell cloud-init not to care whether there is a partition
-# or not. Auto will use the first partition that does not contain a
-# file system already. In the absence of a partition table, it will
-# put it directly on the disk.
+# The valid options are:
+# "auto|any": tell cloud-init not to care whether there is a partition
+# or not. Auto will use the first partition that does not contain a
+# file system already. In the absence of a partition table, it will
+# put it directly on the disk.
#
-# "auto": If a file system that matches the specification in terms of
-# label, type and device, then cloud-init will skip the creation of
-# the file system.
+# "auto": If a file system that matches the specification in terms of
+# label, type and device, then cloud-init will skip the creation of
+# the file system.
#
-# "any": If a file system that matches the file system type and device,
-# then cloud-init will skip the creation of the file system.
+# "any": If a file system that matches the file system type and device,
+# then cloud-init will skip the creation of the file system.
#
-# Devices are selected based on first-detected, starting with partitions
-# and then the raw disk. Consider the following:
-# NAME FSTYPE LABEL
-# xvdb
-# |-xvdb1 ext4
-# |-xvdb2
-# |-xvdb3 btrfs test
-# \-xvdb4 ext4 test
+# Devices are selected based on first-detected, starting with partitions
+# and then the raw disk. Consider the following:
+# NAME FSTYPE LABEL
+# xvdb
+# |-xvdb1 ext4
+# |-xvdb2
+# |-xvdb3 btrfs test
+# \-xvdb4 ext4 test
#
-# If you ask for 'auto', label of 'test, and file system of 'ext4'
-# then cloud-init will select the 2nd partition, even though there
-# is a partition match at the 4th partition.
+# If you ask for 'auto', label of 'test, and file system of 'ext4'
+# then cloud-init will select the 2nd partition, even though there
+# is a partition match at the 4th partition.
#
-# If you ask for 'any' and a label of 'test', then cloud-init will
-# select the 1st partition.
+# If you ask for 'any' and a label of 'test', then cloud-init will
+# select the 1st partition.
#
-# If you ask for 'auto' and don't define label, then cloud-init will
-# select the 1st partition.
+# If you ask for 'auto' and don't define label, then cloud-init will
+# select the 1st partition.
#
-# In general, if you have a specific partition configuration in mind,
-# you should define either the device or the partition number. 'auto'
-# and 'any' are specifically intended for formating ephemeral storage or
-# for simple schemes.
+# In general, if you have a specific partition configuration in mind,
+# you should define either the device or the partition number. 'auto'
+# and 'any' are specifically intended for formating ephemeral storage or
+# for simple schemes.
#
-# "none": Put the file system directly on the device.
+# "none": Put the file system directly on the device.
#
-# <NUM>: where NUM is the actual partition number.
+# <NUM>: where NUM is the actual partition number.
#
-# <OVERWRITE>: Defines whether or not to overwrite any existing
-# filesystem.
+# <OVERWRITE>: Defines whether or not to overwrite any existing
+# filesystem.
#
-# "true": Indiscriminately destroy any pre-existing file system. Use at
-# your own peril.
+# "true": Indiscriminately destroy any pre-existing file system. Use at
+# your own peril.
#
-# "false": If an existing file system exists, skip the creation.
+# "false": If an existing file system exists, skip the creation.
#
-# <REPLACE_FS>: This is a special directive, used for Microsoft Azure that
-# instructs cloud-init to replace a file system of <FS_TYPE>. NOTE:
-# unless you define a label, this requires the use of the 'any' partition
-# directive.
+# <REPLACE_FS>: This is a special directive, used for Microsoft Azure that
+# instructs cloud-init to replace a file system of <FS_TYPE>. NOTE:
+# unless you define a label, this requires the use of the 'any' partition
+# directive.
#
# Behavior Caveat: The default behavior is to _check_ if the file system exists.
-# If a file system matches the specification, then the operation is a no-op.
+# If a file system matches the specification, then the operation is a no-op.
diff --git a/doc/examples/cloud-config-landscape.txt b/doc/examples/cloud-config-landscape.txt
index d7ff8ef8..88be57ce 100644
--- a/doc/examples/cloud-config-landscape.txt
+++ b/doc/examples/cloud-config-landscape.txt
@@ -1,3 +1,4 @@
+#cloud-config
# Landscape-client configuration
#
# Anything under the top 'landscape: client' entry
diff --git a/doc/examples/cloud-config-mcollective.txt b/doc/examples/cloud-config-mcollective.txt
index 67735682..a701616a 100644
--- a/doc/examples/cloud-config-mcollective.txt
+++ b/doc/examples/cloud-config-mcollective.txt
@@ -5,45 +5,45 @@
# Make sure that this file is valid yaml before starting instances.
# It should be passed as user-data when starting the instance.
mcollective:
- # Every key present in the conf object will be added to server.cfg:
- # key: value
- #
- # For example the configuration below will have the following key
- # added to server.cfg:
- # plugin.stomp.host: dbhost
- conf:
- plugin.stomp.host: dbhost
- # This will add ssl certs to mcollective
- # WARNING WARNING WARNING
- # The ec2 metadata service is a network service, and thus is readable
- # by non-root users on the system (ie: 'ec2metadata --user-data')
- # If you want security for this, please use include-once + SSL urls
- public-cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
- private-cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
+ # Every key present in the conf object will be added to server.cfg:
+ # key: value
+ #
+ # For example the configuration below will have the following key
+ # added to server.cfg:
+ # plugin.stomp.host: dbhost
+ conf:
+ plugin.stomp.host: dbhost
+ # This will add ssl certs to mcollective
+ # WARNING WARNING WARNING
+ # The ec2 metadata service is a network service, and thus is readable
+ # by non-root users on the system (ie: 'ec2metadata --user-data')
+ # If you want security for this, please use include-once + SSL urls
+ public-cert: |
+ -----BEGIN CERTIFICATE-----
+ MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
+ Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
+ MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
+ b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
+ 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
+ qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
+ T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
+ BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
+ SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
+ +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
+ hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
+ -----END CERTIFICATE-----
+ private-cert: |
+ -----BEGIN CERTIFICATE-----
+ MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
+ Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
+ MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
+ b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
+ 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
+ qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
+ T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
+ BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
+ SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
+ +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
+ hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
+ -----END CERTIFICATE-----
diff --git a/doc/examples/cloud-config-mount-points.txt b/doc/examples/cloud-config-mount-points.txt
index bce28bf8..43f80ec9 100644
--- a/doc/examples/cloud-config-mount-points.txt
+++ b/doc/examples/cloud-config-mount-points.txt
@@ -41,6 +41,6 @@ mount_default_fields: [ None, None, "auto", "defaults,nofail", "0", "2" ]
# swap can also be set up by the 'mounts' module
# default is to not create any swap files, because 'size' is set to 0
swap:
- filename: /swap.img
- size: "auto" # or size in bytes
- maxsize: size in bytes
+ filename: /swap.img
+ size: "auto" # or size in bytes
+ maxsize: size in bytes
diff --git a/doc/examples/cloud-config-phone-home.txt b/doc/examples/cloud-config-phone-home.txt
index 7f2b69f7..b30c14e3 100644
--- a/doc/examples/cloud-config-phone-home.txt
+++ b/doc/examples/cloud-config-phone-home.txt
@@ -5,10 +5,10 @@
# url
# default: none
# phone_home:
-# url: http://my.foo.bar/$INSTANCE/
-# post: all
-# tries: 10
+# url: http://my.foo.bar/$INSTANCE/
+# post: all
+# tries: 10
#
phone_home:
- url: http://my.example.com/$INSTANCE_ID/
- post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
+ url: http://my.example.com/$INSTANCE_ID/
+ post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
diff --git a/doc/examples/cloud-config-power-state.txt b/doc/examples/cloud-config-power-state.txt
index b470153d..9cd56814 100644
--- a/doc/examples/cloud-config-power-state.txt
+++ b/doc/examples/cloud-config-power-state.txt
@@ -33,8 +33,8 @@
# for future use.
#
power_state:
- delay: "+30"
- mode: poweroff
- message: Bye Bye
- timeout: 30
- condition: True
+ delay: "+30"
+ mode: poweroff
+ message: Bye Bye
+ timeout: 30
+ condition: True
diff --git a/doc/examples/cloud-config-puppet.txt b/doc/examples/cloud-config-puppet.txt
index cd3c2f8e..3c7e2da7 100644
--- a/doc/examples/cloud-config-puppet.txt
+++ b/doc/examples/cloud-config-puppet.txt
@@ -5,47 +5,47 @@
# Make sure that this file is valid yaml before starting instances.
# It should be passed as user-data when starting the instance.
puppet:
- # Every key present in the conf object will be added to puppet.conf:
- # [name]
- # subkey=value
- #
- # For example the configuration below will have the following section
- # added to puppet.conf:
- # [puppetd]
- # server=puppetmaster.example.org
- # certname=i-0123456.ip-X-Y-Z.cloud.internal
- #
- # The puppmaster ca certificate will be available in
- # /var/lib/puppet/ssl/certs/ca.pem
- conf:
- agent:
- server: "puppetmaster.example.org"
- # certname supports substitutions at runtime:
- # %i: instanceid
- # Example: i-0123456
- # %f: fqdn of the machine
- # Example: ip-X-Y-Z.cloud.internal
- #
- # NB: the certname will automatically be lowercased as required by puppet
- certname: "%i.%f"
- # ca_cert is a special case. It won't be added to puppet.conf.
- # It holds the puppetmaster certificate in pem format.
- # It should be a multi-line string (using the | yaml notation for
- # multi-line strings).
- # The puppetmaster certificate is located in
- # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host.
- #
- ca_cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
+ # Every key present in the conf object will be added to puppet.conf:
+ # [name]
+ # subkey=value
+ #
+ # For example the configuration below will have the following section
+ # added to puppet.conf:
+ # [puppetd]
+ # server=puppetmaster.example.org
+ # certname=i-0123456.ip-X-Y-Z.cloud.internal
+ #
+ # The puppmaster ca certificate will be available in
+ # /var/lib/puppet/ssl/certs/ca.pem
+ conf:
+ agent:
+ server: "puppetmaster.example.org"
+ # certname supports substitutions at runtime:
+ # %i: instanceid
+ # Example: i-0123456
+ # %f: fqdn of the machine
+ # Example: ip-X-Y-Z.cloud.internal
+ #
+ # NB: the certname will automatically be lowercased as required by puppet
+ certname: "%i.%f"
+ # ca_cert is a special case. It won't be added to puppet.conf.
+ # It holds the puppetmaster certificate in pem format.
+ # It should be a multi-line string (using the | yaml notation for
+ # multi-line strings).
+ # The puppetmaster certificate is located in
+ # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host.
+ #
+ ca_cert: |
+ -----BEGIN CERTIFICATE-----
+ MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
+ Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
+ MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
+ b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
+ 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
+ qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
+ T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
+ BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
+ SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
+ +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
+ hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
+ -----END CERTIFICATE-----
diff --git a/doc/examples/cloud-config-reporting.txt b/doc/examples/cloud-config-reporting.txt
index ee00078f..80bde303 100644
--- a/doc/examples/cloud-config-reporting.txt
+++ b/doc/examples/cloud-config-reporting.txt
@@ -4,14 +4,14 @@
## A 'webhook' and a 'log' type.
## It also disables the built in default 'log'
reporting:
- smtest:
- type: webhook
- endpoint: "http://myhost:8000/"
- consumer_key: "ckey_foo"
- consumer_secret: "csecret_foo"
- token_key: "tkey_foo"
- token_secret: "tkey_foo"
- smlogger:
- type: log
- level: WARN
- log: null
+ smtest:
+ type: webhook
+ endpoint: "http://myhost:8000/"
+ consumer_key: "ckey_foo"
+ consumer_secret: "csecret_foo"
+ token_key: "tkey_foo"
+ token_secret: "tkey_foo"
+ smlogger:
+ type: log
+ level: WARN
+ log: null
diff --git a/doc/examples/cloud-config-rh_subscription.txt b/doc/examples/cloud-config-rh_subscription.txt
index be121338..5cc903a2 100644
--- a/doc/examples/cloud-config-rh_subscription.txt
+++ b/doc/examples/cloud-config-rh_subscription.txt
@@ -14,36 +14,36 @@
# /etc/rhsm/rhs.conf file
rh_subscription:
- username: joe@foo.bar
+ username: joe@foo.bar
- ## Quote your password if it has symbols to be safe
- password: '1234abcd'
+ ## Quote your password if it has symbols to be safe
+ password: '1234abcd'
- ## If you prefer, you can use the activation key and
- ## org instead of username and password. Be sure to
- ## comment out username and password
+ ## If you prefer, you can use the activation key and
+ ## org instead of username and password. Be sure to
+ ## comment out username and password
- #activation-key: foobar
- #org: 12345
+ #activation-key: foobar
+ #org: 12345
- ## Uncomment to auto-attach subscriptions to your system
- #auto-attach: True
+ ## Uncomment to auto-attach subscriptions to your system
+ #auto-attach: True
- ## Uncomment to set the service level for your
- ## subscriptions
- #service-level: self-support
+ ## Uncomment to set the service level for your
+ ## subscriptions
+ #service-level: self-support
- ## Uncomment to add pools (needs to be a list of IDs)
- #add-pool: []
+ ## Uncomment to add pools (needs to be a list of IDs)
+ #add-pool: []
- ## Uncomment to add or remove yum repos
- ## (needs to be a list of repo IDs)
- #enable-repo: []
- #disable-repo: []
+ ## Uncomment to add or remove yum repos
+ ## (needs to be a list of repo IDs)
+ #enable-repo: []
+ #disable-repo: []
- ## Uncomment to alter the baseurl in /etc/rhsm/rhsm.conf
- #rhsm-baseurl: http://url
+ ## Uncomment to alter the baseurl in /etc/rhsm/rhsm.conf
+ #rhsm-baseurl: http://url
- ## Uncomment to alter the server hostname in
- ## /etc/rhsm/rhsm.conf
- #server-hostname: foo.bar.com
+ ## Uncomment to alter the server hostname in
+ ## /etc/rhsm/rhsm.conf
+ #server-hostname: foo.bar.com
diff --git a/doc/examples/cloud-config-rsyslog.txt b/doc/examples/cloud-config-rsyslog.txt
index 28ea1f16..d28dd38e 100644
--- a/doc/examples/cloud-config-rsyslog.txt
+++ b/doc/examples/cloud-config-rsyslog.txt
@@ -1,3 +1,4 @@
+#cloud-config
## the rsyslog module allows you to configure the systems syslog.
## configuration of syslog is under the top level cloud-config
## entry 'rsyslog'.
@@ -5,22 +6,22 @@
## Example:
#cloud-config
rsyslog:
- remotes:
- # udp to host 'maas.mydomain' port 514
- maashost: maas.mydomain
- # udp to ipv4 host on port 514
- maas: "@[10.5.1.56]:514"
- # tcp to host ipv6 host on port 555
- maasipv6: "*.* @@[FE80::0202:B3FF:FE1E:8329]:555"
- configs:
- - "*.* @@192.158.1.1"
- - content: "*.* @@192.0.2.1:10514"
- filename: 01-example.conf
- - content: |
- *.* @@syslogd.example.com
- config_dir: /etc/rsyslog.d
- config_filename: 20-cloud-config.conf
- service_reload_command: [your, syslog, reload, command]
+ remotes:
+ # udp to host 'maas.mydomain' port 514
+ maashost: maas.mydomain
+ # udp to ipv4 host on port 514
+ maas: "@[10.5.1.56]:514"
+ # tcp to host ipv6 host on port 555
+ maasipv6: "*.* @@[FE80::0202:B3FF:FE1E:8329]:555"
+ configs:
+ - "*.* @@192.158.1.1"
+ - content: "*.* @@192.0.2.1:10514"
+ filename: 01-example.conf
+ - content: |
+ *.* @@syslogd.example.com
+ config_dir: /etc/rsyslog.d
+ config_filename: 20-cloud-config.conf
+ service_reload_command: [your, syslog, reload, command]
## Additionally the following legacy format is supported
## it is converted into the format above before use.
@@ -28,11 +29,11 @@ rsyslog:
## rsyslog_dir -> rsyslog/config_dir
## rsyslog -> rsyslog/configs
# rsyslog:
-# - "*.* @@192.158.1.1"
-# - content: "*.* @@192.0.2.1:10514"
-# filename: 01-example.conf
-# - content: |
-# *.* @@syslogd.example.com
+# - "*.* @@192.158.1.1"
+# - content: "*.* @@192.0.2.1:10514"
+# filename: 01-example.conf
+# - content: |
+# *.* @@syslogd.example.com
# rsyslog_filename: 20-cloud-config.conf
# rsyslog_dir: /etc/rsyslog.d
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index f588bfbc..b593cdd1 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -7,6 +7,11 @@ groups:
- cloud-users
# Add users to the system. Users are added after groups are added.
+# Note: Most of these configuration options will not be honored if the user
+# already exists. Following options are the exceptions and they are
+# applicable on already-existing users:
+# - 'plain_text_passwd', 'hashed_passwd', 'lock_passwd', 'sudo',
+# 'ssh_authorized_keys', 'ssh_redirect_user'.
users:
- default
- name: foobar
@@ -84,7 +89,7 @@ users:
# use <default_username> instead. This option only disables cloud
# provided public-keys. An error will be raised if ssh_authorized_keys
# or ssh_import_id is provided for the same user.
-#
+#
# ssh_authorized_keys.
# sudo: Defaults to none. Accepts a sudo rule string, a list of sudo rule
# strings or False to explicitly deny sudo usage. Examples:
@@ -120,10 +125,10 @@ users:
# to have the 'ubuntu' user in addition to other users, you need to instruct
# cloud-init that you also want the default user. To do this use the following
# syntax:
-# users:
-# - default
-# - bob
-# - ....
+# users:
+# - default
+# - bob
+# - ....
# foobar: ...
#
# users[0] (the first user in users) overrides the user directive.
@@ -131,10 +136,10 @@ users:
# The 'default' user above references the distro's config:
# system_info:
# default_user:
-# name: Ubuntu
-# plain_text_passwd: 'ubuntu'
-# home: /home/ubuntu
-# shell: /bin/bash
-# lock_passwd: True
-# gecos: Ubuntu
-# groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip, netdev]
+# name: Ubuntu
+# plain_text_passwd: 'ubuntu'
+# home: /home/ubuntu
+# shell: /bin/bash
+# lock_passwd: True
+# gecos: Ubuntu
+# groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip, netdev]
diff --git a/doc/examples/cloud-config-vendor-data.txt b/doc/examples/cloud-config-vendor-data.txt
index 7f90847b..920d12e8 100644
--- a/doc/examples/cloud-config-vendor-data.txt
+++ b/doc/examples/cloud-config-vendor-data.txt
@@ -7,8 +7,8 @@
# vendordata. Users of the end system are given ultimate control.
#
vendor_data:
- enabled: True
- prefix: /usr/bin/ltrace
+ enabled: True
+ prefix: /usr/bin/ltrace
# enabled: whether it is enabled or not
# prefix: the command to run before any vendor scripts.
diff --git a/doc/examples/cloud-config-write-files.txt b/doc/examples/cloud-config-write-files.txt
index ec98bc93..6c67c503 100644
--- a/doc/examples/cloud-config-write-files.txt
+++ b/doc/examples/cloud-config-write-files.txt
@@ -8,26 +8,26 @@
#
# Note: Content strings here are truncated for example purposes.
write_files:
-- encoding: b64
- content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
- owner: root:root
- path: /etc/sysconfig/selinux
- permissions: '0644'
-- content: |
- # My new /etc/sysconfig/samba file
+- encoding: b64
+ content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
+ owner: root:root
+ path: /etc/sysconfig/selinux
+ permissions: '0644'
+- content: |
+ # My new /etc/sysconfig/samba file
- SMBDOPTIONS="-D"
- path: /etc/sysconfig/samba
-- content: !!binary |
- f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAAAAAAAAEAAOAAI
- AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgA
- AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAA
- ....
- path: /bin/arch
- permissions: '0555'
-- encoding: gzip
- content: !!binary |
- H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
- path: /usr/bin/hello
- permissions: '0755'
+ SMBDOPTIONS="-D"
+ path: /etc/sysconfig/samba
+- content: !!binary |
+ f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAAAAAAAAEAAOAAI
+ AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgA
+ AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAA
+ ....
+ path: /bin/arch
+ permissions: '0555'
+- encoding: gzip
+ content: !!binary |
+ H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
+ path: /usr/bin/hello
+ permissions: '0755'
diff --git a/doc/examples/cloud-config-yum-repo.txt b/doc/examples/cloud-config-yum-repo.txt
index ab2c031e..e8f2bbb4 100644
--- a/doc/examples/cloud-config-yum-repo.txt
+++ b/doc/examples/cloud-config-yum-repo.txt
@@ -6,15 +6,15 @@
# The following example adds the file /etc/yum.repos.d/epel_testing.repo
# which can then subsequently be used by yum for later operations.
yum_repos:
- # The name of the repository
- epel-testing:
- # Any repository configuration options
- # See: man yum.conf
- #
- # This one is required!
- baseurl: http://download.fedoraproject.org/pub/epel/testing/5/$basearch
- enabled: false
- failovermethod: priority
- gpgcheck: true
- gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL
- name: Extra Packages for Enterprise Linux 5 - Testing
+ # The name of the repository
+ epel-testing:
+ # Any repository configuration options
+ # See: man yum.conf
+ #
+ # This one is required!
+ baseurl: http://download.fedoraproject.org/pub/epel/testing/5/$basearch
+ enabled: false
+ failovermethod: priority
+ gpgcheck: true
+ gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL
+ name: Extra Packages for Enterprise Linux 5 - Testing
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index eb84dcf5..f3ae5e68 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -192,8 +192,8 @@ cloud_config_modules:
# ssh_import_id: [ user1, user2 ]
# ssh_import_id will feed the list in that variable to
-# ssh-import-id, so that public keys stored in launchpad
-# can easily be imported into the configured user
+# ssh-import-id, so that public keys stored in launchpad
+# can easily be imported into the configured user
# This can be a single string ('smoser') or a list ([smoser, kirkland])
ssh_import_id: [smoser]
@@ -202,14 +202,15 @@ ssh_import_id: [smoser]
# See debconf-set-selections man page.
#
# Default: none
-#
-debconf_selections: | # Need to preserve newlines
- # Force debconf priority to critical.
- debconf debconf/priority select critical
+#
+debconf_selections:
+ # Force debconf priority to critical.
+ set1: debconf debconf/priority select critical
- # Override default frontend to readline, but allow user to select.
- debconf debconf/frontend select readline
- debconf debconf/frontend seen false
+ # Override default frontend to readline, but allow user to select.
+ set2: |
+ debconf debconf/frontend select readline
+ debconf debconf/frontend seen false
# manage byobu defaults
# byobu_by_default:
@@ -235,7 +236,7 @@ disable_root: false
# The string '$USER' will be replaced with the username of the default user.
# The string '$DISABLE_USER' will be replaced with the username to disable.
#
-# disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as the user \"$USER\" rather than the user \"$DISABLE_USER\".';echo;sleep 10"
+# disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as the user \"$USER\" rather than the user \"$DISABLE_USER\".';echo;sleep 10;exit 142"
# disable ssh access for non-root-users
# To disable ssh access for non-root users, ssh_redirect_user: true can be
@@ -375,11 +376,11 @@ final_message: "The system is finally up, after $UPTIME seconds"
# the special entry "&1" for an error means "same location as stdout"
# (Note, that '&1' has meaning in yaml, so it must be quoted)
output:
- init: "> /var/log/my-cloud-init.log"
- config: [ ">> /tmp/foo.out", "> /tmp/foo.err" ]
- final:
- output: "| tee /tmp/final.stdout | tee /tmp/bar.stdout"
- error: "&1"
+ init: "> /var/log/my-cloud-init.log"
+ config: [ ">> /tmp/foo.out", "> /tmp/foo.err" ]
+ final:
+ output: "| tee /tmp/final.stdout | tee /tmp/bar.stdout"
+ error: "&1"
# phone_home: if this dictionary is present, then the phone_home
@@ -392,8 +393,8 @@ output:
# tries: 10
#
phone_home:
- url: http://my.example.com/$INSTANCE_ID/
- post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
+ url: http://my.example.com/$INSTANCE_ID/
+ post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
# timezone: set the timezone for this instance
# the value of 'timezone' must exist in /usr/share/zoneinfo
@@ -407,7 +408,7 @@ timezone: US/Eastern
# then 'L' will be initially created with root:root ownership (during
# cloud-init), and then at cloud-config time (when syslog is available)
# the syslog daemon will be unable to write to the file.
-#
+#
# to remedy this situation, 'def_log_file' can be set to a filename
# and syslog_fix_perms to a string containing "<user>:<group>"
# if syslog_fix_perms is a list, it will iterate through and use the
@@ -446,11 +447,11 @@ syslog_fix_perms: syslog:root
# to set hashed password, here account 'user3' has a password it set to
# 'cloud-init', hashed with SHA-256:
# chpasswd:
-# list: |
-# user1:password1
-# user2:RANDOM
-# user3:$5$eriogqzq$Dg7PxHsKGzziuEGkZgkLvacjuEFeljJ.rLf.hZqKQLA
-# expire: True
+# list: |
+# user1:password1
+# user2:RANDOM
+# user3:$5$eriogqzq$Dg7PxHsKGzziuEGkZgkLvacjuEFeljJ.rLf.hZqKQLA
+# expire: True
# ssh_pwauth: [ True, False, "" or "unchanged" ]
#
# Hashed passwords can be generated in multiple ways, example with python3:
@@ -510,7 +511,7 @@ manual_cache_clean: False
# power_state can be used to make the system shutdown, reboot or
# halt after boot is finished. This same thing can be acheived by
# user-data scripts or by runcmd by simply invoking 'shutdown'.
-#
+#
# Doing it this way ensures that cloud-init is entirely finished with
# modules that would be executed, and avoids any error/log messages
# that may go to the console as a result of system services like
@@ -521,6 +522,6 @@ manual_cache_clean: False
# mode: required. must be one of 'poweroff', 'halt', 'reboot'
# message: provided as the message argument to 'shutdown'. default is none.
power_state:
- delay: 30
- mode: poweroff
- message: Bye Bye
+ delay: 30
+ mode: poweroff
+ message: Bye Bye
diff --git a/doc/examples/kernel-cmdline.txt b/doc/examples/kernel-cmdline.txt
index f043baef..805bc3d3 100644
--- a/doc/examples/kernel-cmdline.txt
+++ b/doc/examples/kernel-cmdline.txt
@@ -3,16 +3,19 @@ configuration that comes from the kernel command line has higher priority
than configuration in /etc/cloud/cloud.cfg
The format is:
- cc: <yaml content here> [end_cc]
+ cc: <yaml content here|URL encoded yaml content> [end_cc]
cloud-config will consider any content after 'cc:' to be cloud-config
data. If an 'end_cc' string is present, then it will stop reading there.
otherwise it considers everthing after 'cc:' to be cloud-config content.
-In order to allow carriage returns, you must enter '\\n', literally,
+In order to allow carriage returns, you must enter '\\n', literally,
on the command line two backslashes followed by a letter 'n'.
+The yaml content may also be URL encoded (urllib.parse.quote()).
+
Here are some examples:
- root=/dev/sda1 cc: ssh_import_id: [smoser, kirkland]\\n
- root=LABEL=uec-rootfs cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
- cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc root=/dev/sda1
+ root=/dev/sda1 cc: ssh_import_id: [smoser, kirkland]\\n
+ root=LABEL=uec-rootfs cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
+ cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc root=/dev/sda1
+ cc:ssh_import_id: %5Bsmoser%5D end_cc cc:runcmd: %5B %5B ls, -l %5D %5D end_cc root=/dev/sda1
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 86441986..684822c2 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -18,7 +18,7 @@ from cloudinit.config.schema import get_schema_doc
# General information about the project.
project = 'cloud-init'
-copyright = '2019, Canonical Ltd.'
+copyright = '2020, Canonical Ltd.'
# -- General configuration ----------------------------------------------------
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index 5d90c131..0015e35a 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -68,6 +68,7 @@ Having trouble? We would like to help!
:caption: Development
topics/hacking.rst
+ topics/code_review.rst
topics/security.rst
topics/debugging.rst
topics/logging.rst
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
index 3f215b1b..8f56a7d2 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/topics/availability.rst
@@ -14,17 +14,20 @@ distributions and clouds, both public and private.
Distributions
=============
-Cloud-init has support across all major Linux distributions and
-FreeBSD:
+Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD
+and OpenBSD:
-- Ubuntu
-- SLES/openSUSE
-- RHEL/CentOS
-- Fedora
-- Gentoo Linux
-- Debian
+- Alpine Linux
- ArchLinux
+- Debian
+- Fedora
- FreeBSD
+- Gentoo Linux
+- NetBSD
+- OpenBSD
+- RHEL/CentOS
+- SLES/openSUSE
+- Ubuntu
Clouds
======
diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst
index d846867b..4e79c958 100644
--- a/doc/rtd/topics/boot.rst
+++ b/doc/rtd/topics/boot.rst
@@ -129,7 +129,7 @@ Config
+---------+--------+----------------------------------------------------------+
This stage runs config modules only. Modules that do not really have an
-effect on other stages of boot are run here.
+effect on other stages of boot are run here, including ``runcmd``.
Final
=====
@@ -150,7 +150,7 @@ Things that run here include
* package installations
* configuration management plugins (puppet, chef, salt-minion)
- * user-scripts (including ``runcmd``).
+ * user-scripts (i.e. shell scripts passed as user-data)
For scripts external to cloud-init looking to wait until cloud-init is
finished, the ``cloud-init status`` subcommand can help block external
diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst
index b32677b0..0ff230b5 100644
--- a/doc/rtd/topics/cli.rst
+++ b/doc/rtd/topics/cli.rst
@@ -106,17 +106,19 @@ Do **NOT** rely on the output of these commands as they can and will change.
Current subcommands:
- * ``schema``: a **#cloud-config** format and schema
- validator. It accepts a cloud-config yaml file and annotates potential
- schema errors locally without the need for deployment. Schema
- validation is work in progress and supports a subset of cloud-config
- modules.
-
+ * ``net-convert``: manually use cloud-init's network format conversion, useful
+ for testing configuration or testing changes to the network conversion logic
+ itself.
* ``render``: use cloud-init's jinja template render to
process **#cloud-config** or **custom-scripts**, injecting any variables
from ``/run/cloud-init/instance-data.json``. It accepts a user-data file
containing the jinja template header ``## template: jinja`` and renders
that content with any instance-data.json variables present.
+ * ``schema``: a **#cloud-config** format and schema
+ validator. It accepts a cloud-config yaml file and annotates potential
+ schema errors locally without the need for deployment. Schema
+ validation is work in progress and supports a subset of cloud-config
+ modules.
.. _cli_features:
@@ -162,7 +164,7 @@ declared to run in various boot stages in the file
* *cloud_init_modules*
* *cloud_config_modules*
-* *cloud_init_modules*
+* *cloud_final_modules*
Can be run on the command line, but each module is gated to run only once due
to semaphores in ``/var/lib/cloud/``.
diff --git a/doc/rtd/topics/code_review.rst b/doc/rtd/topics/code_review.rst
new file mode 100644
index 00000000..68c10405
--- /dev/null
+++ b/doc/rtd/topics/code_review.rst
@@ -0,0 +1,256 @@
+*******************
+Code Review Process
+*******************
+
+In order to manage incoming pull requests effectively, and provide
+timely feedback and/or acceptance this document serves as a guideline
+for the review process and outlines the expectations for those
+submitting code to the project as well as those reviewing the code.
+Code is reviewed for acceptance by at least one core team member (later
+referred to as committers), but comments and suggestions from others
+are encouraged and welcome.
+
+The process is intended to provide timely and actionable feedback for
+any submission.
+
+Asking For Help
+===============
+
+cloud-init contributors, potential contributors, community members and
+users are encouraged to ask for any help that they need. If you have
+questions about the code review process, or at any point during the
+code review process, these are the available avenues:
+
+* if you have an open Pull Request, comment on that pull request
+* join the ``#cloud-init`` channel on the Freenode IRC network and ask
+ away
+* send an email to the cloud-init mailing list,
+ cloud-init@lists.launchpad.net
+
+These are listed in rough order of preference, but use whichever of
+them you are most comfortable with.
+
+Goals
+=====
+
+This process has the following goals:
+
+* Ensure code reviews occur in a timely fashion and provide actionable
+ feedback if changes are desired.
+* Ensure the minimization of ancillary problems to increase the
+ efficiency for those reviewing the submitted code
+
+Role Definitions
+================
+
+Any code review process will have (at least) two involved parties. For
+our purposes, these parties are referred to as **Proposer** and
+**Reviewer**. (We also have the **Committer** role which is a special
+case of the **Reviewer** role.) The terms are defined here (and the
+use of the singular form is not meant to imply that they refer to a
+single person):
+
+Proposer
+ The person proposing a pull request (hereafter known as a PR).
+
+Reviewer
+ A person who is reviewing a PR.
+
+Committer
+ A cloud-init core developer (i.e. a person who has permission to
+ merge PRs into master).
+
+Prerequisites For Landing Pull Requests
+=======================================
+
+Before a PR can be landed into master, the following conditions *must*
+be met:
+
+* the CLA has been signed by the **Proposer** (or is covered by an
+ entity-level CLA signature)
+* all required status checks are passing
+* at least one "Approve" review from a **Committer**
+* no "Request changes" reviews from any **Committer**
+
+The following conditions *should* be met:
+
+* any Python functions/methods/classes have docstrings added/updated
+* any changes to config module behaviour are captured in the
+ documentation of the config module
+* any Python code added has corresponding unit tests
+* no "Request changes" reviews from any **Reviewer**
+
+These conditions can be relaxed at the discretion of the
+**Committers** on a case-by-case basis. Generally, for accountability,
+this should not be the decision of a single **Committer**, and the
+decision should be documented in comments on the PR.
+
+(To take a specific example, the ``cc_phone_home`` module had no tests
+at the time `PR #237
+<https://github.com/canonical/cloud-init/pull/237>`_ was submitted, so
+the **Proposer** was not expected to write a full set of tests for
+their minor modification, but they were expected to update the config
+module docs.)
+
+Non-Committer Reviews
+=====================
+
+Reviews from non-**Committers** are *always* welcome. Please feel
+empowered to review PRs and leave your thoughts and comments on any
+submitted PRs, regardless of the **Proposer**.
+
+Much of the below process is written in terms of the **Committers**.
+This is not intended to reflect that reviews should only come from that
+group, but acknowledges that we are ultimately responsible for
+maintaining the standards of the codebase. It would be entirely
+reasonable (and very welcome) for a **Reviewer** to only examine part
+of a PR, but it would not be appropriate for a **Committer** to merge a
+PR without full scrutiny.
+
+Opening Phase
+=============
+
+In this phase, the **Proposer** is responsible for opening a pull
+request and meeting the prerequisites laid out above.
+
+If they need help understanding the prerequisites, or help meeting the
+prerequisites, then they can (and should!) ask for help. See the
+:ref:`Asking For Help` section above for the ways to do that.
+
+These are the steps that comprise the opening phase:
+
+1. The **Proposer** opens PR
+
+2. CI runs automatically, and if
+
+ CI fails
+ The **Proposer** is expected to fix CI failures. If the
+ **Proposer** doesn't understand the nature of the failures they
+ are seeing, they should comment in the PR to request assistance,
+ or use another way of :ref:`Asking For Help`.
+
+ (Note that if assistance is not requested, the **Committers**
+ will assume that the **Proposer** is working on addressing the
+ failures themselves. If you require assistance, please do ask
+ for help!)
+
+ CI passes
+ Move on to the :ref:`Review phase`.
+
+Review Phase
+============
+
+In this phase, the **Proposer** and the **Reviewers** will iterate
+together to, hopefully, get the PR merged into the cloud-init codebase.
+There are three potential outcomes: merged, rejected permanently, and
+temporarily closed. (The first two are covered in this section; see
+:ref:`Inactive Pull Requests` for details about temporary closure.)
+
+(In the below, when the verbs "merge" or "squash merge" are used, they
+should be understood to mean "squash merged using the GitHub UI", which
+is the only way that changes can land in cloud-init's master branch.)
+
+These are the steps that comprise the review phase:
+
+1. **The Committers** assign a **Committer** to the PR
+
+ This **Committer** is expected to shepherd the PR to completion (and
+ merge it, if that is the outcome reached). This means that they
+ will perform an initial review, and monitor the PR to ensure that
+ the **Proposer** is receiving any assistance that they require. The
+ **Committers** will perform this assignment on a daily basis.
+
+ This assignment is intended to ensure that the **Proposer** has a
+ clear point of contact with a cloud-init core developer, and that
+ they get timely feedback after submitting a PR. It *is not*
+ intended to preclude reviews from any other **Reviewers**, nor to
+ imply that the **Committer** has ownership over the review process.
+
+ The assigned **Committer** may choose to delegate the code review of
+ a PR to another **Reviewer** if they think that they would be better
+ suited.
+
+ (Note that, in GitHub terms, this is setting an Assignee, not
+ requesting a review.)
+
+2. That **Committer** performs an initial review of the PR, resulting
+ in one of the following:
+
+ Approve
+ If the submitted PR meets all of the :ref:`Prerequisites for
+ Landing Pull Requests` and passes code review, then the
+ **Committer** will squash merge immediately.
+
+ There may be circumstances where a PR should not be merged
+ immediately. The ``wip`` label will be applied to PRs for which
+ this is true. Only **Committers** are able to apply labels to
+ PRs, so anyone who believes that this label should be applied to a
+ PR should request its application in a comment on the PR.
+
+ The review process is **DONE**.
+
+ Approve (with nits)
+ If the **Proposer** submits their PR with "Allow edits from
+ maintainer" enabled, and the only changes the **Committer**
+ requests are minor "nits", the **Committer** can push fixes for
+ those nits and *immediately* squash merge. If the **Committer**
+ does not wish to fix these nits but believes they should block a
+ straight-up Approve, then their review should be "Needs Changes"
+ instead.
+
+ A nit is understood to be something like a minor style issue or a
+ spelling error, generally confined to a single line of code.
+
+ If a **Committer** is unsure as to whether their requested change
+ is a nit, they should not treat it as a nit.
+
+ (If a **Proposer** wants to opt-out of this, then they should
+ uncheck "Allow edits from maintainer" when submitting their PR.)
+
+ The review process is **DONE**.
+
+ Outright rejection
+ The **Committer** will close the PR, with useful messaging for the
+ **Proposer** as to why this has happened.
+
+ This is reserved for cases where the proposed change is completely
+ unfit for landing, and there is no reasonable path forward. This
+ should only be used sparingly, as there are very few cases where
+ proposals are completely unfit.
+
+ If a different approach to the same problem is planned, it should
+ be submitted as a separate PR. The **Committer** should include
+ this information in their message when the PR is closed.
+
+ The review process is **DONE**.
+
+ Needs Changes
+ The **Committer** will give the **Proposer** a clear idea of what
+ is required for an Approve vote or, for more complex PRs, what the
+ next steps towards an Approve vote are.
+
+ The **Proposer** will ask questions if they don't understand, or
+ disagree with, the **Committer**'s review comments.
+
+ Once consensus has been reached, the **Proposer** will address the
+ review comments.
+
+ Once the review comments are addressed (as well as, potentially,
+ in the interim), CI will run. If CI fails, the **Proposer** is
+ expected to fix CI failures. If CI passes, the **Proposer**
+ should indicate that the PR is ready for re-review (by @ing the
+ assigned reviewer), effectively moving back to the start of this
+ section.
+
+Inactive Pull Requests
+======================
+
+PRs will be temporarily closed if they have been waiting on
+**Proposer** action for a certain amount of time without activity. A
+PR will be marked as stale (with an explanatory comment) after 14 days
+of inactivity. It will be closed after a further 7 days of inactivity.
+
+These closes are not considered permanent, and the closing message
+should reflect this for the **Proposer**. However, if a PR is reopened,
+it should effectively enter the :ref:`Opening phase` again, as it may
+need some work done to get CI passing again.
diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
index 1427fb3d..fdb919a5 100644
--- a/doc/rtd/topics/datasources/azure.rst
+++ b/doc/rtd/topics/datasources/azure.rst
@@ -114,19 +114,19 @@ An example configuration with the default values is provided below:
.. sourcecode:: yaml
datasource:
- Azure:
- agent_command: __builtin__
- apply_network_config: true
- data_dir: /var/lib/waagent
- dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
- disk_aliases:
+ Azure:
+ agent_command: __builtin__
+ apply_network_config: true
+ data_dir: /var/lib/waagent
+ dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
+ disk_aliases:
ephemeral0: /dev/disk/cloud/azure_resource
- hostname_bounce:
+ hostname_bounce:
interface: eth0
command: builtin
policy: true
hostname_command: hostname
- set_hostname: true
+ set_hostname: true
Userdata
diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst
index da183226..a24de34f 100644
--- a/doc/rtd/topics/datasources/cloudstack.rst
+++ b/doc/rtd/topics/datasources/cloudstack.rst
@@ -9,14 +9,20 @@ dhcp lease information given to the instance.
For more details on meta-data and user-data,
refer the `CloudStack Administrator Guide`_.
-URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.1
-is the Virtual Router IP:
+URLs to access user-data and meta-data from the Virtual Machine.
+`data-server.` is a well-known hostname provided by the CloudStack virtual
+router that points to the next UserData server (which is usually also
+the virtual router).
.. code:: bash
- http://10.1.1.1/latest/user-data
- http://10.1.1.1/latest/meta-data
- http://10.1.1.1/latest/meta-data/{metadata type}
+ http://data-server./latest/user-data
+ http://data-server./latest/meta-data
+ http://data-server./latest/meta-data/{metadata type}
+
+If `data-server.` cannot be resolved, cloud-init will try to obtain the
+virtual router's address from the system's DHCP leases. If that fails,
+it will use the system's default gateway.
Configuration
-------------
@@ -37,11 +43,11 @@ An example configuration with the default values is provided below:
.. sourcecode:: yaml
datasource:
- CloudStack:
- max_wait: 120
- timeout: 50
- datasource_list:
- - CloudStack
+ CloudStack:
+ max_wait: 120
+ timeout: 50
+ datasource_list:
+ - CloudStack
.. _Apache CloudStack: http://cloudstack.apache.org/
diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst
index a90f3779..274ca1e4 100644
--- a/doc/rtd/topics/datasources/ec2.rst
+++ b/doc/rtd/topics/datasources/ec2.rst
@@ -42,6 +42,7 @@ Note that there are multiple versions of this data provided, cloud-init
by default uses **2009-04-04** but newer versions can be supported with
relative ease (newer versions have more data exposed, while maintaining
backward compatibility with the previous versions).
+Version **2016-09-02** is required for secondary IP address support.
To see which versions are supported from your cloud provider use the following
URL:
@@ -80,16 +81,26 @@ The settings that may be configured are:
* **timeout**: the timeout value provided to urlopen for each individual http
request. This is used both when selecting a metadata_url and when crawling
the metadata service. (default: 50)
+ * **apply_full_imds_network_config**: Boolean (default: True) to allow
+ cloud-init to configure any secondary NICs and secondary IPs described by
+ the metadata service. All network interfaces are configured with DHCP (v4)
+ to obtain an primary IPv4 address and route. Interfaces which have a
+ non-empty 'ipv6s' list will also enable DHCPv6 to obtain a primary IPv6
+ address and route. The DHCP response (v4 and v6) return an IP that matches
+ the first element of local-ipv4s and ipv6s lists respectively. All
+ additional values (secondary addresses) in the static ip lists will be
+ added to interface.
An example configuration with the default values is provided below:
.. sourcecode:: yaml
datasource:
- Ec2:
- metadata_urls: ["http://169.254.169.254:80", "http://instance-data:8773"]
- max_wait: 120
- timeout: 50
+ Ec2:
+ metadata_urls: ["http://169.254.169.254:80", "http://instance-data:8773"]
+ max_wait: 120
+ timeout: 50
+ apply_full_imds_network_config: true
Notes
-----
@@ -102,4 +113,12 @@ Notes
The check for the instance type is performed by is_classic_instance()
method.
+ * For EC2 instances with multiple network interfaces (NICs) attached, dhcp4
+ will be enabled to obtain the primary private IPv4 address of those NICs.
+ Wherever dhcp4 or dhcp6 is enabled for a NIC, a dhcp route-metric will be
+ added with the value of ``<device-number + 1> * 100`` to ensure dhcp
+ routes on the primary NIC are preferred to any secondary NICs.
+ For example: the primary NIC will have a DHCP route-metric of 100,
+ the next NIC will be 200.
+
.. vi: textwidth=78
diff --git a/doc/rtd/topics/datasources/maas.rst b/doc/rtd/topics/datasources/maas.rst
index 85c853e9..427fba24 100644
--- a/doc/rtd/topics/datasources/maas.rst
+++ b/doc/rtd/topics/datasources/maas.rst
@@ -5,6 +5,6 @@ MAAS
*TODO*
-For now see: http://maas.ubuntu.com/
+For now see: https://maas.io/docs
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index bc96f7fe..0ca79102 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -32,7 +32,7 @@ The permitted keys are:
With ``ds=nocloud``, the ``seedfrom`` value must start with ``/`` or
``file://``. With ``ds=nocloud-net``, the ``seedfrom`` value must start
-with ``http://``, ``https://`` or ``ftp://``
+with ``http://`` or ``https://``.
e.g. you can pass this option to QEMU:
@@ -53,6 +53,12 @@ These user-data and meta-data files are expected to be in the following format.
Basically, user-data is simply user-data and meta-data is a yaml formatted file
representing what you'd find in the EC2 metadata service.
+You may also optionally provide a vendor-data file in the following format.
+
+::
+
+ /vendor-data
+
Given a disk ubuntu 12.04 cloud image in 'disk.img', you can create a
sufficient disk by following the example below.
@@ -133,12 +139,12 @@ be network configuration based on the filename.
version: 2
ethernets:
interface0:
- match:
- mac_address: "52:54:00:12:34:00"
- set-name: interface0
- addresses:
- - 192.168.1.10/255.255.255.0
- gateway4: 192.168.1.254
+ match:
+ mac_address: "52:54:00:12:34:00"
+ set-name: interface0
+ addresses:
+ - 192.168.1.10/255.255.255.0
+ gateway4: 192.168.1.254
.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
index 8ce2a53d..b23b4b7c 100644
--- a/doc/rtd/topics/datasources/openstack.rst
+++ b/doc/rtd/topics/datasources/openstack.rst
@@ -5,7 +5,7 @@ OpenStack
This datasource supports reading data from the
`OpenStack Metadata Service
-<https://docs.openstack.org/nova/latest/admin/networking-nova.html#metadata-service>`_.
+<https://docs.openstack.org/nova/latest/admin/metadata-service.html>`_.
Discovery
-------------
@@ -19,7 +19,8 @@ checks the following environment attributes as a potential OpenStack platform:
* **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova*
* **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute*
- * **DMI chassis_asset_tag** is *OpenTelekomCloud*
+ * **DMI chassis_asset_tag** is *OpenTelekomCloud*, *SAP CCloud VM*,
+ *OpenStack Nova* (since 19.2) or *OpenStack Compute* (since 19.2)
Configuration
@@ -50,12 +51,12 @@ An example configuration with the default values is provided below:
.. sourcecode:: yaml
datasource:
- OpenStack:
- metadata_urls: ["http://169.254.169.254"]
- max_wait: -1
- timeout: 10
- retries: 5
- apply_network_config: True
+ OpenStack:
+ metadata_urls: ["http://169.254.169.254"]
+ max_wait: -1
+ timeout: 10
+ retries: 5
+ apply_network_config: True
Vendor Data
diff --git a/doc/rtd/topics/datasources/ovf.rst b/doc/rtd/topics/datasources/ovf.rst
index c312617f..6256e624 100644
--- a/doc/rtd/topics/datasources/ovf.rst
+++ b/doc/rtd/topics/datasources/ovf.rst
@@ -11,4 +11,23 @@ transport.
For further information see a full working example in cloud-init's
source code tree in doc/sources/ovf
+Configuration
+-------------
+On VMware platforms, VMTools use is required for OVF datasource configuration
+settings as well as vCloud and vSphere admin configuration. User could change
+the VMTools configuration options with command::
+
+ vmware-toolbox-cmd config set <section> <key> <value>
+
+The following VMTools configuration options affect cloud-init's behavior on a booted VM:
+ * a: [deploypkg] enable-custom-scripts
+ If this option is absent in VMTools configuration, the custom script is
+ disabled by default for security reasons. Some VMware products could
+ change this default behavior (for example: enabled by default) via
+ customization specification settings.
+
+VMWare admin can refer to (https://github.com/canonical/cloud-init/blob/master/cloudinit/sources/helpers/vmware/imc/config.py) and set the customization specification settings.
+
+For more information, see [VMware vSphere Product Documentation](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-9A5093A5-C54F-4502-941B-3F9C0F573A39.html) and specific VMTools parameters consumed.
+
.. vi: textwidth=78
diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst
index 98c0cfaa..aa1be142 100644
--- a/doc/rtd/topics/faq.rst
+++ b/doc/rtd/topics/faq.rst
@@ -104,6 +104,23 @@ The force parameter allows the command to be run again since the instance has
already launched. The other options increase the verbosity of logging and
put the logs to STDERR.
+How can I re-run datasource detection and cloud-init?
+=====================================================
+
+If a user is developing a new datasource or working on debugging an issue it
+may be useful to re-run datasource detection and the initial setup of
+cloud-init.
+
+To do this, force ds-identify to re-run, clean up any logs, and re-run
+cloud-init:
+
+.. code-block:: shell-session
+
+ $ sudo DI_LOG=stderr /usr/lib/cloud-init/ds-identify --force
+ $ sudo cloud-init clean --logs
+ $ sudo cloud-init init --local
+ $ sudo cloud-init init
+
How can I debug my user data?
=============================
@@ -206,8 +223,8 @@ values or the LXD `Custom Network Configuration`_ document for more about
custom network config.
.. _LXD: https://linuxcontainers.org/
-.. _Instance Configuration: https://lxd.readthedocs.io/en/latest/instances/
-.. _Custom Network Configuration: https://lxd.readthedocs.io/en/latest/cloud-init/
+.. _Instance Configuration: https://linuxcontainers.org/lxd/docs/master/instances
+.. _Custom Network Configuration: https://linuxcontainers.org/lxd/docs/master/cloud-init
Where can I learn more?
========================================
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index 2b60bdd3..d03e4caf 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -23,44 +23,35 @@ Using a mime-multi part file, the user can specify more than one type of data.
For example, both a user data script and a cloud-config type could be
specified.
-Supported content-types:
+Supported content-types are listed from the cloud-init subcommand make-mime::
-- text/cloud-boothook
-- text/cloud-config
-- text/cloud-config-archive
-- text/jinja2
-- text/part-handler
-- text/upstart-job
-- text/x-include-once-url
-- text/x-include-url
-- text/x-shellscript
+ % cloud-init devel make-mime --list-types
+ cloud-boothook
+ cloud-config
+ cloud-config-archive
+ cloud-config-jsonp
+ jinja2
+ part-handler
+ upstart-job
+ x-include-once-url
+ x-include-url
+ x-shellscript
-Helper script to generate mime messages
----------------------------------------
-.. code-block:: python
-
- #!/usr/bin/python
-
- import sys
+Helper subcommand to generate mime messages
+-------------------------------------------
- from email.mime.multipart import MIMEMultipart
- from email.mime.text import MIMEText
+The cloud-init subcommand can generate MIME multi-part files: `make-mime`_.
- if len(sys.argv) == 1:
- print("%s input-file:type ..." % (sys.argv[0]))
- sys.exit(1)
+``make-mime`` subcommand takes pairs of (filename, "text/" mime subtype)
+separated by a colon (e.g. ``config.yaml:cloud-config``) and emits a MIME
+multipart message to stdout. An example invocation, assuming you have your
+cloud config in ``config.yaml`` and a shell script in ``script.sh`` and want
+to store the multipart message in ``user-data``::
- combined_message = MIMEMultipart()
- for i in sys.argv[1:]:
- (filename, format_type) = i.split(":", 1)
- with open(filename) as fh:
- contents = fh.read()
- sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
- sub_message.add_header('Content-Disposition', 'attachment; filename="%s"' % (filename))
- combined_message.attach(sub_message)
+ % cloud-init devel make-mime -a config.yaml:cloud-config -a script.sh:x-shellscript > user-data
- print(combined_message)
+.. _make-mime: https://github.com/canonical/cloud-init/blob/master/cloudinit/cmd/devel/make_mime.py
User-Data Script
@@ -126,7 +117,7 @@ Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when
using a MIME archive.
.. note::
- New in cloud-init v. 18.4: Cloud config dta can also render cloud instance
+ New in cloud-init v. 18.4: Cloud config data can also render cloud instance
metadata variables using jinja templating. See
:ref:`instance_metadata` for more information.
diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst
index e7dd0d62..255245a4 100644
--- a/doc/rtd/topics/instancedata.rst
+++ b/doc/rtd/topics/instancedata.rst
@@ -76,6 +76,11 @@ There are three basic top-level keys:
'security sensitive'. Only the keys listed here will be redacted from
instance-data.json for non-root users.
+* **merged_cfg**: Merged cloud-init 'system_config' from `/etc/cloud/cloud.cfg`
+ and `/etc/cloud/cloud-cfg.d`. Values under this key could contain sensitive
+ information such as passwords, so it is included in the **sensitive-keys**
+ list which is only readable by root.
+
* **ds**: Datasource-specific metadata crawled for the specific cloud
platform. It should closely represent the structure of the cloud metadata
crawled. The structure of content and details provided are entirely
@@ -83,6 +88,9 @@ There are three basic top-level keys:
The content exposed under the 'ds' key is currently **experimental** and
expected to change slightly in the upcoming cloud-init release.
+* **sys_info**: Information about the underlying os, python, architecture and
+ kernel. This represents the data collected by `cloudinit.util.system_info`.
+
* **v1**: Standardized cloud-init metadata keys, these keys are guaranteed to
exist on all cloud platforms. They will also retain their current behavior
and format and will be carried forward even if cloud-init introduces a new
@@ -103,7 +111,7 @@ v1.cloud_name
-------------
Where possible this will indicate the 'name' of the cloud the system is running
on. This is different than the 'platform' item. For example, the cloud name of
-Amazone Web Services is 'aws', while the platform is 'ec2'.
+Amazon Web Services is 'aws', while the platform is 'ec2'.
If determining a specific name is not possible or provided in meta-data, then
this filed may contain the same content as 'platform'.
@@ -117,6 +125,22 @@ Example output:
- nocloud
- ovf
+v1.distro, v1.distro_version, v1.distro_release
+-----------------------------------------------
+This shall be the distro name, version and release as determined by
+`cloudinit.util.get_linux_distro`.
+
+Example output:
+
+- alpine, 3.12.0, ''
+- centos, 7.5, core
+- debian, 9, stretch
+- freebsd, 12.0-release-p10,
+- opensuse, 42.3, x86_64
+- opensuse-tumbleweed, 20180920, x86_64
+- redhat, 7.5, 'maipo'
+- sles, 12.3, x86_64
+- ubuntu, 20.04, focal
v1.instance_id
--------------
@@ -126,6 +150,14 @@ Examples output:
- i-<hash>
+v1.kernel_release
+-----------------
+This shall be the running kernel `uname -r`
+
+Example output:
+
+- 5.3.0-1010-aws
+
v1.local_hostname
-----------------
The internal or local hostname of the system.
@@ -135,6 +167,17 @@ Examples output:
- ip-10-41-41-70
- <user-provided-hostname>
+v1.machine
+----------
+This shall be the running cpu machine architecture `uname -m`
+
+Example output:
+
+- x86_64
+- i686
+- ppc64le
+- s390x
+
v1.platform
-------------
An attempt to identify the cloud platfrom instance that the system is running
@@ -154,7 +197,7 @@ v1.subplatform
Additional platform details describing the specific source or type of metadata
used. The format of subplatform will be:
-``<subplatform_type> (<url_file_or_dev_path>``
+``<subplatform_type> (<url_file_or_dev_path>)``
Examples output:
@@ -171,6 +214,15 @@ Examples output:
- ['ssh-rsa AA...', ...]
+v1.python_version
+-----------------
+The version of python that is running cloud-init as determined by
+`cloudinit.util.system_info`
+
+Example output:
+
+- 3.7.6
+
v1.region
---------
The physical region/data center in which the instance is deployed.
@@ -192,164 +244,265 @@ Examples output:
Example Output
--------------
-Below is an example of ``/run/cloud-init/instance_data.json`` on an EC2
-instance:
+Below is an example of ``/run/cloud-init/instance-data-sensitive.json`` on an
+EC2 instance:
.. sourcecode:: json
{
+ "_beta_keys": [
+ "subplatform"
+ ],
+ "availability_zone": "us-east-1b",
"base64_encoded_keys": [],
+ "merged_cfg": {
+ "_doc": "Merged cloud-init system config from /etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/",
+ "_log": [
+ "[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n",
+ "[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n",
+ "[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=(\"/dev/log\", handlers.SysLogHandler.LOG_USER)\n"
+ ],
+ "cloud_config_modules": [
+ "emit_upstart",
+ "snap",
+ "ssh-import-id",
+ "locale",
+ "set-passwords",
+ "grub-dpkg",
+ "apt-pipelining",
+ "apt-configure",
+ "ubuntu-advantage",
+ "ntp",
+ "timezone",
+ "disable-ec2-metadata",
+ "runcmd",
+ "byobu"
+ ],
+ "cloud_final_modules": [
+ "package-update-upgrade-install",
+ "fan",
+ "landscape",
+ "lxd",
+ "ubuntu-drivers",
+ "puppet",
+ "chef",
+ "mcollective",
+ "salt-minion",
+ "rightscale_userdata",
+ "scripts-vendor",
+ "scripts-per-once",
+ "scripts-per-boot",
+ "scripts-per-instance",
+ "scripts-user",
+ "ssh-authkey-fingerprints",
+ "keys-to-console",
+ "phone-home",
+ "final-message",
+ "power-state-change"
+ ],
+ "cloud_init_modules": [
+ "migrator",
+ "seed_random",
+ "bootcmd",
+ "write-files",
+ "growpart",
+ "resizefs",
+ "disk_setup",
+ "mounts",
+ "set_hostname",
+ "update_hostname",
+ "update_etc_hosts",
+ "ca-certs",
+ "rsyslog",
+ "users-groups",
+ "ssh"
+ ],
+ "datasource_list": [
+ "Ec2",
+ "None"
+ ],
+ "def_log_file": "/var/log/cloud-init.log",
+ "disable_root": true,
+ "log_cfgs": [
+ [
+ "[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n",
+ "[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n"
+ ]
+ ],
+ "output": {
+ "all": "| tee -a /var/log/cloud-init-output.log"
+ },
+ "preserve_hostname": false,
+ "syslog_fix_perms": [
+ "syslog:adm",
+ "root:adm",
+ "root:wheel",
+ "root:root"
+ ],
+ "users": [
+ "default"
+ ],
+ "vendor_data": {
+ "enabled": true,
+ "prefix": []
+ }
+ },
+ "cloud_name": "aws",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
"ds": {
"_doc": "EXPERIMENTAL: The structure and format of content scoped under the 'ds' key may change in subsequent releases of cloud-init.",
"_metadata_api_version": "2016-09-02",
"dynamic": {
- "instance-identity": {
+ "instance_identity": {
"document": {
- "accountId": "437526006925",
+ "accountId": "329910648901",
"architecture": "x86_64",
- "availabilityZone": "us-east-2b",
+ "availabilityZone": "us-east-1b",
"billingProducts": null,
"devpayProductCodes": null,
- "imageId": "ami-079638aae7046bdd2",
- "instanceId": "i-075f088c72ad3271c",
+ "imageId": "ami-02e8aa396f8be3b6d",
+ "instanceId": "i-0929128ff2f73a2f1",
"instanceType": "t2.micro",
"kernelId": null,
"marketplaceProductCodes": null,
- "pendingTime": "2018-10-05T20:10:43Z",
- "privateIp": "10.41.41.95",
+ "pendingTime": "2020-02-27T20:46:18Z",
+ "privateIp": "172.31.81.43",
"ramdiskId": null,
- "region": "us-east-2",
+ "region": "us-east-1",
"version": "2017-09-30"
},
"pkcs7": [
- "MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCAJIAEggHbewog",
- "ICJkZXZwYXlQcm9kdWN0Q29kZXMiIDogbnVsbCwKICAibWFya2V0cGxhY2VQcm9kdWN0Q29kZXMi",
- "IDogbnVsbCwKICAicHJpdmF0ZUlwIiA6ICIxMC40MS40MS45NSIsCiAgInZlcnNpb24iIDogIjIw",
- "MTctMDktMzAiLAogICJpbnN0YW5jZUlkIiA6ICJpLTA3NWYwODhjNzJhZDMyNzFjIiwKICAiYmls",
- "bGluZ1Byb2R1Y3RzIiA6IG51bGwsCiAgImluc3RhbmNlVHlwZSIgOiAidDIubWljcm8iLAogICJh",
- "Y2NvdW50SWQiIDogIjQzNzUyNjAwNjkyNSIsCiAgImF2YWlsYWJpbGl0eVpvbmUiIDogInVzLWVh",
- "c3QtMmIiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJyYW1kaXNrSWQiIDogbnVsbCwKICAiYXJj",
- "aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJpbWFnZUlkIiA6ICJhbWktMDc5NjM4YWFlNzA0NmJk",
- "ZDIiLAogICJwZW5kaW5nVGltZSIgOiAiMjAxOC0xMC0wNVQyMDoxMDo0M1oiLAogICJyZWdpb24i",
- "IDogInVzLWVhc3QtMiIKfQAAAAAAADGCARcwggETAgEBMGkwXDELMAkGA1UEBhMCVVMxGTAXBgNV",
- "BAgTEFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0FtYXpvbiBX",
- "ZWIgU2VydmljZXMgTExDAgkAlrpI2eVeGmcwCQYFKw4DAhoFAKBdMBgGCSqGSIb3DQEJAzELBgkq",
- "hkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8XDTE4MTAwNTIwMTA0OFowIwYJKoZIhvcNAQkEMRYEFK0k",
- "Tz6n1A8/zU1AzFj0riNQORw2MAkGByqGSM44BAMELjAsAhRNrr174y98grPBVXUforN/6wZp8AIU",
- "JLZBkrB2GJA8A4WJ1okq++jSrBIAAAAAAAA="
+ "MIAGCSqGSIb3DQ...",
+ "REDACTED",
+ "AhQUgq0iPWqPTVnT96tZE6L1XjjLHQAAAAAAAA=="
],
"rsa2048": [
- "MIAGCSqGSIb3DQEHAqCAMIACAQExDzANBglghkgBZQMEAgEFADCABgkqhkiG9w0BBwGggCSABIIB",
- "23sKICAiZGV2cGF5UHJvZHVjdENvZGVzIiA6IG51bGwsCiAgIm1hcmtldHBsYWNlUHJvZHVjdENv",
- "ZGVzIiA6IG51bGwsCiAgInByaXZhdGVJcCIgOiAiMTAuNDEuNDEuOTUiLAogICJ2ZXJzaW9uIiA6",
- "ICIyMDE3LTA5LTMwIiwKICAiaW5zdGFuY2VJZCIgOiAiaS0wNzVmMDg4YzcyYWQzMjcxYyIsCiAg",
- "ImJpbGxpbmdQcm9kdWN0cyIgOiBudWxsLAogICJpbnN0YW5jZVR5cGUiIDogInQyLm1pY3JvIiwK",
- "ICAiYWNjb3VudElkIiA6ICI0Mzc1MjYwMDY5MjUiLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1",
- "cy1lYXN0LTJiIiwKICAia2VybmVsSWQiIDogbnVsbCwKICAicmFtZGlza0lkIiA6IG51bGwsCiAg",
- "ImFyY2hpdGVjdHVyZSIgOiAieDg2XzY0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLTA3OTYzOGFhZTcw",
- "NDZiZGQyIiwKICAicGVuZGluZ1RpbWUiIDogIjIwMTgtMTAtMDVUMjA6MTA6NDNaIiwKICAicmVn",
- "aW9uIiA6ICJ1cy1lYXN0LTIiCn0AAAAAAAAxggH/MIIB+wIBATBpMFwxCzAJBgNVBAYTAlVTMRkw",
- "FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6",
- "b24gV2ViIFNlcnZpY2VzIExMQwIJAM07oeX4xevdMA0GCWCGSAFlAwQCAQUAoGkwGAYJKoZIhvcN",
- "AQkDMQsGCSqGSIb3DQEHATAcBgkqhkiG9w0BCQUxDxcNMTgxMDA1MjAxMDQ4WjAvBgkqhkiG9w0B",
- "CQQxIgQgkYz0pZk3zJKBi4KP4egeOKJl/UYwu5UdE7id74pmPwMwDQYJKoZIhvcNAQEBBQAEggEA",
- "dC3uIGGNul1OC1mJKSH3XoBWsYH20J/xhIdftYBoXHGf2BSFsrs9ZscXd2rKAKea4pSPOZEYMXgz",
- "lPuT7W0WU89N3ZKviy/ReMSRjmI/jJmsY1lea6mlgcsJXreBXFMYucZvyeWGHdnCjamoKWXkmZlM",
- "mSB1gshWy8Y7DzoKviYPQZi5aI54XK2Upt4kGme1tH1NI2Cq+hM4K+adxTbNhS3uzvWaWzMklUuU",
- "QHX2GMmjAVRVc8vnA8IAsBCJJp+gFgYzi09IK+cwNgCFFPADoG6jbMHHf4sLB3MUGpiA+G9JlCnM",
- "fmkjI2pNRB8spc0k4UG4egqLrqCz67WuK38tjwAAAAAAAA=="
+ "MIAGCSqGSIb...",
+ "REDACTED",
+ "clYQvuE45xXm7Yreg3QtQbrP//owl1eZHj6s350AAAAAAAA="
],
"signature": [
- "Tsw6h+V3WnxrNVSXBYIOs1V4j95YR1mLPPH45XnhX0/Ei3waJqf7/7EEKGYP1Cr4PTYEULtZ7Mvf",
- "+xJpM50Ivs2bdF7o0c4vnplRWe3f06NI9pv50dr110j/wNzP4MZ1pLhJCqubQOaaBTF3LFutgRrt",
- "r4B0mN3p7EcqD8G+ll0="
+ "dA+QV+LLCWCRNddnrKleYmh2GvYo+t8urDkdgmDSsPi",
+ "REDACTED",
+ "kDT4ygyJLFkd3b4qjAs="
]
}
},
- "meta-data": {
- "ami-id": "ami-079638aae7046bdd2",
- "ami-launch-index": "0",
- "ami-manifest-path": "(unknown)",
- "block-device-mapping": {
+ "meta_data": {
+ "ami_id": "ami-02e8aa396f8be3b6d",
+ "ami_launch_index": "0",
+ "ami_manifest_path": "(unknown)",
+ "block_device_mapping": {
"ami": "/dev/sda1",
- "ephemeral0": "sdb",
- "ephemeral1": "sdc",
"root": "/dev/sda1"
},
- "hostname": "ip-10-41-41-95.us-east-2.compute.internal",
- "instance-action": "none",
- "instance-id": "i-075f088c72ad3271c",
- "instance-type": "t2.micro",
- "local-hostname": "ip-10-41-41-95.us-east-2.compute.internal",
- "local-ipv4": "10.41.41.95",
- "mac": "06:74:8f:39:cd:a6",
+ "hostname": "ip-172-31-81-43.ec2.internal",
+ "instance_action": "none",
+ "instance_id": "i-0929128ff2f73a2f1",
+ "instance_type": "t2.micro",
+ "local_hostname": "ip-172-31-81-43.ec2.internal",
+ "local_ipv4": "172.31.81.43",
+ "mac": "12:7e:c9:93:29:af",
"metrics": {
"vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
},
"network": {
"interfaces": {
"macs": {
- "06:74:8f:39:cd:a6": {
- "device-number": "0",
- "interface-id": "eni-052058bbd7831eaae",
- "ipv4-associations": {
- "18.218.221.122": "10.41.41.95"
- },
- "local-hostname": "ip-10-41-41-95.us-east-2.compute.internal",
- "local-ipv4s": "10.41.41.95",
- "mac": "06:74:8f:39:cd:a6",
- "owner-id": "437526006925",
- "public-hostname": "ec2-18-218-221-122.us-east-2.compute.amazonaws.com",
- "public-ipv4s": "18.218.221.122",
- "security-group-ids": "sg-828247e9",
- "security-groups": "Cloud-init integration test secgroup",
- "subnet-id": "subnet-282f3053",
- "subnet-ipv4-cidr-block": "10.41.41.0/24",
- "subnet-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/64",
- "vpc-id": "vpc-252ef24d",
- "vpc-ipv4-cidr-block": "10.41.0.0/16",
- "vpc-ipv4-cidr-blocks": "10.41.0.0/16",
- "vpc-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/56"
- }
+ "12:7e:c9:93:29:af": {
+ "device_number": "0",
+ "interface_id": "eni-0c07a0474339b801d",
+ "ipv4_associations": {
+ "3.89.187.177": "172.31.81.43"
+ },
+ "local_hostname": "ip-172-31-81-43.ec2.internal",
+ "local_ipv4s": "172.31.81.43",
+ "mac": "12:7e:c9:93:29:af",
+ "owner_id": "329910648901",
+ "public_hostname": "ec2-3-89-187-177.compute-1.amazonaws.com",
+ "public_ipv4s": "3.89.187.177",
+ "security_group_ids": "sg-0100038b68aa79986",
+ "security_groups": "launch-wizard-3",
+ "subnet_id": "subnet-04e2d12a",
+ "subnet_ipv4_cidr_block": "172.31.80.0/20",
+ "vpc_id": "vpc-210b4b5b",
+ "vpc_ipv4_cidr_block": "172.31.0.0/16",
+ "vpc_ipv4_cidr_blocks": "172.31.0.0/16"
+ }
}
}
},
"placement": {
- "availability-zone": "us-east-2b"
+ "availability_zone": "us-east-1b"
},
"profile": "default-hvm",
- "public-hostname": "ec2-18-218-221-122.us-east-2.compute.amazonaws.com",
- "public-ipv4": "18.218.221.122",
- "public-keys": {
- "cloud-init-integration": [
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB cloud-init-integration"
- ]
- },
- "reservation-id": "r-0594a20e31f6cfe46",
- "security-groups": "Cloud-init integration test secgroup",
+ "public_hostname": "ec2-3-89-187-177.compute-1.amazonaws.com",
+ "public_ipv4": "3.89.187.177",
+ "reservation_id": "r-0c481643d15766a02",
+ "security_groups": "launch-wizard-3",
"services": {
"domain": "amazonaws.com",
"partition": "aws"
}
}
},
+ "instance_id": "i-0929128ff2f73a2f1",
+ "kernel_release": "5.3.0-1010-aws",
+ "local_hostname": "ip-172-31-81-43",
+ "machine": "x86_64",
+ "platform": "ec2",
+ "public_ssh_keys": [],
+ "python_version": "3.7.6",
+ "region": "us-east-1",
"sensitive_keys": [],
+ "subplatform": "metadata (http://169.254.169.254)",
+ "sys_info": {
+ "dist": [
+ "ubuntu",
+ "20.04",
+ "focal"
+ ],
+ "platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal",
+ "python": "3.7.6",
+ "release": "5.3.0-1010-aws",
+ "system": "Linux",
+ "uname": [
+ "Linux",
+ "ip-172-31-81-43",
+ "5.3.0-1010-aws",
+ "#11-Ubuntu SMP Thu Jan 16 07:59:32 UTC 2020",
+ "x86_64",
+ "x86_64"
+ ],
+ "variant": "ubuntu"
+ },
+ "system_platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal",
+ "userdata": "#cloud-config\nssh_import_id: [<my-launchpad-id>]\n...",
"v1": {
"_beta_keys": [
"subplatform"
],
- "availability-zone": "us-east-2b",
- "availability_zone": "us-east-2b",
+ "availability_zone": "us-east-1b",
"cloud_name": "aws",
- "instance_id": "i-075f088c72ad3271c",
- "local_hostname": "ip-10-41-41-95",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance_id": "i-0929128ff2f73a2f1",
+ "kernel": "5.3.0-1010-aws",
+ "local_hostname": "ip-172-31-81-43",
+ "machine": "x86_64",
"platform": "ec2",
- "public_ssh_keys": [
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB cloud-init-integration"
- ],
- "region": "us-east-2",
- "subplatform": "metadata (http://169.254.169.254)"
- }
+ "public_ssh_keys": [],
+ "python": "3.7.6",
+ "region": "us-east-1",
+ "subplatform": "metadata (http://169.254.169.254)",
+ "system_platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal",
+ "variant": "ubuntu"
+ },
+ "variant": "ubuntu",
+ "vendordata": ""
}
diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst
index 9c9be804..e30fe0fe 100644
--- a/doc/rtd/topics/modules.rst
+++ b/doc/rtd/topics/modules.rst
@@ -6,6 +6,7 @@ Modules
*******
.. contents:: Table of Contents
+.. automodule:: cloudinit.config.cc_apk_configure
.. automodule:: cloudinit.config.cc_apt_configure
.. automodule:: cloudinit.config.cc_apt_pipelining
.. automodule:: cloudinit.config.cc_bootcmd
diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst
index 7f857550..c93e29be 100644
--- a/doc/rtd/topics/network-config-format-v2.rst
+++ b/doc/rtd/topics/network-config-format-v2.rst
@@ -50,9 +50,8 @@ currently being defined.
There are two physically/structurally different classes of device definitions,
and the ID field has a different interpretation for each:
-Physical devices
-
-: (Examples: ethernet, wifi) These can dynamically come and go between
+Physical devices (Examples: ethernet, wifi):
+ These can dynamically come and go between
reboots and even during runtime (hotplugging). In the generic case, they
can be selected by ``match:`` rules on desired properties, such as
name/name pattern, MAC address, driver, or device paths. In general these
@@ -69,9 +68,8 @@ Physical devices
which is only being used for references from definitions of compound
devices in the config.
-Virtual devices
-
-: (Examples: veth, bridge, bond) These are fully under the control of the
+Virtual devices (Examples: veth, bridge, bond):
+ These are fully under the control of the
config file(s) and the network stack. I. e. these devices are being created
instead of matched. Thus ``match:`` and ``set-name:`` are not applicable for
these, and the ID field is the name of the created virtual device.
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 1520ba9a..08db04d8 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -25,17 +25,23 @@ For example, OpenStack may provide network config in the MetaData Service.
**System Config**
-A ``network:`` entry in /etc/cloud/cloud.cfg.d/* configuration files.
+A ``network:`` entry in ``/etc/cloud/cloud.cfg.d/*`` configuration files.
**Kernel Command Line**
-``ip=`` or ``network-config=<YAML config string>``
+``ip=`` or ``network-config=<Base64 encoded YAML config string>``
User-data cannot change an instance's network configuration. In the absence
of network configuration in any of the above sources , `Cloud-init`_ will
write out a network configuration that will issue a DHCP request on a "first"
network interface.
+.. note::
+
+ The network-config value is expected to be a Base64 encoded YAML string in
+ :ref:`network_config_v1` or :ref:`network_config_v2` format. Optionally it
+ can be compressed with ``gzip`` prior to Base64 encoding.
+
Disabling Network Configuration
===============================
@@ -48,19 +54,19 @@ on other methods, such as embedded configuration or other customizations.
**Kernel Command Line**
-`Cloud-init`_ will check for a parameter ``network-config`` and the
-value is expected to be YAML string in the :ref:`network_config_v1` format.
-The YAML string may optionally be ``Base64`` encoded, and optionally
-compressed with ``gzip``.
+`Cloud-init`_ will check additionally check for the parameter
+``network-config=disabled`` which will automatically disable any network
+configuration.
Example disabling kernel command line entry: ::
- network-config={config: disabled}
+ network-config=disabled
**cloud config**
-In the combined cloud-init configuration dictionary. ::
+In the combined cloud-init configuration dictionary, merged from
+``/etc/cloud/cloud.cfg`` and ``/etc/cloud/cloud.cfg.d/*``::
network:
config: disabled
@@ -159,7 +165,7 @@ supported formats. The following ``renderers`` are supported in cloud-init:
- **ENI**
/etc/network/interfaces or ``ENI`` is supported by the ``ifupdown`` package
-found in Ubuntu and Debian.
+found in Alpine Linux, Debian and Ubuntu.
- **Netplan**
@@ -191,7 +197,7 @@ supplying an updated configuration in cloud-config. ::
system_info:
network:
- renderers: ['netplan', 'eni', 'sysconfig', 'freebsd']
+ renderers: ['netplan', 'eni', 'sysconfig', 'freebsd', 'netbsd', 'openbsd']
Network Configuration Tools
diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst
index aee3d7fc..f03b5969 100644
--- a/doc/rtd/topics/tests.rst
+++ b/doc/rtd/topics/tests.rst
@@ -467,11 +467,11 @@ Set region in platforms.yaml
.. code-block:: yaml
azurecloud:
- enabled: true
- region: West US 2
- vm_size: Standard_DS1_v2
- storage_sku: standard_lrs
- tag: ci
+ enabled: true
+ region: West US 2
+ vm_size: Standard_DS1_v2
+ storage_sku: standard_lrs
+ tag: ci
Architecture
@@ -546,38 +546,38 @@ The following demonstrates merge behavior:
.. code-block:: yaml
defaults:
- list_item:
- - list_entry_1
- - list_entry_2
- int_item_1: 123
- int_item_2: 234
- dict_item:
- subkey_1: 1
- subkey_2: 2
- subkey_dict:
- subsubkey_1: a
- subsubkey_2: b
+ list_item:
+ - list_entry_1
+ - list_entry_2
+ int_item_1: 123
+ int_item_2: 234
+ dict_item:
+ subkey_1: 1
+ subkey_2: 2
+ subkey_dict:
+ subsubkey_1: a
+ subsubkey_2: b
overrides:
- list_item:
- - overridden_list_entry
- int_item_1: 0
- dict_item:
- subkey_2: false
- subkey_dict:
- subsubkey_2: 'new value'
+ list_item:
+ - overridden_list_entry
+ int_item_1: 0
+ dict_item:
+ subkey_2: false
+ subkey_dict:
+ subsubkey_2: 'new value'
result:
- list_item:
- - overridden_list_entry
- int_item_1: 0
- int_item_2: 234
- dict_item:
- subkey_1: 1
- subkey_2: false
- subkey_dict:
- subsubkey_1: a
- subsubkey_2: 'new value'
+ list_item:
+ - overridden_list_entry
+ int_item_1: 0
+ int_item_2: 234
+ dict_item:
+ subkey_1: 1
+ subkey_2: false
+ subkey_dict:
+ subsubkey_1: a
+ subsubkey_2: 'new value'
Image Config
diff --git a/integration-requirements.txt b/integration-requirements.txt
index 897d6110..44e45c1b 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -5,7 +5,6 @@
# the packages/pkg-deps.json file as well.
#
-unittest2
# ec2 backend
boto3==1.5.9
diff --git a/packages/bddeb b/packages/bddeb
index 209765a5..b0f219b6 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -24,6 +24,7 @@ def find_root():
if "avoid-pep8-E402-import-not-top-of-file":
# Use the util functions from cloudinit
sys.path.insert(0, find_root())
+ from cloudinit import subp
from cloudinit import util
from cloudinit import temp_utils
from cloudinit import templater
@@ -53,27 +54,21 @@ def run_helper(helper, args=None, strip=True):
if args is None:
args = []
cmd = [util.abs_join(find_root(), 'tools', helper)] + args
- (stdout, _stderr) = util.subp(cmd)
+ (stdout, _stderr) = subp.subp(cmd)
if strip:
stdout = stdout.strip()
return stdout
-def write_debian_folder(root, templ_data, is_python2, cloud_util_deps):
+def write_debian_folder(root, templ_data, cloud_util_deps):
"""Create a debian package directory with all rendered template files."""
print("Creating a debian/ folder in %r" % (root))
- if is_python2:
- pyver = "2"
- python = "python"
- else:
- pyver = "3"
- python = "python3"
deb_dir = util.abs_join(root, 'debian')
# Just copy debian/ dir and then update files
pdeb_d = util.abs_join(find_root(), 'packages', 'debian')
- util.subp(['cp', '-a', pdeb_d, deb_dir])
+ subp.subp(['cp', '-a', pdeb_d, deb_dir])
# Fill in the change log template
templater.render_to_file(util.abs_join(find_root(),
@@ -83,30 +78,25 @@ def write_debian_folder(root, templ_data, is_python2, cloud_util_deps):
# Write out the control file template
reqs_output = run_helper(
- 'read-dependencies',
- args=['--distro', 'debian', '--python-version', pyver])
+ 'read-dependencies', args=['--distro', 'debian'])
reqs = reqs_output.splitlines()
test_reqs = run_helper(
'read-dependencies',
['--requirements-file', 'test-requirements.txt',
- '--system-pkg-names', '--python-version', pyver]).splitlines()
+ '--system-pkg-names']).splitlines()
requires = ['cloud-utils | cloud-guest-utils'] if cloud_util_deps else []
# We consolidate all deps as Build-Depends as our package build runs all
# tests so we need all runtime dependencies anyway.
# NOTE: python package was moved to the front after debuild -S would fail with
# 'Please add apropriate interpreter' errors (as in debian bug 861132)
- requires.extend([python] + reqs + test_reqs)
+ requires.extend(['python3'] + reqs + test_reqs)
+ if templ_data['debian_release'] == 'xenial':
+ requires.append('python3-pytest-catchlog')
templater.render_to_file(util.abs_join(find_root(),
'packages', 'debian', 'control.in'),
util.abs_join(deb_dir, 'control'),
- params={'build_depends': ','.join(requires),
- 'python': python})
-
- templater.render_to_file(util.abs_join(find_root(),
- 'packages', 'debian', 'rules.in'),
- util.abs_join(deb_dir, 'rules'),
- params={'python': python, 'pyver': pyver})
+ params={'build_depends': ','.join(requires)})
def read_version():
@@ -203,13 +193,12 @@ def main():
print("Extracting temporary tarball %r" % (tarball))
cmd = ['tar', '-xvzf', tarball_fp, '-C', tdir]
- util.subp(cmd, capture=capture)
+ subp.subp(cmd, capture=capture)
xdir = util.abs_join(tdir, "cloud-init-%s" % ver_data['version_long'])
templ_data.update(ver_data)
- write_debian_folder(xdir, templ_data, is_python2=args.python2,
- cloud_util_deps=args.cloud_utils)
+ write_debian_folder(xdir, templ_data, cloud_util_deps=args.cloud_utils)
print("Running 'debuild %s' in %r" % (' '.join(args.debuild_args),
xdir))
@@ -217,7 +206,7 @@ def main():
cmd = ['debuild', '--preserve-envvar', 'INIT_SYSTEM']
if args.debuild_args:
cmd.extend(args.debuild_args)
- util.subp(cmd, capture=capture)
+ subp.subp(cmd, capture=capture)
link_fn = os.path.join(os.getcwd(), 'cloud-init_all.deb')
link_dsc = os.path.join(os.getcwd(), 'cloud-init.dsc')
diff --git a/packages/brpm b/packages/brpm
index 4004fd0e..a9fd0b70 100755
--- a/packages/brpm
+++ b/packages/brpm
@@ -24,6 +24,7 @@ def find_root():
if "avoid-pep8-E402-import-not-top-of-file":
# Use the util functions from cloudinit
sys.path.insert(0, find_root())
+ from cloudinit import subp
from cloudinit import templater
from cloudinit import util
@@ -36,24 +37,28 @@ def run_helper(helper, args=None, strip=True):
if args is None:
args = []
cmd = [util.abs_join(find_root(), 'tools', helper)] + args
- (stdout, _stderr) = util.subp(cmd)
+ (stdout, _stderr) = subp.subp(cmd)
if strip:
stdout = stdout.strip()
return stdout
-def read_dependencies(distro, requirements_file='requirements.txt'):
+def read_dependencies(distro):
"""Returns the Python package depedencies from requirements.txt files.
- @returns a tuple of (requirements, test_requirements)
+ @returns a tuple of (build_deps, run_deps, test_deps)
"""
- pkg_deps = run_helper(
- 'read-dependencies', args=['--distro', distro]).splitlines()
+ build_deps = run_helper(
+ 'read-dependencies',args=[
+ '--distro', distro, '--build-requires']).splitlines()
+ run_deps = run_helper(
+ 'read-dependencies', args=[
+ '--distro', distro, '--runtime-requires']).splitlines()
test_deps = run_helper(
'read-dependencies', args=[
'--requirements-file', 'test-requirements.txt',
'--system-pkg-names']).splitlines()
- return (pkg_deps, test_deps)
+ return (build_deps, run_deps, test_deps)
def read_version():
@@ -83,9 +88,9 @@ def generate_spec_contents(args, version_data, tmpl_fn, top_dir, arc_fn):
rpm_upstream_version = version_data['version']
subs['rpm_upstream_version'] = rpm_upstream_version
- deps, test_deps = read_dependencies(distro=args.distro)
- subs['buildrequires'] = deps + test_deps
- subs['requires'] = deps
+ build_deps, run_deps, test_deps = read_dependencies(distro=args.distro)
+ subs['buildrequires'] = build_deps + test_deps
+ subs['requires'] = run_deps
if args.boot == 'sysvinit':
subs['sysvinit'] = True
@@ -174,7 +179,7 @@ def main():
else:
cmd = ['rpmbuild', '-ba', spec_fn]
- util.subp(cmd, capture=capture)
+ subp.subp(cmd, capture=capture)
# Copy the items built to our local dir
globs = []
diff --git a/packages/debian/control.in b/packages/debian/control.in
index e9ed64f3..72895b47 100644
--- a/packages/debian/control.in
+++ b/packages/debian/control.in
@@ -10,11 +10,10 @@ Standards-Version: 3.9.6
Package: cloud-init
Architecture: all
Depends: ${misc:Depends},
- ${${python}:Depends},
+ ${python3:Depends},
iproute2,
isc-dhcp-client
Recommends: eatmydata, sudo, software-properties-common, gdisk
-XB-Python-Version: ${python:Versions}
Description: Init scripts for cloud instances
Cloud instances need special scripts to run during initialisation
to retrieve and install ssh keys and to let the user run various scripts.
diff --git a/packages/debian/rules.in b/packages/debian/rules
index e542c7f1..d138deeb 100755
--- a/packages/debian/rules.in
+++ b/packages/debian/rules
@@ -1,12 +1,10 @@
-## template:basic
#!/usr/bin/make -f
INIT_SYSTEM ?= systemd
export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM)
-PYVER ?= python${pyver}
DEB_VERSION := $(shell dpkg-parsechangelog --show-field=Version)
%:
- dh $@ --with $(PYVER),systemd --buildsystem pybuild
+ dh $@ --with python3,systemd --buildsystem pybuild
override_dh_install:
dh_install
@@ -19,7 +17,7 @@ override_dh_install:
override_dh_auto_test:
ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS)))
- http_proxy= make PYVER=python${pyver} check
+ http_proxy= make check
else
@echo check disabled by DEB_BUILD_OPTIONS=$(DEB_BUILD_OPTIONS)
endif
diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json
index cf065219..80028396 100644
--- a/packages/pkg-deps.json
+++ b/packages/pkg-deps.json
@@ -6,52 +6,31 @@
"dh-systemd"
],
"renames" : {
- "pyyaml" : {
- "2" : "python-yaml",
- "3" : "python3-yaml"
- },
- "contextlib2" : {
- "2" : "python-contextlib2"
- },
- "pyserial" : {
- "2" : "python-serial",
- "3" : "python3-serial"
- }
+ "pyyaml" : "python3-yaml",
+ "pyserial" : "python3-serial"
},
"requires" : [
"procps"
]
},
+ "centos" : {
+ "build-requires" : [
+ "python3-devel"
+ ],
+ "requires" : [
+ "e2fsprogs",
+ "iproute",
+ "net-tools",
+ "procps",
+ "rsyslog",
+ "shadow-utils",
+ "sudo"
+ ]
+ },
"redhat" : {
"build-requires" : [
- "python-devel",
- "python-setuptools"
+ "python3-devel"
],
- "renames" : {
- "jinja2" : {
- "3" : "python36-jinja2"
- },
- "jsonschema" : {
- "3" : "python36-jsonschema"
- },
- "pyflakes" : {
- "2" : "pyflakes",
- "3" : "python36-pyflakes"
- },
- "pyyaml" : {
- "2" : "PyYAML",
- "3" : "python36-PyYAML"
- },
- "pyserial" : {
- "2" : "pyserial"
- },
- "requests" : {
- "3" : "python36-requests"
- },
- "six" : {
- "3" : "python36-six"
- }
- },
"requires" : [
"e2fsprogs",
"iproute",
@@ -64,9 +43,6 @@
},
"suse" : {
"renames" : {
- "pyyaml" : {
- "2" : "python-yaml"
- }
},
"build-requires" : [
"fdupes",
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index 057a5784..4cff2c97 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -1,6 +1,4 @@
## template: jinja
-%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
-
%define use_systemd (0%{?fedora} && 0%{?fedora} >= 18) || (0%{?rhel} && 0%{?rhel} >= 7)
%if %{use_systemd}
@@ -94,11 +92,11 @@ ssh keys and to let the user run various scripts.
{% endfor %}
%build
-%{__python} setup.py build
+%{__python3} setup.py build
%install
-%{__python} setup.py install -O1 \
+%{__python3} setup.py install -O1 \
--skip-build --root $RPM_BUILD_ROOT \
--init-system=%{init_system}
@@ -109,7 +107,7 @@ cp -p tools/21-cloudinit.conf \
$RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf
# Remove the tests
-rm -rf $RPM_BUILD_ROOT%{python_sitelib}/tests
+rm -rf $RPM_BUILD_ROOT%{python3_sitelib}/tests
# Required dirs...
mkdir -p $RPM_BUILD_ROOT/%{_sharedstatedir}/cloud
@@ -213,4 +211,4 @@ fi
%dir %{_sharedstatedir}/cloud
# Python code is here...
-%{python_sitelib}/*
+%{python3_sitelib}/*
diff --git a/requirements.txt b/requirements.txt
index dd10d85d..5817da3b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -32,6 +32,3 @@ jsonpatch
# For validating cloud-config sections per schema definitions
jsonschema
-
-# For Python 2/3 compatibility
-six
diff --git a/setup.py b/setup.py
index 01a67b95..cbacf48e 100755
--- a/setup.py
+++ b/setup.py
@@ -15,6 +15,7 @@ import os
import shutil
import sys
import tempfile
+import platform
import setuptools
from setuptools.command.install import install
@@ -33,23 +34,6 @@ def is_f(p):
def is_generator(p):
return '-generator' in p
-def tiny_p(cmd, capture=True):
- # Darn python 2.6 doesn't have check_output (argggg)
- stdout = subprocess.PIPE
- stderr = subprocess.PIPE
- if not capture:
- stdout = None
- stderr = None
- sp = subprocess.Popen(cmd, stdout=stdout,
- stderr=stderr, stdin=None,
- universal_newlines=True)
- (out, err) = sp.communicate()
- ret = sp.returncode
- if ret not in [0]:
- raise RuntimeError("Failed running %s [rc=%s] (%s, %s)" %
- (cmd, ret, out, err))
- return (out, err)
-
def pkg_config_read(library, var):
fallbacks = {
@@ -60,7 +44,7 @@ def pkg_config_read(library, var):
}
cmd = ['pkg-config', '--variable=%s' % var, library]
try:
- (path, err) = tiny_p(cmd)
+ path = subprocess.check_output(cmd).decode('utf-8')
path = path.strip()
except Exception:
path = fallbacks[library][var]
@@ -82,14 +66,14 @@ def in_virtualenv():
def get_version():
cmd = [sys.executable, 'tools/read-version']
- (ver, _e) = tiny_p(cmd)
- return str(ver).strip()
+ ver = subprocess.check_output(cmd)
+ return ver.decode('utf-8').strip()
def read_requires():
cmd = [sys.executable, 'tools/read-dependencies']
- (deps, _e) = tiny_p(cmd)
- return str(deps).splitlines()
+ deps = subprocess.check_output(cmd)
+ return deps.decode('utf-8').splitlines()
def render_tmpl(template, mode=None):
@@ -117,10 +101,11 @@ def render_tmpl(template, mode=None):
bname = os.path.basename(template).rstrip(tmpl_ext)
fpath = os.path.join(tmpd, bname)
if VARIANT:
- tiny_p([sys.executable, './tools/render-cloudcfg', '--variant',
- VARIANT, template, fpath])
+ subprocess.run([sys.executable, './tools/render-cloudcfg', '--variant',
+ VARIANT, template, fpath])
else:
- tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath])
+ subprocess.run(
+ [sys.executable, './tools/render-cloudcfg', template, fpath])
if mode:
os.chmod(fpath, mode)
# return path relative to setup.py
@@ -136,6 +121,7 @@ if '--distro' in sys.argv:
INITSYS_FILES = {
'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],
'sysvinit_freebsd': [f for f in glob('sysvinit/freebsd/*') if is_f(f)],
+ 'sysvinit_netbsd': [f for f in glob('sysvinit/netbsd/*') if is_f(f)],
'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)],
'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)],
'sysvinit_suse': [f for f in glob('sysvinit/suse/*') if is_f(f)],
@@ -152,6 +138,7 @@ INITSYS_FILES = {
INITSYS_ROOTS = {
'sysvinit': 'etc/rc.d/init.d',
'sysvinit_freebsd': 'usr/local/etc/rc.d',
+ 'sysvinit_netbsd': 'usr/local/etc/rc.d',
'sysvinit_deb': 'etc/init.d',
'sysvinit_openrc': 'etc/init.d',
'sysvinit_suse': 'etc/init.d',
@@ -228,7 +215,7 @@ class InitsysInstallData(install):
if self.init_system and isinstance(self.init_system, str):
self.init_system = self.init_system.split(",")
- if len(self.init_system) == 0:
+ if len(self.init_system) == 0 and not platform.system().endswith('BSD'):
self.init_system = ['systemd']
bad = [f for f in self.init_system if f not in INITSYS_TYPES]
@@ -272,7 +259,7 @@ data_files = [
(USR + '/share/doc/cloud-init/examples/seed',
[f for f in glob('doc/examples/seed/*') if is_f(f)]),
]
-if os.uname()[0] != 'FreeBSD':
+if not platform.system().endswith('BSD'):
data_files.extend([
(ETC + '/NetworkManager/dispatcher.d/',
['tools/hook-network-manager']),
diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl
index 45efa243..0773356b 100755
--- a/systemd/cloud-init-generator.tmpl
+++ b/systemd/cloud-init-generator.tmpl
@@ -83,7 +83,7 @@ default() {
check_for_datasource() {
local ds_rc=""
-{% if variant in ["redhat", "fedora", "centos"] %}
+{% if variant in ["rhel", "fedora", "centos"] %}
local dsidentify="/usr/libexec/cloud-init/ds-identify"
{% else %}
local dsidentify="/usr/lib/cloud-init/ds-identify"
diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl
index 9ad3574c..af6d9a8b 100644
--- a/systemd/cloud-init.service.tmpl
+++ b/systemd/cloud-init.service.tmpl
@@ -10,7 +10,7 @@ After=systemd-networkd-wait-online.service
{% if variant in ["ubuntu", "unknown", "debian"] %}
After=networking.service
{% endif %}
-{% if variant in ["centos", "fedora", "redhat"] %}
+{% if variant in ["centos", "fedora", "rhel"] %}
After=network.service
After=NetworkManager.service
{% endif %}
diff --git a/sysvinit/netbsd/cloudconfig b/sysvinit/netbsd/cloudconfig
new file mode 100755
index 00000000..5cd7eb31
--- /dev/null
+++ b/sysvinit/netbsd/cloudconfig
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+# PROVIDE: cloudconfig
+# REQUIRE: cloudinit
+# BEFORE: sshd
+
+$_rc_subr_loaded . /etc/rc.subr
+
+name="cloudinit"
+start_cmd="start_cloud_init"
+start_cloud_init()
+{
+ /usr/pkg/bin/cloud-init modules --mode config
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/sysvinit/netbsd/cloudfinal b/sysvinit/netbsd/cloudfinal
new file mode 100755
index 00000000..72f3e472
--- /dev/null
+++ b/sysvinit/netbsd/cloudfinal
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+# PROVIDE: cloudfinal
+# REQUIRE: LOGIN cloudconfig
+
+$_rc_subr_loaded . /etc/rc.subr
+
+name="cloudinit"
+start_cmd="start_cloud_init"
+start_cloud_init()
+{
+ /usr/pkg/bin/cloud-init modules --mode final
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/sysvinit/netbsd/cloudinit b/sysvinit/netbsd/cloudinit
new file mode 100755
index 00000000..266afc2a
--- /dev/null
+++ b/sysvinit/netbsd/cloudinit
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+# PROVIDE: cloudinit
+# REQUIRE: cloudinitlocal
+
+$_rc_subr_loaded . /etc/rc.subr
+
+name="cloudinit"
+start_cmd="start_cloud_init"
+start_cloud_init()
+{
+ /usr/pkg/bin/cloud-init init
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/sysvinit/netbsd/cloudinitlocal b/sysvinit/netbsd/cloudinitlocal
new file mode 100755
index 00000000..1f30e70b
--- /dev/null
+++ b/sysvinit/netbsd/cloudinitlocal
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+# PROVIDE: cloudinitlocal
+# REQUIRE: NETWORKING
+
+# After NETWORKING because we don't want staticroute to wipe
+# the route set by the DHCP client toward the meta-data server.
+$_rc_subr_loaded . /etc/rc.subr
+
+name="cloudinitlocal"
+start_cmd="start_cloud_init_local"
+start_cloud_init_local()
+{
+ /usr/pkg/bin/cloud-init init -l
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/templates/chef_client.rb.tmpl b/templates/chef_client.rb.tmpl
index 99978d3b..0a759b04 100644
--- a/templates/chef_client.rb.tmpl
+++ b/templates/chef_client.rb.tmpl
@@ -14,6 +14,9 @@ you need to add the following to config:
The reason these are not in quotes is because they are ruby
symbols that will be placed inside here, and not actual strings...
#}
+{% if chef_license %}
+chef_license "{{chef_license}}"
+{% endif%}
{% if log_level %}
log_level {{log_level}}
{% endif %}
diff --git a/templates/chrony.conf.alpine.tmpl b/templates/chrony.conf.alpine.tmpl
new file mode 100644
index 00000000..45efc18c
--- /dev/null
+++ b/templates/chrony.conf.alpine.tmpl
@@ -0,0 +1,38 @@
+## template:jinja
+# Welcome to the chrony configuration file. See chrony.conf(5) for more
+# information about usable directives.
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# This directive specifies the location of the file containing ID/key pairs for
+# NTP authentication.
+keyfile /etc/chrony/chrony.keys
+
+# This directive specifies the file into which chronyd will store the rate
+# information.
+driftfile /var/lib/chrony/chrony.drift
+
+# Uncomment the following line to turn logging on.
+#log tracking measurements statistics
+
+# Log files location.
+logdir /var/log/chrony
+
+# Stop bad estimates upsetting machine clock.
+maxupdateskew 100.0
+
+# This directive enables kernel synchronisation (every 11 minutes) of the
+# real-time clock. Note that it can’t be used along with the 'rtcfile' directive.
+rtcsync
+
+# Step the system clock instead of slewing it if the adjustment is larger than
+# one second, but only in the first three clock updates.
+makestep 1 3
diff --git a/templates/hosts.alpine.tmpl b/templates/hosts.alpine.tmpl
new file mode 100644
index 00000000..33c1a941
--- /dev/null
+++ b/templates/hosts.alpine.tmpl
@@ -0,0 +1,28 @@
+## template:jinja
+{#
+This file /etc/cloud/templates/hosts.alpine.tmpl is only utilized
+if enabled in cloud-config. Specifically, in order to enable it
+you need to add the following to config:
+ manage_etc_hosts: True
+-#}
+# Your system has configured 'manage_etc_hosts' as True.
+# As a result, if you wish for changes to this file to persist
+# then you will need to either
+# a.) make changes to the master file in /etc/cloud/templates/hosts.alpine.tmpl
+# b.) change or remove the value of 'manage_etc_hosts' in
+# /etc/cloud/cloud.cfg or cloud-config from user-data
+#
+# The following lines are desirable for IPv4 capable hosts
+127.0.1.1 {{fqdn}} {{hostname}}
+127.0.0.1 localhost.localdomain localhost
+127.0.0.1 localhost4.localdomain4 localhost4
+
+# The following lines are desirable for IPv6 capable hosts
+::1 {{fqdn}} {{hostname}}
+::1 localhost6.localdomain6 localhost6
+
+fe00::0 ip6-localnet
+ff00::0 ip6-mcastprefix
+ff02::1 ip6-allnodes
+ff02::2 ip6-allrouters
+ff02::3 ip6-allhosts
diff --git a/templates/hosts.freebsd.tmpl b/templates/hosts.freebsd.tmpl
index 7ded762f..5cd5d3bc 100644
--- a/templates/hosts.freebsd.tmpl
+++ b/templates/hosts.freebsd.tmpl
@@ -11,14 +11,13 @@ you need to add the following to config:
# a.) make changes to the master file in /etc/cloud/templates/hosts.freebsd.tmpl
# b.) change or remove the value of 'manage_etc_hosts' in
# /etc/cloud/cloud.cfg or cloud-config from user-data
-#
-# The following lines are desirable for IPv4 capable hosts
-127.0.0.1 {{fqdn}} {{hostname}}
-127.0.0.1 localhost.localdomain localhost
-127.0.0.1 localhost4.localdomain4 localhost4
# The following lines are desirable for IPv6 capable hosts
::1 {{fqdn}} {{hostname}}
::1 localhost.localdomain localhost
::1 localhost6.localdomain6 localhost6
+# The following lines are desirable for IPv4 capable hosts
+127.0.0.1 {{fqdn}} {{hostname}}
+127.0.0.1 localhost.localdomain localhost
+127.0.0.1 localhost4.localdomain4 localhost4
diff --git a/templates/hosts.suse.tmpl b/templates/hosts.suse.tmpl
index 8e664dbf..5d7953f0 100644
--- a/templates/hosts.suse.tmpl
+++ b/templates/hosts.suse.tmpl
@@ -13,7 +13,7 @@ you need to add the following to config:
# /etc/cloud/cloud.cfg or cloud-config from user-data
#
# The following lines are desirable for IPv4 capable hosts
-127.0.0.1 {{fqdn}} {{hostname}}
+127.0.1.1 {{fqdn}} {{hostname}}
127.0.0.1 localhost.localdomain localhost
127.0.0.1 localhost4.localdomain4 localhost4
diff --git a/templates/ntp.conf.alpine.tmpl b/templates/ntp.conf.alpine.tmpl
new file mode 100644
index 00000000..59ca8fc1
--- /dev/null
+++ b/templates/ntp.conf.alpine.tmpl
@@ -0,0 +1,10 @@
+## template:jinja
+# /etc/ntp.conf
+#
+# Configuration for Busybox ntpd - it only supports "server" lines.
+
+{% if servers %}# Servers
+{% endif %}
+{% for server in servers -%}
+server {{server}}
+{% endfor %}
diff --git a/templates/resolv.conf.tmpl b/templates/resolv.conf.tmpl
index bfae80db..f870be67 100644
--- a/templates/resolv.conf.tmpl
+++ b/templates/resolv.conf.tmpl
@@ -21,10 +21,18 @@ domain {{domain}}
sortlist {% for sort in sortlist %}{{sort}} {% endfor %}
{% endif %}
+{#
+ Flags and options are required to be on the
+ same line preceded by "options" keyword
+#}
{% if options or flags %}
-options {% for flag in flags %}{{flag}} {% endfor %}
-{% for key, value in options.items() -%}
- {{key}}:{{value}}
+options
+{%- for flag in flags %}
+ {{flag-}}
+{% endfor %}
+
+{%- for key, value in options.items()|sort %}
+ {{key}}:{{value-}}
{% endfor %}
{% endif %}
diff --git a/test-requirements.txt b/test-requirements.txt
index 6fb22b24..0a6a04d4 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,12 +1,7 @@
# Needed generally in tests
httpretty>=0.7.1
-nose
-unittest2
-coverage
-
-# Only needed if you want to know the test times
-# nose-timer
+pytest
+pytest-cov
# Only really needed on older versions of python
-contextlib2
setuptools
diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py
index f04d0cd4..e45ad947 100644
--- a/tests/cloud_tests/bddeb.py
+++ b/tests/cloud_tests/bddeb.py
@@ -6,7 +6,7 @@ from functools import partial
import os
import tempfile
-from cloudinit import util as c_util
+from cloudinit import subp
from tests.cloud_tests import (config, LOG)
from tests.cloud_tests import platforms
from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)
@@ -42,8 +42,8 @@ def build_deb(args, instance):
'GIT_WORK_TREE': extract_dir}
LOG.debug('creating tarball of cloud-init at: %s', local_tarball)
- c_util.subp(['tar', 'cf', local_tarball, '--owner', 'root',
- '--group', 'root', '-C', args.cloud_init, '.'])
+ subp.subp(['tar', 'cf', local_tarball, '--owner', 'root',
+ '--group', 'root', '-C', args.cloud_init, '.'])
LOG.debug('copying to remote system at: %s', remote_tarball)
instance.push_file(local_tarball, remote_tarball)
@@ -55,7 +55,7 @@ def build_deb(args, instance):
LOG.debug('installing deps')
deps_path = os.path.join(extract_dir, 'tools', 'read-dependencies')
instance.execute([deps_path, '--install', '--test-distro',
- '--distro', 'ubuntu', '--python-version', '3'])
+ '--distro', 'ubuntu'])
LOG.debug('building deb in remote system at: %s', output_link)
bddeb_args = args.bddeb_args.split() if args.bddeb_args else []
diff --git a/tests/cloud_tests/platforms/__init__.py b/tests/cloud_tests/platforms/__init__.py
index 6a410b84..e506baa0 100644
--- a/tests/cloud_tests/platforms/__init__.py
+++ b/tests/cloud_tests/platforms/__init__.py
@@ -6,6 +6,7 @@ from .ec2 import platform as ec2
from .lxd import platform as lxd
from .nocloudkvm import platform as nocloudkvm
from .azurecloud import platform as azurecloud
+from ..util import emit_dots_on_travis
PLATFORMS = {
'ec2': ec2.EC2Platform,
@@ -17,7 +18,8 @@ PLATFORMS = {
def get_image(platform, config):
"""Get image from platform object using os_name."""
- return platform.get_image(config)
+ with emit_dots_on_travis():
+ return platform.get_image(config)
def get_instance(snapshot, *args, **kwargs):
diff --git a/tests/cloud_tests/platforms/azurecloud/instance.py b/tests/cloud_tests/platforms/azurecloud/instance.py
index f1e28a96..eedbaae8 100644
--- a/tests/cloud_tests/platforms/azurecloud/instance.py
+++ b/tests/cloud_tests/platforms/azurecloud/instance.py
@@ -80,7 +80,6 @@ class AzureCloudInstance(Instance):
except CloudError:
LOG.debug(('image not found, launching instance with base image, '
'image_id=%s'), self.image_id)
- pass
vm_params = {
'name': self.vm_name,
@@ -135,9 +134,10 @@ class AzureCloudInstance(Instance):
self.vm_name, vm_params)
LOG.debug('creating instance %s from image_id=%s', self.vm_name,
self.image_id)
- except CloudError:
- raise RuntimeError('failed creating instance:\n{}'.format(
- traceback.format_exc()))
+ except CloudError as e:
+ raise RuntimeError(
+ 'failed creating instance:\n{}'.format(traceback.format_exc())
+ ) from e
if wait:
self.instance.wait()
@@ -169,7 +169,6 @@ class AzureCloudInstance(Instance):
sleep(15)
else:
LOG.warning('Could not find console log: %s', e)
- pass
LOG.debug('stopping instance %s', self.image_id)
vm_deallocate = \
diff --git a/tests/cloud_tests/platforms/azurecloud/platform.py b/tests/cloud_tests/platforms/azurecloud/platform.py
index cb62a74b..a664f612 100644
--- a/tests/cloud_tests/platforms/azurecloud/platform.py
+++ b/tests/cloud_tests/platforms/azurecloud/platform.py
@@ -59,9 +59,12 @@ class AzureCloudPlatform(Platform):
self.vnet = self._create_vnet()
self.subnet = self._create_subnet()
self.nic = self._create_nic()
- except CloudError:
- raise RuntimeError('failed creating a resource:\n{}'.format(
- traceback.format_exc()))
+ except CloudError as e:
+ raise RuntimeError(
+ 'failed creating a resource:\n{}'.format(
+ traceback.format_exc()
+ )
+ ) from e
def create_instance(self, properties, config, features,
image_id, user_data=None):
@@ -105,8 +108,10 @@ class AzureCloudPlatform(Platform):
if image_id.find('__') > 0:
image_id = image_id.split('__')[1]
LOG.debug('image_id shortened to %s', image_id)
- except KeyError:
- raise RuntimeError('no images found for %s' % img_conf['release'])
+ except KeyError as e:
+ raise RuntimeError(
+ 'no images found for %s' % img_conf['release']
+ ) from e
return AzureCloudImage(self, img_conf, image_id)
@@ -140,9 +145,11 @@ class AzureCloudPlatform(Platform):
secret=azure_creds['clientSecret'],
tenant=azure_creds['tenantId'])
return credentials, subscription_id
- except KeyError:
- raise RuntimeError('Please configure Azure service principal'
- ' credentials in %s' % cred_file)
+ except KeyError as e:
+ raise RuntimeError(
+ 'Please configure Azure service principal'
+ ' credentials in %s' % cred_file
+ ) from e
def _create_resource_group(self):
"""Create resource group"""
diff --git a/tests/cloud_tests/platforms/ec2/instance.py b/tests/cloud_tests/platforms/ec2/instance.py
index ab6037b1..d2e84047 100644
--- a/tests/cloud_tests/platforms/ec2/instance.py
+++ b/tests/cloud_tests/platforms/ec2/instance.py
@@ -49,11 +49,11 @@ class EC2Instance(Instance):
# OutputBytes comes from platform._decode_console_output_as_bytes
response = self.instance.console_output()
return response['OutputBytes']
- except KeyError:
+ except KeyError as e:
if 'Output' in response:
msg = ("'OutputBytes' did not exist in console_output() but "
"'Output' did: %s..." % response['Output'][0:128])
- raise util.PlatformError('console_log', msg)
+ raise util.PlatformError('console_log', msg) from e
return ('No Console Output [%s]' % self.instance).encode()
def destroy(self):
diff --git a/tests/cloud_tests/platforms/ec2/platform.py b/tests/cloud_tests/platforms/ec2/platform.py
index 7a3d0fe0..b61a2ffb 100644
--- a/tests/cloud_tests/platforms/ec2/platform.py
+++ b/tests/cloud_tests/platforms/ec2/platform.py
@@ -35,12 +35,14 @@ class EC2Platform(Platform):
self.ec2_resource = b3session.resource('ec2')
self.ec2_region = b3session.region_name
self.key_name = self._upload_public_key(config)
- except botocore.exceptions.NoRegionError:
+ except botocore.exceptions.NoRegionError as e:
raise RuntimeError(
- 'Please configure default region in $HOME/.aws/config')
- except botocore.exceptions.NoCredentialsError:
+ 'Please configure default region in $HOME/.aws/config'
+ ) from e
+ except botocore.exceptions.NoCredentialsError as e:
raise RuntimeError(
- 'Please configure ec2 credentials in $HOME/.aws/credentials')
+ 'Please configure ec2 credentials in $HOME/.aws/credentials'
+ ) from e
self.vpc = self._create_vpc()
self.internet_gateway = self._create_internet_gateway()
@@ -125,8 +127,10 @@ class EC2Platform(Platform):
try:
image_ami = image['id']
- except KeyError:
- raise RuntimeError('No images found for %s!' % img_conf['release'])
+ except KeyError as e:
+ raise RuntimeError(
+ 'No images found for %s!' % img_conf['release']
+ ) from e
LOG.debug('found image: %s', image_ami)
image = EC2Image(self, img_conf, image_ami)
@@ -195,7 +199,7 @@ class EC2Platform(Platform):
CidrBlock=self.ipv4_cidr,
AmazonProvidedIpv6CidrBlock=True)
except botocore.exceptions.ClientError as e:
- raise RuntimeError(e)
+ raise RuntimeError(e) from e
vpc.wait_until_available()
self._tag_resource(vpc)
diff --git a/tests/cloud_tests/platforms/images.py b/tests/cloud_tests/platforms/images.py
index 557a5cf6..f047de2e 100644
--- a/tests/cloud_tests/platforms/images.py
+++ b/tests/cloud_tests/platforms/images.py
@@ -52,6 +52,5 @@ class Image(TargetBase):
def destroy(self):
"""Clean up data associated with image."""
- pass
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py
index 529e79cd..efc35c7f 100644
--- a/tests/cloud_tests/platforms/instances.py
+++ b/tests/cloud_tests/platforms/instances.py
@@ -132,8 +132,8 @@ class Instance(TargetBase):
"""
def clean_test(test):
"""Clean formatting for system ready test testcase."""
- return ' '.join(l for l in test.strip().splitlines()
- if not l.lstrip().startswith('#'))
+ return ' '.join(line for line in test.strip().splitlines()
+ if not line.lstrip().startswith('#'))
boot_timeout = self.config['boot_timeout']
tests = [self.config['system_ready_script']]
diff --git a/tests/cloud_tests/platforms/lxd/image.py b/tests/cloud_tests/platforms/lxd/image.py
index b5de1f52..a88b47f3 100644
--- a/tests/cloud_tests/platforms/lxd/image.py
+++ b/tests/cloud_tests/platforms/lxd/image.py
@@ -8,6 +8,7 @@ import tempfile
from ..images import Image
from .snapshot import LXDSnapshot
+from cloudinit import subp
from cloudinit import util as c_util
from tests.cloud_tests import util
@@ -75,19 +76,36 @@ class LXDImage(Image):
}
def export_image(self, output_dir):
- """Export image from lxd image store to (split) tarball on disk.
+ """Export image from lxd image store to disk.
- @param output_dir: dir to store tarballs in
- @return_value: tuple of path to metadata tarball and rootfs tarball
+ @param output_dir: dir to store the exported image in
+ @return_value: tuple of path to metadata tarball and rootfs
+
+ Only the "split" image format with separate rootfs and metadata
+ files is supported, e.g:
+
+ 71f171df[...]cd31.squashfs (could also be: .tar.xz or .tar.gz)
+ meta-71f171df[...]cd31.tar.xz
+
+ Combined images made by a single tarball are not supported.
"""
# pylxd's image export feature doesn't do split exports, so use cmdline
- c_util.subp(['lxc', 'image', 'export', self.pylxd_image.fingerprint,
- output_dir], capture=True)
- tarballs = [p for p in os.listdir(output_dir) if p.endswith('tar.xz')]
+ fp = self.pylxd_image.fingerprint
+ subp.subp(['lxc', 'image', 'export', fp, output_dir], capture=True)
+ image_files = [p for p in os.listdir(output_dir) if fp in p]
+
+ if len(image_files) != 2:
+ raise NotImplementedError(
+ "Image %s has unsupported format. "
+ "Expected 2 files, found %d: %s."
+ % (fp, len(image_files), ', '.join(image_files)))
+
metadata = os.path.join(
- output_dir, next(p for p in tarballs if p.startswith('meta-')))
+ output_dir,
+ next(p for p in image_files if p.startswith('meta-')))
rootfs = os.path.join(
- output_dir, next(p for p in tarballs if not p.startswith('meta-')))
+ output_dir,
+ next(p for p in image_files if not p.startswith('meta-')))
return (metadata, rootfs)
def import_image(self, metadata, rootfs):
@@ -101,8 +119,8 @@ class LXDImage(Image):
"""
alias = util.gen_instance_name(
image_desc=str(self), use_desc='update-metadata')
- c_util.subp(['lxc', 'image', 'import', metadata, rootfs,
- '--alias', alias], capture=True)
+ subp.subp(['lxc', 'image', 'import', metadata, rootfs,
+ '--alias', alias], capture=True)
self.pylxd_image = self.platform.query_image_by_alias(alias)
return self.pylxd_image.fingerprint
diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py
index 2b804a62..2b973a08 100644
--- a/tests/cloud_tests/platforms/lxd/instance.py
+++ b/tests/cloud_tests/platforms/lxd/instance.py
@@ -7,7 +7,8 @@ import shutil
import time
from tempfile import mkdtemp
-from cloudinit.util import load_yaml, subp, ProcessExecutionError, which
+from cloudinit.subp import subp, ProcessExecutionError, which
+from cloudinit.util import load_yaml
from tests.cloud_tests import LOG
from tests.cloud_tests.util import PlatformError
@@ -174,7 +175,8 @@ class LXDInstance(Instance):
raise PlatformError(
"console log",
"Console log failed [%d]: stdout=%s stderr=%s" % (
- e.exit_code, e.stdout, e.stderr))
+ e.exit_code, e.stdout, e.stderr)
+ ) from e
def reboot(self, wait=True):
"""Reboot instance."""
diff --git a/tests/cloud_tests/platforms/nocloudkvm/image.py b/tests/cloud_tests/platforms/nocloudkvm/image.py
index bc2b6e75..ff5b6ad7 100644
--- a/tests/cloud_tests/platforms/nocloudkvm/image.py
+++ b/tests/cloud_tests/platforms/nocloudkvm/image.py
@@ -2,7 +2,7 @@
"""NoCloud KVM Image Base Class."""
-from cloudinit import util as c_util
+from cloudinit import subp
import os
import shutil
@@ -30,8 +30,8 @@ class NoCloudKVMImage(Image):
self._img_path = os.path.join(self._workd,
os.path.basename(self._orig_img_path))
- c_util.subp(['qemu-img', 'create', '-f', 'qcow2',
- '-b', orig_img_path, self._img_path])
+ subp.subp(['qemu-img', 'create', '-f', 'qcow2',
+ '-b', orig_img_path, self._img_path])
super(NoCloudKVMImage, self).__init__(platform, config)
@@ -50,10 +50,10 @@ class NoCloudKVMImage(Image):
'--system-resolvconf', self._img_path,
'--', 'chroot', '_MOUNTPOINT_']
try:
- out, err = c_util.subp(mic_chroot + env_args + list(command),
- data=stdin, decode=False)
+ out, err = subp.subp(mic_chroot + env_args + list(command),
+ data=stdin, decode=False)
return (out, err, 0)
- except c_util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
return (e.stdout, e.stderr, e.exit_code)
def snapshot(self):
diff --git a/tests/cloud_tests/platforms/nocloudkvm/instance.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py
index 96185b75..5140a11c 100644
--- a/tests/cloud_tests/platforms/nocloudkvm/instance.py
+++ b/tests/cloud_tests/platforms/nocloudkvm/instance.py
@@ -11,7 +11,7 @@ import uuid
from ..instances import Instance
from cloudinit.atomic_helper import write_json
-from cloudinit import util as c_util
+from cloudinit import subp
from tests.cloud_tests import LOG, util
# This domain contains reverse lookups for hostnames that are used.
@@ -110,8 +110,8 @@ class NoCloudKVMInstance(Instance):
"""Clean up instance."""
if self.pid:
try:
- c_util.subp(['kill', '-9', self.pid])
- except c_util.ProcessExecutionError:
+ subp.subp(['kill', '-9', self.pid])
+ except subp.ProcessExecutionError:
pass
if self.pid_file:
@@ -143,8 +143,8 @@ class NoCloudKVMInstance(Instance):
# meta-data can be yaml, but more easily pretty printed with json
write_json(meta_data_file, self.meta_data)
- c_util.subp(['cloud-localds', seed_file, user_data_file,
- meta_data_file])
+ subp.subp(['cloud-localds', seed_file, user_data_file,
+ meta_data_file])
return seed_file
diff --git a/tests/cloud_tests/platforms/nocloudkvm/platform.py b/tests/cloud_tests/platforms/nocloudkvm/platform.py
index 2d1480f5..53c8ebf2 100644
--- a/tests/cloud_tests/platforms/nocloudkvm/platform.py
+++ b/tests/cloud_tests/platforms/nocloudkvm/platform.py
@@ -12,6 +12,7 @@ from simplestreams import util as s_util
from ..platforms import Platform
from .image import NoCloudKVMImage
from .instance import NoCloudKVMInstance
+from cloudinit import subp
from cloudinit import util as c_util
from tests.cloud_tests import util
@@ -84,8 +85,8 @@ class NoCloudKVMPlatform(Platform):
"""
name = util.gen_instance_name(image_desc=image_desc, use_desc=use_desc)
img_path = os.path.join(self.config['data_dir'], name + '.qcow2')
- c_util.subp(['qemu-img', 'create', '-f', 'qcow2',
- '-b', src_img_path, img_path])
+ subp.subp(['qemu-img', 'create', '-f', 'qcow2',
+ '-b', src_img_path, img_path])
return NoCloudKVMInstance(self, name, img_path, properties, config,
features, user_data, meta_data)
diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py
index bebdf1c6..ac3b6563 100644
--- a/tests/cloud_tests/platforms/platforms.py
+++ b/tests/cloud_tests/platforms/platforms.py
@@ -7,6 +7,7 @@ import shutil
from simplestreams import filters, mirrors
from simplestreams import util as s_util
+from cloudinit import subp
from cloudinit import util as c_util
from tests.cloud_tests import util
@@ -48,10 +49,10 @@ class Platform(object):
if os.path.exists(filename):
c_util.del_file(filename)
- c_util.subp(['ssh-keygen', '-m', 'PEM', '-t', 'rsa', '-b', '4096',
- '-f', filename, '-P', '',
- '-C', 'ubuntu@cloud_test'],
- capture=True)
+ subp.subp(['ssh-keygen', '-m', 'PEM', '-t', 'rsa', '-b', '4096',
+ '-f', filename, '-P', '',
+ '-C', 'ubuntu@cloud_test'],
+ capture=True)
@staticmethod
def _query_streams(img_conf, img_filter):
@@ -73,8 +74,10 @@ class Platform(object):
try:
return tmirror.json_entries[0]
- except IndexError:
- raise RuntimeError('no images found with filter: %s' % img_filter)
+ except IndexError as e:
+ raise RuntimeError(
+ 'no images found with filter: %s' % img_filter
+ ) from e
class FilterMirror(mirrors.BasicMirrorWriter):
diff --git a/tests/cloud_tests/platforms/snapshots.py b/tests/cloud_tests/platforms/snapshots.py
index 94328982..0f5f8bb6 100644
--- a/tests/cloud_tests/platforms/snapshots.py
+++ b/tests/cloud_tests/platforms/snapshots.py
@@ -40,6 +40,5 @@ class Snapshot(object):
def destroy(self):
"""Clean up snapshot data."""
- pass
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml
index 7ddc5b85..e76a3d35 100644
--- a/tests/cloud_tests/releases.yaml
+++ b/tests/cloud_tests/releases.yaml
@@ -30,8 +30,10 @@ default_release_config:
mirror_url: https://cloud-images.ubuntu.com/daily
mirror_dir: '/srv/citest/images'
keyring: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg
- # The OS version formatted as Major.Minor is used to compare releases
- version: null # Each release needs to define this, for example 16.04
+ # The OS version formatted as Major.Minor is used to compare releases.
+ # Each release needs to define this, for example "16.04". Quoting is
+ # necessary to ensure the version is treated as a string.
+ version: null
ec2:
# Choose from: [ebs, instance-store]
@@ -131,12 +133,44 @@ features:
releases:
# UBUNTU =================================================================
+ groovy:
+ # EOL: Jul 2021
+ default:
+ enabled: true
+ release: groovy
+ version: "20.10"
+ os: ubuntu
+ feature_groups:
+ - base
+ - debian_base
+ - ubuntu_specific
+ lxd:
+ sstreams_server: https://cloud-images.ubuntu.com/daily
+ alias: groovy
+ setup_overrides: null
+ override_templates: false
+ focal:
+ # EOL: Apr 2025
+ default:
+ enabled: true
+ release: focal
+ version: "20.04"
+ os: ubuntu
+ feature_groups:
+ - base
+ - debian_base
+ - ubuntu_specific
+ lxd:
+ sstreams_server: https://cloud-images.ubuntu.com/daily
+ alias: focal
+ setup_overrides: null
+ override_templates: false
eoan:
# EOL: Jul 2020
default:
enabled: true
release: eoan
- version: 19.10
+ version: "19.10"
os: ubuntu
feature_groups:
- base
@@ -152,7 +186,7 @@ releases:
default:
enabled: true
release: disco
- version: 19.04
+ version: "19.04"
os: ubuntu
feature_groups:
- base
@@ -168,7 +202,7 @@ releases:
default:
enabled: true
release: cosmic
- version: 18.10
+ version: "18.10"
os: ubuntu
feature_groups:
- base
@@ -184,7 +218,7 @@ releases:
default:
enabled: true
release: bionic
- version: 18.04
+ version: "18.04"
os: ubuntu
feature_groups:
- base
@@ -200,7 +234,7 @@ releases:
default:
enabled: true
release: artful
- version: 17.10
+ version: "17.10"
os: ubuntu
feature_groups:
- base
@@ -216,7 +250,7 @@ releases:
default:
enabled: true
release: xenial
- version: 16.04
+ version: "16.04"
os: ubuntu
feature_groups:
- base
@@ -232,7 +266,7 @@ releases:
default:
enabled: true
release: trusty
- version: 14.04
+ version: "14.04"
os: ubuntu
feature_groups:
- base
diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py
index 6bb39f77..bb9785d3 100644
--- a/tests/cloud_tests/testcases/__init__.py
+++ b/tests/cloud_tests/testcases/__init__.py
@@ -4,7 +4,7 @@
import importlib
import inspect
-import unittest2
+import unittest
from cloudinit.util import read_conf
@@ -21,8 +21,10 @@ def discover_test(test_name):
config.name_sanitize(test_name))
try:
testmod = importlib.import_module(testmod_name)
- except NameError:
- raise ValueError('no test verifier found at: {}'.format(testmod_name))
+ except NameError as e:
+ raise ValueError(
+ 'no test verifier found at: {}'.format(testmod_name)
+ ) from e
found = [mod for name, mod in inspect.getmembers(testmod)
if (inspect.isclass(mod)
@@ -48,7 +50,7 @@ def get_test_class(test_name, test_data, test_conf):
def __str__(self):
return "%s (%s)" % (self._testMethodName,
- unittest2.util.strclass(self._realclass))
+ unittest.util.strclass(self._realclass))
@classmethod
def setUpClass(cls):
@@ -62,9 +64,9 @@ def get_suite(test_name, data, conf):
@return_value: a test suite
"""
- suite = unittest2.TestSuite()
+ suite = unittest.TestSuite()
suite.addTest(
- unittest2.defaultTestLoader.loadTestsFromTestCase(
+ unittest.defaultTestLoader.loadTestsFromTestCase(
get_test_class(test_name, data, conf)))
return suite
diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py
index fd12d87b..4448e0b5 100644
--- a/tests/cloud_tests/testcases/base.py
+++ b/tests/cloud_tests/testcases/base.py
@@ -5,15 +5,15 @@
import crypt
import json
import re
-import unittest2
+import unittest
from cloudinit import util as c_util
-SkipTest = unittest2.SkipTest
+SkipTest = unittest.SkipTest
-class CloudTestCase(unittest2.TestCase):
+class CloudTestCase(unittest.TestCase):
"""Base test class for verifiers."""
# data gets populated in get_suite.setUpClass
@@ -34,7 +34,6 @@ class CloudTestCase(unittest2.TestCase):
@classmethod
def maybeSkipTest(cls):
"""Present to allow subclasses to override and raise a skipTest."""
- pass
def assertPackageInstalled(self, name, version=None):
"""Check dpkg-query --show output for matching package name.
@@ -141,8 +140,8 @@ class CloudTestCase(unittest2.TestCase):
def test_no_warnings_in_log(self):
"""Unexpected warnings should not be found in the log."""
warnings = [
- l for l in self.get_data_file('cloud-init.log').splitlines()
- if 'WARN' in l]
+ line for line in self.get_data_file('cloud-init.log').splitlines()
+ if 'WARN' in line]
joined_warnings = '\n'.join(warnings)
for expected_warning in self.expected_warnings:
self.assertIn(
@@ -172,9 +171,7 @@ class CloudTestCase(unittest2.TestCase):
'Skipping instance-data.json test.'
' OS: %s not bionic or newer' % self.os_name)
instance_data = json.loads(out)
- self.assertItemsEqual(
- [],
- instance_data['base64_encoded_keys'])
+ self.assertCountEqual(['merged_cfg'], instance_data['sensitive_keys'])
ds = instance_data.get('ds', {})
v1_data = instance_data.get('v1', {})
metadata = ds.get('meta-data', {})
@@ -201,6 +198,23 @@ class CloudTestCase(unittest2.TestCase):
self.assertIn('i-', v1_data['instance_id'])
self.assertIn('ip-', v1_data['local_hostname'])
self.assertIsNotNone(v1_data['region'], 'expected ec2 region')
+ self.assertIsNotNone(
+ re.match(r'\d\.\d+\.\d+-\d+-aws', v1_data['kernel_release']))
+ self.assertEqual(
+ 'redacted for non-root user', instance_data['merged_cfg'])
+ self.assertEqual(self.os_cfg['os'], v1_data['variant'])
+ self.assertEqual(self.os_cfg['os'], v1_data['distro'])
+ self.assertEqual(
+ self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
+ "Unexpected sys_info dist value")
+ self.assertEqual(self.os_name, v1_data['distro_release'])
+ self.assertEqual(
+ str(self.os_cfg['version']), v1_data['distro_version'])
+ self.assertEqual('x86_64', v1_data['machine'])
+ self.assertIsNotNone(
+ re.match(r'3.\d\.\d', v1_data['python_version']),
+ "unexpected python version: {ver}".format(
+ ver=v1_data["python_version"]))
def test_instance_data_json_lxd(self):
"""Validate instance-data.json content by lxd platform.
@@ -222,7 +236,7 @@ class CloudTestCase(unittest2.TestCase):
' OS: %s not bionic or newer' % self.os_name)
instance_data = json.loads(out)
v1_data = instance_data.get('v1', {})
- self.assertItemsEqual([], sorted(instance_data['base64_encoded_keys']))
+ self.assertCountEqual([], sorted(instance_data['base64_encoded_keys']))
self.assertEqual('unknown', v1_data['cloud_name'])
self.assertEqual('lxd', v1_data['platform'])
self.assertEqual(
@@ -237,6 +251,23 @@ class CloudTestCase(unittest2.TestCase):
self.assertIsNone(
v1_data['region'],
'found unexpected lxd region %s' % v1_data['region'])
+ self.assertIsNotNone(
+ re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release']))
+ self.assertEqual(
+ 'redacted for non-root user', instance_data['merged_cfg'])
+ self.assertEqual(self.os_cfg['os'], v1_data['variant'])
+ self.assertEqual(self.os_cfg['os'], v1_data['distro'])
+ self.assertEqual(
+ self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
+ "Unexpected sys_info dist value")
+ self.assertEqual(self.os_name, v1_data['distro_release'])
+ self.assertEqual(
+ str(self.os_cfg['version']), v1_data['distro_version'])
+ self.assertEqual('x86_64', v1_data['machine'])
+ self.assertIsNotNone(
+ re.match(r'3.\d\.\d', v1_data['python_version']),
+ "unexpected python version: {ver}".format(
+ ver=v1_data["python_version"]))
def test_instance_data_json_kvm(self):
"""Validate instance-data.json content by nocloud-kvm platform.
@@ -259,7 +290,7 @@ class CloudTestCase(unittest2.TestCase):
' OS: %s not bionic or newer' % self.os_name)
instance_data = json.loads(out)
v1_data = instance_data.get('v1', {})
- self.assertItemsEqual([], instance_data['base64_encoded_keys'])
+ self.assertCountEqual([], instance_data['base64_encoded_keys'])
self.assertEqual('unknown', v1_data['cloud_name'])
self.assertEqual('nocloud', v1_data['platform'])
subplatform = v1_data['subplatform']
@@ -278,6 +309,23 @@ class CloudTestCase(unittest2.TestCase):
self.assertIsNone(
v1_data['region'],
'found unexpected lxd region %s' % v1_data['region'])
+ self.assertIsNotNone(
+ re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release']))
+ self.assertEqual(
+ 'redacted for non-root user', instance_data['merged_cfg'])
+ self.assertEqual(self.os_cfg['os'], v1_data['variant'])
+ self.assertEqual(self.os_cfg['os'], v1_data['distro'])
+ self.assertEqual(
+ self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
+ "Unexpected sys_info dist value")
+ self.assertEqual(self.os_name, v1_data['distro_release'])
+ self.assertEqual(
+ str(self.os_cfg['version']), v1_data['distro_version'])
+ self.assertEqual('x86_64', v1_data['machine'])
+ self.assertIsNotNone(
+ re.match(r'3.\d\.\d', v1_data['python_version']),
+ "unexpected python version: {ver}".format(
+ ver=v1_data["python_version"]))
class PasswordListTest(CloudTestCase):
diff --git a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml b/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml
index 0bec305e..68ca95b5 100644
--- a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml
+++ b/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml
@@ -8,43 +8,44 @@ cloud_config: |
#cloud-config
# Key from https://packages.chef.io/chef.asc
apt:
- source1:
- source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.12 (Darwin)
- Comment: GPGTools - http://gpgtools.org
+ sources:
+ source1:
+ source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v1.4.12 (Darwin)
+ Comment: GPGTools - http://gpgtools.org
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg
- PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK
- CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid
- AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd
- Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz
- SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK
- OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/
- Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY
- IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu
- twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8
- DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE
- WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS
- 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA
- dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC
- MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD
- 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K
- zA==
- =IxPr
- -----END PGP PUBLIC KEY BLOCK-----
+ mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
+ twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
+ dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
+ JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
+ ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
+ XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
+ DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
+ sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
+ Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
+ YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
+ CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
+ +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg
+ PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK
+ CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid
+ AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd
+ Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz
+ SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK
+ OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/
+ Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY
+ IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu
+ twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8
+ DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE
+ WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS
+ 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA
+ dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC
+ MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD
+ 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K
+ zA==
+ =IxPr
+ -----END PGP PUBLIC KEY BLOCK-----
chef:
diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py
index 0f4c3d08..7d341773 100644
--- a/tests/cloud_tests/testcases/modules/ntp_chrony.py
+++ b/tests/cloud_tests/testcases/modules/ntp_chrony.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""cloud-init Integration Test Verify Script."""
-import unittest2
+import unittest
from tests.cloud_tests.testcases import base
@@ -13,7 +13,7 @@ class TestNtpChrony(base.CloudTestCase):
"""Skip this suite of tests on lxd and artful or older."""
if self.platform == 'lxd':
if self.is_distro('ubuntu') and self.os_version_cmp('artful') <= 0:
- raise unittest2.SkipTest(
+ raise unittest.SkipTest(
'No support for chrony on containers <= artful.'
' LP: #1589780')
return super(TestNtpChrony, self).setUp()
diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py
index 06f7d865..7dcccbdd 100644
--- a/tests/cloud_tests/util.py
+++ b/tests/cloud_tests/util.py
@@ -5,6 +5,7 @@
import base64
import copy
import glob
+import multiprocessing
import os
import random
import shlex
@@ -12,8 +13,11 @@ import shutil
import string
import subprocess
import tempfile
+import time
import yaml
+from contextlib import contextmanager
+from cloudinit import subp
from cloudinit import util as c_util
from tests.cloud_tests import LOG
@@ -118,6 +122,36 @@ def current_verbosity():
return max(min(3 - int(LOG.level / 10), 2), 0)
+@contextmanager
+def emit_dots_on_travis():
+ """
+ A context manager that emits a dot every 10 seconds if running on Travis.
+
+ Travis will kill jobs that don't emit output for a certain amount of time.
+ This context manager spins up a background process which will emit a dot to
+ stdout every 10 seconds to avoid being killed.
+
+ It should be wrapped selectively around operations that are known to take a
+ long time.
+ """
+ if os.environ.get('TRAVIS') != "true":
+ # If we aren't on Travis, don't do anything.
+ yield
+ return
+
+ def emit_dots():
+ while True:
+ print(".")
+ time.sleep(10)
+
+ dot_process = multiprocessing.Process(target=emit_dots)
+ dot_process.start()
+ try:
+ yield
+ finally:
+ dot_process.terminate()
+
+
def is_writable_dir(path):
"""Make sure dir is writable.
@@ -199,8 +233,8 @@ def flat_tar(output, basedir, owner='root', group='root'):
@param group: group archive files belong to
@return_value: none
"""
- c_util.subp(['tar', 'cf', output, '--owner', owner, '--group', group,
- '-C', basedir] + rel_files(basedir), capture=True)
+ subp.subp(['tar', 'cf', output, '--owner', owner, '--group', group,
+ '-C', basedir] + rel_files(basedir), capture=True)
def parse_conf_list(entries, valid=None, boolean=False):
@@ -432,7 +466,7 @@ class TargetBase(object):
return path
-class InTargetExecuteError(c_util.ProcessExecutionError):
+class InTargetExecuteError(subp.ProcessExecutionError):
"""Error type for in target commands that fail."""
default_desc = 'Unexpected error while running command.'
diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py
index 7018f4d5..0295af40 100644
--- a/tests/cloud_tests/verify.py
+++ b/tests/cloud_tests/verify.py
@@ -3,7 +3,7 @@
"""Verify test results."""
import os
-import unittest2
+import unittest
from tests.cloud_tests import (config, LOG, util, testcases)
@@ -18,7 +18,7 @@ def verify_data(data_dir, platform, os_name, tests):
@return_value: {<test_name>: {passed: True/False, failures: []}}
"""
base_dir = os.sep.join((data_dir, platform, os_name))
- runner = unittest2.TextTestRunner(verbosity=util.current_verbosity())
+ runner = unittest.TextTestRunner(verbosity=util.current_verbosity())
res = {}
for test_name in tests:
LOG.debug('verifying test data for %s', test_name)
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index b92ffc79..c5675249 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -15,6 +15,7 @@ from cloudinit.tests.helpers import (
from cloudinit import handlers
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
from cloudinit.handlers.cloud_config import CloudConfigPartHandler
@@ -66,7 +67,7 @@ class TestUpstartJobPartHandler(FilesystemMockingTestCase):
util.ensure_dir("/etc/upstart")
with mock.patch(self.mpath + 'SUITABLE_UPSTART', return_value=True):
- with mock.patch.object(util, 'subp') as m_subp:
+ with mock.patch.object(subp, 'subp') as m_subp:
h = UpstartJobPartHandler(paths)
h.handle_part('', handlers.CONTENT_START,
None, None, None)
@@ -109,7 +110,7 @@ class TestJinjaTemplatePartHandler(CiTestCase):
cloudconfig_handler = CloudConfigPartHandler(self.paths)
h = JinjaTemplatePartHandler(
self.paths, sub_handlers=[script_handler, cloudconfig_handler])
- self.assertItemsEqual(
+ self.assertCountEqual(
['text/cloud-config', 'text/cloud-config-jsonp',
'text/x-shellscript'],
h.sub_handlers)
@@ -120,7 +121,7 @@ class TestJinjaTemplatePartHandler(CiTestCase):
cloudconfig_handler = CloudConfigPartHandler(self.paths)
h = JinjaTemplatePartHandler(
self.paths, sub_handlers=[script_handler, cloudconfig_handler])
- self.assertItemsEqual(
+ self.assertCountEqual(
['text/cloud-config', 'text/cloud-config-jsonp',
'text/x-shellscript'],
h.sub_handlers)
@@ -302,7 +303,7 @@ class TestConvertJinjaInstanceData(CiTestCase):
expected_data.update({'v1key1': 'v1.1', 'v2key1': 'v2.1'})
converted_data = convert_jinja_instance_data(data=data)
- self.assertItemsEqual(
+ self.assertCountEqual(
['ds', 'v1', 'v2', 'v1key1', 'v2key1'], converted_data.keys())
self.assertEqual(
expected_data,
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index e57c15d1..dcf0fe5a 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -214,17 +214,17 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
self.assertEqual(1, exit_code)
# Known whitebox output from schema subcommand
self.assertEqual(
- 'Expected either --config-file argument or --doc\n',
+ 'Expected either --config-file argument or --docs\n',
self.stderr.getvalue())
def test_wb_devel_schema_subcommand_doc_content(self):
"""Validate that doc content is sane from known examples."""
stdout = io.StringIO()
self.patchStdoutAndStderr(stdout=stdout)
- self._call_main(['cloud-init', 'devel', 'schema', '--doc'])
+ self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all'])
expected_doc_sections = [
'**Supported distros:** all',
- '**Supported distros:** centos, debian, fedora',
+ '**Supported distros:** alpine, centos, debian, fedora',
'**Config schema**:\n **resize_rootfs:** (true/false/noblock)',
'**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n'
]
diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py
index 2a1095b9..bfd07ecf 100644
--- a/tests/unittests/test_cs_util.py
+++ b/tests/unittests/test_cs_util.py
@@ -1,7 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import print_function
-
from cloudinit.tests import helpers as test_helpers
from cloudinit.cs_utils import Cepko
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index 74cc26ec..fb2b55e8 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -213,6 +213,40 @@ c: d
self.assertEqual(1, len(cc))
self.assertEqual('c', cc['a'])
+ def test_cloud_config_as_x_shell_script(self):
+ blob_cc = '''
+#cloud-config
+a: b
+c: d
+'''
+ message_cc = MIMEBase("text", "x-shellscript")
+ message_cc.set_payload(blob_cc)
+
+ blob_jp = '''
+#cloud-config-jsonp
+[
+ { "op": "replace", "path": "/a", "value": "c" },
+ { "op": "remove", "path": "/c" }
+]
+'''
+
+ message_jp = MIMEBase('text', "cloud-config-jsonp")
+ message_jp.set_payload(blob_jp)
+
+ message = MIMEMultipart()
+ message.attach(message_cc)
+ message.attach(message_jp)
+
+ self.reRoot()
+ ci = stages.Init()
+ ci.datasource = FakeDataSource(str(message))
+ ci.fetch()
+ ci.consume_data()
+ cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
+ cc = util.load_yaml(cc_contents)
+ self.assertEqual(1, len(cc))
+ self.assertEqual('c', cc['a'])
+
def test_vendor_user_yaml_cloud_config(self):
vendor_blob = '''
#cloud-config
@@ -605,6 +639,33 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
self.reRoot()
ci = stages.Init()
ci.datasource = FakeDataSource(blob)
+ ci.fetch()
+ with self.assertRaises(Exception) as context:
+ ci.consume_data()
+ self.assertIn('403', str(context.exception))
+
+ with self.assertRaises(FileNotFoundError):
+ util.load_file(ci.paths.get_ipath("cloud_config"))
+
+ @mock.patch('cloudinit.url_helper.time.sleep')
+ @mock.patch(
+ "cloudinit.user_data.features.ERROR_ON_USER_DATA_FAILURE", False
+ )
+ def test_include_bad_url_no_fail(self, mock_sleep):
+ """Test #include with a bad URL and failure disabled"""
+ bad_url = 'http://bad/forbidden'
+ bad_data = '#cloud-config\nbad: true\n'
+ httpretty.register_uri(httpretty.GET, bad_url, bad_data, status=403)
+
+ included_url = 'http://hostname/path'
+ included_data = '#cloud-config\nincluded: true\n'
+ httpretty.register_uri(httpretty.GET, included_url, included_data)
+
+ blob = '#include\n%s\n%s' % (bad_url, included_url)
+
+ self.reRoot()
+ ci = stages.Init()
+ ci.datasource = FakeDataSource(blob)
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py
index 1e66fcdb..b626229e 100644
--- a/tests/unittests/test_datasource/test_aliyun.py
+++ b/tests/unittests/test_datasource/test_aliyun.py
@@ -143,7 +143,7 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
self.assertEqual('aliyun', self.ds.cloud_name)
self.assertEqual('ec2', self.ds.platform)
self.assertEqual(
- 'metadata (http://100.100.100.200)', self.ds.subplatform)
+ 'metadata (http://100.100.100.200)', self.ds.subplatform)
@mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
def test_returns_false_when_not_on_aliyun(self, m_is_aliyun):
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py
index 3119bfac..fc59d1d5 100644
--- a/tests/unittests/test_datasource/test_altcloud.py
+++ b/tests/unittests/test_datasource/test_altcloud.py
@@ -15,6 +15,7 @@ import shutil
import tempfile
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
from cloudinit.tests.helpers import CiTestCase, mock
@@ -286,7 +287,7 @@ class TestUserDataRhevm(CiTestCase):
def test_modprobe_fails(self):
'''Test user_data_rhevm() where modprobe fails.'''
- self.m_modprobe_floppy.side_effect = util.ProcessExecutionError(
+ self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError(
"Failed modprobe")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
@@ -294,7 +295,7 @@ class TestUserDataRhevm(CiTestCase):
def test_no_modprobe_cmd(self):
'''Test user_data_rhevm() with no modprobe command.'''
- self.m_modprobe_floppy.side_effect = util.ProcessExecutionError(
+ self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError(
"No such file or dir")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
@@ -302,7 +303,7 @@ class TestUserDataRhevm(CiTestCase):
def test_udevadm_fails(self):
'''Test user_data_rhevm() where udevadm fails.'''
- self.m_udevadm_settle.side_effect = util.ProcessExecutionError(
+ self.m_udevadm_settle.side_effect = subp.ProcessExecutionError(
"Failed settle.")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index a809fd87..47e03bd1 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -114,14 +114,14 @@ NETWORK_METADATA = {
"ipv4": {
"subnet": [
{
- "prefix": "24",
- "address": "10.0.0.0"
+ "prefix": "24",
+ "address": "10.0.0.0"
}
],
"ipAddress": [
{
- "privateIpAddress": "10.0.0.4",
- "publicIpAddress": "104.46.124.81"
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81"
}
]
}
@@ -278,6 +278,23 @@ class TestParseNetworkConfig(CiTestCase):
}
self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+ @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
+ return_value='hv_netvsc')
+ def test_match_driver_for_netvsc(self, m_driver):
+ """parse_network_config emits driver when using netvsc."""
+ expected = {'ethernets': {
+ 'eth0': {
+ 'dhcp4': True,
+ 'dhcp4-overrides': {'route-metric': 100},
+ 'dhcp6': False,
+ 'match': {
+ 'macaddress': '00:0d:3a:04:75:98',
+ 'driver': 'hv_netvsc',
+ },
+ 'set-name': 'eth0'
+ }}, 'version': 2}
+ self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
+
class TestGetMetadataFromIMDS(HttprettyTestCase):
@@ -383,8 +400,6 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
class TestAzureDataSource(CiTestCase):
- with_logs = True
-
def setUp(self):
super(TestAzureDataSource, self).setUp()
self.tmp = self.tmp_dir()
@@ -493,7 +508,7 @@ scbus-1 on xpt0 bus 0
(dsaz, 'get_hostname', mock.MagicMock()),
(dsaz, 'set_hostname', mock.MagicMock()),
(dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric),
- (dsaz.util, 'which', lambda x: True),
+ (dsaz.subp, 'which', lambda x: True),
(dsaz.util, 'read_dmi_data', mock.MagicMock(
side_effect=_dmi_mocks)),
(dsaz.util, 'wait_for_files', mock.MagicMock(
@@ -530,14 +545,14 @@ scbus-1 on xpt0 bus 0
def tags_exists(x, y):
for tag in x.keys():
- self.assertIn(tag, y)
+ assert tag in y
for tag in y.keys():
- self.assertIn(tag, x)
+ assert tag in x
def tags_equal(x, y):
for x_val in x.values():
y_val = y.get(x_val.tag)
- self.assertEqual(x_val.text, y_val.text)
+ assert x_val.text == y_val.text
old_cnt = create_tag_index(oxml)
new_cnt = create_tag_index(nxml)
@@ -651,7 +666,7 @@ scbus-1 on xpt0 bus 0
crawled_metadata = dsrc.crawl_metadata()
- self.assertItemsEqual(
+ self.assertCountEqual(
crawled_metadata.keys(),
['cfg', 'files', 'metadata', 'userdata_raw'])
self.assertEqual(crawled_metadata['cfg'], expected_cfg)
@@ -685,15 +700,17 @@ scbus-1 on xpt0 bus 0
'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
@mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
def test_crawl_metadata_on_reprovision_reports_ready(
- self, poll_imds_func,
- report_ready_func,
- m_write, m_dhcp):
+ self, poll_imds_func, report_ready_func, m_write, m_dhcp
+ ):
"""If reprovisioning, report ready at the end"""
ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"})
+ platform_settings={"PreprovisionedVm": "True"}
+ )
- data = {'ovfcontent': ovfenv,
- 'sys_cfg': {}}
+ data = {
+ 'ovfcontent': ovfenv,
+ 'sys_cfg': {}
+ }
dsrc = self._get_ds(data)
poll_imds_func.return_value = ovfenv
dsrc.crawl_metadata()
@@ -708,15 +725,18 @@ scbus-1 on xpt0 bus 0
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
@mock.patch('cloudinit.sources.DataSourceAzure.readurl')
def test_crawl_metadata_on_reprovision_reports_ready_using_lease(
- self, m_readurl, m_dhcp,
- m_net, report_ready_func,
- m_media_switch, m_write):
+ self, m_readurl, m_dhcp, m_net, report_ready_func,
+ m_media_switch, m_write
+ ):
"""If reprovisioning, report ready using the obtained lease"""
ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"})
+ platform_settings={"PreprovisionedVm": "True"}
+ )
- data = {'ovfcontent': ovfenv,
- 'sys_cfg': {}}
+ data = {
+ 'ovfcontent': ovfenv,
+ 'sys_cfg': {}
+ }
dsrc = self._get_ds(data)
lease = {
@@ -1269,20 +1289,20 @@ scbus-1 on xpt0 bus 0
expected_config['config'].append(blacklist_config)
self.assertEqual(netconfig, expected_config)
- @mock.patch(MOCKPATH + 'util.subp')
- def test_get_hostname_with_no_args(self, subp):
+ @mock.patch(MOCKPATH + 'subp.subp')
+ def test_get_hostname_with_no_args(self, m_subp):
dsaz.get_hostname()
- subp.assert_called_once_with(("hostname",), capture=True)
+ m_subp.assert_called_once_with(("hostname",), capture=True)
- @mock.patch(MOCKPATH + 'util.subp')
- def test_get_hostname_with_string_arg(self, subp):
+ @mock.patch(MOCKPATH + 'subp.subp')
+ def test_get_hostname_with_string_arg(self, m_subp):
dsaz.get_hostname(hostname_command="hostname")
- subp.assert_called_once_with(("hostname",), capture=True)
+ m_subp.assert_called_once_with(("hostname",), capture=True)
- @mock.patch(MOCKPATH + 'util.subp')
- def test_get_hostname_with_iterable_arg(self, subp):
+ @mock.patch(MOCKPATH + 'subp.subp')
+ def test_get_hostname_with_iterable_arg(self, m_subp):
dsaz.get_hostname(hostname_command=("hostname",))
- subp.assert_called_once_with(("hostname",), capture=True)
+ m_subp.assert_called_once_with(("hostname",), capture=True)
class TestAzureBounce(CiTestCase):
@@ -1304,7 +1324,7 @@ class TestAzureBounce(CiTestCase):
mock.patch.object(dsaz, 'get_metadata_from_imds',
mock.MagicMock(return_value={})))
self.patches.enter_context(
- mock.patch.object(dsaz.util, 'which', lambda x: True))
+ mock.patch.object(dsaz.subp, 'which', lambda x: True))
self.patches.enter_context(mock.patch.object(
dsaz, '_get_random_seed', return_value='wild'))
@@ -1333,7 +1353,7 @@ class TestAzureBounce(CiTestCase):
self.set_hostname = self.patches.enter_context(
mock.patch.object(dsaz, 'set_hostname'))
self.subp = self.patches.enter_context(
- mock.patch(MOCKPATH + 'util.subp'))
+ mock.patch(MOCKPATH + 'subp.subp'))
self.find_fallback_nic = self.patches.enter_context(
mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9'))
@@ -1416,7 +1436,7 @@ class TestAzureBounce(CiTestCase):
cfg = {'hostname_bounce': {'policy': 'force'}}
dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg),
agent_command=['not', '__builtin__'])
- patch_path = MOCKPATH + 'util.which'
+ patch_path = MOCKPATH + 'subp.which'
with mock.patch(patch_path) as m_which:
m_which.return_value = None
ret = self._get_and_setup(dsrc)
@@ -1521,6 +1541,17 @@ class TestAzureBounce(CiTestCase):
self.assertEqual(0, self.set_hostname.call_count)
+ @mock.patch(MOCKPATH + 'perform_hostname_bounce')
+ def test_set_hostname_failed_disable_bounce(
+ self, perform_hostname_bounce):
+ cfg = {'set_hostname': True, 'hostname_bounce': {'policy': 'force'}}
+ self.get_hostname.return_value = "old-hostname"
+ self.set_hostname.side_effect = Exception
+ data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
+ self._get_ds(data).get_data()
+
+ self.assertEqual(0, perform_hostname_bounce.call_count)
+
class TestLoadAzureDsDir(CiTestCase):
"""Tests for load_azure_ds_dir."""
@@ -1953,11 +1984,12 @@ class TestPreprovisioningPollIMDS(CiTestCase):
self.tries += 1
if self.tries == 1:
raise requests.Timeout('Fake connection timeout')
- elif self.tries == 2:
+ elif self.tries in (2, 3):
response = requests.Response()
- response.status_code = 404
+ response.status_code = 404 if self.tries == 2 else 410
raise requests.exceptions.HTTPError(
- "fake 404", response=response)
+ "fake {}".format(response.status_code), response=response
+ )
# Third try should succeed and stop retries or redhcp
return mock.MagicMock(status_code=200, text="good", content="good")
@@ -1969,7 +2001,7 @@ class TestPreprovisioningPollIMDS(CiTestCase):
self.assertEqual(report_ready_func.call_count, 1)
report_ready_func.assert_called_with(lease=lease)
self.assertEqual(3, m_dhcpv4.call_count, 'Expected 3 DHCP calls')
- self.assertEqual(3, self.tries, 'Expected 3 total reads from IMDS')
+ self.assertEqual(4, self.tries, 'Expected 4 total reads from IMDS')
def test_poll_imds_report_ready_false(self,
report_ready_func, fake_resp,
@@ -1989,7 +2021,7 @@ class TestPreprovisioningPollIMDS(CiTestCase):
self.assertEqual(report_ready_func.call_count, 0)
-@mock.patch(MOCKPATH + 'util.subp')
+@mock.patch(MOCKPATH + 'subp.subp')
@mock.patch(MOCKPATH + 'util.write_file')
@mock.patch(MOCKPATH + 'util.is_FreeBSD')
@mock.patch('cloudinit.sources.helpers.netlink.'
@@ -2160,7 +2192,7 @@ class TestWBIsPlatformViable(CiTestCase):
{'os.path.exists': False,
# Non-matching Azure chassis-asset-tag
'util.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X',
- 'util.which': None},
+ 'subp.which': None},
dsaz._is_platform_viable, 'doesnotmatter'))
self.assertIn(
"DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format(
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index 007df09f..5e6d3d2d 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -1,8 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
-import unittest2
+import re
+import unittest
from textwrap import dedent
+from xml.etree import ElementTree
from cloudinit.sources.helpers import azure as azure_helper
from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir
@@ -48,6 +50,30 @@ GOAL_STATE_TEMPLATE = """\
</GoalState>
"""
+HEALTH_REPORT_XML_TEMPLATE = '''\
+<?xml version="1.0" encoding="utf-8"?>
+<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:xsd="http://www.w3.org/2001/XMLSchema">
+ <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>
+ <Container>
+ <ContainerId>{container_id}</ContainerId>
+ <RoleInstanceList>
+ <Role>
+ <InstanceId>{instance_id}</InstanceId>
+ <Health>
+ <State>{health_status}</State>
+ {health_detail_subsection}
+ </Health>
+ </Role>
+ </RoleInstanceList>
+ </Container>
+</Health>
+'''
+
+
+class SentinelException(Exception):
+ pass
+
class TestFindEndpoint(CiTestCase):
@@ -140,9 +166,7 @@ class TestGoalStateParsing(CiTestCase):
'certificates_url': 'MyCertificatesUrl',
}
- def _get_goal_state(self, http_client=None, **kwargs):
- if http_client is None:
- http_client = mock.MagicMock()
+ def _get_formatted_goal_state_xml_string(self, **kwargs):
parameters = self.default_parameters.copy()
parameters.update(kwargs)
xml = GOAL_STATE_TEMPLATE.format(**parameters)
@@ -153,7 +177,13 @@ class TestGoalStateParsing(CiTestCase):
continue
new_xml_lines.append(line)
xml = '\n'.join(new_xml_lines)
- return azure_helper.GoalState(xml, http_client)
+ return xml
+
+ def _get_goal_state(self, m_azure_endpoint_client=None, **kwargs):
+ if m_azure_endpoint_client is None:
+ m_azure_endpoint_client = mock.MagicMock()
+ xml = self._get_formatted_goal_state_xml_string(**kwargs)
+ return azure_helper.GoalState(xml, m_azure_endpoint_client)
def test_incarnation_parsed_correctly(self):
incarnation = '123'
@@ -190,25 +220,55 @@ class TestGoalStateParsing(CiTestCase):
azure_helper.is_byte_swapped(previous_iid, current_iid))
def test_certificates_xml_parsed_and_fetched_correctly(self):
- http_client = mock.MagicMock()
+ m_azure_endpoint_client = mock.MagicMock()
certificates_url = 'TestCertificatesUrl'
goal_state = self._get_goal_state(
- http_client=http_client, certificates_url=certificates_url)
+ m_azure_endpoint_client=m_azure_endpoint_client,
+ certificates_url=certificates_url)
certificates_xml = goal_state.certificates_xml
- self.assertEqual(1, http_client.get.call_count)
- self.assertEqual(certificates_url, http_client.get.call_args[0][0])
- self.assertTrue(http_client.get.call_args[1].get('secure', False))
- self.assertEqual(http_client.get.return_value.contents,
- certificates_xml)
+ self.assertEqual(1, m_azure_endpoint_client.get.call_count)
+ self.assertEqual(
+ certificates_url,
+ m_azure_endpoint_client.get.call_args[0][0])
+ self.assertTrue(
+ m_azure_endpoint_client.get.call_args[1].get(
+ 'secure', False))
+ self.assertEqual(
+ m_azure_endpoint_client.get.return_value.contents,
+ certificates_xml)
def test_missing_certificates_skips_http_get(self):
- http_client = mock.MagicMock()
+ m_azure_endpoint_client = mock.MagicMock()
goal_state = self._get_goal_state(
- http_client=http_client, certificates_url=None)
+ m_azure_endpoint_client=m_azure_endpoint_client,
+ certificates_url=None)
certificates_xml = goal_state.certificates_xml
- self.assertEqual(0, http_client.get.call_count)
+ self.assertEqual(0, m_azure_endpoint_client.get.call_count)
self.assertIsNone(certificates_xml)
+ def test_invalid_goal_state_xml_raises_parse_error(self):
+ xml = 'random non-xml data'
+ with self.assertRaises(ElementTree.ParseError):
+ azure_helper.GoalState(xml, mock.MagicMock())
+
+ def test_missing_container_id_in_goal_state_xml_raises_exc(self):
+ xml = self._get_formatted_goal_state_xml_string()
+ xml = re.sub('<ContainerId>.*</ContainerId>', '', xml)
+ with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
+ azure_helper.GoalState(xml, mock.MagicMock())
+
+ def test_missing_instance_id_in_goal_state_xml_raises_exc(self):
+ xml = self._get_formatted_goal_state_xml_string()
+ xml = re.sub('<InstanceId>.*</InstanceId>', '', xml)
+ with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
+ azure_helper.GoalState(xml, mock.MagicMock())
+
+ def test_missing_incarnation_in_goal_state_xml_raises_exc(self):
+ xml = self._get_formatted_goal_state_xml_string()
+ xml = re.sub('<Incarnation>.*</Incarnation>', '', xml)
+ with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
+ azure_helper.GoalState(xml, mock.MagicMock())
+
class TestAzureEndpointHttpClient(CiTestCase):
@@ -222,61 +282,95 @@ class TestAzureEndpointHttpClient(CiTestCase):
patches = ExitStack()
self.addCleanup(patches.close)
- self.read_file_or_url = patches.enter_context(
- mock.patch.object(azure_helper.url_helper, 'read_file_or_url'))
+ self.readurl = patches.enter_context(
+ mock.patch.object(azure_helper.url_helper, 'readurl'))
+ patches.enter_context(
+ mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
def test_non_secure_get(self):
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
url = 'MyTestUrl'
response = client.get(url, secure=False)
- self.assertEqual(1, self.read_file_or_url.call_count)
- self.assertEqual(self.read_file_or_url.return_value, response)
+ self.assertEqual(1, self.readurl.call_count)
+ self.assertEqual(self.readurl.return_value, response)
self.assertEqual(
- mock.call(url, headers=self.regular_headers, retries=10,
- timeout=5),
- self.read_file_or_url.call_args)
+ mock.call(url, headers=self.regular_headers,
+ timeout=5, retries=10, sec_between=5),
+ self.readurl.call_args)
+
+ def test_non_secure_get_raises_exception(self):
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ self.readurl.side_effect = SentinelException
+ url = 'MyTestUrl'
+ with self.assertRaises(SentinelException):
+ client.get(url, secure=False)
def test_secure_get(self):
url = 'MyTestUrl'
- certificate = mock.MagicMock()
+ m_certificate = mock.MagicMock()
expected_headers = self.regular_headers.copy()
expected_headers.update({
"x-ms-cipher-name": "DES_EDE3_CBC",
- "x-ms-guest-agent-public-x509-cert": certificate,
+ "x-ms-guest-agent-public-x509-cert": m_certificate,
})
- client = azure_helper.AzureEndpointHttpClient(certificate)
+ client = azure_helper.AzureEndpointHttpClient(m_certificate)
response = client.get(url, secure=True)
- self.assertEqual(1, self.read_file_or_url.call_count)
- self.assertEqual(self.read_file_or_url.return_value, response)
+ self.assertEqual(1, self.readurl.call_count)
+ self.assertEqual(self.readurl.return_value, response)
self.assertEqual(
- mock.call(url, headers=expected_headers, retries=10,
- timeout=5),
- self.read_file_or_url.call_args)
+ mock.call(url, headers=expected_headers,
+ timeout=5, retries=10, sec_between=5),
+ self.readurl.call_args)
+
+ def test_secure_get_raises_exception(self):
+ url = 'MyTestUrl'
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ self.readurl.side_effect = SentinelException
+ with self.assertRaises(SentinelException):
+ client.get(url, secure=True)
def test_post(self):
- data = mock.MagicMock()
+ m_data = mock.MagicMock()
url = 'MyTestUrl'
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- response = client.post(url, data=data)
- self.assertEqual(1, self.read_file_or_url.call_count)
- self.assertEqual(self.read_file_or_url.return_value, response)
+ response = client.post(url, data=m_data)
+ self.assertEqual(1, self.readurl.call_count)
+ self.assertEqual(self.readurl.return_value, response)
self.assertEqual(
- mock.call(url, data=data, headers=self.regular_headers, retries=10,
- timeout=5),
- self.read_file_or_url.call_args)
+ mock.call(url, data=m_data, headers=self.regular_headers,
+ timeout=5, retries=10, sec_between=5),
+ self.readurl.call_args)
+
+ def test_post_raises_exception(self):
+ m_data = mock.MagicMock()
+ url = 'MyTestUrl'
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ self.readurl.side_effect = SentinelException
+ with self.assertRaises(SentinelException):
+ client.post(url, data=m_data)
def test_post_with_extra_headers(self):
url = 'MyTestUrl'
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
extra_headers = {'test': 'header'}
client.post(url, extra_headers=extra_headers)
- self.assertEqual(1, self.read_file_or_url.call_count)
expected_headers = self.regular_headers.copy()
expected_headers.update(extra_headers)
+ self.assertEqual(1, self.readurl.call_count)
self.assertEqual(
mock.call(mock.ANY, data=mock.ANY, headers=expected_headers,
- retries=10, timeout=5),
- self.read_file_or_url.call_args)
+ timeout=5, retries=10, sec_between=5),
+ self.readurl.call_args)
+
+ def test_post_with_sleep_with_extra_headers_raises_exception(self):
+ m_data = mock.MagicMock()
+ url = 'MyTestUrl'
+ extra_headers = {'test': 'header'}
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ self.readurl.side_effect = SentinelException
+ with self.assertRaises(SentinelException):
+ client.post(
+ url, data=m_data, extra_headers=extra_headers)
class TestOpenSSLManager(CiTestCase):
@@ -287,7 +381,7 @@ class TestOpenSSLManager(CiTestCase):
self.addCleanup(patches.close)
self.subp = patches.enter_context(
- mock.patch.object(azure_helper.util, 'subp'))
+ mock.patch.object(azure_helper.subp, 'subp'))
try:
self.open = patches.enter_context(
mock.patch('__builtin__.open'))
@@ -332,7 +426,7 @@ class TestOpenSSLManagerActions(CiTestCase):
path = 'tests/data/azure'
return os.path.join(path, name)
- @unittest2.skip("todo move to cloud_test")
+ @unittest.skip("todo move to cloud_test")
def test_pubkey_extract(self):
cert = load_file(self._data_file('pubkey_extract_cert'))
good_key = load_file(self._data_file('pubkey_extract_ssh_key'))
@@ -344,7 +438,7 @@ class TestOpenSSLManagerActions(CiTestCase):
fingerprint = sslmgr._get_fingerprint_from_cert(cert)
self.assertEqual(good_fingerprint, fingerprint)
- @unittest2.skip("todo move to cloud_test")
+ @unittest.skip("todo move to cloud_test")
@mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml')
def test_parse_certificates(self, mock_decrypt_certs):
"""Azure control plane puts private keys as well as certificates
@@ -365,6 +459,131 @@ class TestOpenSSLManagerActions(CiTestCase):
self.assertIn(fp, keys_by_fp)
+class TestGoalStateHealthReporter(CiTestCase):
+
+ default_parameters = {
+ 'incarnation': 1634,
+ 'container_id': 'MyContainerId',
+ 'instance_id': 'MyInstanceId'
+ }
+
+ test_endpoint = 'TestEndpoint'
+ test_url = 'http://{0}/machine?comp=health'.format(test_endpoint)
+ test_default_headers = {'Content-Type': 'text/xml; charset=utf-8'}
+
+ provisioning_success_status = 'Ready'
+
+ def setUp(self):
+ super(TestGoalStateHealthReporter, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ patches.enter_context(
+ mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
+ self.read_file_or_url = patches.enter_context(
+ mock.patch.object(azure_helper.url_helper, 'read_file_or_url'))
+
+ self.post = patches.enter_context(
+ mock.patch.object(azure_helper.AzureEndpointHttpClient,
+ 'post'))
+
+ self.GoalState = patches.enter_context(
+ mock.patch.object(azure_helper, 'GoalState'))
+ self.GoalState.return_value.container_id = \
+ self.default_parameters['container_id']
+ self.GoalState.return_value.instance_id = \
+ self.default_parameters['instance_id']
+ self.GoalState.return_value.incarnation = \
+ self.default_parameters['incarnation']
+
+ def _get_formatted_health_report_xml_string(self, **kwargs):
+ return HEALTH_REPORT_XML_TEMPLATE.format(**kwargs)
+
+ def _get_report_ready_health_document(self):
+ return self._get_formatted_health_report_xml_string(
+ incarnation=self.default_parameters['incarnation'],
+ container_id=self.default_parameters['container_id'],
+ instance_id=self.default_parameters['instance_id'],
+ health_status=self.provisioning_success_status,
+ health_detail_subsection='')
+
+ def test_send_ready_signal_sends_post_request(self):
+ with mock.patch.object(
+ azure_helper.GoalStateHealthReporter,
+ 'build_report') as m_build_report:
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ client, self.test_endpoint)
+ reporter.send_ready_signal()
+
+ self.assertEqual(1, self.post.call_count)
+ self.assertEqual(
+ mock.call(
+ self.test_url,
+ data=m_build_report.return_value,
+ extra_headers=self.test_default_headers),
+ self.post.call_args)
+
+ def test_build_report_for_health_document(self):
+ health_document = self._get_report_ready_health_document()
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
+ self.test_endpoint)
+ generated_health_document = reporter.build_report(
+ incarnation=self.default_parameters['incarnation'],
+ container_id=self.default_parameters['container_id'],
+ instance_id=self.default_parameters['instance_id'],
+ status=self.provisioning_success_status)
+ self.assertEqual(health_document, generated_health_document)
+ self.assertIn(
+ '<GoalStateIncarnation>{}</GoalStateIncarnation>'.format(
+ str(self.default_parameters['incarnation'])),
+ generated_health_document)
+ self.assertIn(
+ ''.join([
+ '<ContainerId>',
+ self.default_parameters['container_id'],
+ '</ContainerId>']),
+ generated_health_document)
+ self.assertIn(
+ ''.join([
+ '<InstanceId>',
+ self.default_parameters['instance_id'],
+ '</InstanceId>']),
+ generated_health_document)
+ self.assertIn(
+ ''.join([
+ '<State>',
+ self.provisioning_success_status,
+ '</State>']),
+ generated_health_document
+ )
+ self.assertNotIn('<Details>', generated_health_document)
+ self.assertNotIn('<SubStatus>', generated_health_document)
+ self.assertNotIn('<Description>', generated_health_document)
+
+ def test_send_ready_signal_calls_build_report(self):
+ with mock.patch.object(
+ azure_helper.GoalStateHealthReporter, 'build_report'
+ ) as m_build_report:
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
+ self.test_endpoint)
+ reporter.send_ready_signal()
+
+ self.assertEqual(1, m_build_report.call_count)
+ self.assertEqual(
+ mock.call(
+ incarnation=self.default_parameters['incarnation'],
+ container_id=self.default_parameters['container_id'],
+ instance_id=self.default_parameters['instance_id'],
+ status=self.provisioning_success_status),
+ m_build_report.call_args)
+
+
class TestWALinuxAgentShim(CiTestCase):
def setUp(self):
@@ -383,14 +602,21 @@ class TestWALinuxAgentShim(CiTestCase):
patches.enter_context(
mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
- def test_http_client_uses_certificate(self):
+ self.test_incarnation = 'TestIncarnation'
+ self.test_container_id = 'TestContainerId'
+ self.test_instance_id = 'TestInstanceId'
+ self.GoalState.return_value.incarnation = self.test_incarnation
+ self.GoalState.return_value.container_id = self.test_container_id
+ self.GoalState.return_value.instance_id = self.test_instance_id
+
+ def test_azure_endpoint_client_uses_certificate_during_report_ready(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
self.assertEqual(
[mock.call(self.OpenSSLManager.return_value.certificate)],
self.AzureEndpointHttpClient.call_args_list)
- def test_correct_url_used_for_goalstate(self):
+ def test_correct_url_used_for_goalstate_during_report_ready(self):
self.find_endpoint.return_value = 'test_endpoint'
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
@@ -404,11 +630,10 @@ class TestWALinuxAgentShim(CiTestCase):
self.GoalState.call_args_list)
def test_certificates_used_to_determine_public_keys(self):
+ # if register_with_azure_and_fetch_data() isn't passed some info about
+ # the user's public keys, there's no point in even trying to parse the
+ # certificates
shim = wa_shim()
- """if register_with_azure_and_fetch_data() isn't passed some info about
- the user's public keys, there's no point in even trying to parse
- the certificates
- """
mypk = [{'fingerprint': 'fp1', 'path': 'path1'},
{'fingerprint': 'fp3', 'path': 'path3', 'value': ''}]
certs = {'fp1': 'expected-key',
@@ -439,43 +664,67 @@ class TestWALinuxAgentShim(CiTestCase):
expected_url = 'http://test_endpoint/machine?comp=health'
self.assertEqual(
[mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
- self.AzureEndpointHttpClient.return_value.post.call_args_list)
+ self.AzureEndpointHttpClient.return_value.post
+ .call_args_list)
def test_goal_state_values_used_for_report_ready(self):
- self.GoalState.return_value.incarnation = 'TestIncarnation'
- self.GoalState.return_value.container_id = 'TestContainerId'
- self.GoalState.return_value.instance_id = 'TestInstanceId'
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
posted_document = (
- self.AzureEndpointHttpClient.return_value.post.call_args[1]['data']
+ self.AzureEndpointHttpClient.return_value.post
+ .call_args[1]['data']
)
- self.assertIn('TestIncarnation', posted_document)
- self.assertIn('TestContainerId', posted_document)
- self.assertIn('TestInstanceId', posted_document)
+ self.assertIn(self.test_incarnation, posted_document)
+ self.assertIn(self.test_container_id, posted_document)
+ self.assertIn(self.test_instance_id, posted_document)
+
+ def test_xml_elems_in_report_ready(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_fetch_data()
+ health_document = HEALTH_REPORT_XML_TEMPLATE.format(
+ incarnation=self.test_incarnation,
+ container_id=self.test_container_id,
+ instance_id=self.test_instance_id,
+ health_status='Ready',
+ health_detail_subsection='')
+ posted_document = (
+ self.AzureEndpointHttpClient.return_value.post
+ .call_args[1]['data'])
+ self.assertEqual(health_document, posted_document)
def test_clean_up_can_be_called_at_any_time(self):
shim = wa_shim()
shim.clean_up()
- def test_clean_up_will_clean_up_openssl_manager_if_instantiated(self):
+ def test_clean_up_after_report_ready(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
shim.clean_up()
self.assertEqual(
1, self.OpenSSLManager.return_value.clean_up.call_count)
- def test_failure_to_fetch_goalstate_bubbles_up(self):
- class SentinelException(Exception):
- pass
- self.AzureEndpointHttpClient.return_value.get.side_effect = (
- SentinelException)
+ def test_fetch_goalstate_during_report_ready_raises_exc_on_get_exc(self):
+ self.AzureEndpointHttpClient.return_value.get \
+ .side_effect = (SentinelException)
shim = wa_shim()
self.assertRaises(SentinelException,
shim.register_with_azure_and_fetch_data)
+ def test_fetch_goalstate_during_report_ready_raises_exc_on_parse_exc(self):
+ self.GoalState.side_effect = SentinelException
+ shim = wa_shim()
+ self.assertRaises(SentinelException,
+ shim.register_with_azure_and_fetch_data)
-class TestGetMetadataFromFabric(CiTestCase):
+ def test_failure_to_send_report_ready_health_doc_bubbles_up(self):
+ self.AzureEndpointHttpClient.return_value.post \
+ .side_effect = SentinelException
+ shim = wa_shim()
+ self.assertRaises(SentinelException,
+ shim.register_with_azure_and_fetch_data)
+
+
+class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase):
@mock.patch.object(azure_helper, 'WALinuxAgentShim')
def test_data_from_shim_returned(self, shim):
@@ -491,14 +740,39 @@ class TestGetMetadataFromFabric(CiTestCase):
@mock.patch.object(azure_helper, 'WALinuxAgentShim')
def test_failure_in_registration_calls_clean_up(self, shim):
- class SentinelException(Exception):
- pass
shim.return_value.register_with_azure_and_fetch_data.side_effect = (
SentinelException)
self.assertRaises(SentinelException,
azure_helper.get_metadata_from_fabric)
self.assertEqual(1, shim.return_value.clean_up.call_count)
+ @mock.patch.object(azure_helper, 'WALinuxAgentShim')
+ def test_calls_shim_register_with_azure_and_fetch_data(self, shim):
+ m_pubkey_info = mock.MagicMock()
+ azure_helper.get_metadata_from_fabric(pubkey_info=m_pubkey_info)
+ self.assertEqual(
+ 1,
+ shim.return_value
+ .register_with_azure_and_fetch_data.call_count)
+ self.assertEqual(
+ mock.call(pubkey_info=m_pubkey_info),
+ shim.return_value
+ .register_with_azure_and_fetch_data.call_args)
+
+ @mock.patch.object(azure_helper, 'WALinuxAgentShim')
+ def test_instantiates_shim_with_kwargs(self, shim):
+ m_fallback_lease_file = mock.MagicMock()
+ m_dhcp_options = mock.MagicMock()
+ azure_helper.get_metadata_from_fabric(
+ fallback_lease_file=m_fallback_lease_file,
+ dhcp_opts=m_dhcp_options)
+ self.assertEqual(1, shim.call_count)
+ self.assertEqual(
+ mock.call(
+ fallback_lease_file=m_fallback_lease_file,
+ dhcp_options=m_dhcp_options),
+ shim.call_args)
+
class TestExtractIpAddressFromNetworkd(CiTestCase):
diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py
index d62d542b..7aa3b1d1 100644
--- a/tests/unittests/test_datasource/test_cloudsigma.py
+++ b/tests/unittests/test_datasource/test_cloudsigma.py
@@ -3,6 +3,7 @@
import copy
from cloudinit.cs_utils import Cepko
+from cloudinit import distros
from cloudinit import helpers
from cloudinit import sources
from cloudinit.sources import DataSourceCloudSigma
@@ -47,8 +48,11 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
self.paths = helpers.Paths({'run_dir': self.tmp_dir()})
self.add_patch(DS_PATH + '.is_running_in_cloudsigma',
"m_is_container", return_value=True)
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", cfg={}, paths=self.paths)
self.datasource = DataSourceCloudSigma.DataSourceCloudSigma(
- "", "", paths=self.paths)
+ sys_cfg={}, distro=distro, paths=self.paths)
self.datasource.cepko = CepkoMock(SERVER_CONTEXT)
def test_get_hostname(self):
diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py
index 83c2f753..e68168f2 100644
--- a/tests/unittests/test_datasource/test_cloudstack.py
+++ b/tests/unittests/test_datasource/test_cloudstack.py
@@ -41,7 +41,7 @@ class TestCloudStackPasswordFetching(CiTestCase):
def _set_password_server_response(self, response_string):
subp = mock.MagicMock(return_value=(response_string, ''))
self.patches.enter_context(
- mock.patch('cloudinit.sources.DataSourceCloudStack.util.subp',
+ mock.patch('cloudinit.sources.DataSourceCloudStack.subp.subp',
subp))
return subp
diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py
index 2a96122f..a93f2195 100644
--- a/tests/unittests/test_datasource/test_ec2.py
+++ b/tests/unittests/test_datasource/test_ec2.py
@@ -3,6 +3,7 @@
import copy
import httpretty
import json
+import requests
from unittest import mock
from cloudinit import helpers
@@ -37,6 +38,8 @@ DYNAMIC_METADATA = {
# python3 -c 'import json
# from cloudinit.ec2_utils import get_instance_metadata as gm
# print(json.dumps(gm("2016-09-02"), indent=1, sort_keys=True))'
+# Note that the MAC addresses have been modified to sort in the opposite order
+# to the device-number attribute, to test LP: #1876312
DEFAULT_METADATA = {
"ami-id": "ami-8b92b4ee",
"ami-launch-index": "0",
@@ -76,7 +79,7 @@ DEFAULT_METADATA = {
"vpc-ipv4-cidr-blocks": "172.31.0.0/16",
"vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56"
},
- "06:17:04:d7:26:0A": {
+ "06:17:04:d7:26:08": {
"device-number": "1", # Only IPv4 local config
"interface-id": "eni-e44ef49f",
"ipv4-associations": {"": "172.3.3.16"},
@@ -84,7 +87,7 @@ DEFAULT_METADATA = {
"local-hostname": ("ip-172-3-3-16.us-east-2."
"compute.internal"),
"local-ipv4s": "172.3.3.16",
- "mac": "06:17:04:d7:26:0A",
+ "mac": "06:17:04:d7:26:08",
"owner-id": "950047163771",
"public-hostname": ("ec2-172-3-3-16.us-east-2."
"compute.amazonaws.com"),
@@ -112,6 +115,122 @@ DEFAULT_METADATA = {
"services": {"domain": "amazonaws.com", "partition": "aws"},
}
+# collected from api version 2018-09-24/ with
+# python3 -c 'import json
+# from cloudinit.ec2_utils import get_instance_metadata as gm
+# print(json.dumps(gm("2018-09-24"), indent=1, sort_keys=True))'
+
+NIC1_MD_IPV4_IPV6_MULTI_IP = {
+ "device-number": "0",
+ "interface-id": "eni-0d6335689899ce9cc",
+ "ipv4-associations": {
+ "18.218.219.181": "172.31.44.13"
+ },
+ "ipv6s": [
+ "2600:1f16:292:100:c187:593c:4349:136",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9",
+ "2600:1f16:292:100:f152:2222:3333:4444"
+ ],
+ "local-hostname": ("ip-172-31-44-13.us-east-2."
+ "compute.internal"),
+ "local-ipv4s": [
+ "172.31.44.13",
+ "172.31.45.70"
+ ],
+ "mac": "0a:07:84:3d:6e:38",
+ "owner-id": "329910648901",
+ "public-hostname": ("ec2-18-218-219-181.us-east-2."
+ "compute.amazonaws.com"),
+ "public-ipv4s": "18.218.219.181",
+ "security-group-ids": "sg-0c387755222ba8d2e",
+ "security-groups": "launch-wizard-4",
+ "subnet-id": "subnet-9d7ba0d1",
+ "subnet-ipv4-cidr-block": "172.31.32.0/20",
+ "subnet_ipv6_cidr_blocks": "2600:1f16:292:100::/64",
+ "vpc-id": "vpc-a07f62c8",
+ "vpc-ipv4-cidr-block": "172.31.0.0/16",
+ "vpc-ipv4-cidr-blocks": "172.31.0.0/16",
+ "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56"
+}
+
+NIC2_MD = {
+ "device-number": "1",
+ "interface-id": "eni-043cdce36ded5e79f",
+ "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal",
+ "local-ipv4s": "172.31.47.221",
+ "mac": "0a:75:69:92:e2:16",
+ "owner-id": "329910648901",
+ "security-group-ids": "sg-0d68fef37d8cc9b77",
+ "security-groups": "launch-wizard-17",
+ "subnet-id": "subnet-9d7ba0d1",
+ "subnet-ipv4-cidr-block": "172.31.32.0/20",
+ "vpc-id": "vpc-a07f62c8",
+ "vpc-ipv4-cidr-block": "172.31.0.0/16",
+ "vpc-ipv4-cidr-blocks": "172.31.0.0/16"
+}
+
+SECONDARY_IP_METADATA_2018_09_24 = {
+ "ami-id": "ami-0986c2ac728528ac2",
+ "ami-launch-index": "0",
+ "ami-manifest-path": "(unknown)",
+ "block-device-mapping": {
+ "ami": "/dev/sda1",
+ "root": "/dev/sda1"
+ },
+ "events": {
+ "maintenance": {
+ "history": "[]",
+ "scheduled": "[]"
+ }
+ },
+ "hostname": "ip-172-31-44-13.us-east-2.compute.internal",
+ "identity-credentials": {
+ "ec2": {
+ "info": {
+ "AccountId": "329910648901",
+ "Code": "Success",
+ "LastUpdated": "2019-07-06T14:22:56Z"
+ }
+ }
+ },
+ "instance-action": "none",
+ "instance-id": "i-069e01e8cc43732f8",
+ "instance-type": "t2.micro",
+ "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal",
+ "local-ipv4": "172.31.44.13",
+ "mac": "0a:07:84:3d:6e:38",
+ "metrics": {
+ "vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
+ },
+ "network": {
+ "interfaces": {
+ "macs": {
+ "0a:07:84:3d:6e:38": NIC1_MD_IPV4_IPV6_MULTI_IP,
+ }
+ }
+ },
+ "placement": {
+ "availability-zone": "us-east-2c"
+ },
+ "profile": "default-hvm",
+ "public-hostname": (
+ "ec2-18-218-219-181.us-east-2.compute.amazonaws.com"),
+ "public-ipv4": "18.218.219.181",
+ "public-keys": {
+ "yourkeyname,e": [
+ "ssh-rsa AAAAW...DZ yourkeyname"
+ ]
+ },
+ "reservation-id": "r-09b4917135cdd33be",
+ "security-groups": "launch-wizard-4",
+ "services": {
+ "domain": "amazonaws.com",
+ "partition": "aws"
+ }
+}
+
+M_PATH_NET = 'cloudinit.sources.DataSourceEc2.net.'
+
def _register_ssh_keys(rfunc, base_url, keys_data):
"""handle ssh key inconsistencies.
@@ -200,6 +319,7 @@ def register_mock_metaserver(base_url, data):
class TestEc2(test_helpers.HttprettyTestCase):
with_logs = True
+ maxDiff = None
valid_platform_data = {
'uuid': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412',
@@ -265,30 +385,23 @@ class TestEc2(test_helpers.HttprettyTestCase):
register_mock_metaserver(instance_id_url, None)
return ds
- def test_network_config_property_returns_version_1_network_data(self):
- """network_config property returns network version 1 for metadata.
-
- Only one device is configured even when multiple exist in metadata.
- """
+ def test_network_config_property_returns_version_2_network_data(self):
+ """network_config property returns network version 2 for metadata"""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
md={'md': DEFAULT_METADATA})
- find_fallback_path = (
- 'cloudinit.sources.DataSourceEc2.net.find_fallback_nic')
+ find_fallback_path = M_PATH_NET + 'find_fallback_nic'
with mock.patch(find_fallback_path) as m_find_fallback:
m_find_fallback.return_value = 'eth9'
ds.get_data()
mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA
- expected = {'version': 1, 'config': [
- {'mac_address': '06:17:04:d7:26:09', 'name': 'eth9',
- 'subnets': [{'type': 'dhcp4'}, {'type': 'dhcp6'}],
- 'type': 'physical'}]}
- patch_path = (
- 'cloudinit.sources.DataSourceEc2.net.get_interfaces_by_mac')
- get_interface_mac_path = (
- 'cloudinit.sources.DataSourceEc2.net.get_interface_mac')
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': '06:17:04:d7:26:09'}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': True}}}
+ patch_path = M_PATH_NET + 'get_interfaces_by_mac'
+ get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
with mock.patch(patch_path) as m_get_interfaces_by_mac:
with mock.patch(find_fallback_path) as m_find_fallback:
with mock.patch(get_interface_mac_path) as m_get_mac:
@@ -297,30 +410,59 @@ class TestEc2(test_helpers.HttprettyTestCase):
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
- def test_network_config_property_set_dhcp4_on_private_ipv4(self):
- """network_config property configures dhcp4 on private ipv4 nics.
+ def test_network_config_property_set_dhcp4(self):
+ """network_config property configures dhcp4 on nics with local-ipv4s.
- Only one device is configured even when multiple exist in metadata.
+ Only one device is configured based on get_interfaces_by_mac even when
+ multiple MACs exist in metadata.
"""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
md={'md': DEFAULT_METADATA})
- find_fallback_path = (
- 'cloudinit.sources.DataSourceEc2.net.find_fallback_nic')
+ find_fallback_path = M_PATH_NET + 'find_fallback_nic'
with mock.patch(find_fallback_path) as m_find_fallback:
m_find_fallback.return_value = 'eth9'
ds.get_data()
- mac1 = '06:17:04:d7:26:0A' # IPv4 only in DEFAULT_METADATA
- expected = {'version': 1, 'config': [
- {'mac_address': '06:17:04:d7:26:0A', 'name': 'eth9',
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical'}]}
- patch_path = (
- 'cloudinit.sources.DataSourceEc2.net.get_interfaces_by_mac')
- get_interface_mac_path = (
- 'cloudinit.sources.DataSourceEc2.net.get_interface_mac')
+ mac1 = '06:17:04:d7:26:08' # IPv4 only in DEFAULT_METADATA
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': mac1.lower()}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': False}}}
+ patch_path = M_PATH_NET + 'get_interfaces_by_mac'
+ get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
+ with mock.patch(patch_path) as m_get_interfaces_by_mac:
+ with mock.patch(find_fallback_path) as m_find_fallback:
+ with mock.patch(get_interface_mac_path) as m_get_mac:
+ m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
+ m_find_fallback.return_value = 'eth9'
+ m_get_mac.return_value = mac1
+ self.assertEqual(expected, ds.network_config)
+
+ def test_network_config_property_secondary_private_ips(self):
+ """network_config property configures any secondary ipv4 addresses.
+
+ Only one device is configured based on get_interfaces_by_mac even when
+ multiple MACs exist in metadata.
+ """
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
+ md={'md': SECONDARY_IP_METADATA_2018_09_24})
+ find_fallback_path = M_PATH_NET + 'find_fallback_nic'
+ with mock.patch(find_fallback_path) as m_find_fallback:
+ m_find_fallback.return_value = 'eth9'
+ ds.get_data()
+
+ mac1 = '0a:07:84:3d:6e:38' # 1 secondary IPv4 and 2 secondary IPv6
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': mac1}, 'set-name': 'eth9',
+ 'addresses': ['172.31.45.70/20',
+ '2600:1f16:292:100:f152:2222:3333:4444/128',
+ '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
+ 'dhcp4': True, 'dhcp6': True}}}
+ patch_path = M_PATH_NET + 'get_interfaces_by_mac'
+ get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
with mock.patch(patch_path) as m_get_interfaces_by_mac:
with mock.patch(find_fallback_path) as m_find_fallback:
with mock.patch(get_interface_mac_path) as m_get_mac:
@@ -356,21 +498,18 @@ class TestEc2(test_helpers.HttprettyTestCase):
register_mock_metaserver(
'http://169.254.169.254/2009-04-04/meta-data/', DEFAULT_METADATA)
mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA
- get_interface_mac_path = (
- 'cloudinit.sources.DataSourceEc2.net.get_interface_mac')
+ get_interface_mac_path = M_PATH_NET + 'get_interfaces_by_mac'
ds.fallback_nic = 'eth9'
- with mock.patch(get_interface_mac_path) as m_get_interface_mac:
- m_get_interface_mac.return_value = mac1
+ with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac:
+ m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
nc = ds.network_config # Will re-crawl network metadata
self.assertIsNotNone(nc)
self.assertIn(
'Refreshing stale metadata from prior to upgrade',
self.logs.getvalue())
- expected = {'version': 1, 'config': [
- {'mac_address': '06:17:04:d7:26:09',
- 'name': 'eth9',
- 'subnets': [{'type': 'dhcp4'}, {'type': 'dhcp6'}],
- 'type': 'physical'}]}
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': True}}}
self.assertEqual(expected, ds.network_config)
def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self):
@@ -429,6 +568,55 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertTrue(ds.get_data())
self.assertFalse(ds.is_classic_instance())
+ def test_aws_inaccessible_imds_service_fails_with_retries(self):
+ """Inaccessibility of http://169.254.169.254 are retried."""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
+ md=None)
+
+ conn_error = requests.exceptions.ConnectionError(
+ '[Errno 113] no route to host'
+ )
+
+ mock_success = mock.MagicMock(contents=b'fakesuccess')
+ mock_success.ok.return_value = True
+
+ with mock.patch('cloudinit.url_helper.readurl') as m_readurl:
+ m_readurl.side_effect = (conn_error, conn_error, mock_success)
+ with mock.patch('cloudinit.url_helper.time.sleep'):
+ self.assertTrue(ds.wait_for_metadata_service())
+
+ # Just one /latest/api/token request
+ self.assertEqual(3, len(m_readurl.call_args_list))
+ for readurl_call in m_readurl.call_args_list:
+ self.assertIn('latest/api/token', readurl_call[0][0])
+
+ def test_aws_token_403_fails_without_retries(self):
+ """Verify that 403s fetching AWS tokens are not retried."""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
+ md=None)
+ token_url = self.data_url('latest', data_item='api/token')
+ httpretty.register_uri(httpretty.PUT, token_url, body={}, status=403)
+ self.assertFalse(ds.get_data())
+ # Just one /latest/api/token request
+ logs = self.logs.getvalue()
+ failed_put_log = '"PUT /latest/api/token HTTP/1.1" 403 0'
+ expected_logs = [
+ 'WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is'
+ ' disabled. Aborting.',
+ "WARNING: IMDS's HTTP endpoint is probably disabled",
+ failed_put_log
+ ]
+ for log in expected_logs:
+ self.assertIn(log, logs)
+ self.assertEqual(
+ 1,
+ len([line for line in logs.splitlines() if failed_put_log in line])
+ )
+
def test_aws_token_redacted(self):
"""Verify that aws tokens are redacted when logged."""
ds = self._setup_ds(
@@ -443,7 +631,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
logs_with_redacted = [log for log in all_logs if REDACT_TOK in log]
logs_with_token = [log for log in all_logs if 'API-TOKEN' in log]
self.assertEqual(1, len(logs_with_redacted_ttl))
- self.assertEqual(79, len(logs_with_redacted))
+ self.assertEqual(81, len(logs_with_redacted))
self.assertEqual(0, len(logs_with_token))
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
@@ -556,7 +744,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
ret = ds.get_data()
self.assertTrue(ret)
- m_dhcp.assert_called_once_with('eth9')
+ m_dhcp.assert_called_once_with('eth9', None)
m_net.assert_called_once_with(
broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9',
prefix_or_mask='255.255.255.0', router='192.168.2.1',
@@ -564,23 +752,64 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertIn('Crawl of metadata service took', self.logs.getvalue())
+class TestGetSecondaryAddresses(test_helpers.CiTestCase):
+
+ mac = '06:17:04:d7:26:ff'
+ with_logs = True
+
+ def test_md_with_no_secondary_addresses(self):
+ """Empty list is returned when nic metadata contains no secondary ip"""
+ self.assertEqual([], ec2.get_secondary_addresses(NIC2_MD, self.mac))
+
+ def test_md_with_secondary_v4_and_v6_addresses(self):
+ """All secondary addresses are returned from nic metadata"""
+ self.assertEqual(
+ ['172.31.45.70/20', '2600:1f16:292:100:f152:2222:3333:4444/128',
+ '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
+ ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac))
+
+ def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self):
+ """Any invalid subnet-ipv(4|6)-cidr-block values use defaults"""
+ invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP)
+ invalid_cidr_md['subnet-ipv4-cidr-block'] = "something-unexpected"
+ invalid_cidr_md['subnet-ipv6-cidr-block'] = "not/sure/what/this/is"
+ self.assertEqual(
+ ['172.31.45.70/24', '2600:1f16:292:100:f152:2222:3333:4444/128',
+ '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
+ ec2.get_secondary_addresses(invalid_cidr_md, self.mac))
+ expected_logs = [
+ "WARNING: Could not parse subnet-ipv4-cidr-block"
+ " something-unexpected for mac 06:17:04:d7:26:ff."
+ " ipv4 network config prefix defaults to /24",
+ "WARNING: Could not parse subnet-ipv6-cidr-block"
+ " not/sure/what/this/is for mac 06:17:04:d7:26:ff."
+ " ipv6 network config prefix defaults to /128"
+ ]
+ logs = self.logs.getvalue()
+ for log in expected_logs:
+ self.assertIn(log, logs)
+
+
class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
def setUp(self):
super(TestConvertEc2MetadataNetworkConfig, self).setUp()
self.mac1 = '06:17:04:d7:26:09'
+ interface_dict = copy.deepcopy(
+ DEFAULT_METADATA['network']['interfaces']['macs'][self.mac1])
+ # These tests are written assuming the base interface doesn't have IPv6
+ interface_dict.pop('ipv6s')
self.network_metadata = {
- 'interfaces': {'macs': {
- self.mac1: {'public-ipv4s': '172.31.2.16'}}}}
+ 'interfaces': {'macs': {self.mac1: interface_dict}}}
def test_convert_ec2_metadata_network_config_skips_absent_macs(self):
"""Any mac absent from metadata is skipped by network config."""
macs_to_nics = {self.mac1: 'eth9', 'DE:AD:BE:EF:FF:FF': 'vitualnic2'}
# DE:AD:BE:EF:FF:FF represented by OS but not in metadata
- expected = {'version': 1, 'config': [
- {'mac_address': self.mac1, 'type': 'physical',
- 'name': 'eth9', 'subnets': [{'type': 'dhcp4'}]}]}
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': False}}}
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
@@ -594,15 +823,15 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
network_metadata_ipv6['interfaces']['macs'][self.mac1])
nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
nic1_metadata.pop('public-ipv4s')
- expected = {'version': 1, 'config': [
- {'mac_address': self.mac1, 'type': 'physical',
- 'name': 'eth9', 'subnets': [{'type': 'dhcp6'}]}]}
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': True}}}
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
network_metadata_ipv6, macs_to_nics))
- def test_convert_ec2_metadata_network_config_handles_local_dhcp4(self):
+ def test_convert_ec2_metadata_network_config_local_only_dhcp4(self):
"""Config dhcp4 when there are no public addresses in public-ipv4s."""
macs_to_nics = {self.mac1: 'eth9'}
network_metadata_ipv6 = copy.deepcopy(self.network_metadata)
@@ -610,9 +839,9 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
network_metadata_ipv6['interfaces']['macs'][self.mac1])
nic1_metadata['local-ipv4s'] = '172.3.3.15'
nic1_metadata.pop('public-ipv4s')
- expected = {'version': 1, 'config': [
- {'mac_address': self.mac1, 'type': 'physical',
- 'name': 'eth9', 'subnets': [{'type': 'dhcp4'}]}]}
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': False}}}
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
@@ -627,16 +856,16 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
nic1_metadata['public-ipv4s'] = ''
# When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config.
- expected = {'version': 1, 'config': [
- {'mac_address': self.mac1, 'type': 'physical',
- 'name': 'eth9', 'subnets': [{'type': 'dhcp4'}]}]}
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': False}}}
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
network_metadata_ipv6, macs_to_nics, fallback_nic='eth9'))
def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self):
- """When dhcp6 is public and dhcp4 is set to local enable both."""
+ """When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4."""
macs_to_nics = {self.mac1: 'eth9'}
network_metadata_both = copy.deepcopy(self.network_metadata)
nic1_metadata = (
@@ -644,10 +873,35 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
nic1_metadata.pop('public-ipv4s')
nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc
- expected = {'version': 1, 'config': [
- {'mac_address': self.mac1, 'type': 'physical',
- 'name': 'eth9',
- 'subnets': [{'type': 'dhcp4'}, {'type': 'dhcp6'}]}]}
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': True}}}
+ self.assertEqual(
+ expected,
+ ec2.convert_ec2_metadata_network_config(
+ network_metadata_both, macs_to_nics))
+
+ def test_convert_ec2_metadata_network_config_handles_multiple_nics(self):
+ """DHCP route-metric increases on secondary NICs for IPv4 and IPv6."""
+ mac2 = '06:17:04:d7:26:08'
+ macs_to_nics = {self.mac1: 'eth9', mac2: 'eth10'}
+ network_metadata_both = copy.deepcopy(self.network_metadata)
+ # Add 2nd nic info
+ network_metadata_both['interfaces']['macs'][mac2] = NIC2_MD
+ nic1_metadata = (
+ network_metadata_both['interfaces']['macs'][self.mac1])
+ nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
+ nic1_metadata.pop('public-ipv4s') # No public-ipv4 IPs in cfg
+ nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc
+ expected = {'version': 2, 'ethernets': {
+ 'eth9': {
+ 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100},
+ 'dhcp6': True, 'dhcp6-overrides': {'route-metric': 100}},
+ 'eth10': {
+ 'match': {'macaddress': mac2}, 'set-name': 'eth10',
+ 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 200},
+ 'dhcp6': False}}}
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
@@ -660,10 +914,9 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
nic1_metadata = (
network_metadata_both['interfaces']['macs'][self.mac1])
nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
- expected = {'version': 1, 'config': [
- {'mac_address': self.mac1, 'type': 'physical',
- 'name': 'eth9',
- 'subnets': [{'type': 'dhcp4'}, {'type': 'dhcp6'}]}]}
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': True}}}
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
@@ -671,12 +924,10 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self):
"""Convert Ec2 Metadata calls get_interfaces_by_mac by default."""
- expected = {'version': 1, 'config': [
- {'mac_address': self.mac1, 'type': 'physical',
- 'name': 'eth9',
- 'subnets': [{'type': 'dhcp4'}]}]}
- patch_path = (
- 'cloudinit.sources.DataSourceEc2.net.get_interfaces_by_mac')
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': self.mac1},
+ 'set-name': 'eth9', 'dhcp4': True, 'dhcp6': False}}}
+ patch_path = M_PATH_NET + 'get_interfaces_by_mac'
with mock.patch(patch_path) as m_get_interfaces_by_mac:
m_get_interfaces_by_mac.return_value = {self.mac1: 'eth9'}
self.assertEqual(
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 4afbccff..01f4cbd1 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -114,7 +114,8 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
self.assertTrue(success)
req_header = httpretty.last_request().headers
- self.assertDictContainsSubset(HEADERS, req_header)
+ for header_name, expected_value in HEADERS.items():
+ self.assertEqual(expected_value, req_header.get(header_name))
def test_metadata(self):
# UnicodeDecodeError if set to ds.userdata instead of userdata_raw
diff --git a/tests/unittests/test_datasource/test_hetzner.py b/tests/unittests/test_datasource/test_hetzner.py
index a9c12597..d0879545 100644
--- a/tests/unittests/test_datasource/test_hetzner.py
+++ b/tests/unittests/test_datasource/test_hetzner.py
@@ -5,10 +5,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.sources import DataSourceHetzner
+import cloudinit.sources.helpers.hetzner as hc_helper
from cloudinit import util, settings, helpers
from cloudinit.tests.helpers import mock, CiTestCase
+import base64
+import pytest
+
METADATA = util.load_yaml("""
hostname: cloudinit-test
instance-id: 123456
@@ -115,3 +119,22 @@ class TestDataSourceHetzner(CiTestCase):
# These are a white box attempt to ensure it did not search.
m_find_fallback.assert_not_called()
m_read_md.assert_not_called()
+
+
+class TestMaybeB64Decode:
+ """Test the maybe_b64decode helper function."""
+
+ @pytest.mark.parametrize("invalid_input", (str("not bytes"), int(4)))
+ def test_raises_error_on_non_bytes(self, invalid_input):
+ """maybe_b64decode should raise error if data is not bytes."""
+ with pytest.raises(TypeError):
+ hc_helper.maybe_b64decode(invalid_input)
+
+ @pytest.mark.parametrize("in_data,expected", [
+ # If data is not b64 encoded, then return value should be the same.
+ (b"this is my data", b"this is my data"),
+ # If data is b64 encoded, then return value should be decoded.
+ (base64.b64encode(b"data"), b"data"),
+ ])
+ def test_happy_path(self, in_data, expected):
+ assert expected == hc_helper.maybe_b64decode(in_data)
diff --git a/tests/unittests/test_datasource/test_ibmcloud.py b/tests/unittests/test_datasource/test_ibmcloud.py
index 0b54f585..9013ae9f 100644
--- a/tests/unittests/test_datasource/test_ibmcloud.py
+++ b/tests/unittests/test_datasource/test_ibmcloud.py
@@ -15,13 +15,6 @@ mock = test_helpers.mock
D_PATH = "cloudinit.sources.DataSourceIBMCloud."
-class TestIBMCloud(test_helpers.CiTestCase):
- """Test the datasource."""
- def setUp(self):
- super(TestIBMCloud, self).setUp()
- pass
-
-
@mock.patch(D_PATH + "_is_xen", return_value=True)
@mock.patch(D_PATH + "_is_ibm_provisioning")
@mock.patch(D_PATH + "util.blkid")
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index 2a81d3f5..41b6c27b 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -158,7 +158,6 @@ class TestMAASDataSource(CiTestCase):
@mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper")
class TestGetOauthHelper(CiTestCase):
- with_logs = True
base_cfg = {'consumer_key': 'FAKE_CONSUMER_KEY',
'token_key': 'FAKE_TOKEN_KEY',
'token_secret': 'FAKE_TOKEN_SECRET',
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index 18bea0b9..2e6b53ff 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -288,8 +288,23 @@ class TestNoCloudDataSource(CiTestCase):
self.mocks.enter_context(
mock.patch.object(util, 'is_FreeBSD', return_value=True))
+ def _mfind_devs_with_freebsd(
+ criteria=None, oformat='device',
+ tag=None, no_cache=False, path=None):
+ if not criteria:
+ return ["/dev/msdosfs/foo", "/dev/iso9660/foo"]
+ if criteria.startswith("LABEL="):
+ return ["/dev/msdosfs/foo", "/dev/iso9660/foo"]
+ elif criteria == "TYPE=vfat":
+ return ["/dev/msdosfs/foo"]
+ elif criteria == "TYPE=iso9660":
+ return ["/dev/iso9660/foo"]
+ return []
+
self.mocks.enter_context(
- mock.patch.object(os.path, 'exists', return_value=True))
+ mock.patch.object(
+ util, 'find_devs_with_freebsd',
+ side_effect=_mfind_devs_with_freebsd))
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc._get_devices('foo')
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index bb399f6d..9c6070a5 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -9,6 +9,8 @@ import os
import pwd
import unittest
+import pytest
+
TEST_VARS = {
'VAR1': 'single',
@@ -130,18 +132,18 @@ class TestOpenNebulaDataSource(CiTestCase):
def test_seed_dir_non_contextdisk(self):
self.assertRaises(ds.NonContextDiskDir, ds.read_context_disk_dir,
- self.seed_dir)
+ self.seed_dir, mock.Mock())
def test_seed_dir_empty1_context(self):
populate_dir(self.seed_dir, {'context.sh': ''})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertIsNone(results['userdata'])
self.assertEqual(results['metadata'], {})
def test_seed_dir_empty2_context(self):
populate_context_dir(self.seed_dir, {})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertIsNone(results['userdata'])
self.assertEqual(results['metadata'], {})
@@ -151,11 +153,11 @@ class TestOpenNebulaDataSource(CiTestCase):
self.assertRaises(ds.BrokenContextDiskDir,
ds.read_context_disk_dir,
- self.seed_dir)
+ self.seed_dir, mock.Mock())
def test_context_parser(self):
populate_context_dir(self.seed_dir, TEST_VARS)
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('metadata' in results)
self.assertEqual(TEST_VARS, results['metadata'])
@@ -166,7 +168,7 @@ class TestOpenNebulaDataSource(CiTestCase):
for k in ('SSH_KEY', 'SSH_PUBLIC_KEY'):
my_d = os.path.join(self.tmp, "%s-%i" % (k, c))
populate_context_dir(my_d, {k: '\n'.join(public_keys)})
- results = ds.read_context_disk_dir(my_d)
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
self.assertTrue('metadata' in results)
self.assertTrue('public-keys' in results['metadata'])
@@ -180,7 +182,7 @@ class TestOpenNebulaDataSource(CiTestCase):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: USER_DATA,
'USERDATA_ENCODING': ''})
- results = ds.read_context_disk_dir(my_d)
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
self.assertTrue('userdata' in results)
self.assertEqual(USER_DATA, results['userdata'])
@@ -190,7 +192,7 @@ class TestOpenNebulaDataSource(CiTestCase):
for k in ('USER_DATA', 'USERDATA'):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: b64userdata})
- results = ds.read_context_disk_dir(my_d)
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
self.assertTrue('userdata' in results)
self.assertEqual(b64userdata, results['userdata'])
@@ -200,7 +202,7 @@ class TestOpenNebulaDataSource(CiTestCase):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: util.b64e(USER_DATA),
'USERDATA_ENCODING': 'base64'})
- results = ds.read_context_disk_dir(my_d)
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
self.assertTrue('userdata' in results)
self.assertEqual(USER_DATA, results['userdata'])
@@ -212,7 +214,7 @@ class TestOpenNebulaDataSource(CiTestCase):
for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: PUBLIC_IP})
- results = ds.read_context_disk_dir(my_d)
+ results = ds.read_context_disk_dir(my_d, mock.Mock())
self.assertTrue('metadata' in results)
self.assertTrue('local-hostname' in results['metadata'])
@@ -227,7 +229,7 @@ class TestOpenNebulaDataSource(CiTestCase):
# without ETH0_MAC
# for Older OpenNebula?
populate_context_dir(self.seed_dir, {'ETH0_IP': IP_BY_MACADDR})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -237,7 +239,7 @@ class TestOpenNebulaDataSource(CiTestCase):
# ETH0_IP and ETH0_MAC
populate_context_dir(
self.seed_dir, {'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -249,7 +251,7 @@ class TestOpenNebulaDataSource(CiTestCase):
# "AR = [ TYPE = ETHER ]"
populate_context_dir(
self.seed_dir, {'ETH0_IP': '', 'ETH0_MAC': MACADDR})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -263,7 +265,7 @@ class TestOpenNebulaDataSource(CiTestCase):
'ETH0_MAC': MACADDR,
'ETH0_MASK': '255.255.0.0'
})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -277,7 +279,7 @@ class TestOpenNebulaDataSource(CiTestCase):
'ETH0_MAC': MACADDR,
'ETH0_MASK': ''
})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -290,7 +292,7 @@ class TestOpenNebulaDataSource(CiTestCase):
'ETH0_IP6': IP6_GLOBAL,
'ETH0_MAC': MACADDR,
})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -303,7 +305,7 @@ class TestOpenNebulaDataSource(CiTestCase):
'ETH0_IP6_ULA': IP6_ULA,
'ETH0_MAC': MACADDR,
})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -317,7 +319,7 @@ class TestOpenNebulaDataSource(CiTestCase):
'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX,
'ETH0_MAC': MACADDR,
})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -331,7 +333,7 @@ class TestOpenNebulaDataSource(CiTestCase):
'ETH0_IP6_PREFIX_LENGTH': '',
'ETH0_MAC': MACADDR,
})
- results = ds.read_context_disk_dir(self.seed_dir)
+ results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
self.assertTrue('network-interfaces' in results)
self.assertTrue(
@@ -355,6 +357,7 @@ class TestOpenNebulaDataSource(CiTestCase):
util.find_devs_with = orig_find_devs_with
+@mock.patch(DS_PATH + '.net.get_interfaces_by_mac', mock.Mock(return_value={}))
class TestOpenNebulaNetwork(unittest.TestCase):
system_nics = ('eth0', 'ens3')
@@ -367,7 +370,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
expected = {
'02:00:0a:12:01:01': 'ETH0',
'02:00:0a:12:0f:0f': 'ETH1', }
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(expected, net.context_devname)
def test_get_nameservers(self):
@@ -382,21 +385,21 @@ class TestOpenNebulaNetwork(unittest.TestCase):
expected = {
'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
'search': ['example.com', 'example.org']}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_nameservers('eth0')
self.assertEqual(expected, val)
def test_get_mtu(self):
"""Verify get_mtu('device') correctly returns MTU size."""
context = {'ETH0_MTU': '1280'}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_mtu('eth0')
self.assertEqual('1280', val)
def test_get_ip(self):
"""Verify get_ip('device') correctly returns IPv4 address."""
context = {'ETH0_IP': PUBLIC_IP}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_ip('eth0', MACADDR)
self.assertEqual(PUBLIC_IP, val)
@@ -407,7 +410,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
string.
"""
context = {'ETH0_IP': ''}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_ip('eth0', MACADDR)
self.assertEqual(IP_BY_MACADDR, val)
@@ -420,7 +423,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'ETH0_IP6': IP6_GLOBAL,
'ETH0_IP6_ULA': '', }
expected = [IP6_GLOBAL]
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_ip6('eth0')
self.assertEqual(expected, val)
@@ -433,7 +436,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'ETH0_IP6': '',
'ETH0_IP6_ULA': IP6_ULA, }
expected = [IP6_ULA]
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_ip6('eth0')
self.assertEqual(expected, val)
@@ -446,7 +449,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'ETH0_IP6': IP6_GLOBAL,
'ETH0_IP6_ULA': IP6_ULA, }
expected = [IP6_GLOBAL, IP6_ULA]
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_ip6('eth0')
self.assertEqual(expected, val)
@@ -455,7 +458,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
Verify get_ip6_prefix('device') correctly returns IPv6 prefix.
"""
context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_ip6_prefix('eth0')
self.assertEqual(IP6_PREFIX, val)
@@ -466,7 +469,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
string.
"""
context = {'ETH0_IP6_PREFIX_LENGTH': ''}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_ip6_prefix('eth0')
self.assertEqual('64', val)
@@ -476,7 +479,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
address.
"""
context = {'ETH0_GATEWAY': '1.2.3.5'}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_gateway('eth0')
self.assertEqual('1.2.3.5', val)
@@ -486,7 +489,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
address.
"""
context = {'ETH0_GATEWAY6': IP6_GW}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_gateway6('eth0')
self.assertEqual(IP6_GW, val)
@@ -495,7 +498,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
Verify get_mask('device') correctly returns IPv4 subnet mask.
"""
context = {'ETH0_MASK': '255.255.0.0'}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_mask('eth0')
self.assertEqual('255.255.0.0', val)
@@ -505,7 +508,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
It returns default value '255.255.255.0' if ETH0_MASK has empty string.
"""
context = {'ETH0_MASK': ''}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_mask('eth0')
self.assertEqual('255.255.255.0', val)
@@ -514,7 +517,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
Verify get_network('device') correctly returns IPv4 network address.
"""
context = {'ETH0_NETWORK': '1.2.3.0'}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_network('eth0', MACADDR)
self.assertEqual('1.2.3.0', val)
@@ -525,7 +528,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
empty string.
"""
context = {'ETH0_NETWORK': ''}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_network('eth0', MACADDR)
self.assertEqual('10.18.1.0', val)
@@ -534,7 +537,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
Verify get_field('device', 'name') returns *context* value.
"""
context = {'ETH9_DUMMY': 'DUMMY_VALUE'}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_field('eth9', 'dummy')
self.assertEqual('DUMMY_VALUE', val)
@@ -544,7 +547,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
value.
"""
context = {'ETH9_DUMMY': 'DUMMY_VALUE'}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE')
self.assertEqual('DUMMY_VALUE', val)
@@ -554,7 +557,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
value if context value is empty string.
"""
context = {'ETH9_DUMMY': ''}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE')
self.assertEqual('DEFAULT_VALUE', val)
@@ -564,7 +567,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
empty string.
"""
context = {'ETH9_DUMMY': ''}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_field('eth9', 'dummy')
self.assertEqual(None, val)
@@ -574,7 +577,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
None.
"""
context = {'ETH9_DUMMY': None}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
val = net.get_field('eth9', 'dummy')
self.assertEqual(None, val)
@@ -594,7 +597,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_GATEWAY
@@ -610,7 +613,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
@@ -629,7 +632,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_GATEWAY6
@@ -645,7 +648,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
@@ -666,7 +669,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH
@@ -686,7 +689,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
IP6_GLOBAL + '/' + IP6_PREFIX,
IP6_ULA + '/' + IP6_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
@@ -707,7 +710,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN
@@ -727,7 +730,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
@@ -746,7 +749,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_MTU
@@ -762,14 +765,14 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork(context)
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
def test_eth0(self, m_get_phys_by_mac):
for nic in self.system_nics:
m_get_phys_by_mac.return_value = {MACADDR: nic}
- net = ds.OpenNebulaNetwork({})
+ net = ds.OpenNebulaNetwork({}, mock.Mock())
expected = {
'version': 2,
'ethernets': {
@@ -779,6 +782,14 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.assertEqual(net.gen_conf(), expected)
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_distro_passed_through(self, m_get_physical_nics_by_mac):
+ ds.OpenNebulaNetwork({}, mock.sentinel.distro)
+ self.assertEqual(
+ [mock.call(mock.sentinel.distro)],
+ m_get_physical_nics_by_mac.call_args_list,
+ )
+
def test_eth0_override(self):
self.maxDiff = None
context = {
@@ -797,7 +808,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'ETH0_SEARCH_DOMAIN': '',
}
for nic in self.system_nics:
- net = ds.OpenNebulaNetwork(context,
+ net = ds.OpenNebulaNetwork(context, mock.Mock(),
system_nics_by_mac={MACADDR: nic})
expected = {
'version': 2,
@@ -829,7 +840,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'ETH0_SEARCH_DOMAIN': 'example.com example.org',
}
for nic in self.system_nics:
- net = ds.OpenNebulaNetwork(context,
+ net = ds.OpenNebulaNetwork(context, mock.Mock(),
system_nics_by_mac={MACADDR: nic})
expected = {
@@ -883,7 +894,10 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'ETH3_SEARCH_DOMAIN': 'third.example.com third.example.org',
}
net = ds.OpenNebulaNetwork(
- context, system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'})
+ context,
+ mock.Mock(),
+ system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'}
+ )
expected = {
'version': 2,
@@ -913,12 +927,43 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.assertEqual(expected, net.gen_conf())
-class TestParseShellConfig(unittest.TestCase):
+class TestParseShellConfig:
+ @pytest.mark.allow_subp_for("bash")
def test_no_seconds(self):
cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"])
# we could test 'sleep 2', but that would make the test run slower.
ret = ds.parse_shell_config(cfg)
- self.assertEqual(ret, {"foo": "bar", "xx": "foo"})
+ assert ret == {"foo": "bar", "xx": "foo"}
+
+
+class TestGetPhysicalNicsByMac:
+ @pytest.mark.parametrize(
+ "interfaces_by_mac,physical_devs,expected_return",
+ [
+ # No interfaces => empty return
+ ({}, [], {}),
+ # Only virtual interface => empty return
+ ({"mac1": "virtual0"}, [], {}),
+ # Only physical interface => it is returned
+ ({"mac2": "physical0"}, ["physical0"], {"mac2": "physical0"}),
+ # Combination of physical and virtual => only physical returned
+ (
+ {"mac3": "physical1", "mac4": "virtual1"},
+ ["physical1"],
+ {"mac3": "physical1"},
+ ),
+ ],
+ )
+ def test(self, interfaces_by_mac, physical_devs, expected_return):
+ distro = mock.Mock()
+ distro.networking.is_physical.side_effect = (
+ lambda devname: devname in physical_devs
+ )
+ with mock.patch(
+ DS_PATH + ".net.get_interfaces_by_mac",
+ return_value=interfaces_by_mac,
+ ):
+ assert expected_return == ds.get_physical_nics_by_mac(distro)
def populate_context_dir(path, variables):
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index f754556f..3cfba74d 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -279,7 +279,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual(2, len(ds_os_local.files))
self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure)
self.assertIsNone(ds_os_local.vendordata_raw)
- m_dhcp.assert_called_with('eth9')
+ m_dhcp.assert_called_with('eth9', None)
def test_bad_datasource_meta(self):
os_files = copy.deepcopy(OS_FILES)
@@ -510,6 +510,24 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
'Expected detect_openstack == True on OpenTelekomCloud')
@test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
+ def test_detect_openstack_sapccloud_chassis_asset_tag(self, m_dmi,
+ m_is_x86):
+ """Return True on OpenStack reporting SAP CCloud VM asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == 'system-product-name':
+ return 'VMware Virtual Platform' # SAP CCloud uses VMware
+ if dmi_key == 'chassis-asset-tag':
+ return 'SAP CCloud VM'
+ assert False, 'Unexpected dmi read of %s' % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ 'Expected detect_openstack == True on SAP CCloud VM')
+
+ @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi,
m_is_x86):
"""Return True on OpenStack reporting Oracle cloud asset-tag."""
diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py
index a19c35c8..1d088577 100644
--- a/tests/unittests/test_datasource/test_ovf.py
+++ b/tests/unittests/test_datasource/test_ovf.py
@@ -10,6 +10,7 @@ import os
from collections import OrderedDict
from textwrap import dedent
+from cloudinit import subp
from cloudinit import util
from cloudinit.tests.helpers import CiTestCase, mock, wrap_and_call
from cloudinit.helpers import Paths
@@ -48,7 +49,7 @@ def fill_properties(props, template=OVF_ENV_CONTENT):
for key, val in props.items():
lines.append(prop_tmpl.format(key=key, val=val))
indent = " "
- properties = ''.join([indent + l + "\n" for l in lines])
+ properties = ''.join([indent + line + "\n" for line in lines])
return template.format(properties=properties)
@@ -219,6 +220,88 @@ class TestDatasourceOVF(CiTestCase):
self.assertIn('Custom script is disabled by VM Administrator',
str(context.exception))
+ def test_get_data_cust_script_enabled(self):
+ """If custom script is enabled by VMware tools configuration,
+ execute the script.
+ """
+ paths = Paths({'cloud_dir': self.tdir})
+ ds = self.datasource(
+ sys_cfg={'disable_vmware_customization': False}, distro={},
+ paths=paths)
+ # Prepare the conf file
+ conf_file = self.tmp_path('test-cust', self.tdir)
+ conf_content = dedent("""\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345346
+ """)
+ util.write_file(conf_file, conf_content)
+
+ # Mock custom script is enabled by return true when calling
+ # get_tools_config
+ with mock.patch(MPATH + 'get_tools_config', return_value="true"):
+ with mock.patch(MPATH + 'set_customization_status',
+ return_value=('msg', b'')):
+ with self.assertRaises(CustomScriptNotFound) as context:
+ wrap_and_call(
+ 'cloudinit.sources.DataSourceOVF',
+ {'util.read_dmi_data': 'vmware',
+ 'util.del_dir': True,
+ 'search_file': self.tdir,
+ 'wait_for_imc_cfg_file': conf_file,
+ 'get_nics_to_enable': ''},
+ ds.get_data)
+ # Verify custom script is trying to be executed
+ customscript = self.tmp_path('test-script', self.tdir)
+ self.assertIn('Script %s not found!!' % customscript,
+ str(context.exception))
+
+ def test_get_data_force_run_post_script_is_yes(self):
+ """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if
+ enable-custom-scripts is not defined in VM Tools configuration
+ """
+ paths = Paths({'cloud_dir': self.tdir})
+ ds = self.datasource(
+ sys_cfg={'disable_vmware_customization': False}, distro={},
+ paths=paths)
+ # Prepare the conf file
+ conf_file = self.tmp_path('test-cust', self.tdir)
+ # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts
+ # default value is TRUE
+ conf_content = dedent("""\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345346
+ DEFAULT-RUN-POST-CUST-SCRIPT = yes
+ """)
+ util.write_file(conf_file, conf_content)
+
+ # Mock get_tools_config(section, key, defaultVal) to return
+ # defaultVal
+ def my_get_tools_config(*args, **kwargs):
+ return args[2]
+
+ with mock.patch(MPATH + 'get_tools_config',
+ side_effect=my_get_tools_config):
+ with mock.patch(MPATH + 'set_customization_status',
+ return_value=('msg', b'')):
+ with self.assertRaises(CustomScriptNotFound) as context:
+ wrap_and_call(
+ 'cloudinit.sources.DataSourceOVF',
+ {'util.read_dmi_data': 'vmware',
+ 'util.del_dir': True,
+ 'search_file': self.tdir,
+ 'wait_for_imc_cfg_file': conf_file,
+ 'get_nics_to_enable': ''},
+ ds.get_data)
+ # Verify custom script still runs although it is
+ # disabled by VMware Tools
+ customscript = self.tmp_path('test-script', self.tdir)
+ self.assertIn('Script %s not found!!' % customscript,
+ str(context.exception))
+
def test_get_data_non_vmware_seed_platform_info(self):
"""Platform info properly reports when on non-vmware platforms."""
paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir})
@@ -401,8 +484,8 @@ class TestTransportIso9660(CiTestCase):
self.assertTrue(dsovf.maybe_cdrom_device('xvdza1'))
-@mock.patch(MPATH + "util.which")
-@mock.patch(MPATH + "util.subp")
+@mock.patch(MPATH + "subp.which")
+@mock.patch(MPATH + "subp.subp")
class TestTransportVmwareGuestinfo(CiTestCase):
"""Test the com.vmware.guestInfo transport implemented in
transport_vmware_guestinfo."""
@@ -420,7 +503,7 @@ class TestTransportVmwareGuestinfo(CiTestCase):
def test_notfound_on_exit_code_1(self, m_subp, m_which):
"""If vmware-rpctool exits 1, then must return not found."""
m_which.return_value = self.rpctool_path
- m_subp.side_effect = util.ProcessExecutionError(
+ m_subp.side_effect = subp.ProcessExecutionError(
stdout="", stderr="No value found", exit_code=1, cmd=["unused"])
self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
self.assertEqual(1, m_subp.call_count)
@@ -442,7 +525,7 @@ class TestTransportVmwareGuestinfo(CiTestCase):
def test_notfound_and_warns_on_unexpected_exit_code(self, m_subp, m_which):
"""If vmware-rpctool exits non zero or 1, warnings should be logged."""
m_which.return_value = self.rpctool_path
- m_subp.side_effect = util.ProcessExecutionError(
+ m_subp.side_effect = subp.ProcessExecutionError(
stdout=None, stderr="No value found", exit_code=2, cmd=["unused"])
self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
self.assertEqual(1, m_subp.call_count)
diff --git a/tests/unittests/test_datasource/test_rbx.py b/tests/unittests/test_datasource/test_rbx.py
index aabf1f18..d017510e 100644
--- a/tests/unittests/test_datasource/test_rbx.py
+++ b/tests/unittests/test_datasource/test_rbx.py
@@ -4,6 +4,7 @@ from cloudinit import helpers
from cloudinit import distros
from cloudinit.sources import DataSourceRbxCloud as ds
from cloudinit.tests.helpers import mock, CiTestCase, populate_dir
+from cloudinit import subp
DS_PATH = "cloudinit.sources.DataSourceRbxCloud"
@@ -156,7 +157,7 @@ class TestRbxDataSource(CiTestCase):
expected
)
- @mock.patch(DS_PATH + '.util.subp')
+ @mock.patch(DS_PATH + '.subp.subp')
def test_gratuitous_arp_run_standard_arping(self, m_subp):
"""Test handle run arping & parameters."""
items = [
@@ -182,7 +183,7 @@ class TestRbxDataSource(CiTestCase):
], m_subp.call_args_list
)
- @mock.patch(DS_PATH + '.util.subp')
+ @mock.patch(DS_PATH + '.subp.subp')
def test_handle_rhel_like_arping(self, m_subp):
"""Test handle on RHEL-like distros."""
items = [
@@ -199,6 +200,35 @@ class TestRbxDataSource(CiTestCase):
m_subp.call_args_list
)
+ @mock.patch(
+ DS_PATH + '.subp.subp',
+ side_effect=subp.ProcessExecutionError()
+ )
+ def test_continue_on_arping_error(self, m_subp):
+ """Continue when command error"""
+ items = [
+ {
+ 'destination': '172.17.0.2',
+ 'source': '172.16.6.104'
+ },
+ {
+ 'destination': '172.17.0.2',
+ 'source': '172.16.6.104',
+ },
+ ]
+ ds.gratuitous_arp(items, self._fetch_distro('ubuntu'))
+ self.assertEqual([
+ mock.call([
+ 'arping', '-c', '2', '-S',
+ '172.16.6.104', '172.17.0.2'
+ ]),
+ mock.call([
+ 'arping', '-c', '2', '-S',
+ '172.16.6.104', '172.17.0.2'
+ ])
+ ], m_subp.call_args_list
+ )
+
def populate_cloud_metadata(path, data):
populate_dir(path, {'cloud.json': json.dumps(data)})
diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py
index 1b4dd0ad..9d82bda9 100644
--- a/tests/unittests/test_datasource/test_scaleway.py
+++ b/tests/unittests/test_datasource/test_scaleway.py
@@ -353,12 +353,16 @@ class TestDataSourceScaleway(HttprettyTestCase):
self.datasource.metadata['ipv6'] = None
netcfg = self.datasource.network_config
- resp = {'version': 1,
- 'config': [{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]}]
+ resp = {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [{'type': 'dhcp4'}]
}
+ ]
+ }
self.assertEqual(netcfg, resp)
@mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
@@ -371,25 +375,32 @@ class TestDataSourceScaleway(HttprettyTestCase):
m_get_cmdline.return_value = 'scaleway'
fallback_nic.return_value = 'ens2'
self.datasource.metadata['ipv6'] = {
- 'address': '2000:abc:4444:9876::42:999',
- 'gateway': '2000:abc:4444:9876::42:000',
- 'netmask': '127',
- }
+ 'address': '2000:abc:4444:9876::42:999',
+ 'gateway': '2000:abc:4444:9876::42:000',
+ 'netmask': '127',
+ }
netcfg = self.datasource.network_config
- resp = {'version': 1,
- 'config': [{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'},
- {'type': 'static',
- 'address': '2000:abc:4444:9876::42:999',
- 'gateway': '2000:abc:4444:9876::42:000',
- 'netmask': '127', }
- ]
-
- }]
+ resp = {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [
+ {
+ 'type': 'dhcp4'
+ },
+ {
+ 'type': 'static',
+ 'address': '2000:abc:4444:9876::42:999',
+ 'gateway': '2000:abc:4444:9876::42:000',
+ 'netmask': '127',
+ }
+ ]
}
+ ]
+ }
self.assertEqual(netcfg, resp)
@mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
@@ -417,12 +428,16 @@ class TestDataSourceScaleway(HttprettyTestCase):
self.datasource.metadata['ipv6'] = None
self.datasource._network_config = sources.UNSET
- resp = {'version': 1,
- 'config': [{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]}]
+ resp = {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [{'type': 'dhcp4'}]
}
+ ]
+ }
netcfg = self.datasource.network_config
self.assertEqual(netcfg, resp)
@@ -441,12 +456,16 @@ class TestDataSourceScaleway(HttprettyTestCase):
self.datasource.metadata['ipv6'] = None
self.datasource._network_config = None
- resp = {'version': 1,
- 'config': [{
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]}]
+ resp = {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [{'type': 'dhcp4'}]
}
+ ]
+ }
netcfg = self.datasource.network_config
self.assertEqual(netcfg, resp)
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 62084de5..5847a384 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -12,8 +12,6 @@ order to validate return responses.
'''
-from __future__ import print_function
-
from binascii import crc32
import json
import multiprocessing
@@ -22,7 +20,7 @@ import os.path
import re
import signal
import stat
-import unittest2
+import unittest
import uuid
from cloudinit import serial
@@ -34,8 +32,8 @@ from cloudinit.sources.DataSourceSmartOS import (
from cloudinit.event import EventType
from cloudinit import helpers as c_helpers
-from cloudinit.util import (
- b64e, subp, ProcessExecutionError, which, write_file)
+from cloudinit.util import (b64e, write_file)
+from cloudinit.subp import (subp, ProcessExecutionError, which)
from cloudinit.tests.helpers import (
CiTestCase, mock, FilesystemMockingTestCase, skipIf)
@@ -669,7 +667,7 @@ class TestIdentifyFile(CiTestCase):
with self.allow_subp(["file"]):
self.assertEqual("text/plain", identify_file(fname))
- @mock.patch(DSMOS + ".util.subp")
+ @mock.patch(DSMOS + ".subp.subp")
def test_returns_none_on_error(self, m_subp):
"""On 'file' execution error, None should be returned."""
m_subp.side_effect = ProcessExecutionError("FILE_FAILED", exit_code=99)
@@ -1095,11 +1093,11 @@ class TestNetworkConversion(CiTestCase):
self.assertEqual(expected, found)
-@unittest2.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM,
- "Only supported on KVM and bhyve guests under SmartOS")
-@unittest2.skipUnless(os.access(SERIAL_DEVICE, os.W_OK),
- "Requires write access to " + SERIAL_DEVICE)
-@unittest2.skipUnless(HAS_PYSERIAL is True, "pyserial not available")
+@unittest.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM,
+ "Only supported on KVM and bhyve guests under SmartOS")
+@unittest.skipUnless(os.access(SERIAL_DEVICE, os.W_OK),
+ "Requires write access to " + SERIAL_DEVICE)
+@unittest.skipUnless(HAS_PYSERIAL is True, "pyserial not available")
class TestSerialConcurrency(CiTestCase):
"""
This class tests locking on an actual serial port, and as such can only
diff --git a/tests/unittests/test_distros/test_bsd_utils.py b/tests/unittests/test_distros/test_bsd_utils.py
new file mode 100644
index 00000000..3a68f2a9
--- /dev/null
+++ b/tests/unittests/test_distros/test_bsd_utils.py
@@ -0,0 +1,67 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import cloudinit.distros.bsd_utils as bsd_utils
+
+from cloudinit.tests.helpers import (CiTestCase, ExitStack, mock)
+
+RC_FILE = """
+if something; then
+ do something here
+fi
+hostname={hostname}
+"""
+
+
+class TestBsdUtils(CiTestCase):
+
+ def setUp(self):
+ super().setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.load_file = patches.enter_context(
+ mock.patch.object(bsd_utils.util, 'load_file'))
+
+ self.write_file = patches.enter_context(
+ mock.patch.object(bsd_utils.util, 'write_file'))
+
+ def test_get_rc_config_value(self):
+ self.load_file.return_value = 'hostname=foo\n'
+ self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
+ self.load_file.assert_called_with('/etc/rc.conf')
+
+ self.load_file.return_value = 'hostname=foo'
+ self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
+
+ self.load_file.return_value = 'hostname="foo"'
+ self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
+
+ self.load_file.return_value = "hostname='foo'"
+ self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
+
+ self.load_file.return_value = 'hostname=\'foo"'
+ self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "'foo\"")
+
+ self.load_file.return_value = ''
+ self.assertEqual(bsd_utils.get_rc_config_value('hostname'), None)
+
+ self.load_file.return_value = RC_FILE.format(hostname='foo')
+ self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "foo")
+
+ def test_set_rc_config_value_unchanged(self):
+ # bsd_utils.set_rc_config_value('hostname', 'foo')
+ # self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n')
+
+ self.load_file.return_value = RC_FILE.format(hostname='foo')
+ self.write_file.assert_not_called()
+
+ def test_set_rc_config_value(self):
+ bsd_utils.set_rc_config_value('hostname', 'foo')
+ self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n')
+
+ self.load_file.return_value = RC_FILE.format(hostname='foo')
+ bsd_utils.set_rc_config_value('hostname', 'bar')
+ self.write_file.assert_called_with(
+ '/etc/rc.conf',
+ RC_FILE.format(hostname='bar')
+ )
diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py
index ef11784d..94ab052d 100644
--- a/tests/unittests/test_distros/test_create_users.py
+++ b/tests/unittests/test_distros/test_create_users.py
@@ -46,7 +46,7 @@ class MyBaseDistro(distros.Distro):
@mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False)
-@mock.patch("cloudinit.distros.util.subp")
+@mock.patch("cloudinit.distros.subp.subp")
class TestCreateUser(CiTestCase):
with_logs = True
@@ -240,7 +240,7 @@ class TestCreateUser(CiTestCase):
[mock.call(set(['auth1']), user), # not disabled
mock.call(set(['key1']), 'foouser', options=disable_prefix)])
- @mock.patch("cloudinit.distros.util.which")
+ @mock.patch("cloudinit.distros.subp.which")
def test_lock_with_usermod_if_no_passwd(self, m_which, m_subp,
m_is_snappy):
"""Lock uses usermod --lock if no 'passwd' cmd available."""
@@ -250,7 +250,7 @@ class TestCreateUser(CiTestCase):
[mock.call(['usermod', '--lock', 'bob'])],
m_subp.call_args_list)
- @mock.patch("cloudinit.distros.util.which")
+ @mock.patch("cloudinit.distros.subp.which")
def test_lock_with_passwd_if_available(self, m_which, m_subp,
m_is_snappy):
"""Lock with only passwd will use passwd."""
@@ -260,7 +260,7 @@ class TestCreateUser(CiTestCase):
[mock.call(['passwd', '-l', 'bob'])],
m_subp.call_args_list)
- @mock.patch("cloudinit.distros.util.which")
+ @mock.patch("cloudinit.distros.subp.which")
def test_lock_raises_runtime_if_no_commands(self, m_which, m_subp,
m_is_snappy):
"""Lock with no commands available raises RuntimeError."""
diff --git a/tests/unittests/test_distros/test_debian.py b/tests/unittests/test_distros/test_debian.py
index da16a797..7ff8240b 100644
--- a/tests/unittests/test_distros/test_debian.py
+++ b/tests/unittests/test_distros/test_debian.py
@@ -5,7 +5,7 @@ from cloudinit import util
from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock)
-@mock.patch("cloudinit.distros.debian.util.subp")
+@mock.patch("cloudinit.distros.debian.subp.subp")
class TestDebianApplyLocale(FilesystemMockingTestCase):
def setUp(self):
diff --git a/tests/unittests/test_distros/test_freebsd.py b/tests/unittests/test_distros/test_freebsd.py
index 8af253a2..be565b04 100644
--- a/tests/unittests/test_distros/test_freebsd.py
+++ b/tests/unittests/test_distros/test_freebsd.py
@@ -8,7 +8,7 @@ import os
class TestDeviceLookUp(CiTestCase):
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_find_freebsd_part_label(self, mock_subp):
glabel_out = '''
gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1
@@ -19,7 +19,7 @@ gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1
res = find_freebsd_part("/dev/label/rootfs")
self.assertEqual("da0p2", res)
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_find_freebsd_part_gpt(self, mock_subp):
glabel_out = '''
gpt/bootfs N/A vtbd0p1
diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py
index 02b334e3..44607489 100644
--- a/tests/unittests/test_distros/test_generic.py
+++ b/tests/unittests/test_distros/test_generic.py
@@ -6,6 +6,7 @@ from cloudinit import util
from cloudinit.tests import helpers
import os
+import pytest
import shutil
import tempfile
from unittest import mock
@@ -37,24 +38,6 @@ gapmi = distros._get_arch_package_mirror_info
class TestGenericDistro(helpers.FilesystemMockingTestCase):
- def return_first(self, mlist):
- if not mlist:
- return None
- return mlist[0]
-
- def return_second(self, mlist):
- if not mlist:
- return None
- return mlist[1]
-
- def return_none(self, _mlist):
- return None
-
- def return_last(self, mlist):
- if not mlist:
- return None
- return(mlist[-1])
-
def setUp(self):
super(TestGenericDistro, self).setUp()
# Make a temp directoy for tests to use.
@@ -145,61 +128,6 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
arch_mirrors = gapmi(package_mirrors, arch="amd64")
self.assertEqual(package_mirrors[0], arch_mirrors)
- def test_get_package_mirror_info_az_ec2(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(availability_zone="us-east-1a")
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- self.assertEqual(results,
- {'primary': 'http://us-east-1.ec2/',
- 'security': 'http://security-mirror1-intel'})
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_second)
- self.assertEqual(results,
- {'primary': 'http://us-east-1a.clouds/',
- 'security': 'http://security-mirror2-intel'})
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_none)
- self.assertEqual(results, package_mirrors[0]['failsafe'])
-
- def test_get_package_mirror_info_az_non_ec2(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(availability_zone="nova.cloudvendor")
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- self.assertEqual(results,
- {'primary': 'http://nova.cloudvendor.clouds/',
- 'security': 'http://security-mirror1-intel'})
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_last)
- self.assertEqual(results,
- {'primary': 'http://nova.cloudvendor.clouds/',
- 'security': 'http://security-mirror2-intel'})
-
- def test_get_package_mirror_info_none(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(availability_zone=None)
-
- # because both search entries here replacement based on
- # availability-zone, the filter will be called with an empty list and
- # failsafe should be taken.
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- self.assertEqual(results,
- {'primary': 'http://fs-primary-intel',
- 'security': 'http://security-mirror1-intel'})
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_last)
- self.assertEqual(results,
- {'primary': 'http://fs-primary-intel',
- 'security': 'http://security-mirror2-intel'})
-
def test_systemd_in_use(self):
cls = distros.fetch("ubuntu")
d = cls("ubuntu", {}, None)
@@ -245,7 +173,7 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
for d_name in ("ubuntu", "rhel"):
cls = distros.fetch(d_name)
d = cls(d_name, {}, None)
- with mock.patch("cloudinit.util.subp") as m_subp:
+ with mock.patch("cloudinit.subp.subp") as m_subp:
d.expire_passwd("myuser")
m_subp.assert_called_once_with(["passwd", "--expire", "myuser"])
@@ -253,10 +181,122 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
"""Test FreeBSD.expire_passwd uses the pw command."""
cls = distros.fetch("freebsd")
d = cls("freebsd", {}, None)
- with mock.patch("cloudinit.util.subp") as m_subp:
+ with mock.patch("cloudinit.subp.subp") as m_subp:
d.expire_passwd("myuser")
m_subp.assert_called_once_with(
["pw", "usermod", "myuser", "-p", "01-Jan-1970"])
+class TestGetPackageMirrors:
+
+ def return_first(self, mlist):
+ if not mlist:
+ return None
+ return mlist[0]
+
+ def return_second(self, mlist):
+ if not mlist:
+ return None
+
+ return mlist[1] if len(mlist) > 1 else None
+
+ def return_none(self, _mlist):
+ return None
+
+ def return_last(self, mlist):
+ if not mlist:
+ return None
+ return(mlist[-1])
+
+ @pytest.mark.parametrize(
+ "allow_ec2_mirror, platform_type, mirrors",
+ [
+ (True, "ec2", [
+ {'primary': 'http://us-east-1.ec2/',
+ 'security': 'http://security-mirror1-intel'},
+ {'primary': 'http://us-east-1a.clouds/',
+ 'security': 'http://security-mirror2-intel'}
+ ]),
+ (True, "other", [
+ {'primary': 'http://us-east-1.ec2/',
+ 'security': 'http://security-mirror1-intel'},
+ {'primary': 'http://us-east-1a.clouds/',
+ 'security': 'http://security-mirror2-intel'}
+ ]),
+ (False, "ec2", [
+ {'primary': 'http://us-east-1.ec2/',
+ 'security': 'http://security-mirror1-intel'},
+ {'primary': 'http://us-east-1a.clouds/',
+ 'security': 'http://security-mirror2-intel'}
+ ]),
+ (False, "other", [
+ {'primary': 'http://us-east-1a.clouds/',
+ 'security': 'http://security-mirror1-intel'},
+ {'primary': 'http://fs-primary-intel',
+ 'security': 'http://security-mirror2-intel'}
+ ])
+ ])
+ def test_get_package_mirror_info_az_ec2(self,
+ allow_ec2_mirror,
+ platform_type,
+ mirrors):
+ flag_path = "cloudinit.distros." \
+ "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES"
+ with mock.patch(flag_path, allow_ec2_mirror):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ data_source_mock = mock.Mock(
+ availability_zone="us-east-1a",
+ platform_type=platform_type)
+
+ results = gpmi(arch_mirrors, data_source=data_source_mock,
+ mirror_filter=self.return_first)
+ assert(results == mirrors[0])
+
+ results = gpmi(arch_mirrors, data_source=data_source_mock,
+ mirror_filter=self.return_second)
+ assert(results == mirrors[1])
+
+ results = gpmi(arch_mirrors, data_source=data_source_mock,
+ mirror_filter=self.return_none)
+ assert(results == package_mirrors[0]['failsafe'])
+
+ def test_get_package_mirror_info_az_non_ec2(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ data_source_mock = mock.Mock(availability_zone="nova.cloudvendor")
+
+ results = gpmi(arch_mirrors, data_source=data_source_mock,
+ mirror_filter=self.return_first)
+ assert(results == {
+ 'primary': 'http://nova.cloudvendor.clouds/',
+ 'security': 'http://security-mirror1-intel'}
+ )
+
+ results = gpmi(arch_mirrors, data_source=data_source_mock,
+ mirror_filter=self.return_last)
+ assert(results == {
+ 'primary': 'http://nova.cloudvendor.clouds/',
+ 'security': 'http://security-mirror2-intel'}
+ )
+
+ def test_get_package_mirror_info_none(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ data_source_mock = mock.Mock(availability_zone=None)
+
+ # because both search entries here replacement based on
+ # availability-zone, the filter will be called with an empty list and
+ # failsafe should be taken.
+ results = gpmi(arch_mirrors, data_source=data_source_mock,
+ mirror_filter=self.return_first)
+ assert(results == {
+ 'primary': 'http://fs-primary-intel',
+ 'security': 'http://security-mirror1-intel'}
+ )
+
+ results = gpmi(arch_mirrors, data_source=data_source_mock,
+ mirror_filter=self.return_last)
+ assert(results == {
+ 'primary': 'http://fs-primary-intel',
+ 'security': 'http://security-mirror2-intel'}
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_netbsd.py b/tests/unittests/test_distros/test_netbsd.py
new file mode 100644
index 00000000..11a68d2a
--- /dev/null
+++ b/tests/unittests/test_distros/test_netbsd.py
@@ -0,0 +1,17 @@
+import cloudinit.distros.netbsd
+
+import pytest
+import unittest.mock as mock
+
+
+@pytest.mark.parametrize('with_pkgin', (True, False))
+@mock.patch("cloudinit.distros.netbsd.os")
+def test_init(m_os, with_pkgin):
+ print(with_pkgin)
+ m_os.path.exists.return_value = with_pkgin
+ cfg = {}
+
+ distro = cloudinit.distros.netbsd.NetBSD("netbsd", cfg, None)
+ expectation = ['pkgin', '-y', 'full-upgrade'] if with_pkgin else None
+ assert distro.pkg_cmd_upgrade_prefix == expectation
+ assert [mock.call('/usr/pkg/bin/pkgin')] == m_os.path.exists.call_args_list
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index ccf66161..8d7b09c8 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -12,6 +12,7 @@ from cloudinit import helpers
from cloudinit import settings
from cloudinit.tests.helpers import (
FilesystemMockingTestCase, dir2dict)
+from cloudinit import subp
from cloudinit import util
@@ -532,7 +533,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
NETWORKING_IPV6=yes
IPV6_AUTOCONF=no
"""),
- }
+ }
# rh_distro.apply_network_config(V1_NET_CFG_IPV6, False)
self._apply_and_verify(self.distro.apply_network_config,
V1_NET_CFG_IPV6,
@@ -656,7 +657,7 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
IP=dhcp
Interface=eth1
"""),
- }
+ }
# ub_distro.apply_network_config(V1_NET_CFG, False)
self._apply_and_verify(self.distro.apply_network_config,
@@ -688,6 +689,6 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
def get_mode(path, target=None):
- return os.stat(util.target_path(target, path)).st_mode & 0o777
+ return os.stat(subp.target_path(target, path)).st_mode & 0o777
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py
index a6faf0ef..fa48410a 100644
--- a/tests/unittests/test_distros/test_user_data_normalize.py
+++ b/tests/unittests/test_distros/test_user_data_normalize.py
@@ -307,7 +307,7 @@ class TestUGNormalize(TestCase):
self.assertEqual({'default': False}, users['joe'])
self.assertEqual({'default': False}, users['bob'])
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_create_snap_user(self, mock_subp):
mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n',
'')]
@@ -326,7 +326,7 @@ class TestUGNormalize(TestCase):
mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd)
self.assertEqual(username, 'joe')
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_create_snap_user_known(self, mock_subp):
mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n',
'')]
@@ -348,7 +348,7 @@ class TestUGNormalize(TestCase):
@mock.patch('cloudinit.util.system_is_snappy')
@mock.patch('cloudinit.util.is_group')
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_add_user_on_snappy_system(self, mock_subp, mock_isgrp,
mock_snappy):
mock_isgrp.return_value = False
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index 36d7fbbf..9314b244 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -6,6 +6,7 @@ import os
from uuid import uuid4
from cloudinit import safeyaml
+from cloudinit import subp
from cloudinit import util
from cloudinit.tests.helpers import (
CiTestCase, dir2dict, populate_dir, populate_dir_with_ts)
@@ -160,8 +161,8 @@ class DsIdentifyBase(CiTestCase):
rc = 0
try:
- out, err = util.subp(['sh', '-c', '. %s' % wrap], capture=True)
- except util.ProcessExecutionError as e:
+ out, err = subp.subp(['sh', '-c', '. %s' % wrap], capture=True)
+ except subp.ProcessExecutionError as e:
rc = e.exit_code
out = e.stdout
err = e.stderr
@@ -272,6 +273,10 @@ class TestDsIdentify(DsIdentifyBase):
"""Rbx datasource has a disk with LABEL=CLOUDMD."""
self._test_ds_found('RbxCloud')
+ def test_rbx_cloud_lower(self):
+ """Rbx datasource has a disk with LABEL=cloudmd."""
+ self._test_ds_found('RbxCloudLower')
+
def test_config_drive_upper(self):
"""ConfigDrive datasource has a disk with LABEL=CONFIG-2."""
self._test_ds_found('ConfigDriveUpper')
@@ -447,6 +452,10 @@ class TestDsIdentify(DsIdentifyBase):
"""Open Telecom identification."""
self._test_ds_found('OpenStack-OpenTelekom')
+ def test_openstack_sap_ccloud(self):
+ """SAP Converged Cloud identification"""
+ self._test_ds_found('OpenStack-SAPCCloud')
+
def test_openstack_asset_tag_nova(self):
"""OpenStack identification via asset tag OpenStack Nova."""
self._test_ds_found('OpenStack-AssetTag-Nova')
@@ -568,6 +577,10 @@ class TestDsIdentify(DsIdentifyBase):
"""NoCloud is found with uppercase filesystem label."""
self._test_ds_found('NoCloudUpper')
+ def test_nocloud_fatboot(self):
+ """NoCloud fatboot label - LP: #184166."""
+ self._test_ds_found('NoCloud-fatboot')
+
def test_nocloud_seed(self):
"""Nocloud seed directory."""
self._test_ds_found('NoCloud-seed')
@@ -607,8 +620,10 @@ class TestDsIdentify(DsIdentifyBase):
ret = self._check_via_dict(
cust, RC_FOUND,
func=".", args=[os.path.join(rootd, mpp)], rootd=rootd)
- line = [l for l in ret.stdout.splitlines() if l.startswith(pre)][0]
- toks = line.replace(pre, "").split(":")
+ match = [
+ line for line in ret.stdout.splitlines() if line.startswith(pre)
+ ][0]
+ toks = match.replace(pre, "").split(":")
expected = ["/sbin", "/bin", "/usr/sbin", "/usr/bin", "/mycust/path"]
self.assertEqual(expected, [p for p in expected if p in toks],
"path did not have expected tokens")
@@ -805,6 +820,20 @@ VALID_CFG = {
'dev/vdb': 'pretend iso content for cidata\n',
}
},
+ 'NoCloud-fatboot': {
+ 'ds': 'NoCloud',
+ 'mocks': [
+ MOCK_VIRT_IS_XEN,
+ {'name': 'blkid', 'ret': 0,
+ 'out': blkid_out(
+ BLKID_UEFI_UBUNTU +
+ [{'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'SEC_TYPE': 'msdos',
+ 'UUID': '355a-4FC2', 'LABEL_FATBOOT': 'cidata'}])},
+ ],
+ 'files': {
+ 'dev/vdb': 'pretend iso content for cidata\n',
+ }
+ },
'NoCloud-seed': {
'ds': 'NoCloud',
'files': {
@@ -834,6 +863,12 @@ VALID_CFG = {
'files': {P_CHASSIS_ASSET_TAG: 'OpenTelekomCloud\n'},
'mocks': [MOCK_VIRT_IS_XEN],
},
+ 'OpenStack-SAPCCloud': {
+ # SAP CCloud hosts use OpenStack on VMware
+ 'ds': 'OpenStack',
+ 'files': {P_CHASSIS_ASSET_TAG: 'SAP CCloud VM\n'},
+ 'mocks': [MOCK_VIRT_IS_VMWARE],
+ },
'OpenStack-AssetTag-Nova': {
# VMware vSphere can't modify product-name, LP: #1669875
'ds': 'OpenStack',
@@ -935,6 +970,18 @@ VALID_CFG = {
)},
],
},
+ 'RbxCloudLower': {
+ 'ds': 'RbxCloud',
+ 'mocks': [
+ {'name': 'blkid', 'ret': 0,
+ 'out': blkid_out(
+ [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
+ {'DEVNAME': 'vda2', 'TYPE': 'ext4',
+ 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()},
+ {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'cloudmd'}]
+ )},
+ ],
+ },
'Hetzner': {
'ds': 'Hetzner',
'files': {P_SYS_VENDOR: 'Hetzner\n'},
@@ -1028,11 +1075,11 @@ VALID_CFG = {
'Ec2-E24Cloud': {
'ds': 'Ec2',
'files': {P_SYS_VENDOR: 'e24cloud\n'},
- },
+ },
'Ec2-E24Cloud-negative': {
'ds': 'Ec2',
'files': {P_SYS_VENDOR: 'e24cloudyday\n'},
- }
+ }
}
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apk_configure.py b/tests/unittests/test_handler/test_handler_apk_configure.py
new file mode 100644
index 00000000..8acc0b33
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_apk_configure.py
@@ -0,0 +1,299 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+""" test_apk_configure
+Test creation of repositories file
+"""
+
+import logging
+import os
+import textwrap
+
+from cloudinit import (cloud, helpers, util)
+
+from cloudinit.config import cc_apk_configure
+from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock)
+
+REPO_FILE = "/etc/apk/repositories"
+DEFAULT_MIRROR_URL = "https://alpine.global.ssl.fastly.net/alpine"
+CC_APK = 'cloudinit.config.cc_apk_configure'
+
+
+class TestNoConfig(FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestNoConfig, self).setUp()
+ self.add_patch(CC_APK + '._write_repositories_file', 'm_write_repos')
+ self.name = "apk-configure"
+ self.cloud_init = None
+ self.log = logging.getLogger("TestNoConfig")
+ self.args = []
+
+ def test_no_config(self):
+ """
+ Test that nothing is done if no apk-configure
+ configuration is provided.
+ """
+ config = util.get_builtin_cfg()
+
+ cc_apk_configure.handle(self.name, config, self.cloud_init,
+ self.log, self.args)
+
+ self.assertEqual(0, self.m_write_repos.call_count)
+
+
+class TestConfig(FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestConfig, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.new_root = self.reRoot(root=self.new_root)
+ for dirname in ['tmp', 'etc/apk']:
+ util.ensure_dir(os.path.join(self.new_root, dirname))
+ self.paths = helpers.Paths({'templates_dir': self.new_root})
+ self.name = "apk-configure"
+ self.cloud = cloud.Cloud(None, self.paths, None, None, None)
+ self.log = logging.getLogger("TestNoConfig")
+ self.args = []
+
+ @mock.patch(CC_APK + '._write_repositories_file')
+ def test_no_repo_settings(self, m_write_repos):
+ """
+ Test that nothing is written if the 'alpine-repo' key
+ is not present.
+ """
+ config = {"apk_repos": {}}
+
+ cc_apk_configure.handle(self.name, config, self.cloud, self.log,
+ self.args)
+
+ self.assertEqual(0, m_write_repos.call_count)
+
+ @mock.patch(CC_APK + '._write_repositories_file')
+ def test_empty_repo_settings(self, m_write_repos):
+ """
+ Test that nothing is written if 'alpine_repo' list is empty.
+ """
+ config = {"apk_repos": {"alpine_repo": []}}
+
+ cc_apk_configure.handle(self.name, config, self.cloud, self.log,
+ self.args)
+
+ self.assertEqual(0, m_write_repos.call_count)
+
+ def test_only_main_repo(self):
+ """
+ Test when only details of main repo is written to file.
+ """
+ alpine_version = 'v3.12'
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version
+ }
+ }
+ }
+
+ cc_apk_configure.handle(self.name, config, self.cloud, self.log,
+ self.args)
+
+ expected_content = textwrap.dedent("""\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+
+ """.format(DEFAULT_MIRROR_URL, alpine_version))
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_main_and_community_repos(self):
+ """
+ Test when only details of main and community repos are
+ written to file.
+ """
+ alpine_version = 'edge'
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True
+ }
+ }
+ }
+
+ cc_apk_configure.handle(self.name, config, self.cloud, self.log,
+ self.args)
+
+ expected_content = textwrap.dedent("""\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+
+ """.format(DEFAULT_MIRROR_URL, alpine_version))
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_main_community_testing_repos(self):
+ """
+ Test when details of main, community and testing repos
+ are written to file.
+ """
+ alpine_version = 'v3.12'
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True,
+ "testing_enabled": True
+ }
+ }
+ }
+
+ cc_apk_configure.handle(self.name, config, self.cloud, self.log,
+ self.args)
+
+ expected_content = textwrap.dedent("""\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+ #
+ # Testing - using with non-Edge installation may cause problems!
+ #
+ {0}/edge/testing
+
+ """.format(DEFAULT_MIRROR_URL, alpine_version))
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_edge_main_community_testing_repos(self):
+ """
+ Test when details of main, community and testing repos
+ for Edge version of Alpine are written to file.
+ """
+ alpine_version = 'edge'
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True,
+ "testing_enabled": True
+ }
+ }
+ }
+
+ cc_apk_configure.handle(self.name, config, self.cloud, self.log,
+ self.args)
+
+ expected_content = textwrap.dedent("""\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+ {0}/{1}/testing
+
+ """.format(DEFAULT_MIRROR_URL, alpine_version))
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_main_community_testing_local_repos(self):
+ """
+ Test when details of main, community, testing and
+ local repos are written to file.
+ """
+ alpine_version = 'v3.12'
+ local_repo_url = 'http://some.mirror/whereever'
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True,
+ "testing_enabled": True
+ },
+ "local_repo_base_url": local_repo_url
+ }
+ }
+
+ cc_apk_configure.handle(self.name, config, self.cloud, self.log,
+ self.args)
+
+ expected_content = textwrap.dedent("""\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+ #
+ # Testing - using with non-Edge installation may cause problems!
+ #
+ {0}/edge/testing
+
+ #
+ # Local repo
+ #
+ {2}/{1}
+
+ """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url))
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_edge_main_community_testing_local_repos(self):
+ """
+ Test when details of main, community, testing and local repos
+ for Edge version of Alpine are written to file.
+ """
+ alpine_version = 'edge'
+ local_repo_url = 'http://some.mirror/whereever'
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True,
+ "testing_enabled": True
+ },
+ "local_repo_base_url": local_repo_url
+ }
+ }
+
+ cc_apk_configure.handle(self.name, config, self.cloud, self.log,
+ self.args)
+
+ expected_content = textwrap.dedent("""\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+ {0}/edge/testing
+
+ #
+ # Local repo
+ #
+ {2}/{1}
+
+ """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url))
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
index 69009a44..369480be 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
@@ -13,6 +13,7 @@ from cloudinit import cloud
from cloudinit import distros
from cloudinit import helpers
from cloudinit import templater
+from cloudinit import subp
from cloudinit import util
from cloudinit.config import cc_apt_configure
@@ -66,7 +67,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
"""
def setUp(self):
super(TestAptSourceConfigSourceList, self).setUp()
- self.subp = util.subp
+ self.subp = subp.subp
self.new_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.new_root)
@@ -100,6 +101,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
cfg = {'apt_mirror_search': mirror}
else:
cfg = {'apt_mirror': mirror}
+
mycloud = self._get_cloud(distro)
with mock.patch.object(util, 'write_file') as mockwf:
@@ -107,8 +109,9 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
return_value="faketmpl") as mocklf:
with mock.patch.object(os.path, 'isfile',
return_value=True) as mockisfile:
- with mock.patch.object(templater, 'render_string',
- return_value="fake") as mockrnd:
+ with mock.patch.object(
+ templater, 'render_string',
+ return_value='fake') as mockrnd:
with mock.patch.object(util, 'rename'):
cc_apt_configure.handle("test", cfg, mycloud,
LOG, None)
@@ -176,7 +179,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
# the second mock restores the original subp
with mock.patch.object(util, 'write_file') as mockwrite:
- with mock.patch.object(util, 'subp', self.subp):
+ with mock.patch.object(subp, 'subp', self.subp):
with mock.patch.object(Distro, 'get_primary_arch',
return_value='amd64'):
cc_apt_configure.handle("notimportant", cfg, mycloud,
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
index 0aa3d51a..b96fd4d4 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
@@ -13,6 +13,7 @@ from unittest.mock import call
from cloudinit import cloud
from cloudinit import distros
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
from cloudinit.config import cc_apt_configure
@@ -94,7 +95,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
"""TestAptSourceConfigSourceList - Class to test sources list rendering"""
def setUp(self):
super(TestAptSourceConfigSourceList, self).setUp()
- self.subp = util.subp
+ self.subp = subp.subp
self.new_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.new_root)
@@ -222,7 +223,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
# the second mock restores the original subp
with mock.patch.object(util, 'write_file') as mockwrite:
- with mock.patch.object(util, 'subp', self.subp):
+ with mock.patch.object(subp, 'subp', self.subp):
with mock.patch.object(Distro, 'get_primary_arch',
return_value='amd64'):
cc_apt_configure.handle("notimportant", cfg, mycloud,
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py
index 866752ef..367971cb 100644
--- a/tests/unittests/test_handler/test_handler_apt_source_v1.py
+++ b/tests/unittests/test_handler/test_handler_apt_source_v1.py
@@ -14,6 +14,7 @@ from unittest.mock import call
from cloudinit.config import cc_apt_configure
from cloudinit import gpg
+from cloudinit import subp
from cloudinit import util
from cloudinit.tests.helpers import TestCase
@@ -42,10 +43,17 @@ class FakeDistro(object):
return
+class FakeDatasource:
+ """Fake Datasource helper object"""
+ def __init__(self):
+ self.region = 'region'
+
+
class FakeCloud(object):
"""Fake Cloud helper object"""
def __init__(self):
self.distro = FakeDistro()
+ self.datasource = FakeDatasource()
class TestAptSourceConfig(TestCase):
@@ -271,7 +279,7 @@ class TestAptSourceConfig(TestCase):
"""
cfg = self.wrapv1conf(cfg)
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('fakekey 1234', '')) as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
@@ -356,7 +364,7 @@ class TestAptSourceConfig(TestCase):
"""
cfg = self.wrapv1conf([cfg])
- with mock.patch.object(util, 'subp') as mockobj:
+ with mock.patch.object(subp, 'subp') as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
mockobj.assert_called_with(['apt-key', 'add', '-'],
@@ -398,7 +406,7 @@ class TestAptSourceConfig(TestCase):
'filename': self.aptlistfile}
cfg = self.wrapv1conf([cfg])
- with mock.patch.object(util, 'subp') as mockobj:
+ with mock.patch.object(subp, 'subp') as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
mockobj.assert_called_once_with(['apt-key', 'add', '-'],
@@ -413,7 +421,7 @@ class TestAptSourceConfig(TestCase):
'filename': self.aptlistfile}
cfg = self.wrapv1conf([cfg])
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('fakekey 1212', '')) as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
@@ -476,7 +484,7 @@ class TestAptSourceConfig(TestCase):
'filename': self.aptlistfile}
cfg = self.wrapv1conf([cfg])
- with mock.patch.object(util, 'subp') as mockobj:
+ with mock.patch.object(subp, 'subp') as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
mockobj.assert_called_once_with(['add-apt-repository',
'ppa:smoser/cloud-init-test'],
@@ -495,7 +503,7 @@ class TestAptSourceConfig(TestCase):
'filename': self.aptlistfile3}
cfg = self.wrapv1conf([cfg1, cfg2, cfg3])
- with mock.patch.object(util, 'subp') as mockobj:
+ with mock.patch.object(subp, 'subp') as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud,
None, None)
calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'],
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py
index 90949b6d..ac847238 100644
--- a/tests/unittests/test_handler/test_handler_apt_source_v3.py
+++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py
@@ -18,6 +18,7 @@ from cloudinit import cloud
from cloudinit import distros
from cloudinit import gpg
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
from cloudinit.config import cc_apt_configure
@@ -48,6 +49,18 @@ MOCK_LSB_RELEASE_DATA = {
'release': '18.04', 'codename': 'bionic'}
+class FakeDatasource:
+ """Fake Datasource helper object"""
+ def __init__(self):
+ self.region = 'region'
+
+
+class FakeCloud:
+ """Fake Cloud helper object"""
+ def __init__(self):
+ self.datasource = FakeDatasource()
+
+
class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
"""TestAptSourceConfig
Main Class to test apt configs
@@ -221,7 +234,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
"""
params = self._get_default_params()
- with mock.patch("cloudinit.util.subp",
+ with mock.patch("cloudinit.subp.subp",
return_value=('fakekey 1234', '')) as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
@@ -296,7 +309,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
' xenial main'),
'key': "fakekey 4321"}}
- with mock.patch.object(util, 'subp') as mockobj:
+ with mock.patch.object(subp, 'subp') as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
@@ -318,7 +331,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
params = self._get_default_params()
cfg = {self.aptlistfile: {'key': "fakekey 4242"}}
- with mock.patch.object(util, 'subp') as mockobj:
+ with mock.patch.object(subp, 'subp') as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
@@ -333,7 +346,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
params = self._get_default_params()
cfg = {self.aptlistfile: {'keyid': "03683F77"}}
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('fakekey 1212', '')) as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
@@ -416,7 +429,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
params = self._get_default_params()
cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'}}
- with mock.patch("cloudinit.util.subp") as mockobj:
+ with mock.patch("cloudinit.subp.subp") as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
mockobj.assert_any_call(['add-apt-repository',
@@ -432,7 +445,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
self.aptlistfile2: {'source': 'ppa:smoser/cloud-init-test2'},
self.aptlistfile3: {'source': 'ppa:smoser/cloud-init-test3'}}
- with mock.patch("cloudinit.util.subp") as mockobj:
+ with mock.patch("cloudinit.subp.subp") as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'],
@@ -470,7 +483,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
fromfn = ("%s/%s_%s" % (pre, archive, post))
tofn = ("%s/test.ubuntu.com_%s" % (pre, post))
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, arch)
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch)
self.assertEqual(mirrors['MIRROR'],
"http://test.ubuntu.com/%s/" % component)
@@ -558,7 +571,8 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
"security": [{'arches': ["default"],
"uri": smir}]}
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, 'amd64')
+ mirrors = cc_apt_configure.find_apt_mirror_info(
+ cfg, FakeCloud(), 'amd64')
self.assertEqual(mirrors['MIRROR'],
pmir)
@@ -593,7 +607,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
"security": [{'arches': ["default"], "uri": "nothis-security"},
{'arches': [arch], "uri": smir}]}
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, arch)
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch)
self.assertEqual(mirrors['PRIMARY'], pmir)
self.assertEqual(mirrors['MIRROR'], pmir)
@@ -612,7 +626,8 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
{'arches': ["default"],
"uri": smir}]}
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, 'amd64')
+ mirrors = cc_apt_configure.find_apt_mirror_info(
+ cfg, FakeCloud(), 'amd64')
self.assertEqual(mirrors['MIRROR'],
pmir)
@@ -670,9 +685,9 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
"security": [{'arches': ["default"],
"search": ["sfailme", smir]}]}
- with mock.patch.object(cc_apt_configure, 'search_for_mirror',
+ with mock.patch.object(cc_apt_configure.util, 'search_for_mirror',
side_effect=[pmir, smir]) as mocksearch:
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None,
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(),
'amd64')
calls = [call(["pfailme", pmir]),
@@ -709,9 +724,10 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
mockgm.assert_has_calls(calls)
# should not be called, since primary is specified
- with mock.patch.object(cc_apt_configure,
+ with mock.patch.object(cc_apt_configure.util,
'search_for_mirror') as mockse:
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, arch)
+ mirrors = cc_apt_configure.find_apt_mirror_info(
+ cfg, FakeCloud(), arch)
mockse.assert_not_called()
self.assertEqual(mirrors['MIRROR'],
@@ -974,7 +990,7 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
mocksdns.assert_has_calls(calls)
# first return is for the non-dns call before
- with mock.patch.object(cc_apt_configure, 'search_for_mirror',
+ with mock.patch.object(cc_apt_configure.util, 'search_for_mirror',
side_effect=[None, pmir, None, smir]) as mockse:
mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
@@ -996,7 +1012,7 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
class TestDebconfSelections(TestCase):
- @mock.patch("cloudinit.config.cc_apt_configure.util.subp")
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
def test_set_sel_appends_newline_if_absent(self, m_subp):
"""Automatically append a newline to debconf-set-selections config."""
selections = b'some/setting boolean true'
@@ -1033,7 +1049,9 @@ class TestDebconfSelections(TestCase):
# assumes called with *args value.
selections = m_set_sel.call_args_list[0][0][0].decode()
- missing = [l for l in lines if l not in selections.splitlines()]
+ missing = [
+ line for line in lines if line not in selections.splitlines()
+ ]
self.assertEqual([], missing)
@mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure")
@@ -1079,7 +1097,7 @@ class TestDebconfSelections(TestCase):
self.assertTrue(m_get_inst.called)
self.assertEqual(m_dpkg_r.call_count, 0)
- @mock.patch("cloudinit.config.cc_apt_configure.util.subp")
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
def test_dpkg_reconfigure_does_reconfigure(self, m_subp):
target = "/foo-target"
@@ -1102,12 +1120,12 @@ class TestDebconfSelections(TestCase):
'cloud-init']
self.assertEqual(expected, found)
- @mock.patch("cloudinit.config.cc_apt_configure.util.subp")
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
def test_dpkg_reconfigure_not_done_on_no_data(self, m_subp):
cc_apt_configure.dpkg_reconfigure([])
m_subp.assert_not_called()
- @mock.patch("cloudinit.config.cc_apt_configure.util.subp")
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
def test_dpkg_reconfigure_not_done_if_no_cleaners(self, m_subp):
cc_apt_configure.dpkg_reconfigure(['pkgfoo', 'pkgbar'])
m_subp.assert_not_called()
diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py
index a76760fa..b53d60d4 100644
--- a/tests/unittests/test_handler/test_handler_bootcmd.py
+++ b/tests/unittests/test_handler/test_handler_bootcmd.py
@@ -2,7 +2,7 @@
from cloudinit.config.cc_bootcmd import handle, schema
from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, util)
+from cloudinit import (distros, helpers, cloud, subp, util)
from cloudinit.tests.helpers import (
CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
@@ -36,7 +36,7 @@ class TestBootcmd(CiTestCase):
def setUp(self):
super(TestBootcmd, self).setUp()
- self.subp = util.subp
+ self.subp = subp.subp
self.new_root = self.tmp_dir()
def _get_cloud(self, distro):
@@ -130,7 +130,7 @@ class TestBootcmd(CiTestCase):
with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
with self.allow_subp(['/bin/sh']):
- with self.assertRaises(util.ProcessExecutionError) as ctxt:
+ with self.assertRaises(subp.ProcessExecutionError) as ctxt:
handle('does-not-matter', valid_config, cc, LOG, [])
self.assertIn(
'Unexpected error while running command.\n'
diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
index 5b4105dd..e74a0a08 100644
--- a/tests/unittests/test_handler/test_handler_ca_certs.py
+++ b/tests/unittests/test_handler/test_handler_ca_certs.py
@@ -1,8 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit import cloud
+from cloudinit import distros
from cloudinit.config import cc_ca_certs
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
from cloudinit.tests.helpers import TestCase
@@ -11,13 +13,9 @@ import logging
import shutil
import tempfile
import unittest
+from contextlib import ExitStack
from unittest import mock
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
-
class TestNoConfig(unittest.TestCase):
def setUp(self):
@@ -49,8 +47,9 @@ class TestConfig(TestCase):
def setUp(self):
super(TestConfig, self).setUp()
self.name = "ca-certs"
+ distro = self._fetch_distro('ubuntu')
self.paths = None
- self.cloud = cloud.Cloud(None, self.paths, None, None, None)
+ self.cloud = cloud.Cloud(None, self.paths, None, distro, None)
self.log = logging.getLogger("TestNoConfig")
self.args = []
@@ -65,6 +64,11 @@ class TestConfig(TestCase):
self.mock_remove = self.mocks.enter_context(
mock.patch.object(cc_ca_certs, 'remove_default_ca_certs'))
+ def _fetch_distro(self, kind):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({})
+ return cls(kind, {}, paths)
+
def test_no_trusted_list(self):
"""
Test that no certificates are written if the 'trusted' key is not
@@ -204,6 +208,28 @@ class TestAddCaCerts(TestCase):
mock_load.assert_called_once_with("/etc/ca-certificates.conf")
+ def test_single_cert_to_empty_existing_ca_file(self):
+ """Test adding a single certificate to the trusted CAs
+ when existing ca-certificates.conf is empty"""
+ cert = "CERT1\nLINE2\nLINE3"
+
+ expected = "cloud-init-ca-certs.crt\n"
+
+ with ExitStack() as mocks:
+ mock_write = mocks.enter_context(
+ mock.patch.object(util, 'write_file', autospec=True))
+ mock_stat = mocks.enter_context(
+ mock.patch("cloudinit.config.cc_ca_certs.os.stat")
+ )
+ mock_stat.return_value.st_size = 0
+
+ cc_ca_certs.add_ca_certs([cert])
+
+ mock_write.assert_has_calls([
+ mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
+ cert, mode=0o644),
+ mock.call("/etc/ca-certificates.conf", expected, omode="wb")])
+
def test_multiple_certs(self):
"""Test adding multiple certificates to the trusted CAs."""
certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"]
@@ -232,7 +258,7 @@ class TestAddCaCerts(TestCase):
class TestUpdateCaCerts(unittest.TestCase):
def test_commands(self):
- with mock.patch.object(util, 'subp') as mockobj:
+ with mock.patch.object(subp, 'subp') as mockobj:
cc_ca_certs.update_ca_certs()
mockobj.assert_called_once_with(
["update-ca-certificates"], capture=False)
@@ -254,9 +280,9 @@ class TestRemoveDefaultCaCerts(TestCase):
mock.patch.object(util, 'delete_dir_contents'))
mock_write = mocks.enter_context(
mock.patch.object(util, 'write_file'))
- mock_subp = mocks.enter_context(mock.patch.object(util, 'subp'))
+ mock_subp = mocks.enter_context(mock.patch.object(subp, 'subp'))
- cc_ca_certs.remove_default_ca_certs()
+ cc_ca_certs.remove_default_ca_certs('ubuntu')
mock_delete.assert_has_calls([
mock.call("/usr/share/ca-certificates/"),
diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py
index 2dab3a54..7918c609 100644
--- a/tests/unittests/test_handler/test_handler_chef.py
+++ b/tests/unittests/test_handler/test_handler_chef.py
@@ -41,7 +41,7 @@ class TestInstallChefOmnibus(HttprettyTestCase):
httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200)
ret = (None, None) # stdout, stderr but capture=False
- with mock.patch("cloudinit.config.cc_chef.util.subp_blob_in_tempfile",
+ with mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile",
return_value=ret) as m_subp_blob:
cc_chef.install_chef_from_omnibus()
# admittedly whitebox, but assuming subp_blob_in_tempfile works
@@ -52,7 +52,7 @@ class TestInstallChefOmnibus(HttprettyTestCase):
m_subp_blob.call_args_list)
@mock.patch('cloudinit.config.cc_chef.url_helper.readurl')
- @mock.patch('cloudinit.config.cc_chef.util.subp_blob_in_tempfile')
+ @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile')
def test_install_chef_from_omnibus_retries_url(self, m_subp_blob, m_rdurl):
"""install_chef_from_omnibus retries OMNIBUS_URL upon failure."""
@@ -65,23 +65,23 @@ class TestInstallChefOmnibus(HttprettyTestCase):
cc_chef.install_chef_from_omnibus()
expected_kwargs = {'retries': cc_chef.OMNIBUS_URL_RETRIES,
'url': cc_chef.OMNIBUS_URL}
- self.assertItemsEqual(expected_kwargs, m_rdurl.call_args_list[0][1])
+ self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[0][1])
cc_chef.install_chef_from_omnibus(retries=10)
expected_kwargs = {'retries': 10,
'url': cc_chef.OMNIBUS_URL}
- self.assertItemsEqual(expected_kwargs, m_rdurl.call_args_list[1][1])
+ self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[1][1])
expected_subp_kwargs = {
'args': ['-v', '2.0'],
'basename': 'chef-omnibus-install',
'blob': m_rdurl.return_value.contents,
'capture': False
}
- self.assertItemsEqual(
+ self.assertCountEqual(
expected_subp_kwargs,
m_subp_blob.call_args_list[0][1])
@mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP)
- @mock.patch('cloudinit.config.cc_chef.util.subp_blob_in_tempfile')
+ @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile')
def test_install_chef_from_omnibus_has_omnibus_version(self, m_subp_blob):
"""install_chef_from_omnibus provides version arg to OMNIBUS_URL."""
chef_outfile = self.tmp_path('chef.out', self.new_root)
@@ -97,7 +97,7 @@ class TestInstallChefOmnibus(HttprettyTestCase):
'blob': response,
'capture': False
}
- self.assertItemsEqual(expected_kwargs, called_kwargs)
+ self.assertCountEqual(expected_kwargs, called_kwargs)
class TestChef(FilesystemMockingTestCase):
@@ -130,6 +130,7 @@ class TestChef(FilesystemMockingTestCase):
# This should create a file of the format...
# Created by cloud-init v. 0.7.6 on Sat, 11 Oct 2014 23:57:21 +0000
+ chef_license "accept"
log_level :info
ssl_verify_mode :verify_none
log_location "/var/log/chef/client.log"
@@ -153,6 +154,7 @@ class TestChef(FilesystemMockingTestCase):
util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
cfg = {
'chef': {
+ 'chef_license': "accept",
'server_url': 'localhost',
'validation_name': 'bob',
'validation_key': "/etc/chef/vkey.pem",
diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py
index 0e51f17a..4f4a57fa 100644
--- a/tests/unittests/test_handler/test_handler_disk_setup.py
+++ b/tests/unittests/test_handler/test_handler_disk_setup.py
@@ -44,7 +44,7 @@ class TestGetMbrHddSize(TestCase):
super(TestGetMbrHddSize, self).setUp()
self.patches = ExitStack()
self.subp = self.patches.enter_context(
- mock.patch.object(cc_disk_setup.util, 'subp'))
+ mock.patch.object(cc_disk_setup.subp, 'subp'))
def tearDown(self):
super(TestGetMbrHddSize, self).tearDown()
@@ -173,7 +173,7 @@ class TestUpdateFsSetupDevices(TestCase):
@mock.patch('cloudinit.config.cc_disk_setup.find_device_node',
return_value=('/dev/xdb1', False))
@mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None)
-@mock.patch('cloudinit.config.cc_disk_setup.util.subp', return_value=('', ''))
+@mock.patch('cloudinit.config.cc_disk_setup.subp.subp', return_value=('', ''))
class TestMkfsCommandHandling(CiTestCase):
with_logs = True
@@ -204,7 +204,7 @@ class TestMkfsCommandHandling(CiTestCase):
subp.assert_called_once_with(
'mkfs -t ext4 -L with_cmd /dev/xdb1', shell=True)
- @mock.patch('cloudinit.config.cc_disk_setup.util.which')
+ @mock.patch('cloudinit.config.cc_disk_setup.subp.which')
def test_overwrite_and_extra_opts_without_cmd(self, m_which, subp, *args):
"""mkfs observes extra_opts and overwrite settings when cmd is not
present."""
@@ -222,7 +222,7 @@ class TestMkfsCommandHandling(CiTestCase):
'-L', 'without_cmd', '-F', 'are', 'added'],
shell=False)
- @mock.patch('cloudinit.config.cc_disk_setup.util.which')
+ @mock.patch('cloudinit.config.cc_disk_setup.subp.which')
def test_mkswap(self, m_which, subp, *args):
"""mkfs observes extra_opts and overwrite settings when cmd is not
present."""
diff --git a/tests/unittests/test_handler/test_handler_etc_hosts.py b/tests/unittests/test_handler/test_handler_etc_hosts.py
index d854afcb..e3778b11 100644
--- a/tests/unittests/test_handler/test_handler_etc_hosts.py
+++ b/tests/unittests/test_handler/test_handler_etc_hosts.py
@@ -44,8 +44,8 @@ class TestHostsFile(t_help.FilesystemMockingTestCase):
self.patchUtils(self.tmp)
cc_update_etc_hosts.handle('test', cfg, cc, LOG, [])
contents = util.load_file('%s/etc/hosts' % self.tmp)
- if '127.0.0.1\tcloud-init.test.us\tcloud-init' not in contents:
- self.assertIsNone('No entry for 127.0.0.1 in etc/hosts')
+ if '127.0.1.1\tcloud-init.test.us\tcloud-init' not in contents:
+ self.assertIsNone('No entry for 127.0.1.1 in etc/hosts')
if '192.168.1.1\tblah.blah.us\tblah' not in contents:
self.assertIsNone('Default etc/hosts content modified')
@@ -64,7 +64,7 @@ class TestHostsFile(t_help.FilesystemMockingTestCase):
self.patchUtils(self.tmp)
cc_update_etc_hosts.handle('test', cfg, cc, LOG, [])
contents = util.load_file('%s/etc/hosts' % self.tmp)
- if '127.0.0.1 cloud-init.test.us cloud-init' not in contents:
- self.assertIsNone('No entry for 127.0.0.1 in etc/hosts')
+ if '127.0.1.1 cloud-init.test.us cloud-init' not in contents:
+ self.assertIsNone('No entry for 127.0.1.1 in etc/hosts')
if '::1 cloud-init.test.us cloud-init' not in contents:
self.assertIsNone('No entry for 127.0.0.1 in etc/hosts')
diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py
index 43b53745..7f039b79 100644
--- a/tests/unittests/test_handler/test_handler_growpart.py
+++ b/tests/unittests/test_handler/test_handler_growpart.py
@@ -2,7 +2,7 @@
from cloudinit import cloud
from cloudinit.config import cc_growpart
-from cloudinit import util
+from cloudinit import subp
from cloudinit.tests.helpers import TestCase
@@ -11,13 +11,9 @@ import logging
import os
import re
import unittest
+from contextlib import ExitStack
from unittest import mock
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
-
# growpart:
# mode: auto # off, on, auto, 'growpart'
# devices: ['root']
@@ -99,7 +95,7 @@ class TestConfig(TestCase):
@mock.patch.dict("os.environ", clear=True)
def test_no_resizers_auto_is_fine(self):
with mock.patch.object(
- util, 'subp',
+ subp, 'subp',
return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj:
config = {'growpart': {'mode': 'auto'}}
@@ -113,7 +109,7 @@ class TestConfig(TestCase):
@mock.patch.dict("os.environ", clear=True)
def test_no_resizers_mode_growpart_is_exception(self):
with mock.patch.object(
- util, 'subp',
+ subp, 'subp',
return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj:
config = {'growpart': {'mode': "growpart"}}
self.assertRaises(
@@ -126,7 +122,7 @@ class TestConfig(TestCase):
@mock.patch.dict("os.environ", clear=True)
def test_mode_auto_prefers_growpart(self):
with mock.patch.object(
- util, 'subp',
+ subp, 'subp',
return_value=(HELP_GROWPART_RESIZE, "")) as mockobj:
ret = cc_growpart.resizer_factory(mode="auto")
self.assertIsInstance(ret, cc_growpart.ResizeGrowPart)
@@ -137,7 +133,7 @@ class TestConfig(TestCase):
@mock.patch.dict("os.environ", clear=True)
def test_mode_auto_falls_back_to_gpart(self):
with mock.patch.object(
- util, 'subp',
+ subp, 'subp',
return_value=("", HELP_GPART)) as mockobj:
ret = cc_growpart.resizer_factory(mode="auto")
self.assertIsInstance(ret, cc_growpart.ResizeGpart)
diff --git a/tests/unittests/test_handler/test_handler_landscape.py b/tests/unittests/test_handler/test_handler_landscape.py
index db92a7e2..7d165687 100644
--- a/tests/unittests/test_handler/test_handler_landscape.py
+++ b/tests/unittests/test_handler/test_handler_landscape.py
@@ -49,8 +49,8 @@ class TestLandscape(FilesystemMockingTestCase):
"'landscape' key existed in config, but not a dict",
str(context_manager.exception))
- @mock.patch('cloudinit.config.cc_landscape.util')
- def test_handler_restarts_landscape_client(self, m_util):
+ @mock.patch('cloudinit.config.cc_landscape.subp')
+ def test_handler_restarts_landscape_client(self, m_subp):
"""handler restarts lansdscape-client after install."""
mycloud = self._get_cloud('ubuntu')
cfg = {'landscape': {'client': {}}}
@@ -60,7 +60,7 @@ class TestLandscape(FilesystemMockingTestCase):
cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None)
self.assertEqual(
[mock.call(['service', 'landscape-client', 'restart'])],
- m_util.subp.call_args_list)
+ m_subp.subp.call_args_list)
def test_handler_installs_client_and_creates_config_file(self):
"""Write landscape client.conf and install landscape-client."""
diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py
index 2b22559f..47e7d804 100644
--- a/tests/unittests/test_handler/test_handler_locale.py
+++ b/tests/unittests/test_handler/test_handler_locale.py
@@ -29,8 +29,6 @@ LOG = logging.getLogger(__name__)
class TestLocale(t_help.FilesystemMockingTestCase):
- with_logs = True
-
def setUp(self):
super(TestLocale, self).setUp()
self.new_root = tempfile.mkdtemp()
@@ -86,7 +84,7 @@ class TestLocale(t_help.FilesystemMockingTestCase):
util.write_file(locale_conf, 'LANG="en_US.UTF-8"\n')
cfg = {'locale': 'C.UTF-8'}
cc = self._get_cloud('ubuntu')
- with mock.patch('cloudinit.distros.debian.util.subp') as m_subp:
+ with mock.patch('cloudinit.distros.debian.subp.subp') as m_subp:
with mock.patch('cloudinit.distros.debian.LOCALE_CONF_FN',
locale_conf):
cc_locale.handle('cc_locale', cfg, cc, LOG, [])
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
index 40b521e5..21011204 100644
--- a/tests/unittests/test_handler/test_handler_lxd.py
+++ b/tests/unittests/test_handler/test_handler_lxd.py
@@ -31,13 +31,13 @@ class TestLxd(t_help.CiTestCase):
return cc
@mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.util")
- def test_lxd_init(self, mock_util, m_maybe_clean):
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_lxd_init(self, mock_subp, m_maybe_clean):
cc = self._get_cloud('ubuntu')
- mock_util.which.return_value = True
+ mock_subp.which.return_value = True
m_maybe_clean.return_value = None
cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
- self.assertTrue(mock_util.which.called)
+ self.assertTrue(mock_subp.which.called)
# no bridge config, so maybe_cleanup should not be called.
self.assertFalse(m_maybe_clean.called)
self.assertEqual(
@@ -45,14 +45,14 @@ class TestLxd(t_help.CiTestCase):
mock.call(
['lxd', 'init', '--auto', '--network-address=0.0.0.0',
'--storage-backend=zfs', '--storage-pool=poolname'])],
- mock_util.subp.call_args_list)
+ mock_subp.subp.call_args_list)
@mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.util")
- def test_lxd_install(self, mock_util, m_maybe_clean):
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_lxd_install(self, mock_subp, m_maybe_clean):
cc = self._get_cloud('ubuntu')
cc.distro = mock.MagicMock()
- mock_util.which.return_value = None
+ mock_subp.which.return_value = None
cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
self.assertNotIn('WARN', self.logs.getvalue())
self.assertTrue(cc.distro.install_packages.called)
@@ -62,23 +62,23 @@ class TestLxd(t_help.CiTestCase):
self.assertEqual(sorted(install_pkg), ['lxd', 'zfsutils-linux'])
@mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.util")
- def test_no_init_does_nothing(self, mock_util, m_maybe_clean):
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_no_init_does_nothing(self, mock_subp, m_maybe_clean):
cc = self._get_cloud('ubuntu')
cc.distro = mock.MagicMock()
cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, [])
self.assertFalse(cc.distro.install_packages.called)
- self.assertFalse(mock_util.subp.called)
+ self.assertFalse(mock_subp.subp.called)
self.assertFalse(m_maybe_clean.called)
@mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.util")
- def test_no_lxd_does_nothing(self, mock_util, m_maybe_clean):
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean):
cc = self._get_cloud('ubuntu')
cc.distro = mock.MagicMock()
cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, [])
self.assertFalse(cc.distro.install_packages.called)
- self.assertFalse(mock_util.subp.called)
+ self.assertFalse(mock_subp.subp.called)
self.assertFalse(m_maybe_clean.called)
def test_lxd_debconf_new_full(self):
diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/test_handler/test_handler_mcollective.py
index c013a538..6891e15f 100644
--- a/tests/unittests/test_handler/test_handler_mcollective.py
+++ b/tests/unittests/test_handler/test_handler_mcollective.py
@@ -136,8 +136,9 @@ class TestHandler(t_help.TestCase):
cc = cloud.Cloud(ds, paths, {}, d, None)
return cc
+ @t_help.mock.patch("cloudinit.config.cc_mcollective.subp")
@t_help.mock.patch("cloudinit.config.cc_mcollective.util")
- def test_mcollective_install(self, mock_util):
+ def test_mcollective_install(self, mock_util, mock_subp):
cc = self._get_cloud('ubuntu')
cc.distro = t_help.mock.MagicMock()
mock_util.load_file.return_value = b""
@@ -147,8 +148,8 @@ class TestHandler(t_help.TestCase):
install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
self.assertEqual(install_pkg, ('mcollective',))
- self.assertTrue(mock_util.subp.called)
- self.assertEqual(mock_util.subp.call_args_list[0][0][0],
+ self.assertTrue(mock_subp.subp.called)
+ self.assertEqual(mock_subp.subp.call_args_list[0][0][0],
['service', 'mcollective', 'restart'])
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py
index 05ac183e..e87069f6 100644
--- a/tests/unittests/test_handler/test_handler_mounts.py
+++ b/tests/unittests/test_handler/test_handler_mounts.py
@@ -127,6 +127,119 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
cc_mounts.sanitize_devname(
'ephemeral0.1', lambda x: disk_path, mock.Mock()))
+ def test_network_device_returns_network_device(self):
+ disk_path = 'netdevice:/path'
+ self.assertEqual(
+ disk_path,
+ cc_mounts.sanitize_devname(disk_path, None, mock.Mock()))
+
+
+class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase):
+
+ def setUp(self):
+ super(TestSwapFileCreation, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.patchOS(self.new_root)
+
+ self.fstab_path = os.path.join(self.new_root, 'etc/fstab')
+ self.swap_path = os.path.join(self.new_root, 'swap.img')
+ self._makedirs('/etc')
+
+ self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH',
+ 'mock_fstab_path',
+ self.fstab_path,
+ autospec=False)
+
+ self.add_patch('cloudinit.config.cc_mounts.subp.subp',
+ 'm_subp_subp')
+
+ self.add_patch('cloudinit.config.cc_mounts.util.mounts',
+ 'mock_util_mounts',
+ return_value={
+ '/dev/sda1': {'fstype': 'ext4',
+ 'mountpoint': '/',
+ 'opts': 'rw,relatime,discard'
+ }})
+
+ self.mock_cloud = mock.Mock()
+ self.mock_log = mock.Mock()
+ self.mock_cloud.device_name_to_device = self.device_name_to_device
+
+ self.cc = {
+ 'swap': {
+ 'filename': self.swap_path,
+ 'size': '512',
+ 'maxsize': '512'}}
+
+ def _makedirs(self, directory):
+ directory = os.path.join(self.new_root, directory.lstrip('/'))
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+ def device_name_to_device(self, path):
+ if path == 'swap':
+ return self.swap_path
+ else:
+ dev = None
+
+ return dev
+
+ @mock.patch('cloudinit.util.get_mount_info')
+ @mock.patch('cloudinit.util.kernel_version')
+ def test_swap_creation_method_fallocate_on_xfs(self, m_kernel_version,
+ m_get_mount_info):
+ m_kernel_version.return_value = (4, 20)
+ m_get_mount_info.return_value = ["", "xfs"]
+
+ cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ self.m_subp_subp.assert_has_calls([
+ mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True),
+ mock.call(['mkswap', self.swap_path]),
+ mock.call(['swapon', '-a'])])
+
+ @mock.patch('cloudinit.util.get_mount_info')
+ @mock.patch('cloudinit.util.kernel_version')
+ def test_swap_creation_method_xfs(self, m_kernel_version,
+ m_get_mount_info):
+ m_kernel_version.return_value = (3, 18)
+ m_get_mount_info.return_value = ["", "xfs"]
+
+ cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ self.m_subp_subp.assert_has_calls([
+ mock.call(['dd', 'if=/dev/zero',
+ 'of=' + self.swap_path,
+ 'bs=1M', 'count=0'], capture=True),
+ mock.call(['mkswap', self.swap_path]),
+ mock.call(['swapon', '-a'])])
+
+ @mock.patch('cloudinit.util.get_mount_info')
+ @mock.patch('cloudinit.util.kernel_version')
+ def test_swap_creation_method_btrfs(self, m_kernel_version,
+ m_get_mount_info):
+ m_kernel_version.return_value = (4, 20)
+ m_get_mount_info.return_value = ["", "btrfs"]
+
+ cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ self.m_subp_subp.assert_has_calls([
+ mock.call(['dd', 'if=/dev/zero',
+ 'of=' + self.swap_path,
+ 'bs=1M', 'count=0'], capture=True),
+ mock.call(['mkswap', self.swap_path]),
+ mock.call(['swapon', '-a'])])
+
+ @mock.patch('cloudinit.util.get_mount_info')
+ @mock.patch('cloudinit.util.kernel_version')
+ def test_swap_creation_method_ext4(self, m_kernel_version,
+ m_get_mount_info):
+ m_kernel_version.return_value = (5, 14)
+ m_get_mount_info.return_value = ["", "ext4"]
+
+ cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ self.m_subp_subp.assert_has_calls([
+ mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True),
+ mock.call(['mkswap', self.swap_path]),
+ mock.call(['swapon', '-a'])])
+
class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
@@ -149,8 +262,8 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
'mock_is_block_device',
return_value=True)
- self.add_patch('cloudinit.config.cc_mounts.util.subp',
- 'm_util_subp')
+ self.add_patch('cloudinit.config.cc_mounts.subp.subp',
+ 'm_subp_subp')
self.add_patch('cloudinit.config.cc_mounts.util.mounts',
'mock_util_mounts',
@@ -177,6 +290,18 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
return dev
+ def test_no_fstab(self):
+ """ Handle images which do not include an fstab. """
+ self.assertFalse(os.path.exists(cc_mounts.FSTAB_PATH))
+ fstab_expected_content = (
+ '%s\tnone\tswap\tsw,comment=cloudconfig\t'
+ '0\t0\n' % (self.swap_path,)
+ )
+ cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+ with open(cc_mounts.FSTAB_PATH, 'r') as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+
def test_swap_integrity(self):
'''Ensure that the swap file is correctly created and can
swapon successfully. Fixing the corner case of:
@@ -254,15 +379,18 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
'/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n'
)
fstab_expected_content = fstab_original_content
- cc = {'mounts': [
- ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']]}
+ cc = {
+ 'mounts': [
+ ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']
+ ]
+ }
with open(cc_mounts.FSTAB_PATH, 'w') as fd:
fd.write(fstab_original_content)
with open(cc_mounts.FSTAB_PATH, 'r') as fd:
fstab_new_content = fd.read()
self.assertEqual(fstab_expected_content, fstab_new_content)
cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
- self.m_util_subp.assert_has_calls([
+ self.m_subp_subp.assert_has_calls([
mock.call(['mount', '-a']),
mock.call(['systemctl', 'daemon-reload'])])
diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py
index 463d892a..6b9c8377 100644
--- a/tests/unittests/test_handler/test_handler_ntp.py
+++ b/tests/unittests/test_handler/test_handler_ntp.py
@@ -83,50 +83,50 @@ class TestNtp(FilesystemMockingTestCase):
ntpconfig['template_name'] = os.path.basename(confpath)
return ntpconfig
- @mock.patch("cloudinit.config.cc_ntp.util")
- def test_ntp_install(self, mock_util):
+ @mock.patch("cloudinit.config.cc_ntp.subp")
+ def test_ntp_install(self, mock_subp):
"""ntp_install_client runs install_func when check_exe is absent."""
- mock_util.which.return_value = None # check_exe not found.
+ mock_subp.which.return_value = None # check_exe not found.
install_func = mock.MagicMock()
cc_ntp.install_ntp_client(install_func,
packages=['ntpx'], check_exe='ntpdx')
- mock_util.which.assert_called_with('ntpdx')
+ mock_subp.which.assert_called_with('ntpdx')
install_func.assert_called_once_with(['ntpx'])
- @mock.patch("cloudinit.config.cc_ntp.util")
- def test_ntp_install_not_needed(self, mock_util):
+ @mock.patch("cloudinit.config.cc_ntp.subp")
+ def test_ntp_install_not_needed(self, mock_subp):
"""ntp_install_client doesn't install when check_exe is found."""
client = 'chrony'
- mock_util.which.return_value = [client] # check_exe found.
+ mock_subp.which.return_value = [client] # check_exe found.
install_func = mock.MagicMock()
cc_ntp.install_ntp_client(install_func, packages=[client],
check_exe=client)
install_func.assert_not_called()
- @mock.patch("cloudinit.config.cc_ntp.util")
- def test_ntp_install_no_op_with_empty_pkg_list(self, mock_util):
+ @mock.patch("cloudinit.config.cc_ntp.subp")
+ def test_ntp_install_no_op_with_empty_pkg_list(self, mock_subp):
"""ntp_install_client runs install_func with empty list"""
- mock_util.which.return_value = None # check_exe not found
+ mock_subp.which.return_value = None # check_exe not found
install_func = mock.MagicMock()
cc_ntp.install_ntp_client(install_func, packages=[],
check_exe='timesyncd')
install_func.assert_called_once_with([])
- @mock.patch("cloudinit.config.cc_ntp.util")
- def test_reload_ntp_defaults(self, mock_util):
+ @mock.patch("cloudinit.config.cc_ntp.subp")
+ def test_reload_ntp_defaults(self, mock_subp):
"""Test service is restarted/reloaded (defaults)"""
service = 'ntp_service_name'
cmd = ['service', service, 'restart']
cc_ntp.reload_ntp(service)
- mock_util.subp.assert_called_with(cmd, capture=True)
+ mock_subp.subp.assert_called_with(cmd, capture=True)
- @mock.patch("cloudinit.config.cc_ntp.util")
- def test_reload_ntp_systemd(self, mock_util):
+ @mock.patch("cloudinit.config.cc_ntp.subp")
+ def test_reload_ntp_systemd(self, mock_subp):
"""Test service is restarted/reloaded (systemd)"""
service = 'ntp_service_name'
cc_ntp.reload_ntp(service, systemd=True)
cmd = ['systemctl', 'reload-or-restart', service]
- mock_util.subp.assert_called_with(cmd, capture=True)
+ mock_subp.subp.assert_called_with(cmd, capture=True)
def test_ntp_rename_ntp_conf(self):
"""When NTP_CONF exists, rename_ntp moves it."""
@@ -239,6 +239,35 @@ class TestNtp(FilesystemMockingTestCase):
self.assertEqual(delta[distro][client][key],
result[client][key])
+ def _get_expected_pools(self, pools, distro, client):
+ if client in ['ntp', 'chrony']:
+ if client == 'ntp' and distro == 'alpine':
+ # NTP for Alpine Linux is Busybox's ntp which does not
+ # support 'pool' lines in its configuration file.
+ expected_pools = []
+ else:
+ expected_pools = [
+ 'pool {0} iburst'.format(pool) for pool in pools]
+ elif client == 'systemd-timesyncd':
+ expected_pools = " ".join(pools)
+
+ return expected_pools
+
+ def _get_expected_servers(self, servers, distro, client):
+ if client in ['ntp', 'chrony']:
+ if client == 'ntp' and distro == 'alpine':
+ # NTP for Alpine Linux is Busybox's ntp which only supports
+ # 'server' lines without iburst option.
+ expected_servers = [
+ 'server {0}'.format(srv) for srv in servers]
+ else:
+ expected_servers = [
+ 'server {0} iburst'.format(srv) for srv in servers]
+ elif client == 'systemd-timesyncd':
+ expected_servers = " ".join(servers)
+
+ return expected_servers
+
def test_ntp_handler_real_distro_ntp_templates(self):
"""Test ntp handler renders the shipped distro ntp client templates."""
pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org']
@@ -269,27 +298,35 @@ class TestNtp(FilesystemMockingTestCase):
content = util.load_file(confpath)
if client in ['ntp', 'chrony']:
content_lines = content.splitlines()
- expected_servers = [
- 'server {0} iburst'.format(srv) for srv in servers]
+ expected_servers = self._get_expected_servers(servers,
+ distro,
+ client)
print('distro=%s client=%s' % (distro, client))
for sline in expected_servers:
self.assertIn(sline, content_lines,
('failed to render {0} conf'
' for distro:{1}'.format(client,
distro)))
- expected_pools = [
- 'pool {0} iburst'.format(pool) for pool in pools]
- for pline in expected_pools:
- self.assertIn(pline, content_lines,
- ('failed to render {0} conf'
- ' for distro:{1}'.format(client,
- distro)))
+ expected_pools = self._get_expected_pools(pools, distro,
+ client)
+ if expected_pools != []:
+ for pline in expected_pools:
+ self.assertIn(pline, content_lines,
+ ('failed to render {0} conf'
+ ' for distro:{1}'.format(client,
+ distro)))
elif client == 'systemd-timesyncd':
+ expected_servers = self._get_expected_servers(servers,
+ distro,
+ client)
+ expected_pools = self._get_expected_pools(pools,
+ distro,
+ client)
expected_content = (
"# cloud-init generated file\n" +
"# See timesyncd.conf(5) for details.\n\n" +
- "[Time]\nNTP=%s %s \n" % (" ".join(servers),
- " ".join(pools)))
+ "[Time]\nNTP=%s %s \n" % (expected_servers,
+ expected_pools))
self.assertEqual(expected_content, content)
def test_no_ntpcfg_does_nothing(self):
@@ -312,10 +349,20 @@ class TestNtp(FilesystemMockingTestCase):
confpath = ntpconfig['confpath']
m_select.return_value = ntpconfig
cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, [])
- pools = cc_ntp.generate_server_names(mycloud.distro.name)
- self.assertEqual(
- "servers []\npools {0}\n".format(pools),
- util.load_file(confpath))
+ if distro == 'alpine':
+ # _mock_ntp_client_config call above did not specify a
+ # client value and so it defaults to "ntp" which on
+ # Alpine Linux only supports servers and not pools.
+
+ servers = cc_ntp.generate_server_names(mycloud.distro.name)
+ self.assertEqual(
+ "servers {0}\npools []\n".format(servers),
+ util.load_file(confpath))
+ else:
+ pools = cc_ntp.generate_server_names(mycloud.distro.name)
+ self.assertEqual(
+ "servers []\npools {0}\n".format(pools),
+ util.load_file(confpath))
self.assertNotIn('Invalid config:', self.logs.getvalue())
@skipUnlessJsonSchema()
@@ -374,18 +421,19 @@ class TestNtp(FilesystemMockingTestCase):
invalid_config = {
'ntp': {'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org']}}
for distro in cc_ntp.distros:
- mycloud = self._get_cloud(distro)
- ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig['confpath']
- m_select.return_value = ntpconfig
- cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, [])
- self.assertIn(
- "Invalid config:\nntp: Additional properties are not allowed "
- "('invalidkey' was unexpected)",
- self.logs.getvalue())
- self.assertEqual(
- "servers []\npools ['0.mycompany.pool.ntp.org']\n",
- util.load_file(confpath))
+ if distro != 'alpine':
+ mycloud = self._get_cloud(distro)
+ ntpconfig = self._mock_ntp_client_config(distro=distro)
+ confpath = ntpconfig['confpath']
+ m_select.return_value = ntpconfig
+ cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, [])
+ self.assertIn(
+ "Invalid config:\nntp: Additional properties are not "
+ "allowed ('invalidkey' was unexpected)",
+ self.logs.getvalue())
+ self.assertEqual(
+ "servers []\npools ['0.mycompany.pool.ntp.org']\n",
+ util.load_file(confpath))
@skipUnlessJsonSchema()
@mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
@@ -440,9 +488,10 @@ class TestNtp(FilesystemMockingTestCase):
cc_ntp.handle('notimportant', cfg, mycloud, None, None)
self.assertEqual(0, m_select.call_count)
+ @mock.patch("cloudinit.config.cc_ntp.subp")
@mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
@mock.patch("cloudinit.distros.Distro.uses_systemd")
- def test_ntp_the_whole_package(self, m_sysd, m_select):
+ def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp):
"""Test enabled config renders template, and restarts service """
cfg = {'ntp': {'enabled': True}}
for distro in cc_ntp.distros:
@@ -451,24 +500,35 @@ class TestNtp(FilesystemMockingTestCase):
confpath = ntpconfig['confpath']
service_name = ntpconfig['service_name']
m_select.return_value = ntpconfig
- pools = cc_ntp.generate_server_names(mycloud.distro.name)
- # force uses systemd path
- m_sysd.return_value = True
+
+ hosts = cc_ntp.generate_server_names(mycloud.distro.name)
+ uses_systemd = True
+ expected_service_call = ['systemctl', 'reload-or-restart',
+ service_name]
+ expected_content = "servers []\npools {0}\n".format(hosts)
+
+ if distro == 'alpine':
+ uses_systemd = False
+ expected_service_call = ['service', service_name, 'restart']
+ # _mock_ntp_client_config call above did not specify a client
+ # value and so it defaults to "ntp" which on Alpine Linux only
+ # supports servers and not pools.
+ expected_content = "servers {0}\npools []\n".format(hosts)
+
+ m_sysd.return_value = uses_systemd
with mock.patch('cloudinit.config.cc_ntp.util') as m_util:
# allow use of util.mergemanydict
m_util.mergemanydict.side_effect = util.mergemanydict
# default client is present
- m_util.which.return_value = True
+ m_subp.which.return_value = True
# use the config 'enabled' value
m_util.is_false.return_value = util.is_false(
cfg['ntp']['enabled'])
cc_ntp.handle('notimportant', cfg, mycloud, None, None)
- m_util.subp.assert_called_with(
- ['systemctl', 'reload-or-restart',
- service_name], capture=True)
- self.assertEqual(
- "servers []\npools {0}\n".format(pools),
- util.load_file(confpath))
+ m_subp.subp.assert_called_with(
+ expected_service_call, capture=True)
+
+ self.assertEqual(expected_content, util.load_file(confpath))
def test_opensuse_picks_chrony(self):
"""Test opensuse picks chrony or ntp on certain distro versions"""
@@ -503,7 +563,7 @@ class TestNtp(FilesystemMockingTestCase):
expected_client = mycloud.distro.preferred_ntp_clients[0]
self.assertEqual('ntp', expected_client)
- @mock.patch('cloudinit.config.cc_ntp.util.which')
+ @mock.patch('cloudinit.config.cc_ntp.subp.which')
def test_snappy_system_picks_timesyncd(self, m_which):
"""Test snappy systems prefer installed clients"""
@@ -528,7 +588,7 @@ class TestNtp(FilesystemMockingTestCase):
self.assertEqual(sorted(expected_cfg), sorted(cfg))
self.assertEqual(sorted(expected_cfg), sorted(result))
- @mock.patch('cloudinit.config.cc_ntp.util.which')
+ @mock.patch('cloudinit.config.cc_ntp.subp.which')
def test_ntp_distro_searches_all_preferred_clients(self, m_which):
"""Test select_ntp_client search all distro perferred clients """
# nothing is installed
@@ -546,7 +606,7 @@ class TestNtp(FilesystemMockingTestCase):
m_which.assert_has_calls(expected_calls)
self.assertEqual(sorted(expected_cfg), sorted(cfg))
- @mock.patch('cloudinit.config.cc_ntp.util.which')
+ @mock.patch('cloudinit.config.cc_ntp.subp.which')
def test_user_cfg_ntp_client_auto_uses_distro_clients(self, m_which):
"""Test user_cfg.ntp_client='auto' defaults to distro search"""
# nothing is installed
@@ -566,7 +626,7 @@ class TestNtp(FilesystemMockingTestCase):
@mock.patch('cloudinit.config.cc_ntp.write_ntp_config_template')
@mock.patch('cloudinit.cloud.Cloud.get_template_filename')
- @mock.patch('cloudinit.config.cc_ntp.util.which')
+ @mock.patch('cloudinit.config.cc_ntp.subp.which')
def test_ntp_custom_client_overrides_installed_clients(self, m_which,
m_tmpfn, m_write):
"""Test user client is installed despite other clients present """
@@ -582,7 +642,7 @@ class TestNtp(FilesystemMockingTestCase):
m_install.assert_called_with([client])
m_which.assert_called_with(client)
- @mock.patch('cloudinit.config.cc_ntp.util.which')
+ @mock.patch('cloudinit.config.cc_ntp.subp.which')
def test_ntp_system_config_overrides_distro_builtin_clients(self, m_which):
"""Test distro system_config overrides builtin preferred ntp clients"""
system_client = 'chrony'
@@ -597,7 +657,7 @@ class TestNtp(FilesystemMockingTestCase):
self.assertEqual(sorted(expected_cfg), sorted(result))
m_which.assert_has_calls([])
- @mock.patch('cloudinit.config.cc_ntp.util.which')
+ @mock.patch('cloudinit.config.cc_ntp.subp.which')
def test_ntp_user_config_overrides_system_cfg(self, m_which):
"""Test user-data overrides system_config ntp_client"""
system_client = 'chrony'
diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py
index 0d8d17b9..93b24fdc 100644
--- a/tests/unittests/test_handler/test_handler_power_state.py
+++ b/tests/unittests/test_handler/test_handler_power_state.py
@@ -11,62 +11,63 @@ from cloudinit.tests.helpers import mock
class TestLoadPowerState(t_help.TestCase):
def test_no_config(self):
# completely empty config should mean do nothing
- (cmd, _timeout, _condition) = psc.load_power_state({})
+ (cmd, _timeout, _condition) = psc.load_power_state({}, 'ubuntu')
self.assertIsNone(cmd)
def test_irrelevant_config(self):
# no power_state field in config should return None for cmd
- (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'})
+ (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'},
+ 'ubuntu')
self.assertIsNone(cmd)
def test_invalid_mode(self):
cfg = {'power_state': {'mode': 'gibberish'}}
- self.assertRaises(TypeError, psc.load_power_state, cfg)
+ self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu')
cfg = {'power_state': {'mode': ''}}
- self.assertRaises(TypeError, psc.load_power_state, cfg)
+ self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu')
def test_empty_mode(self):
cfg = {'power_state': {'message': 'goodbye'}}
- self.assertRaises(TypeError, psc.load_power_state, cfg)
+ self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu')
def test_valid_modes(self):
cfg = {'power_state': {}}
for mode in ('halt', 'poweroff', 'reboot'):
cfg['power_state']['mode'] = mode
- check_lps_ret(psc.load_power_state(cfg), mode=mode)
+ check_lps_ret(psc.load_power_state(cfg, 'ubuntu'), mode=mode)
def test_invalid_delay(self):
cfg = {'power_state': {'mode': 'poweroff', 'delay': 'goodbye'}}
- self.assertRaises(TypeError, psc.load_power_state, cfg)
+ self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu')
def test_valid_delay(self):
cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}}
for delay in ("now", "+1", "+30"):
cfg['power_state']['delay'] = delay
- check_lps_ret(psc.load_power_state(cfg))
+ check_lps_ret(psc.load_power_state(cfg, 'ubuntu'))
def test_message_present(self):
cfg = {'power_state': {'mode': 'poweroff', 'message': 'GOODBYE'}}
- ret = psc.load_power_state(cfg)
- check_lps_ret(psc.load_power_state(cfg))
+ ret = psc.load_power_state(cfg, 'ubuntu')
+ check_lps_ret(psc.load_power_state(cfg, 'ubuntu'))
self.assertIn(cfg['power_state']['message'], ret[0])
def test_no_message(self):
# if message is not present, then no argument should be passed for it
cfg = {'power_state': {'mode': 'poweroff'}}
- (cmd, _timeout, _condition) = psc.load_power_state(cfg)
+ (cmd, _timeout, _condition) = psc.load_power_state(cfg, 'ubuntu')
self.assertNotIn("", cmd)
- check_lps_ret(psc.load_power_state(cfg))
+ check_lps_ret(psc.load_power_state(cfg, 'ubuntu'))
self.assertTrue(len(cmd) == 3)
def test_condition_null_raises(self):
cfg = {'power_state': {'mode': 'poweroff', 'condition': None}}
- self.assertRaises(TypeError, psc.load_power_state, cfg)
+ self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu')
def test_condition_default_is_true(self):
cfg = {'power_state': {'mode': 'poweroff'}}
- _cmd, _timeout, cond = psc.load_power_state(cfg)
+ _cmd, _timeout, cond = psc.load_power_state(cfg, 'ubuntu')
self.assertEqual(cond, True)
diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py
index 1494177d..62388ac6 100644
--- a/tests/unittests/test_handler/test_handler_puppet.py
+++ b/tests/unittests/test_handler/test_handler_puppet.py
@@ -12,13 +12,11 @@ import textwrap
LOG = logging.getLogger(__name__)
-@mock.patch('cloudinit.config.cc_puppet.util')
+@mock.patch('cloudinit.config.cc_puppet.subp.subp')
@mock.patch('cloudinit.config.cc_puppet.os')
class TestAutostartPuppet(CiTestCase):
- with_logs = True
-
- def test_wb_autostart_puppet_updates_puppet_default(self, m_os, m_util):
+ def test_wb_autostart_puppet_updates_puppet_default(self, m_os, m_subp):
"""Update /etc/default/puppet to autostart if it exists."""
def _fake_exists(path):
@@ -29,9 +27,9 @@ class TestAutostartPuppet(CiTestCase):
self.assertEqual(
[mock.call(['sed', '-i', '-e', 's/^START=.*/START=yes/',
'/etc/default/puppet'], capture=False)],
- m_util.subp.call_args_list)
+ m_subp.call_args_list)
- def test_wb_autostart_pupppet_enables_puppet_systemctl(self, m_os, m_util):
+ def test_wb_autostart_pupppet_enables_puppet_systemctl(self, m_os, m_subp):
"""If systemctl is present, enable puppet via systemctl."""
def _fake_exists(path):
@@ -41,9 +39,9 @@ class TestAutostartPuppet(CiTestCase):
cc_puppet._autostart_puppet(LOG)
expected_calls = [mock.call(
['/bin/systemctl', 'enable', 'puppet.service'], capture=False)]
- self.assertEqual(expected_calls, m_util.subp.call_args_list)
+ self.assertEqual(expected_calls, m_subp.call_args_list)
- def test_wb_autostart_pupppet_enables_puppet_chkconfig(self, m_os, m_util):
+ def test_wb_autostart_pupppet_enables_puppet_chkconfig(self, m_os, m_subp):
"""If chkconfig is present, enable puppet via checkcfg."""
def _fake_exists(path):
@@ -53,7 +51,7 @@ class TestAutostartPuppet(CiTestCase):
cc_puppet._autostart_puppet(LOG)
expected_calls = [mock.call(
['/sbin/chkconfig', 'puppet', 'on'], capture=False)]
- self.assertEqual(expected_calls, m_util.subp.call_args_list)
+ self.assertEqual(expected_calls, m_subp.call_args_list)
@mock.patch('cloudinit.config.cc_puppet._autostart_puppet')
@@ -83,7 +81,7 @@ class TestPuppetHandle(CiTestCase):
"no 'puppet' configuration found", self.logs.getvalue())
self.assertEqual(0, m_auto.call_count)
- @mock.patch('cloudinit.config.cc_puppet.util.subp')
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp')
def test_handler_puppet_config_starts_puppet_service(self, m_subp, m_auto):
"""Cloud-config 'puppet' configuration starts puppet."""
mycloud = self._get_cloud('ubuntu')
@@ -94,7 +92,7 @@ class TestPuppetHandle(CiTestCase):
[mock.call(['service', 'puppet', 'start'], capture=False)],
m_subp.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.util.subp')
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp')
def test_handler_empty_puppet_config_installs_puppet(self, m_subp, m_auto):
"""Cloud-config empty 'puppet' configuration installs latest puppet."""
mycloud = self._get_cloud('ubuntu')
@@ -105,7 +103,7 @@ class TestPuppetHandle(CiTestCase):
[mock.call(('puppet', None))],
mycloud.distro.install_packages.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.util.subp')
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp')
def test_handler_puppet_config_installs_puppet_on_true(self, m_subp, _):
"""Cloud-config with 'puppet' key installs when 'install' is True."""
mycloud = self._get_cloud('ubuntu')
@@ -116,7 +114,7 @@ class TestPuppetHandle(CiTestCase):
[mock.call(('puppet', None))],
mycloud.distro.install_packages.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.util.subp')
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp')
def test_handler_puppet_config_installs_puppet_version(self, m_subp, _):
"""Cloud-config 'puppet' configuration can specify a version."""
mycloud = self._get_cloud('ubuntu')
@@ -127,7 +125,7 @@ class TestPuppetHandle(CiTestCase):
[mock.call(('puppet', '3.8'))],
mycloud.distro.install_packages.call_args_list)
- @mock.patch('cloudinit.config.cc_puppet.util.subp')
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp')
def test_handler_puppet_config_updates_puppet_conf(self, m_subp, m_auto):
"""When 'conf' is provided update values in PUPPET_CONF_PATH."""
mycloud = self._get_cloud('ubuntu')
@@ -143,7 +141,7 @@ class TestPuppetHandle(CiTestCase):
expected = '[agent]\nserver = puppetmaster.example.org\nother = 3\n\n'
self.assertEqual(expected, content)
- @mock.patch('cloudinit.config.cc_puppet.util.subp')
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp')
def test_handler_puppet_writes_csr_attributes_file(self, m_subp, m_auto):
"""When csr_attributes is provided
creates file in PUPPET_CSR_ATTRIBUTES_PATH."""
@@ -151,15 +149,20 @@ class TestPuppetHandle(CiTestCase):
mycloud.distro = mock.MagicMock()
cfg = {
'puppet': {
- 'csr_attributes': {
- 'custom_attributes': {
- '1.2.840.113549.1.9.7': '342thbjkt82094y0ut'
- 'hhor289jnqthpc2290'},
- 'extension_requests': {
- 'pp_uuid': 'ED803750-E3C7-44F5-BB08-41A04433FE2E',
- 'pp_image_name': 'my_ami_image',
- 'pp_preshared_key': '342thbjkt82094y0uthhor289jnqthpc2290'}
- }}}
+ 'csr_attributes': {
+ 'custom_attributes': {
+ '1.2.840.113549.1.9.7':
+ '342thbjkt82094y0uthhor289jnqthpc2290'
+ },
+ 'extension_requests': {
+ 'pp_uuid': 'ED803750-E3C7-44F5-BB08-41A04433FE2E',
+ 'pp_image_name': 'my_ami_image',
+ 'pp_preshared_key':
+ '342thbjkt82094y0uthhor289jnqthpc2290'
+ }
+ }
+ }
+ }
csr_attributes = 'cloudinit.config.cc_puppet.' \
'PUPPET_CSR_ATTRIBUTES_PATH'
with mock.patch(csr_attributes, self.csr_attributes_path):
diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/test_handler/test_handler_runcmd.py
index 9ce334ac..73237d68 100644
--- a/tests/unittests/test_handler/test_handler_runcmd.py
+++ b/tests/unittests/test_handler/test_handler_runcmd.py
@@ -2,7 +2,7 @@
from cloudinit.config.cc_runcmd import handle, schema
from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, util)
+from cloudinit import (distros, helpers, cloud, subp, util)
from cloudinit.tests.helpers import (
CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin,
skipUnlessJsonSchema)
@@ -20,7 +20,7 @@ class TestRuncmd(FilesystemMockingTestCase):
def setUp(self):
super(TestRuncmd, self).setUp()
- self.subp = util.subp
+ self.subp = subp.subp
self.new_root = self.tmp_dir()
def _get_cloud(self, distro):
diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py
index abecc53b..85167f19 100644
--- a/tests/unittests/test_handler/test_handler_seed_random.py
+++ b/tests/unittests/test_handler/test_handler_seed_random.py
@@ -17,6 +17,7 @@ from io import BytesIO
from cloudinit import cloud
from cloudinit import distros
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
from cloudinit.sources import DataSourceNone
@@ -35,8 +36,8 @@ class TestRandomSeed(t_help.TestCase):
self.unapply = []
# by default 'which' has nothing in its path
- self.apply_patches([(util, 'which', self._which)])
- self.apply_patches([(util, 'subp', self._subp)])
+ self.apply_patches([(subp, 'which', self._which)])
+ self.apply_patches([(subp, 'subp', self._subp)])
self.subp_called = []
self.whichdata = {}
diff --git a/tests/unittests/test_handler/test_handler_spacewalk.py b/tests/unittests/test_handler/test_handler_spacewalk.py
index 410e6f77..26f7648f 100644
--- a/tests/unittests/test_handler/test_handler_spacewalk.py
+++ b/tests/unittests/test_handler/test_handler_spacewalk.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.config import cc_spacewalk
-from cloudinit import util
+from cloudinit import subp
from cloudinit.tests import helpers
@@ -19,20 +19,20 @@ class TestSpacewalk(helpers.TestCase):
}
}
- @mock.patch("cloudinit.config.cc_spacewalk.util.subp")
- def test_not_is_registered(self, mock_util_subp):
- mock_util_subp.side_effect = util.ProcessExecutionError(exit_code=1)
+ @mock.patch("cloudinit.config.cc_spacewalk.subp.subp")
+ def test_not_is_registered(self, mock_subp):
+ mock_subp.side_effect = subp.ProcessExecutionError(exit_code=1)
self.assertFalse(cc_spacewalk.is_registered())
- @mock.patch("cloudinit.config.cc_spacewalk.util.subp")
- def test_is_registered(self, mock_util_subp):
- mock_util_subp.side_effect = None
+ @mock.patch("cloudinit.config.cc_spacewalk.subp.subp")
+ def test_is_registered(self, mock_subp):
+ mock_subp.side_effect = None
self.assertTrue(cc_spacewalk.is_registered())
- @mock.patch("cloudinit.config.cc_spacewalk.util.subp")
- def test_do_register(self, mock_util_subp):
+ @mock.patch("cloudinit.config.cc_spacewalk.subp.subp")
+ def test_do_register(self, mock_subp):
cc_spacewalk.do_register(**self.space_cfg['spacewalk'])
- mock_util_subp.assert_called_with([
+ mock_subp.assert_called_with([
'rhnreg_ks',
'--serverUrl', 'https://localhost/XMLRPC',
'--profilename', 'test',
diff --git a/tests/unittests/test_handler/test_handler_write_files.py b/tests/unittests/test_handler/test_handler_write_files.py
index ed0a4da2..727681d3 100644
--- a/tests/unittests/test_handler/test_handler_write_files.py
+++ b/tests/unittests/test_handler/test_handler_write_files.py
@@ -1,15 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
import base64
+import copy
import gzip
import io
import shutil
import tempfile
+from cloudinit.config.cc_write_files import (
+ handle, decode_perms, write_files)
from cloudinit import log as logging
from cloudinit import util
-from cloudinit.config.cc_write_files import write_files, decode_perms
-from cloudinit.tests.helpers import CiTestCase, FilesystemMockingTestCase
+
+from cloudinit.tests.helpers import (
+ CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema)
LOG = logging.getLogger(__name__)
@@ -36,13 +40,90 @@ YAML_CONTENT_EXPECTED = {
'/tmp/message': "hi mom line 1\nhi mom line 2\n",
}
+VALID_SCHEMA = {
+ 'write_files': [
+ {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff',
+ 'path': '/some', 'permissions': '0777'}
+ ]
+}
+
+INVALID_SCHEMA = { # Dropped required path key
+ 'write_files': [
+ {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff',
+ 'permissions': '0777'}
+ ]
+}
+
+
+@skipUnlessJsonSchema()
+@mock.patch('cloudinit.config.cc_write_files.write_files')
+class TestWriteFilesSchema(CiTestCase):
+
+ with_logs = True
+
+ def test_schema_validation_warns_missing_path(self, m_write_files):
+ """The only required file item property is 'path'."""
+ cc = self.tmp_cloud('ubuntu')
+ valid_config = {'write_files': [{'path': '/some/path'}]}
+ handle('cc_write_file', valid_config, cc, LOG, [])
+ self.assertNotIn('Invalid config:', self.logs.getvalue())
+ handle('cc_write_file', INVALID_SCHEMA, cc, LOG, [])
+ self.assertIn('Invalid config:', self.logs.getvalue())
+ self.assertIn("'path' is a required property", self.logs.getvalue())
+
+ def test_schema_validation_warns_non_string_type_for_files(
+ self, m_write_files):
+ """Schema validation warns of non-string values for each file item."""
+ cc = self.tmp_cloud('ubuntu')
+ for key in VALID_SCHEMA['write_files'][0].keys():
+ if key == 'append':
+ key_type = 'boolean'
+ else:
+ key_type = 'string'
+ invalid_config = copy.deepcopy(VALID_SCHEMA)
+ invalid_config['write_files'][0][key] = 1
+ handle('cc_write_file', invalid_config, cc, LOG, [])
+ self.assertIn(
+ mock.call('cc_write_file', invalid_config['write_files']),
+ m_write_files.call_args_list)
+ self.assertIn(
+ 'write_files.0.%s: 1 is not of type \'%s\'' % (key, key_type),
+ self.logs.getvalue())
+ self.assertIn('Invalid config:', self.logs.getvalue())
+
+ def test_schema_validation_warns_on_additional_undefined_propertes(
+ self, m_write_files):
+ """Schema validation warns on additional undefined file properties."""
+ cc = self.tmp_cloud('ubuntu')
+ invalid_config = copy.deepcopy(VALID_SCHEMA)
+ invalid_config['write_files'][0]['bogus'] = 'value'
+ handle('cc_write_file', invalid_config, cc, LOG, [])
+ self.assertIn(
+ "Invalid config:\nwrite_files.0: Additional properties"
+ " are not allowed ('bogus' was unexpected)",
+ self.logs.getvalue())
+
class TestWriteFiles(FilesystemMockingTestCase):
+
+ with_logs = True
+
def setUp(self):
super(TestWriteFiles, self).setUp()
self.tmp = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp)
+ @skipUnlessJsonSchema()
+ def test_handler_schema_validation_warns_non_array_type(self):
+ """Schema validation warns of non-array value."""
+ invalid_config = {'write_files': 1}
+ cc = self.tmp_cloud('ubuntu')
+ with self.assertRaises(TypeError):
+ handle('cc_write_file', invalid_config, cc, LOG, [])
+ self.assertIn(
+ 'Invalid config:\nwrite_files: 1 is not of type \'array\'',
+ self.logs.getvalue())
+
def test_simple(self):
self.patchUtils(self.tmp)
expected = "hello world\n"
diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py
index 0675bd8f..7c61bbf9 100644
--- a/tests/unittests/test_handler/test_handler_yum_add_repo.py
+++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py
@@ -1,14 +1,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.config import cc_yum_add_repo
-from cloudinit import util
-
-from cloudinit.tests import helpers
-
+import configparser
import logging
import shutil
import tempfile
-from io import StringIO
+
+from cloudinit import util
+from cloudinit.config import cc_yum_add_repo
+from cloudinit.tests import helpers
LOG = logging.getLogger(__name__)
@@ -54,7 +53,8 @@ class TestConfig(helpers.FilesystemMockingTestCase):
self.patchUtils(self.tmp)
cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
contents = util.load_file("/etc/yum.repos.d/epel_testing.repo")
- parser = self.parse_and_read(StringIO(contents))
+ parser = configparser.ConfigParser()
+ parser.read_string(contents)
expected = {
'epel_testing': {
'name': 'Extra Packages for Enterprise Linux 5 - Testing',
@@ -90,7 +90,8 @@ class TestConfig(helpers.FilesystemMockingTestCase):
self.patchUtils(self.tmp)
cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo")
- parser = self.parse_and_read(StringIO(contents))
+ parser = configparser.ConfigParser()
+ parser.read_string(contents)
expected = {
'puppetlabs_products': {
'name': 'Puppet Labs Products El 6 - $basearch',
diff --git a/tests/unittests/test_handler/test_handler_zypper_add_repo.py b/tests/unittests/test_handler/test_handler_zypper_add_repo.py
index 9685ff28..0fb1de1a 100644
--- a/tests/unittests/test_handler/test_handler_zypper_add_repo.py
+++ b/tests/unittests/test_handler/test_handler_zypper_add_repo.py
@@ -1,17 +1,15 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import configparser
import glob
+import logging
import os
-from io import StringIO
-from cloudinit.config import cc_zypper_add_repo
from cloudinit import util
-
+from cloudinit.config import cc_zypper_add_repo
from cloudinit.tests import helpers
from cloudinit.tests.helpers import mock
-import logging
-
LOG = logging.getLogger(__name__)
@@ -66,7 +64,8 @@ class TestConfig(helpers.FilesystemMockingTestCase):
root_d = self.tmp_dir()
cc_zypper_add_repo._write_repos(cfg['repos'], root_d)
contents = util.load_file("%s/testing-foo.repo" % root_d)
- parser = self.parse_and_read(StringIO(contents))
+ parser = configparser.ConfigParser()
+ parser.read_string(contents)
expected = {
'testing-foo': {
'name': 'test-foo',
diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py
index 987a89c9..44292571 100644
--- a/tests/unittests/test_handler/test_schema.py
+++ b/tests/unittests/test_handler/test_schema.py
@@ -1,5 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-
+import cloudinit
from cloudinit.config.schema import (
CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file,
get_schema_doc, get_schema, validate_cloudconfig_file,
@@ -10,7 +10,9 @@ from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema
from copy import copy
import os
+import pytest
from io import StringIO
+from pathlib import Path
from textwrap import dedent
from yaml import safe_load
@@ -20,16 +22,21 @@ class GetSchemaTest(CiTestCase):
def test_get_schema_coalesces_known_schema(self):
"""Every cloudconfig module with schema is listed in allOf keyword."""
schema = get_schema()
- self.assertItemsEqual(
+ self.assertCountEqual(
[
+ 'cc_apk_configure',
+ 'cc_apt_configure',
'cc_bootcmd',
+ 'cc_locale',
'cc_ntp',
'cc_resizefs',
'cc_runcmd',
'cc_snap',
'cc_ubuntu_advantage',
'cc_ubuntu_drivers',
- 'cc_zypper_add_repo'
+ 'cc_write_files',
+ 'cc_zypper_add_repo',
+ 'cc_chef'
],
[subschema['id'] for subschema in schema['allOf']])
self.assertEqual('cloud-config-schema', schema['id'])
@@ -38,7 +45,7 @@ class GetSchemaTest(CiTestCase):
schema['$schema'])
# FULL_SCHEMA is updated by the get_schema call
from cloudinit.config.schema import FULL_SCHEMA
- self.assertItemsEqual(['id', '$schema', 'allOf'], FULL_SCHEMA.keys())
+ self.assertCountEqual(['id', '$schema', 'allOf'], FULL_SCHEMA.keys())
def test_get_schema_returns_global_when_set(self):
"""When FULL_SCHEMA global is already set, get_schema returns it."""
@@ -110,6 +117,23 @@ class ValidateCloudConfigSchemaTest(CiTestCase):
str(context_mgr.exception))
+class TestCloudConfigExamples:
+ schema = get_schema()
+ params = [
+ (schema["id"], example)
+ for schema in schema["allOf"] for example in schema["examples"]]
+
+ @pytest.mark.parametrize("schema_id,example", params)
+ @skipUnlessJsonSchema()
+ def test_validateconfig_schema_of_example(self, schema_id, example):
+ """ For a given example in a config module we test if it is valid
+ according to the unified schema of all config modules
+ """
+ config_load = safe_load(example)
+ validate_cloudconfig_schema(
+ config_load, self.schema, strict=True)
+
+
class ValidateCloudConfigFileTest(CiTestCase):
"""Tests for validate_cloudconfig_file."""
@@ -268,6 +292,41 @@ class GetSchemaDocTest(CiTestCase):
"""),
get_schema_doc(full_schema))
+ def test_get_schema_doc_properly_parse_description(self):
+ """get_schema_doc description properly formatted"""
+ full_schema = copy(self.required_schema)
+ full_schema.update(
+ {'properties': {
+ 'p1': {
+ 'type': 'string',
+ 'description': dedent("""\
+ This item
+ has the
+ following options:
+
+ - option1
+ - option2
+ - option3
+
+ The default value is
+ option1""")
+ }
+ }}
+ )
+
+ self.assertIn(
+ dedent("""
+ **Config schema**:
+ **p1:** (string) This item has the following options:
+
+ - option1
+ - option2
+ - option3
+
+ The default value is option1
+ """),
+ get_schema_doc(full_schema))
+
def test_get_schema_doc_raises_key_errors(self):
"""get_schema_doc raises KeyErrors on missing keys."""
for key in self.required_schema:
@@ -345,34 +404,30 @@ class MainTest(CiTestCase):
def test_main_missing_args(self):
"""Main exits non-zero and reports an error on missing parameters."""
- with mock.patch('sys.exit', side_effect=self.sys_exit):
- with mock.patch('sys.argv', ['mycmd']):
- with mock.patch('sys.stderr', new_callable=StringIO) as \
- m_stderr:
- with self.assertRaises(SystemExit) as context_manager:
- main()
+ with mock.patch('sys.argv', ['mycmd']):
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ with self.assertRaises(SystemExit) as context_manager:
+ main()
self.assertEqual(1, context_manager.exception.code)
self.assertEqual(
- 'Expected either --config-file argument or --doc\n',
+ 'Expected either --config-file argument or --docs\n',
m_stderr.getvalue())
def test_main_absent_config_file(self):
"""Main exits non-zero when config file is absent."""
myargs = ['mycmd', '--annotate', '--config-file', 'NOT_A_FILE']
- with mock.patch('sys.exit', side_effect=self.sys_exit):
- with mock.patch('sys.argv', myargs):
- with mock.patch('sys.stderr', new_callable=StringIO) as \
- m_stderr:
- with self.assertRaises(SystemExit) as context_manager:
- main()
+ with mock.patch('sys.argv', myargs):
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ with self.assertRaises(SystemExit) as context_manager:
+ main()
self.assertEqual(1, context_manager.exception.code)
self.assertEqual(
'Configfile NOT_A_FILE does not exist\n',
m_stderr.getvalue())
def test_main_prints_docs(self):
- """When --doc parameter is provided, main generates documentation."""
- myargs = ['mycmd', '--doc']
+ """When --docs parameter is provided, main generates documentation."""
+ myargs = ['mycmd', '--docs', 'all']
with mock.patch('sys.argv', myargs):
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
self.assertEqual(0, main(), 'Expected 0 exit code')
@@ -430,4 +485,23 @@ class CloudTestsIntegrationTest(CiTestCase):
if errors:
raise AssertionError(', '.join(errors))
+
+def _get_schema_doc_examples():
+ examples_dir = Path(
+ cloudinit.__file__).parent.parent / 'doc' / 'examples'
+ assert examples_dir.is_dir()
+
+ all_text_files = (f for f in examples_dir.glob('cloud-config*.txt')
+ if not f.name.startswith('cloud-config-archive'))
+ return all_text_files
+
+
+class TestSchemaDocExamples:
+ schema = get_schema()
+
+ @pytest.mark.parametrize("example_path", _get_schema_doc_examples())
+ @skipUnlessJsonSchema()
+ def test_schema_doc_examples(self, example_path):
+ validate_cloudconfig_file(str(example_path), self.schema)
+
# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index bedd05fe..54cc8469 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -8,6 +8,7 @@ from cloudinit.net import (
renderers, sysconfig)
from cloudinit.sources.helpers import openstack
from cloudinit import temp_utils
+from cloudinit import subp
from cloudinit import util
from cloudinit import safeyaml as yaml
@@ -24,6 +25,7 @@ import re
import textwrap
from yaml.serializer import Serializer
+import pytest
DHCP_CONTENT_1 = """
DEVICE='eth0'
@@ -424,6 +426,11 @@ network:
mtu: 9000
parameters:
gratuitous-arp: 2
+ bond2:
+ interfaces:
+ - ens5
+ macaddress: 68:05:ca:64:d3:6e
+ mtu: 9000
ethernets:
ens3:
dhcp4: false
@@ -435,6 +442,11 @@ network:
dhcp6: false
match:
macaddress: 52:54:00:11:22:ff
+ ens5:
+ dhcp4: false
+ dhcp6: false
+ match:
+ macaddress: 52:54:00:99:11:99
version: 2
"""
@@ -943,7 +955,7 @@ NETWORK_CONFIGS = {
dhcp6: true
""").rstrip(' '),
'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ 'ifcfg-iface0': textwrap.dedent("""\
BOOTPROTO=dhcp
DHCLIENT6_MODE=managed
STARTMODE=auto""")
@@ -1027,7 +1039,7 @@ NETWORK_CONFIGS = {
},
'v6_and_v4': {
'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ 'ifcfg-iface0': textwrap.dedent("""\
BOOTPROTO=dhcp
DHCLIENT6_MODE=managed
STARTMODE=auto""")
@@ -3191,7 +3203,7 @@ USERCTL=no
def test_check_ifcfg_rh(self):
"""ifcfg-rh plugin is added NetworkManager.conf if conf present."""
render_dir = self.tmp_dir()
- nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file)
+ nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file)
util.ensure_dir(os.path.dirname(nm_cfg))
# write a template nm.conf, note plugins is a list here
@@ -3214,7 +3226,7 @@ USERCTL=no
"""ifcfg-rh plugin is append when plugins is a string."""
render_dir = self.tmp_path("render")
os.makedirs(render_dir)
- nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file)
+ nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file)
util.ensure_dir(os.path.dirname(nm_cfg))
# write a template nm.conf, note plugins is a value here
@@ -3239,7 +3251,7 @@ USERCTL=no
"""enable_ifcfg_plugin creates plugins value if missing."""
render_dir = self.tmp_path("render")
os.makedirs(render_dir)
- nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file)
+ nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file)
util.ensure_dir(os.path.dirname(nm_cfg))
# write a template nm.conf, note plugins is missing
@@ -3331,7 +3343,7 @@ USERCTL=no
USERCTL=no
VLAN=yes
""")
- }
+ }
self._compare_files_to_expected(
expected, self._render_and_read(network_config=v2data))
@@ -3405,7 +3417,7 @@ USERCTL=no
TYPE=Ethernet
USERCTL=no
"""),
- }
+ }
for dhcp_ver in ('dhcp4', 'dhcp6'):
v2data = copy.deepcopy(v2base)
if dhcp_ver == 'dhcp6':
@@ -3919,7 +3931,7 @@ class TestNetplanCleanDefault(CiTestCase):
files = sorted(populate_dir(tmpd, content))
netplan._clean_default(target=tmpd)
found = [t for t in files if os.path.exists(t)]
- expected = [util.target_path(tmpd, f) for f in (astamp, anet, ayaml)]
+ expected = [subp.target_path(tmpd, f) for f in (astamp, anet, ayaml)]
self.assertEqual(sorted(expected), found)
@@ -3932,7 +3944,7 @@ class TestNetplanPostcommands(CiTestCase):
@mock.patch.object(netplan.Renderer, '_netplan_generate')
@mock.patch.object(netplan.Renderer, '_net_setup_link')
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_netplan_render_calls_postcmds(self, mock_subp,
mock_netplan_generate,
mock_net_setup_link):
@@ -3946,7 +3958,7 @@ class TestNetplanPostcommands(CiTestCase):
render_target = 'netplan.yaml'
renderer = netplan.Renderer(
{'netplan_path': render_target, 'postcmds': True})
- mock_subp.side_effect = iter([util.ProcessExecutionError])
+ mock_subp.side_effect = iter([subp.ProcessExecutionError])
renderer.render_network_state(ns, target=render_dir)
mock_netplan_generate.assert_called_with(run=True)
@@ -3954,7 +3966,7 @@ class TestNetplanPostcommands(CiTestCase):
@mock.patch('cloudinit.util.SeLinuxGuard')
@mock.patch.object(netplan, "get_devicelist")
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel):
mock_sel.__enter__ = mock.Mock(return_value=False)
mock_sel.__exit__ = mock.Mock()
@@ -3970,7 +3982,7 @@ class TestNetplanPostcommands(CiTestCase):
renderer = netplan.Renderer(
{'netplan_path': render_target, 'postcmds': True})
mock_subp.side_effect = iter([
- util.ProcessExecutionError,
+ subp.ProcessExecutionError,
('', ''),
('', ''),
])
@@ -4017,6 +4029,8 @@ class TestEniNetworkStateToEni(CiTestCase):
class TestCmdlineConfigParsing(CiTestCase):
+ with_logs = True
+
simple_cfg = {
'config': [{"type": "physical", "name": "eth0",
"mac_address": "c0:d6:9f:2c:e8:80",
@@ -4066,6 +4080,21 @@ class TestCmdlineConfigParsing(CiTestCase):
found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
self.assertEqual(found, self.simple_cfg)
+ def test_cmdline_with_net_config_disabled(self):
+ raw_cmdline = 'ro network-config=disabled root=foo'
+ found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
+ self.assertEqual(found, {'config': 'disabled'})
+
+ def test_cmdline_with_net_config_unencoded_logs_error(self):
+ """network-config cannot be unencoded besides 'disabled'."""
+ raw_cmdline = 'ro network-config={config:disabled} root=foo'
+ found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
+ self.assertIsNone(found)
+ expected_log = (
+ 'ERROR: Expected base64 encoded kernel commandline parameter'
+ ' network-config. Ignoring network-config={config:disabled}.')
+ self.assertIn(expected_log, self.logs.getvalue())
+
def test_cmdline_with_b64_gz(self):
data = _gzip_data(json.dumps(self.simple_cfg).encode())
encoded_text = base64.b64encode(data).decode()
@@ -4242,7 +4271,7 @@ class TestNetplanRoundTrip(CiTestCase):
def setUp(self):
super(TestNetplanRoundTrip, self).setUp()
- self.add_patch('cloudinit.net.netplan.util.subp', 'm_subp')
+ self.add_patch('cloudinit.net.netplan.subp.subp', 'm_subp')
self.m_subp.return_value = (self.NETPLAN_INFO_OUT, '')
def _render_and_read(self, network_config=None, state=None,
@@ -4654,6 +4683,51 @@ class TestEniRoundTrip(CiTestCase):
files['/etc/network/interfaces'].splitlines())
+class TestRenderersSelect:
+
+ @pytest.mark.parametrize(
+ 'renderer_selected,netplan,eni,nm,scfg,sys', (
+ # -netplan -ifupdown -nm -scfg -sys raises error
+ (net.RendererNotFoundError, False, False, False, False, False),
+ # -netplan +ifupdown -nm -scfg -sys selects eni
+ ('eni', False, True, False, False, False),
+ # +netplan +ifupdown -nm -scfg -sys selects eni
+ ('eni', True, True, False, False, False),
+ # +netplan -ifupdown -nm -scfg -sys selects netplan
+ ('netplan', True, False, False, False, False),
+ # Ubuntu with Network-Manager installed
+ # +netplan -ifupdown +nm -scfg -sys selects netplan
+ ('netplan', True, False, True, False, False),
+ # Centos/OpenSuse with Network-Manager installed selects sysconfig
+ # -netplan -ifupdown +nm -scfg +sys selects netplan
+ ('sysconfig', False, False, True, False, True),
+ ),
+ )
+ @mock.patch("cloudinit.net.renderers.netplan.available")
+ @mock.patch("cloudinit.net.renderers.sysconfig.available")
+ @mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig")
+ @mock.patch("cloudinit.net.renderers.sysconfig.available_nm")
+ @mock.patch("cloudinit.net.renderers.eni.available")
+ def test_valid_renderer_from_defaults_depending_on_availability(
+ self, m_eni_avail, m_nm_avail, m_scfg_avail, m_sys_avail,
+ m_netplan_avail, renderer_selected, netplan, eni, nm, scfg, sys
+ ):
+ """Assert proper renderer per DEFAULT_PRIORITY given availability."""
+ m_eni_avail.return_value = eni # ifupdown pkg presence
+ m_nm_avail.return_value = nm # network-manager presence
+ m_scfg_avail.return_value = scfg # sysconfig presence
+ m_sys_avail.return_value = sys # sysconfig/ifup/down presence
+ m_netplan_avail.return_value = netplan # netplan presence
+ if isinstance(renderer_selected, str):
+ (renderer_name, _rnd_class) = renderers.select(
+ priority=renderers.DEFAULT_PRIORITY
+ )
+ assert renderer_selected == renderer_name
+ else:
+ with pytest.raises(renderer_selected):
+ renderers.select(priority=renderers.DEFAULT_PRIORITY)
+
+
class TestNetRenderers(CiTestCase):
@mock.patch("cloudinit.net.renderers.sysconfig.available")
@mock.patch("cloudinit.net.renderers.eni.available")
@@ -4697,58 +4771,18 @@ class TestNetRenderers(CiTestCase):
self.assertRaises(net.RendererNotFoundError, renderers.select,
priority=['sysconfig', 'eni'])
- @mock.patch("cloudinit.net.renderers.netplan.available")
- @mock.patch("cloudinit.net.renderers.sysconfig.available")
- @mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig")
- @mock.patch("cloudinit.net.renderers.sysconfig.available_nm")
- @mock.patch("cloudinit.net.renderers.eni.available")
- @mock.patch("cloudinit.net.renderers.sysconfig.util.get_linux_distro")
- def test_sysconfig_selected_on_sysconfig_enabled_distros(self, m_distro,
- m_eni, m_sys_nm,
- m_sys_scfg,
- m_sys_avail,
- m_netplan):
- """sysconfig only selected on specific distros (rhel/sles)."""
-
- # Ubuntu with Network-Manager installed
- m_eni.return_value = False # no ifupdown (ifquery)
- m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown
- m_sys_nm.return_value = True # network-manager is installed
- m_netplan.return_value = True # netplan is installed
- m_sys_avail.return_value = False # no sysconfig on Ubuntu
- m_distro.return_value = ('ubuntu', None, None)
- self.assertEqual('netplan', renderers.select(priority=None)[0])
-
- # Centos with Network-Manager installed
- m_eni.return_value = False # no ifupdown (ifquery)
- m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown
- m_sys_nm.return_value = True # network-manager is installed
- m_netplan.return_value = False # netplan is not installed
- m_sys_avail.return_value = True # sysconfig is available on centos
- m_distro.return_value = ('centos', None, None)
- self.assertEqual('sysconfig', renderers.select(priority=None)[0])
-
- # OpenSuse with Network-Manager installed
- m_eni.return_value = False # no ifupdown (ifquery)
- m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown
- m_sys_nm.return_value = True # network-manager is installed
- m_netplan.return_value = False # netplan is not installed
- m_sys_avail.return_value = True # sysconfig is available on opensuse
- m_distro.return_value = ('opensuse', None, None)
- self.assertEqual('sysconfig', renderers.select(priority=None)[0])
-
@mock.patch("cloudinit.net.sysconfig.available_sysconfig")
@mock.patch("cloudinit.util.get_linux_distro")
def test_sysconfig_available_uses_variant_mapping(self, m_distro, m_avail):
m_avail.return_value = True
distro_values = [
- ('opensuse', '', ''),
- ('opensuse-leap', '', ''),
- ('opensuse-tumbleweed', '', ''),
- ('sles', '', ''),
- ('centos', '', ''),
- ('fedora', '', ''),
- ('redhat', '', ''),
+ ('opensuse', '', ''),
+ ('opensuse-leap', '', ''),
+ ('opensuse-tumbleweed', '', ''),
+ ('sles', '', ''),
+ ('centos', '', ''),
+ ('fedora', '', ''),
+ ('redhat', '', ''),
]
for (distro_name, distro_version, flavor) in distro_values:
m_distro.return_value = (distro_name, distro_version, flavor)
@@ -5134,7 +5168,7 @@ def _gzip_data(data):
class TestRenameInterfaces(CiTestCase):
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_rename_all(self, mock_subp):
renames = [
('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'),
@@ -5165,7 +5199,7 @@ class TestRenameInterfaces(CiTestCase):
capture=True),
])
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_rename_no_driver_no_device_id(self, mock_subp):
renames = [
('00:11:22:33:44:55', 'interface0', None, None),
@@ -5196,7 +5230,7 @@ class TestRenameInterfaces(CiTestCase):
capture=True),
])
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_rename_all_bounce(self, mock_subp):
renames = [
('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'),
@@ -5231,7 +5265,7 @@ class TestRenameInterfaces(CiTestCase):
mock.call(['ip', 'link', 'set', 'interface2', 'up'], capture=True)
])
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_rename_duplicate_macs(self, mock_subp):
renames = [
('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'),
@@ -5260,7 +5294,7 @@ class TestRenameInterfaces(CiTestCase):
capture=True),
])
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_rename_duplicate_macs_driver_no_devid(self, mock_subp):
renames = [
('00:11:22:33:44:55', 'eth0', 'hv_netsvc', None),
@@ -5289,7 +5323,7 @@ class TestRenameInterfaces(CiTestCase):
capture=True),
])
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_rename_multi_mac_dups(self, mock_subp):
renames = [
('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'),
@@ -5328,7 +5362,7 @@ class TestRenameInterfaces(CiTestCase):
capture=True),
])
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_rename_macs_case_insensitive(self, mock_subp):
"""_rename_interfaces must support upper or lower case macs."""
renames = [
diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py
index 48296c30..414b4830 100644
--- a/tests/unittests/test_net_freebsd.py
+++ b/tests/unittests/test_net_freebsd.py
@@ -7,7 +7,7 @@ SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output")
class TestInterfacesByMac(CiTestCase):
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
@mock.patch('cloudinit.util.is_FreeBSD')
def test_get_interfaces_by_mac(self, mock_is_FreeBSD, mock_subp):
mock_is_FreeBSD.return_value = True
diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py
new file mode 100644
index 00000000..495e2669
--- /dev/null
+++ b/tests/unittests/test_render_cloudcfg.py
@@ -0,0 +1,59 @@
+"""Tests for tools/render-cloudcfg"""
+
+import os
+import sys
+
+import pytest
+
+from cloudinit import subp
+from cloudinit import util
+
+# TODO(Look to align with tools.render-cloudcfg or cloudinit.distos.OSFAMILIES)
+DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd",
+ "netbsd", "openbsd", "rhel", "suse", "ubuntu", "unknown"]
+
+
+@pytest.mark.allow_subp_for(sys.executable)
+class TestRenderCloudCfg:
+
+ cmd = [sys.executable, os.path.realpath('tools/render-cloudcfg')]
+ tmpl_path = os.path.realpath('config/cloud.cfg.tmpl')
+
+ @pytest.mark.parametrize('variant', (DISTRO_VARIANTS))
+ def test_variant_sets_distro_in_cloud_cfg(self, variant, tmpdir):
+ outfile = tmpdir.join('outcfg').strpath
+ subp.subp(
+ self.cmd + ['--variant', variant, self.tmpl_path, outfile])
+ with open(outfile) as stream:
+ system_cfg = util.load_yaml(stream.read())
+ if variant == 'unknown':
+ variant = 'ubuntu' # Unknown is defaulted to ubuntu
+ assert system_cfg['system_info']['distro'] == variant
+
+ @pytest.mark.parametrize('variant', (DISTRO_VARIANTS))
+ def test_variant_sets_default_user_in_cloud_cfg(self, variant, tmpdir):
+ outfile = tmpdir.join('outcfg').strpath
+ subp.subp(
+ self.cmd + ['--variant', variant, self.tmpl_path, outfile])
+ with open(outfile) as stream:
+ system_cfg = util.load_yaml(stream.read())
+
+ default_user_exceptions = {
+ 'amazon': 'ec2-user', 'debian': 'ubuntu', 'unknown': 'ubuntu'}
+ default_user = system_cfg['system_info']['default_user']['name']
+ assert default_user == default_user_exceptions.get(variant, variant)
+
+ @pytest.mark.parametrize('variant,renderers', (
+ ('freebsd', ['freebsd']), ('netbsd', ['netbsd']),
+ ('openbsd', ['openbsd']), ('ubuntu', ['netplan', 'eni', 'sysconfig']))
+ )
+ def test_variant_sets_network_renderer_priority_in_cloud_cfg(
+ self, variant, renderers, tmpdir
+ ):
+ outfile = tmpdir.join('outcfg').strpath
+ subp.subp(
+ self.cmd + ['--variant', variant, self.tmpl_path, outfile])
+ with open(outfile) as stream:
+ system_cfg = util.load_yaml(stream.read())
+
+ assert renderers == system_cfg['system_info']['network']['renderers']
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
index 6814030e..9f11fd5c 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/test_reporting.py
@@ -349,7 +349,6 @@ class TestReportingEventStack(TestCase):
with parent:
with child:
pass
- pass
self.assertEqual(report_start.call_count, 0)
self.assertEqual(report_finish.call_count, 0)
diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py
index b3e083c6..47ede670 100644
--- a/tests/unittests/test_reporting_hyperv.py
+++ b/tests/unittests/test_reporting_hyperv.py
@@ -1,7 +1,9 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import base64
+import zlib
-from cloudinit.reporting import events
-from cloudinit.reporting.handlers import HyperVKvpReportingHandler
+from cloudinit.reporting import events, instantiated_handler_registry
+from cloudinit.reporting.handlers import HyperVKvpReportingHandler, LogHandler
import json
import os
@@ -72,7 +74,7 @@ class TextKvpReporter(CiTestCase):
def test_event_very_long(self):
reporter = HyperVKvpReportingHandler(
kvp_file_path=self.tmp_file_path)
- description = 'ab' * reporter.HV_KVP_EXCHANGE_MAX_VALUE_SIZE
+ description = 'ab' * reporter.HV_KVP_AZURE_MAX_VALUE_SIZE
long_event = events.FinishReportingEvent(
'event_name',
description,
@@ -93,10 +95,15 @@ class TextKvpReporter(CiTestCase):
def test_not_truncate_kvp_file_modified_after_boot(self):
with open(self.tmp_file_path, "wb+") as f:
kvp = {'key': 'key1', 'value': 'value1'}
- data = (struct.pack("%ds%ds" % (
+ data = struct.pack(
+ "%ds%ds"
+ % (
HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
- HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
- kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8')))
+ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE,
+ ),
+ kvp["key"].encode("utf-8"),
+ kvp["value"].encode("utf-8"),
+ )
f.write(data)
cur_time = time.time()
os.utime(self.tmp_file_path, (cur_time, cur_time))
@@ -131,11 +138,13 @@ class TextKvpReporter(CiTestCase):
self.assertEqual(0, len(kvps))
@mock.patch('cloudinit.distros.uses_systemd')
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_get_boot_telemetry(self, m_subp, m_sysd):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
- datetime_pattern = r"\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]"
- r"\d:[0-5]\d\.\d+([+-][0-2]\d:[0-5]\d|Z)"
+ datetime_pattern = (
+ r"\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]"
+ r"\d:[0-5]\d\.\d+([+-][0-2]\d:[0-5]\d|Z)"
+ )
# get_boot_telemetry makes two subp calls to systemctl. We provide
# a list of values that the subp calls should return
@@ -192,6 +201,72 @@ class TextKvpReporter(CiTestCase):
if "test_diagnostic" not in evt_msg:
raise AssertionError("missing expected diagnostic message")
+ def test_report_compressed_event(self):
+ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
+ try:
+ instantiated_handler_registry.register_item("telemetry", reporter)
+ event_desc = b'test_compressed'
+ azure.report_compressed_event(
+ "compressed event", event_desc)
+
+ self.validate_compressed_kvps(reporter, 1, [event_desc])
+ finally:
+ instantiated_handler_registry.unregister_item("telemetry",
+ force=False)
+
+ @mock.patch.object(LogHandler, 'publish_event')
+ def test_push_log_to_kvp(self, publish_event):
+ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
+ try:
+ instantiated_handler_registry.register_item("telemetry", reporter)
+ log_file = self.tmp_path("cloud-init.log")
+ azure.MAX_LOG_TO_KVP_LENGTH = 100
+ azure.LOG_PUSHED_TO_KVP_MARKER_FILE = self.tmp_path(
+ 'log_pushed_to_kvp')
+ with open(log_file, "w") as f:
+ log_content = "A" * 50 + "B" * 100
+ f.write(log_content)
+ azure.push_log_to_kvp(log_file)
+
+ with open(log_file, "a") as f:
+ extra_content = "C" * 10
+ f.write(extra_content)
+ azure.push_log_to_kvp(log_file)
+
+ for call_arg in publish_event.call_args_list:
+ event = call_arg[0][0]
+ self.assertNotEqual(
+ event.event_type, azure.COMPRESSED_EVENT_TYPE)
+ self.validate_compressed_kvps(
+ reporter, 1,
+ [log_content[-azure.MAX_LOG_TO_KVP_LENGTH:].encode()])
+ finally:
+ instantiated_handler_registry.unregister_item("telemetry",
+ force=False)
+
+ def validate_compressed_kvps(self, reporter, count, values):
+ reporter.q.join()
+ kvps = list(reporter._iterate_kvps(0))
+ compressed_count = 0
+ for i in range(len(kvps)):
+ kvp = kvps[i]
+ kvp_value = kvp['value']
+ kvp_value_json = json.loads(kvp_value)
+ evt_msg = kvp_value_json["msg"]
+ evt_type = kvp_value_json["type"]
+ if evt_type != azure.COMPRESSED_EVENT_TYPE:
+ continue
+ evt_msg_json = json.loads(evt_msg)
+ evt_encoding = evt_msg_json["encoding"]
+ evt_data = zlib.decompress(
+ base64.decodebytes(evt_msg_json["data"].encode("ascii")))
+
+ self.assertLess(compressed_count, len(values))
+ self.assertEqual(evt_data, values[compressed_count])
+ self.assertEqual(evt_encoding, "gz+b64")
+ compressed_count += 1
+ self.assertEqual(compressed_count, count)
+
def test_unique_kvp_key(self):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
evt1 = events.ReportingEvent(
diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py
index 4cd27eed..53d3cd5a 100644
--- a/tests/unittests/test_rh_subscription.py
+++ b/tests/unittests/test_rh_subscription.py
@@ -6,7 +6,7 @@ import copy
import logging
from cloudinit.config import cc_rh_subscription
-from cloudinit import util
+from cloudinit import subp
from cloudinit.tests.helpers import CiTestCase, mock
@@ -56,7 +56,7 @@ class GoodTests(CiTestCase):
'''
reg = "The system has been registered with ID:" \
" 12345678-abde-abcde-1234-1234567890abc"
- m_sman_cli.side_effect = [util.ProcessExecutionError, (reg, 'bar')]
+ m_sman_cli.side_effect = [subp.ProcessExecutionError, (reg, 'bar')]
self.handle(self.name, self.config, self.cloud_init,
self.log, self.args)
self.assertIn(mock.call(['identity']), m_sman_cli.call_args_list)
@@ -93,7 +93,7 @@ class GoodTests(CiTestCase):
reg = "The system has been registered with ID:" \
" 12345678-abde-abcde-1234-1234567890abc"
m_sman_cli.side_effect = [
- util.ProcessExecutionError,
+ subp.ProcessExecutionError,
(reg, 'bar'),
('Service level set to: self-support', ''),
('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''),
@@ -161,7 +161,7 @@ class TestBadInput(CiTestCase):
def test_no_password(self, m_sman_cli):
'''Attempt to register without the password key/value.'''
- m_sman_cli.side_effect = [util.ProcessExecutionError,
+ m_sman_cli.side_effect = [subp.ProcessExecutionError,
(self.reg, 'bar')]
self.handle(self.name, self.config_no_password, self.cloud_init,
self.log, self.args)
@@ -169,7 +169,7 @@ class TestBadInput(CiTestCase):
def test_no_org(self, m_sman_cli):
'''Attempt to register without the org key/value.'''
- m_sman_cli.side_effect = [util.ProcessExecutionError]
+ m_sman_cli.side_effect = [subp.ProcessExecutionError]
self.handle(self.name, self.config_no_key, self.cloud_init,
self.log, self.args)
m_sman_cli.assert_called_with(['identity'])
@@ -182,7 +182,7 @@ class TestBadInput(CiTestCase):
def test_service_level_without_auto(self, m_sman_cli):
'''Attempt to register using service-level without auto-attach key.'''
- m_sman_cli.side_effect = [util.ProcessExecutionError,
+ m_sman_cli.side_effect = [subp.ProcessExecutionError,
(self.reg, 'bar')]
self.handle(self.name, self.config_service, self.cloud_init,
self.log, self.args)
@@ -195,7 +195,7 @@ class TestBadInput(CiTestCase):
'''
Register with pools that are not in the format of a list
'''
- m_sman_cli.side_effect = [util.ProcessExecutionError,
+ m_sman_cli.side_effect = [subp.ProcessExecutionError,
(self.reg, 'bar')]
self.handle(self.name, self.config_badpool, self.cloud_init,
self.log, self.args)
@@ -208,7 +208,7 @@ class TestBadInput(CiTestCase):
'''
Register with repos that are not in the format of a list
'''
- m_sman_cli.side_effect = [util.ProcessExecutionError,
+ m_sman_cli.side_effect = [subp.ProcessExecutionError,
(self.reg, 'bar')]
self.handle(self.name, self.config_badrepo, self.cloud_init,
self.log, self.args)
@@ -222,7 +222,7 @@ class TestBadInput(CiTestCase):
'''
Attempt to register with a key that we don't know
'''
- m_sman_cli.side_effect = [util.ProcessExecutionError,
+ m_sman_cli.side_effect = [subp.ProcessExecutionError,
(self.reg, 'bar')]
self.handle(self.name, self.config_badkey, self.cloud_init,
self.log, self.args)
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index 0be41924..fd1d1bac 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -15,6 +15,9 @@ FakePwEnt.__new__.__defaults__ = tuple(
"UNSET_%s" % n for n in FakePwEnt._fields)
+# Do not use these public keys, most of them are fetched from
+# the testdata for OpenSSH, and their private keys are available
+# https://github.com/openssh/openssh-portable/tree/master/regress/unittests/sshkey/testdata
VALID_CONTENT = {
'dsa': (
"AAAAB3NzaC1kc3MAAACBAIrjOQSlSea19bExXBMBKBvcLhBoVvNBjCppNzllipF"
@@ -41,24 +44,238 @@ VALID_CONTENT = {
"YWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07"
"/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw=="
),
+ 'ed25519': (
+ "AAAAC3NzaC1lZDI1NTE5AAAAIA1J77+CrJ8p6/vWCEzuylqJNMHUP/XmeYyGVWb"
+ "8lnDd"
+ ),
+ 'ecdsa-sha2-nistp256-cert-v01@openssh.com': (
+ "AAAAKGVjZHNhLXNoYTItbmlzdHAyNTYtY2VydC12MDFAb3BlbnNzaC5jb20AAAA"
+ "gQIfwT/+UX68/hlKsdKuaOuAVB6ftTg03SlP/uH4OBEwAAAAIbmlzdHAyNTYAAA"
+ "BBBEjA0gjJmPM6La3sXyfNlnjilvvGY6I2M8SvJj4o3X/46wcUbPWTaj4RF3EXw"
+ "HvNxplYBwdPlk2zEecvf9Cs2BMAAAAAAAAAAAAAAAEAAAAYa2V5cy9lY2RzYS1z"
+ "aGEyLW5pc3RwMjU2AAAAAAAAAAAAAAAA//////////8AAAAAAAAAggAAABVwZXJ"
+ "taXQtWDExLWZvcndhcmRpbmcAAAAAAAAAF3Blcm1pdC1hZ2VudC1mb3J3YXJkaW"
+ "5nAAAAAAAAABZwZXJtaXQtcG9ydC1mb3J3YXJkaW5nAAAAAAAAAApwZXJtaXQtc"
+ "HR5AAAAAAAAAA5wZXJtaXQtdXNlci1yYwAAAAAAAAAAAAAAaAAAABNlY2RzYS1z"
+ "aGEyLW5pc3RwMjU2AAAACG5pc3RwMjU2AAAAQQRH6Y9Q1+ocQ8ETKW3LjQqtxg7"
+ "OuSSDacxmmQatQVaIawwjCbmntyEAqmVj3v9ElDSXnO5m7TyYMBQu4+vsh76RAA"
+ "AAZQAAABNlY2RzYS1zaGEyLW5pc3RwMjU2AAAASgAAACEA47Cl2MMhr+glPGuxx"
+ "2tM3QXkDcwdP0SxSEW5yy4XV5oAAAAhANNMm1cdVlAt3hmycQgdD82zPlg5YvVO"
+ "iN0SQTbgVD8i"
+ ),
'ecdsa-sha2-nistp256': (
- "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMy/WuXq5MF"
- "r5hVQ9EEKKUTF7vUaOkgxUh6bNsCs9SFMVslIm1zM/WJYwUv52LdEePjtDYiV4A"
- "l2XthJ9/bs7Pc="
+ "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEjA0gjJmPM"
+ "6La3sXyfNlnjilvvGY6I2M8SvJj4o3X/46wcUbPWTaj4RF3EXwHvNxplYBwdPlk"
+ "2zEecvf9Cs2BM="
),
- 'ecdsa-sha2-nistp521': (
- "AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBABOdNTkh9F"
- "McK4hZRLs5LTXBEXwNr0+Yg9uvJYRFcz2ZlnjYX9tM4Z3QQFjqogU4pU+zpKLqZ"
- "5VE4Jcnb1T608UywBIdXkSFZT8trGJqBv9nFWGgmTX3KP8kiBbihpuv1cGwglPl"
- "Hxs50A42iP0JiT7auGtEAGsu/uMql323GTGb4171Q=="
+ 'ecdsa-sha2-nistp384-cert-v01@openssh.com': (
+ "AAAAKGVjZHNhLXNoYTItbmlzdHAzODQtY2VydC12MDFAb3BlbnNzaC5jb20AAAA"
+ "grnSvDsK1EnCZndO1IyGWcGkVgVSkPWi/XO2ybPFyLVUAAAAIbmlzdHAzODQAAA"
+ "BhBAaYSQs+8TT0Tzciy0dorwhur6yzOGUrYQ6ueUQYWbE7eNdHmhsVrlpGPgSaY"
+ "ByhXtAJiPOMqLU5h0eb3sCtM3ek4NvjXFTGTqPrrxJI6q0OsgrtkGE7UM9ZsfMm"
+ "7q6BOAAAAAAAAAAAAAAAAQAAABhrZXlzL2VjZHNhLXNoYTItbmlzdHAzODQAAAA"
+ "AAAAAAAAAAAD//////////wAAAAAAAACCAAAAFXBlcm1pdC1YMTEtZm9yd2FyZG"
+ "luZwAAAAAAAAAXcGVybWl0LWFnZW50LWZvcndhcmRpbmcAAAAAAAAAFnBlcm1pd"
+ "C1wb3J0LWZvcndhcmRpbmcAAAAAAAAACnBlcm1pdC1wdHkAAAAAAAAADnBlcm1p"
+ "dC11c2VyLXJjAAAAAAAAAAAAAACIAAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAA"
+ "IbmlzdHAzODQAAABhBLWbubcMzcWc7lMTCMGVXZlaVvUOHLjpr6SOOScFFrd8K9"
+ "Gl8nYELST5HZ1gym65m+MG6/tbrUWIY/flLWNIe+WtqxrdPPGdIhFruCwNw2peZ"
+ "SbQOa/o3AGnJ/vO6EKEGAAAAIQAAAATZWNkc2Etc2hhMi1uaXN0cDM4NAAAAGkA"
+ "AAAxAL10JHd5bvnbpD+fet/k1YE1BEIrqGXaoIIJ9ReE5H4nTK1uQJzMD7+wwGK"
+ "RVYqYQgAAADAiit0UCMDAUbjD+R2x4LvU3x/t8G3sdqDLRNfMRpjZpvcS8AwC+Y"
+ "VFVSQNn0AyzW0="
),
'ecdsa-sha2-nistp384': (
- "AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBAnoqFU9Gnl"
- "LcsEuCJnobs/c6whzvjCgouaOO61kgXNtIxyF4Wkutg6xaGYgBBt/phb7a2TurI"
- "bcIBuzJ/mP22UyUAbNnBfStAEBmYbrTf1EfiMCYUAr1XnL0UdYmZ8HFg=="
+ "AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBAaYSQs+8TT"
+ "0Tzciy0dorwhur6yzOGUrYQ6ueUQYWbE7eNdHmhsVrlpGPgSaYByhXtAJiPOMqL"
+ "U5h0eb3sCtM3ek4NvjXFTGTqPrrxJI6q0OsgrtkGE7UM9ZsfMm7q6BOA=="
+ ),
+ 'ecdsa-sha2-nistp521-cert-v01@openssh.com': (
+ "AAAAKGVjZHNhLXNoYTItbmlzdHA1MjEtY2VydC12MDFAb3BlbnNzaC5jb20AAAA"
+ "gGmRzkkMvRFk1V5U3m3mQ2nfW20SJVXk1NKnT5iZGDcEAAAAIbmlzdHA1MjEAAA"
+ "CFBAHosAOHAI1ZkerbKYQ72S6uit1u77PCj/OalZtXgsxv0TTAZB273puG2X94C"
+ "Q8yyNHcby87zFZHdv5BSKyZ/cyREAAeiAcSakop9VS3+bUfZpEIqwBZXarwUjnR"
+ "nxprkcQ0rfCCdagkGZr/OA7DemK2D8tKLTHsKoEEWNImo6/pXDkFxAAAAAAAAAA"
+ "AAAAAAQAAABhrZXlzL2VjZHNhLXNoYTItbmlzdHA1MjEAAAAAAAAAAAAAAAD///"
+ "///////wAAAAAAAACCAAAAFXBlcm1pdC1YMTEtZm9yd2FyZGluZwAAAAAAAAAXc"
+ "GVybWl0LWFnZW50LWZvcndhcmRpbmcAAAAAAAAAFnBlcm1pdC1wb3J0LWZvcndh"
+ "cmRpbmcAAAAAAAAACnBlcm1pdC1wdHkAAAAAAAAADnBlcm1pdC11c2VyLXJjAAA"
+ "AAAAAAAAAAACsAAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAA"
+ "CFBAC6hFVXM1XEg/7qKkp5sLZuANGQVW88b5pPn2ZcK0td9IQstLH6BwWuZ6MPE"
+ "ogiDlvx9HD1BaKGBBfkxgOY8NGFzQHbjU9eTWH3gt0RATDbZsij1pSkFPnAXdU9"
+ "SjfogYloI2xdHaTCgWp3zgsUV+BBQ0QGGv2MqqcOmrF0f5YEJeOffAAAAKcAAAA"
+ "TZWNkc2Etc2hhMi1uaXN0cDUyMQAAAIwAAABCAT+vSOYPuYVTDopDW08576d5Sb"
+ "edXQMOu1op4CQIm98VKtAXvu5dfioi5VYAqpte8M+UxEMOMiQWJp+U9exYf6LuA"
+ "AAAQgEzkIpX3yKXPaPcK17mNx40ujEDitm4ARmbhAge0sFhZtf7YIgI55b6vkI8"
+ "JvMJkzQCBF1cpNOaIpVh1nFZNBphMQ=="
+ ),
+ 'ecdsa-sha2-nistp521': (
+ "AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAHosAOHAI1"
+ "ZkerbKYQ72S6uit1u77PCj/OalZtXgsxv0TTAZB273puG2X94CQ8yyNHcby87zF"
+ "ZHdv5BSKyZ/cyREAAeiAcSakop9VS3+bUfZpEIqwBZXarwUjnRnxprkcQ0rfCCd"
+ "agkGZr/OA7DemK2D8tKLTHsKoEEWNImo6/pXDkFxA=="
+ ),
+ 'sk-ecdsa-sha2-nistp256-cert-v01@openssh.com': (
+ "AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIIxzuxl4z3u"
+ "wAIslne8Huft+1n1IhHAlNbWZkQyyECCGAAAAIFOG6kY7Rf4UtCFvPwKgo/BztX"
+ "ck2xC4a2WyA34XtIwZAAAAAAAAAAgAAAACAAAABmp1bGl1cwAAABIAAAAFaG9zd"
+ "DEAAAAFaG9zdDIAAAAANowB8AAAAABNHmBwAAAAAAAAAAAAAAAAAAAAMwAAAAtz"
+ "c2gtZWQyNTUxOQAAACBThupGO0X+FLQhbz8CoKPwc7V3JNsQuGtlsgN+F7SMGQA"
+ "AAFMAAAALc3NoLWVkMjU1MTkAAABABGTn+Bmz86Ajk+iqKCSdP5NClsYzn4alJd"
+ "0V5bizhP0Kumc/HbqQfSt684J1WdSzih+EjvnTgBhK9jTBKb90AQ=="
+ ),
+ 'sk-ecdsa-sha2-nistp256@openssh.com': (
+ "AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHA"
+ "yNTYAAABBBIELQJ2DgvaX1yQlKFokfWM2suuaCFI2qp0eJodHyg6O4ifxc3XpRK"
+ "d1OS8dNYQtE/YjdXSrA+AOnMF5ns2Nkx4AAAAEc3NoOg=="
+ ),
+ 'sk-ssh-ed25519-cert-v01@openssh.com': (
+ "AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIIxzuxl4z3u"
+ "wAIslne8Huft+1n1IhHAlNbWZkQyyECCGAAAAIFOG6kY7Rf4UtCFvPwKgo/BztX"
+ "ck2xC4a2WyA34XtIwZAAAAAAAAAAgAAAACAAAABmp1bGl1cwAAABIAAAAFaG9zd"
+ "DEAAAAFaG9zdDIAAAAANowB8AAAAABNHmBwAAAAAAAAAAAAAAAAAAAAMwAAAAtz"
+ "c2gtZWQyNTUxOQAAACBThupGO0X+FLQhbz8CoKPwc7V3JNsQuGtlsgN+F7SMGQA"
+ "AAFMAAAALc3NoLWVkMjU1MTkAAABABGTn+Bmz86Ajk+iqKCSdP5NClsYzn4alJd"
+ "0V5bizhP0Kumc/HbqQfSt684J1WdSzih+EjvnTgBhK9jTBKb90AQ=="
+ ),
+ 'sk-ssh-ed25519@openssh.com': (
+ "AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAICFo/k5LU8863u66YC9"
+ "eUO2170QduohPURkQnbLa/dczAAAABHNzaDo="
+ ),
+ 'ssh-dss-cert-v01@openssh.com': (
+ "AAAAHHNzaC1kc3MtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgdTlbNU9Hn9Qng3F"
+ "HxwH971bxCIoq1ern/QWFFDWXgmYAAACBAPqS600VGwdPAQC/p3f0uGyrLVql0c"
+ "Fn1zYd/JGvtabKnIYjLaYprje/NcjwI3CZFJiz4Dp3S8kLs+X5/1DMn/Tg1Y4D4"
+ "yLB+6vCtHcJF7rVBFhvw/KZwc7G54ez3khyOtsg82fzpyOc8/mq+/+C5TMKO7DD"
+ "jMF0k5emWKCsa3ZfAAAAFQCjA/+dKkMu4/CWjJPtfl7YNaStNQAAAIEA7uX1BVV"
+ "tJKjLmWrpw62+l/xSXA5rr7MHBuWjiCYV3VHBfXJaQDyRDtGuEJKDwdzqYgacpG"
+ "ApGWL/cuBtJ9nShsUl6GRG0Ra03g+Hx9VR5LviJBsjAVB4qVgciU1NGga0Bt2Le"
+ "cd1X4EGQRBzVXeuOpiqGM6jP/I2yDMs0Pboet0AAACBAOdXpyfmobEBaOqZAuvg"
+ "j1P0uhjG2P31Ufurv22FWPBU3A9qrkxbOXwE0LwvjCvrsQV/lrYhJz/tiys40Ve"
+ "ahulWZE5SAHMXGIf95LiLSgaXMjko7joot+LK84ltLymwZ4QMnYjnZSSclf1Uuy"
+ "QMcUtb34+I0u9Ycnyhp2mSFsQtAAAAAAAAAAYAAAACAAAABmp1bGl1cwAAABIAA"
+ "AAFaG9zdDEAAAAFaG9zdDIAAAAANowB8AAAAABNHmBwAAAAAAAAAAAAAAAAAAAA"
+ "MwAAAAtzc2gtZWQyNTUxOQAAACBThupGO0X+FLQhbz8CoKPwc7V3JNsQuGtlsgN"
+ "+F7SMGQAAAFMAAAALc3NoLWVkMjU1MTkAAABAh/z1LIdNL1b66tQ8t9DY9BTB3B"
+ "QKpTKmc7ezyFKLwl96yaIniZwD9Ticdbe/8i/Li3uCFE3EAt8NAIv9zff8Bg=="
+ ),
+ 'ssh-dss': (
+ "AAAAB3NzaC1kc3MAAACBAPqS600VGwdPAQC/p3f0uGyrLVql0cFn1zYd/JGvtab"
+ "KnIYjLaYprje/NcjwI3CZFJiz4Dp3S8kLs+X5/1DMn/Tg1Y4D4yLB+6vCtHcJF7"
+ "rVBFhvw/KZwc7G54ez3khyOtsg82fzpyOc8/mq+/+C5TMKO7DDjMF0k5emWKCsa"
+ "3ZfAAAAFQCjA/+dKkMu4/CWjJPtfl7YNaStNQAAAIEA7uX1BVVtJKjLmWrpw62+"
+ "l/xSXA5rr7MHBuWjiCYV3VHBfXJaQDyRDtGuEJKDwdzqYgacpGApGWL/cuBtJ9n"
+ "ShsUl6GRG0Ra03g+Hx9VR5LviJBsjAVB4qVgciU1NGga0Bt2Lecd1X4EGQRBzVX"
+ "euOpiqGM6jP/I2yDMs0Pboet0AAACBAOdXpyfmobEBaOqZAuvgj1P0uhjG2P31U"
+ "furv22FWPBU3A9qrkxbOXwE0LwvjCvrsQV/lrYhJz/tiys40VeahulWZE5SAHMX"
+ "GIf95LiLSgaXMjko7joot+LK84ltLymwZ4QMnYjnZSSclf1UuyQMcUtb34+I0u9"
+ "Ycnyhp2mSFsQt"
+ ),
+ 'ssh-ed25519-cert-v01@openssh.com': (
+ "AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIIxzuxl4z3u"
+ "wAIslne8Huft+1n1IhHAlNbWZkQyyECCGAAAAIFOG6kY7Rf4UtCFvPwKgo/BztX"
+ "ck2xC4a2WyA34XtIwZAAAAAAAAAAgAAAACAAAABmp1bGl1cwAAABIAAAAFaG9zd"
+ "DEAAAAFaG9zdDIAAAAANowB8AAAAABNHmBwAAAAAAAAAAAAAAAAAAAAMwAAAAtz"
+ "c2gtZWQyNTUxOQAAACBThupGO0X+FLQhbz8CoKPwc7V3JNsQuGtlsgN+F7SMGQA"
+ "AAFMAAAALc3NoLWVkMjU1MTkAAABABGTn+Bmz86Ajk+iqKCSdP5NClsYzn4alJd"
+ "0V5bizhP0Kumc/HbqQfSt684J1WdSzih+EjvnTgBhK9jTBKb90AQ=="
+ ),
+ 'ssh-ed25519': (
+ "AAAAC3NzaC1lZDI1NTE5AAAAIFOG6kY7Rf4UtCFvPwKgo/BztXck2xC4a2WyA34"
+ "XtIwZ"
+ ),
+ 'ssh-rsa-cert-v01@openssh.com': (
+ "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAg98LhS2EHxLOWCLo"
+ "pZPwHdg/RJXusnkOqQXSc9R7aITkAAAADAQABAAAAgQDLV5lUTt7FrADseB/CGh"
+ "EZzpoojjEW5y8+ePvLppmK3MmMI18ud6vxzpK3bwZLYkVSyfJYI0HmIuGhdu7yM"
+ "rW6wb84gbq8C31Xoe9EORcIUuGSvDKdNSM1SjlhDquRblDFB8kToqXyx1lqrXec"
+ "XylxIUOL0jE+u0rU1967pDJx+wAAAAAAAAAFAAAAAgAAAAZqdWxpdXMAAAASAAA"
+ "ABWhvc3QxAAAABWhvc3QyAAAAADaMAfAAAAAATR5gcAAAAAAAAAAAAAAAAAAAAD"
+ "MAAAALc3NoLWVkMjU1MTkAAAAgU4bqRjtF/hS0IW8/AqCj8HO1dyTbELhrZbIDf"
+ "he0jBkAAABTAAAAC3NzaC1lZDI1NTE5AAAAQI3QGlUCzC07KorupxpDkkGy6tni"
+ "aZ8EvBflzvv+itXWNchGvfUeHmVT6aX0sRqehdz/lR+GmXRoZBhofwh0qAM="
+ ),
+ 'ssh-rsa': (
+ "AAAAB3NzaC1yc2EAAAADAQABAAAAgQDLV5lUTt7FrADseB/CGhEZzpoojjEW5y8"
+ "+ePvLppmK3MmMI18ud6vxzpK3bwZLYkVSyfJYI0HmIuGhdu7yMrW6wb84gbq8C3"
+ "1Xoe9EORcIUuGSvDKdNSM1SjlhDquRblDFB8kToqXyx1lqrXecXylxIUOL0jE+u"
+ "0rU1967pDJx+w=="
+ ),
+ 'ssh-xmss-cert-v01@openssh.com': (
+ "AAAAHXNzaC14bXNzLWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIM2UD0IH+Igsekq"
+ "xjTO5f36exX4WGRMCtDGPjwfbXblxAAAAFVhNU1NfU0hBMi0yNTZfVzE2X0gxMA"
+ "AAAEDI83/K5JMOy0BMJgQypRdz35ApAnoQinMJ8ZMoZPaEJF8Z4rANQlfzaAXum"
+ "N3RDU5CGIUGGw+WJ904G/wwEq9CAAAAAAAAAAAAAAABAAAACWtleXMveG1zcwAA"
+ "AAAAAAAAAAAAAP//////////AAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJ"
+ "kaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybW"
+ "l0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVyb"
+ "Wl0LXVzZXItcmMAAAAAAAAAAAAAAHUAAAAUc3NoLXhtc3NAb3BlbnNzaC5jb20A"
+ "AAAVWE1TU19TSEEyLTI1Nl9XMTZfSDEwAAAAQA+irIyT2kaOd07YWZT/QItzNBZ"
+ "kUYwnqZJihQ7BxuyiDP4HEFbnfYnnIZXx9Asyi7vDyZRvi+AMSOzmMSq4JnkAAA"
+ "ngAAAAFHNzaC14bXNzQG9wZW5zc2guY29tAAAJxAAAAAAFjaKTDc+7Hu2uFGIab"
+ "3NAku8HbbGtrq/uGXOxmqxu4RaLqmwofl5iXk3nMwWEhQAb99vAc9D9ZFtfxJO4"
+ "STYUTjbj4BxToov/uvbYfE5VeO6sMvkGglgh9YHkCTAItsG8EmGT1SIPfKYzLlN"
+ "jvUlbcv0PaPFMJ0wzS9mNfuRf+KUhf3dxQ6zaMrBH3KEJ8Me2kNjhnh6rNPROeI"
+ "N+IcStSKsydYuiySGKS/orsH38XysuK5QqLizbHJY3cqLbkW9LsIijb+pfEJh4Y"
+ "bOoAbraWAv9ySnWCyRhvw2x8uJ0ZM+p5WSRiZfB3JxCpOhHgiKa9TdmdjnAtnED"
+ "zqKOj/gM7y9mesn5ydQI0bENOGymlw0ThUGKbXMxn87Hc9dDPURUBmoO3NGjPDf"
+ "7meS39A1ZEGtCe/pbZU9iwxqGx4wJYvB4lutRP2tYC1pA6hjQCcHibvxl5iqj+1"
+ "jRjwPr8dbTm4PdETW/7JDSVQXKjxOT0kRLHLelJNeviGx5zSHR5PtnUP3nOBMme"
+ "hk9DwcQW9vfKeWSnu9CMnF8xvYJxoPKQwmz0TKo+YVOUnc9/Ma+Ykseof9/W+rk"
+ "USQGELc4x7XE5XBKYZZP2PmtxirQ3qTWFw+CeTX2Oa+jPYkzOa7jgmHJ3Fi9Xqw"
+ "3L844vRl97e28GmwS0M1SXH+ohES0mO4EcrGh5OLyXBaRTV5QMo+4Bg6FH/HwEn"
+ "gG1mdEOAqvctK2QC70c4lHGzfexqwQ2U6WUADPcd/BLOE8Noj1EiXYwZrSA1okZ"
+ "FYnS/b89Uo51D2FE4A33V4gcxAglGzVNtrPulkguNT9B4jjNgdIwkTBL9k3ujkG"
+ "og6pyYjZ0J5Jp5XPBn+y0LqrpOdZijzrc1OJbX59tTeIbDkM7Fw8As4a03hQPDU"
+ "FTOdyMHgLnuLhLXOcqIjvW5axZL/Kx3UET8wrSHizPoa6NErCG4v5mC2M4kBSOW"
+ "In1QV27QMaHkL/ZAa3mPsW5iFZtOVEGzw2BW4MZs0qOrcloCENZzOHiMBroKEkH"
+ "AbzX6D1FLwml2JpXq4JXlCrdIiFm4+co5ygnWPqb4QGzMlcbjW/x/A16TthNuok"
+ "wwlmK5ndKZ76LahyGKEwx2Nv0D+0xilEC1EldtiYRdBNlcGbU/A5EhH5bQ9KVIH"
+ "wjWm35pRPLl5224//nqvQKhwFCn9otsR35XHXev3IQ0or3HmQxIvSDOwir1l66z"
+ "FFrkyHMWexoucbTBxw1MN3hLb247lcVYJ5+hspJgyoYbfR5RkQVDzhpzskogP7l"
+ "K5t0bphu+f+hpvrca7DAiiIZkcR4R1UUQoRnJPRXyXOxlxwS10b51cP9p9jzvZj"
+ "d2LUs8yx1KXWSxNHo6WmtYONNaUfdX2OB5+QCvPULfLfFeBrqpX6Yp5wQMM5Cup"
+ "k8FEfV07eEgQkVE9nDGKHglWo3kUdOF+XCqWAnXn0b/2bNS9/SSAz6gB1GTFcN/"
+ "QsFGlC0QgbCJbQ7LQM6hilRWupWvN5zZ/+HJyyRHuSs5VnQnKiGbIa6AIhx7mP7"
+ "8T82gKjU3mHLJWMGKcT3cY8R958Gs+w4OT71VJRMw3kK6qk02WCbD5OtbFeC6ib"
+ "KRJKdLK3BzjVs/Fzu3mHVucVby3jpvG1Z8HKspKFhvV7gjFEPu8qHKi4MdAlif/"
+ "KakyPk8yZB/dMfaxh7Kv/WpJuSwWNs7RNh29e+ZG+POxqRPWiHqiVw7P17a4dN7"
+ "nkVOawdBEyxI4NAY+4zW+0r0bAy6zNBitBvkq3IXfr3De6Upex52sPHvK04PXoV"
+ "RI6gjnpPSbLLjpSpcHPKgB7DWefLfhd63BUQbc57D8zm8Jd6qtmzcSKn+wz5/zT"
+ "0I6v9I4a+DOjjyqpPpzzNU76pt+Y8SuBgHzMm1vcAdNWlbQrqtScvm0T9AkYni6"
+ "47vSh77uwRZKDtMCMSU151tVUavXhtLYLZ6/ll5NhMXkkx8//i7pk1OBjN5LHVQ"
+ "0QeimRmavlXU1dJ2rwsFAV+9dDdJXUNOq3VLTo9FrbOzZiWtzzjkJpVJAFREnBn"
+ "yIDBK5AXtXE1RzfzaBHzbI2e2kO3t+CSNLWYMFYHBDqaeICYQ9+I9aO/8hnzVSo"
+ "fp+8IfWO8iJhppqynUniicW2oCzrn4oczzYNEjImt8CGY7g90GxWfX+ZgXMJfy/"
+ "bQiFQL3dZvVypDHEbFoIGz+sxkL83xrP4MZV1V9Wwa64lDXYv01Kp4kQXmmnAZY"
+ "KlxBoWqYDXLeLLguSOZxDSCIDpd+YPm39wQ3wOysHW2fmsWtp6FPPlQRUYjsGIP"
+ "lfrkJzpoeaPKDtF1m+mOULfEh9kvTKCmKRi385T9ON39D97eWqaM4CCfUGImvdR"
+ "DlZLXvjmaAh5BVJ8VJxk75OkP14vWFFlTMv0/k4BYLDKsrNqCREC/G9nQBGcD2D"
+ "CLwC2zPNaX2Y9dnyDs2csjN1ibsYttUMnXMgBcnCOkIkVS496Bpc0jQMf35GUgb"
+ "PSyliwqCoXjEBP/2eyq0VLFKQ0fXGsHWvElT+Y/7RYNTiYVWttFMxN5H/2EGcgn"
+ "lfNHLpQvXH9u/3YminS9GX30hQ7jFhpHXxkK8gZ1mpHL9K3pfKS3lG6EF9wQ23O"
+ "qS8m995SG3dp3MzmywxXen/ukXx6bDiEl5VaOvdRUcbhr5Eb3exVDfdWiaJdTYF"
+ "WfIfJOWx88drB3J9vFwjmuaoNEOjFsoNAMYthYOxXraXaJblvmUKz6tJ3T8/G7x"
+ "B9QGYNBsOqBolKoKHBtsWCosLdWhEZr9VFFh2AJrOW1fx24CIkHnvfTtwYORvQq"
+ "Ckuq2bZS1EOdsFkU/X5gwPl6gSUTNhV3IooXkBFL3iBEbfZ6JpQHVVyIuNWjIyN"
+ "b2liCn9Nn0VHeNMMRLl7uyw4eKlOX2ogom8SLvihYxcJoqlCwtehpLsKsU4iwME"
+ "PmDteW5GBGf4GbnqPFkpIT5ed1jGhdZt/dpsp+v6QhYH1uX4pPxdkdnuc84/yb9"
+ "k4SQdKBJ+l3KZkfIxApNWOZqicJfz/eWwS/15hiamRKRuiiUV2zS1V+l8bV7g9O"
+ "gy5scPBMONxtfFlGEKikZKurFmzboCOGQKRBEUCpsY44IAp443h59pQdVIb0YAS"
+ "kfp2xKHwYij6ELRNdH5MrlFa3bNTskGO4k5XDR4cl/Sma2SXgBKb5XjTtlNmCQG"
+ "Gv6lOW7pGXNhs5wfd8K9Ukm6KeLTIlYn1iiKM37YQpa+4JQYljCYhumbqNCkPTZ"
+ "rNYClh8fQEQ8XuOCDpomMWu58YOTfbZNMDWs/Ou7RfCjX+VNwjPShDK9joMwWKc"
+ "Jy3QalZbaoWtcyyvXxR2sqhVR9F7Cmasq4="
+ ),
+ 'ssh-xmss@openssh.com': (
+ "AAAAFHNzaC14bXNzQG9wZW5zc2guY29tAAAAFVhNU1NfU0hBMi0yNTZfVzE2X0g"
+ "xMAAAAECqptWnK94d+Sj2xcdTu8gz+75lawZoLSZFqC5IhbYuT/Z3oBZCim6yt+"
+ "HAmk6MKldl3Fg+74v4sR/SII0I0Jv/"
),
}
+KEY_TYPES = list(VALID_CONTENT.keys())
+
TEST_OPTIONS = (
"no-port-forwarding,no-agent-forwarding,no-X11-forwarding,"
'command="echo \'Please login as the user \"ubuntu\" rather than the'
@@ -70,13 +287,7 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase):
def test_simple_parse(self):
# test key line with common 3 fields (keytype, base64, comment)
parser = ssh_util.AuthKeyLineParser()
- ecdsa_types = [
- 'ecdsa-sha2-nistp256',
- 'ecdsa-sha2-nistp384',
- 'ecdsa-sha2-nistp521',
- ]
-
- for ktype in ['rsa', 'ecdsa', 'dsa'] + ecdsa_types:
+ for ktype in KEY_TYPES:
content = VALID_CONTENT[ktype]
comment = 'user-%s@host' % ktype
line = ' '.join((ktype, content, comment,))
@@ -90,7 +301,7 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase):
def test_parse_no_comment(self):
# test key line with key type and base64 only
parser = ssh_util.AuthKeyLineParser()
- for ktype in ['rsa', 'ecdsa', 'dsa']:
+ for ktype in KEY_TYPES:
content = VALID_CONTENT[ktype]
line = ' '.join((ktype, content,))
key = parser.parse(line)
@@ -104,7 +315,7 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase):
# test key line with options in it
parser = ssh_util.AuthKeyLineParser()
options = TEST_OPTIONS
- for ktype in ['rsa', 'ecdsa', 'dsa']:
+ for ktype in KEY_TYPES:
content = VALID_CONTENT[ktype]
comment = 'user-%s@host' % ktype
line = ' '.join((options, ktype, content, comment,))
@@ -299,7 +510,7 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase):
lines = ssh_util.parse_ssh_config_lines(list(self.exlines))
result = ssh_util.update_ssh_config_lines(lines, updates)
self.assertEqual([], result)
- self.assertEqual(self.exlines, [str(l) for l in lines])
+ self.assertEqual(self.exlines, [str(line) for line in lines])
def test_keycase_not_modified(self):
"""Original case of key should not be changed on update.
@@ -374,13 +585,13 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
sshd_config = self.tmp_path('sshd_config')
util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys))
+ sshd_config,
+ "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)
+ )
(auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
- content = ssh_util.update_authorized_keys(
- auth_key_entries, [])
+ fpw.pw_name, sshd_config)
+ content = ssh_util.update_authorized_keys(auth_key_entries, [])
self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
self.assertTrue(VALID_CONTENT['rsa'] in content)
@@ -398,11 +609,13 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
sshd_config = self.tmp_path('sshd_config')
util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys))
+ sshd_config,
+ "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)
+ )
(auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
+ fpw.pw_name, sshd_config
+ )
content = ssh_util.update_authorized_keys(auth_key_entries, [])
self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py
index c36e6eb0..cba09830 100644
--- a/tests/unittests/test_templating.py
+++ b/tests/unittests/test_templating.py
@@ -4,8 +4,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import print_function
-
from cloudinit.tests import helpers as test_helpers
import textwrap
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 9ff17f52..fc557469 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -1,27 +1,21 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import print_function
-
import io
-import json
import logging
import os
import re
import shutil
import stat
-import sys
import tempfile
+import pytest
import yaml
from unittest import mock
+from cloudinit import subp
from cloudinit import importer, util
from cloudinit.tests import helpers
-BASH = util.which('bash')
-BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name'
-
-
class FakeSelinux(object):
def __init__(self, match_what):
@@ -105,6 +99,17 @@ class TestWriteFile(helpers.TestCase):
self.assertTrue(os.path.isdir(dirname))
self.assertTrue(os.path.isfile(path))
+ def test_dir_is_not_created_if_ensure_dir_false(self):
+ """Verify directories are not created if ensure_dir_exists is False."""
+ dirname = os.path.join(self.tmp, "subdir")
+ path = os.path.join(dirname, "NewFile.txt")
+ contents = "Hey there"
+
+ with self.assertRaises(FileNotFoundError):
+ util.write_file(path, contents, ensure_dir_exists=False)
+
+ self.assertFalse(os.path.isdir(dirname))
+
def test_explicit_mode(self):
"""Verify explicit file mode works properly."""
path = os.path.join(self.tmp, "NewFile.txt")
@@ -117,29 +122,29 @@ class TestWriteFile(helpers.TestCase):
file_stat = os.stat(path)
self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode))
- def test_copy_mode_no_existing(self):
- """Verify that file is created with mode 0o644 if copy_mode
+ def test_preserve_mode_no_existing(self):
+ """Verify that file is created with mode 0o644 if preserve_mode
is true and there is no prior existing file."""
path = os.path.join(self.tmp, "NewFile.txt")
contents = "Hey there"
- util.write_file(path, contents, copy_mode=True)
+ util.write_file(path, contents, preserve_mode=True)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isfile(path))
file_stat = os.stat(path)
self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
- def test_copy_mode_with_existing(self):
+ def test_preserve_mode_with_existing(self):
"""Verify that file is created using mode of existing file
- if copy_mode is true."""
+ if preserve_mode is true."""
path = os.path.join(self.tmp, "NewFile.txt")
contents = "Hey there"
open(path, 'w').close()
os.chmod(path, 0o666)
- util.write_file(path, contents, copy_mode=True)
+ util.write_file(path, contents, preserve_mode=True)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isfile(path))
@@ -387,7 +392,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))
@mock.patch('cloudinit.util.os')
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_get_device_info_from_zpool(self, zpool_output, m_os):
# mock /dev/zfs exists
m_os.path.exists.return_value = True
@@ -410,17 +415,17 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
self.assertIsNone(ret)
@mock.patch('cloudinit.util.os')
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_get_device_info_from_zpool_handles_no_zpool(self, m_sub, m_os):
"""Handle case where there is no zpool command"""
# mock /dev/zfs exists
m_os.path.exists.return_value = True
- m_sub.side_effect = util.ProcessExecutionError("No zpool cmd")
+ m_sub.side_effect = subp.ProcessExecutionError("No zpool cmd")
ret = util.get_device_info_from_zpool('vmzroot')
self.assertIsNone(ret)
@mock.patch('cloudinit.util.os')
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_get_device_info_from_zpool_on_error(self, zpool_output, m_os):
# mock /dev/zfs exists
m_os.path.exists.return_value = True
@@ -432,7 +437,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
ret = util.get_device_info_from_zpool('vmzroot')
self.assertIsNone(ret)
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_parse_mount_with_ext(self, mount_out):
mount_out.return_value = (
helpers.readResource('mount_parse_ext.txt'), '')
@@ -449,7 +454,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
ret = util.parse_mount('/not/existing/mount')
self.assertIsNone(ret)
- @mock.patch('cloudinit.util.subp')
+ @mock.patch('cloudinit.subp.subp')
def test_parse_mount_with_zfs(self, mount_out):
mount_out.return_value = (
helpers.readResource('mount_parse_zfs.txt'), '')
@@ -515,13 +520,13 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
"""
def _dmidecode_subp(cmd):
if cmd[-1] != key:
- raise util.ProcessExecutionError()
+ raise subp.ProcessExecutionError()
return (content, error)
self.patched_funcs.enter_context(
- mock.patch.object(util, 'which', lambda _: True))
+ mock.patch("cloudinit.subp.which", side_effect=lambda _: True))
self.patched_funcs.enter_context(
- mock.patch.object(util, 'subp', _dmidecode_subp))
+ mock.patch("cloudinit.subp.subp", side_effect=_dmidecode_subp))
def patch_mapping(self, new_mapping):
self.patched_funcs.enter_context(
@@ -548,10 +553,12 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
def test_dmidecode_not_used_on_arm(self):
self.patch_mapping({})
+ print("current =%s", subp)
self._create_sysfs_parent_directory()
dmi_val = 'from-dmidecode'
dmi_name = 'use-dmidecode'
self._configure_dmidecode_return(dmi_name, dmi_val)
+ print("now =%s", subp)
expected = {'armel': None, 'aarch64': dmi_val, 'x86_64': dmi_val}
found = {}
@@ -562,6 +569,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
for arch in expected:
m_uname.return_value = ('x-sysname', 'x-nodename',
'x-release', 'x-version', arch)
+ print("now2 =%s", subp)
found[arch] = util.read_dmi_data(dmi_name)
self.assertEqual(expected, found)
@@ -572,7 +580,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
def test_none_returned_if_dmidecode_not_in_path(self):
self.patched_funcs.enter_context(
- mock.patch.object(util, 'which', lambda _: False))
+ mock.patch.object(subp, 'which', lambda _: False))
self.patch_mapping({})
self.assertIsNone(util.read_dmi_data('expect-fail'))
@@ -736,219 +744,6 @@ class TestReadSeeded(helpers.TestCase):
self.assertEqual(found_ud, ud)
-class TestSubp(helpers.CiTestCase):
- with_logs = True
- allowed_subp = [BASH, 'cat', helpers.CiTestCase.SUBP_SHELL_TRUE,
- BOGUS_COMMAND, sys.executable]
-
- stdin2err = [BASH, '-c', 'cat >&2']
- stdin2out = ['cat']
- utf8_invalid = b'ab\xaadef'
- utf8_valid = b'start \xc3\xa9 end'
- utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7'
- printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--']
-
- def printf_cmd(self, *args):
- # bash's printf supports \xaa. So does /usr/bin/printf
- # but by using bash, we remove dependency on another program.
- return([BASH, '-c', 'printf "$@"', 'printf'] + list(args))
-
- def test_subp_handles_bytestrings(self):
- """subp can run a bytestring command if shell is True."""
- tmp_file = self.tmp_path('test.out')
- cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file)
- (out, _err) = util.subp(cmd.encode('utf-8'), shell=True)
- self.assertEqual(u'', out)
- self.assertEqual(u'', _err)
- self.assertEqual('HI MOM\n', util.load_file(tmp_file))
-
- def test_subp_handles_strings(self):
- """subp can run a string command if shell is True."""
- tmp_file = self.tmp_path('test.out')
- cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file)
- (out, _err) = util.subp(cmd, shell=True)
- self.assertEqual(u'', out)
- self.assertEqual(u'', _err)
- self.assertEqual('HI MOM\n', util.load_file(tmp_file))
-
- def test_subp_handles_utf8(self):
- # The given bytes contain utf-8 accented characters as seen in e.g.
- # the "deja dup" package in Ubuntu.
- cmd = self.printf_cmd(self.utf8_valid_2)
- (out, _err) = util.subp(cmd, capture=True)
- self.assertEqual(out, self.utf8_valid_2.decode('utf-8'))
-
- def test_subp_respects_decode_false(self):
- (out, err) = util.subp(self.stdin2out, capture=True, decode=False,
- data=self.utf8_valid)
- self.assertTrue(isinstance(out, bytes))
- self.assertTrue(isinstance(err, bytes))
- self.assertEqual(out, self.utf8_valid)
-
- def test_subp_decode_ignore(self):
- # this executes a string that writes invalid utf-8 to stdout
- (out, _err) = util.subp(self.printf_cmd('abc\\xaadef'),
- capture=True, decode='ignore')
- self.assertEqual(out, 'abcdef')
-
- def test_subp_decode_strict_valid_utf8(self):
- (out, _err) = util.subp(self.stdin2out, capture=True,
- decode='strict', data=self.utf8_valid)
- self.assertEqual(out, self.utf8_valid.decode('utf-8'))
-
- def test_subp_decode_invalid_utf8_replaces(self):
- (out, _err) = util.subp(self.stdin2out, capture=True,
- data=self.utf8_invalid)
- expected = self.utf8_invalid.decode('utf-8', 'replace')
- self.assertEqual(out, expected)
-
- def test_subp_decode_strict_raises(self):
- args = []
- kwargs = {'args': self.stdin2out, 'capture': True,
- 'decode': 'strict', 'data': self.utf8_invalid}
- self.assertRaises(UnicodeDecodeError, util.subp, *args, **kwargs)
-
- def test_subp_capture_stderr(self):
- data = b'hello world'
- (out, err) = util.subp(self.stdin2err, capture=True,
- decode=False, data=data,
- update_env={'LC_ALL': 'C'})
- self.assertEqual(err, data)
- self.assertEqual(out, b'')
-
- def test_subp_reads_env(self):
- with mock.patch.dict("os.environ", values={'FOO': 'BAR'}):
- out, _err = util.subp(self.printenv + ['FOO'], capture=True)
- self.assertEqual('FOO=BAR', out.splitlines()[0])
-
- def test_subp_env_and_update_env(self):
- out, _err = util.subp(
- self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
- env={'FOO': 'BAR'},
- update_env={'HOME': '/myhome', 'K2': 'V2'})
- self.assertEqual(
- ['FOO=BAR', 'HOME=/myhome', 'K1=', 'K2=V2'], out.splitlines())
-
- def test_subp_update_env(self):
- extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'}
- with mock.patch.dict("os.environ", values=extra):
- out, _err = util.subp(
- self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
- update_env={'HOME': '/myhome', 'K2': 'V2'})
-
- self.assertEqual(
- ['FOO=BAR', 'HOME=/myhome', 'K1=V1', 'K2=V2'], out.splitlines())
-
- def test_subp_warn_missing_shebang(self):
- """Warn on no #! in script"""
- noshebang = self.tmp_path('noshebang')
- util.write_file(noshebang, 'true\n')
-
- os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC)
- with self.allow_subp([noshebang]):
- self.assertRaisesRegex(util.ProcessExecutionError,
- r'Missing #! in script\?',
- util.subp, (noshebang,))
-
- def test_subp_combined_stderr_stdout(self):
- """Providing combine_capture as True redirects stderr to stdout."""
- data = b'hello world'
- (out, err) = util.subp(self.stdin2err, capture=True,
- combine_capture=True, decode=False, data=data)
- self.assertEqual(b'', err)
- self.assertEqual(data, out)
-
- def test_returns_none_if_no_capture(self):
- (out, err) = util.subp(self.stdin2out, data=b'', capture=False)
- self.assertIsNone(err)
- self.assertIsNone(out)
-
- def test_exception_has_out_err_are_bytes_if_decode_false(self):
- """Raised exc should have stderr, stdout as bytes if no decode."""
- with self.assertRaises(util.ProcessExecutionError) as cm:
- util.subp([BOGUS_COMMAND], decode=False)
- self.assertTrue(isinstance(cm.exception.stdout, bytes))
- self.assertTrue(isinstance(cm.exception.stderr, bytes))
-
- def test_exception_has_out_err_are_bytes_if_decode_true(self):
- """Raised exc should have stderr, stdout as string if no decode."""
- with self.assertRaises(util.ProcessExecutionError) as cm:
- util.subp([BOGUS_COMMAND], decode=True)
- self.assertTrue(isinstance(cm.exception.stdout, str))
- self.assertTrue(isinstance(cm.exception.stderr, str))
-
- def test_bunch_of_slashes_in_path(self):
- self.assertEqual("/target/my/path/",
- util.target_path("/target/", "//my/path/"))
- self.assertEqual("/target/my/path/",
- util.target_path("/target/", "///my/path/"))
-
- def test_c_lang_can_take_utf8_args(self):
- """Independent of system LC_CTYPE, args can contain utf-8 strings.
-
- When python starts up, its default encoding gets set based on
- the value of LC_CTYPE. If no system locale is set, the default
- encoding for both python2 and python3 in some paths will end up
- being ascii.
-
- Attempts to use setlocale or patching (or changing) os.environ
- in the current environment seem to not be effective.
-
- This test starts up a python with LC_CTYPE set to C so that
- the default encoding will be set to ascii. In such an environment
- Popen(['command', 'non-ascii-arg']) would cause a UnicodeDecodeError.
- """
- python_prog = '\n'.join([
- 'import json, sys',
- 'from cloudinit.util import subp',
- 'data = sys.stdin.read()',
- 'cmd = json.loads(data)',
- 'subp(cmd, capture=False)',
- ''])
- cmd = [BASH, '-c', 'echo -n "$@"', '--',
- self.utf8_valid.decode("utf-8")]
- python_subp = [sys.executable, '-c', python_prog]
-
- out, _err = util.subp(
- python_subp, update_env={'LC_CTYPE': 'C'},
- data=json.dumps(cmd).encode("utf-8"),
- decode=False)
- self.assertEqual(self.utf8_valid, out)
-
- def test_bogus_command_logs_status_messages(self):
- """status_cb gets status messages logs on bogus commands provided."""
- logs = []
-
- def status_cb(log):
- logs.append(log)
-
- with self.assertRaises(util.ProcessExecutionError):
- util.subp([BOGUS_COMMAND], status_cb=status_cb)
-
- expected = [
- 'Begin run command: {cmd}\n'.format(cmd=BOGUS_COMMAND),
- 'ERROR: End run command: invalid command provided\n']
- self.assertEqual(expected, logs)
-
- def test_command_logs_exit_codes_to_status_cb(self):
- """status_cb gets status messages containing command exit code."""
- logs = []
-
- def status_cb(log):
- logs.append(log)
-
- with self.assertRaises(util.ProcessExecutionError):
- util.subp([BASH, '-c', 'exit 2'], status_cb=status_cb)
- util.subp([BASH, '-c', 'exit 0'], status_cb=status_cb)
-
- expected = [
- 'Begin run command: %s -c exit 2\n' % BASH,
- 'ERROR: End run command: exit(2)\n',
- 'Begin run command: %s -c exit 0\n' % BASH,
- 'End run command: exit(0)\n']
- self.assertEqual(expected, logs)
-
-
class TestEncode(helpers.TestCase):
"""Test the encoding functions"""
def test_decode_binary_plain_text_with_hex(self):
@@ -969,7 +764,7 @@ class TestProcessExecutionError(helpers.TestCase):
empty_description = 'Unexpected error while running command.'
def test_pexec_error_indent_text(self):
- error = util.ProcessExecutionError()
+ error = subp.ProcessExecutionError()
msg = 'abc\ndef'
formatted = 'abc\n{0}def'.format(' ' * 4)
self.assertEqual(error._indent_text(msg, indent_level=4), formatted)
@@ -979,10 +774,10 @@ class TestProcessExecutionError(helpers.TestCase):
error._indent_text(msg.encode()), type(msg.encode()))
def test_pexec_error_type(self):
- self.assertIsInstance(util.ProcessExecutionError(), IOError)
+ self.assertIsInstance(subp.ProcessExecutionError(), IOError)
def test_pexec_error_empty_msgs(self):
- error = util.ProcessExecutionError()
+ error = subp.ProcessExecutionError()
self.assertTrue(all(attr == self.empty_attr for attr in
(error.stderr, error.stdout, error.reason)))
self.assertEqual(error.description, self.empty_description)
@@ -996,7 +791,7 @@ class TestProcessExecutionError(helpers.TestCase):
stderr_msg = 'error error'
cmd = 'test command'
exit_code = 3
- error = util.ProcessExecutionError(
+ error = subp.ProcessExecutionError(
stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd)
self.assertEqual(str(error), self.template.format(
description=self.empty_description, stdout=stdout_msg,
@@ -1007,7 +802,7 @@ class TestProcessExecutionError(helpers.TestCase):
# make sure bytes is converted handled properly when formatting
stdout_msg = 'multi\nline\noutput message'.encode()
stderr_msg = 'multi\nline\nerror message\n\n\n'
- error = util.ProcessExecutionError(
+ error = subp.ProcessExecutionError(
stdout=stdout_msg, stderr=stderr_msg)
self.assertEqual(
str(error),
@@ -1172,4 +967,133 @@ class TestGetProcEnv(helpers.TestCase):
my_ppid = os.getppid()
self.assertEqual(my_ppid, util.get_proc_ppid(my_pid))
+
+class TestKernelVersion():
+ """test kernel version function"""
+
+ params = [
+ ('5.6.19-300.fc32.x86_64', (5, 6)),
+ ('4.15.0-101-generic', (4, 15)),
+ ('3.10.0-1062.12.1.vz7.131.10', (3, 10)),
+ ('4.18.0-144.el8.x86_64', (4, 18))]
+
+ @mock.patch('os.uname')
+ @pytest.mark.parametrize("uname_release,expected", params)
+ def test_kernel_version(self, m_uname, uname_release, expected):
+ m_uname.return_value.release = uname_release
+ assert expected == util.kernel_version()
+
+
+class TestFindDevs:
+ @mock.patch('cloudinit.subp.subp')
+ def test_find_devs_with(self, m_subp):
+ m_subp.return_value = (
+ '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"',
+ ''
+ )
+ devlist = util.find_devs_with()
+ assert devlist == [
+ '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"']
+
+ devlist = util.find_devs_with("LABEL_FATBOOT=A_LABEL")
+ assert devlist == [
+ '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"']
+
+ @mock.patch('cloudinit.subp.subp')
+ def test_find_devs_with_openbsd(self, m_subp):
+ m_subp.return_value = (
+ 'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', ''
+ )
+ devlist = util.find_devs_with_openbsd()
+ assert devlist == ['/dev/cd0a', '/dev/sd1i']
+
+ @mock.patch('cloudinit.subp.subp')
+ def test_find_devs_with_openbsd_with_criteria(self, m_subp):
+ m_subp.return_value = (
+ 'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', ''
+ )
+ devlist = util.find_devs_with_openbsd(criteria="TYPE=iso9660")
+ assert devlist == ['/dev/cd0a']
+
+ # lp: #1841466
+ devlist = util.find_devs_with_openbsd(criteria="LABEL_FATBOOT=A_LABEL")
+ assert devlist == ['/dev/cd0a', '/dev/sd1i']
+
+ @pytest.mark.parametrize(
+ 'criteria,expected_devlist', (
+ (None, ['/dev/msdosfs/EFISYS', '/dev/iso9660/config-2']),
+ ('TYPE=iso9660', ['/dev/iso9660/config-2']),
+ ('TYPE=vfat', ['/dev/msdosfs/EFISYS']),
+ ('LABEL_FATBOOT=A_LABEL', []), # lp: #1841466
+ ),
+ )
+ @mock.patch('glob.glob')
+ def test_find_devs_with_freebsd(self, m_glob, criteria, expected_devlist):
+ def fake_glob(pattern):
+ msdos = ["/dev/msdosfs/EFISYS"]
+ iso9660 = ["/dev/iso9660/config-2"]
+ if pattern == "/dev/msdosfs/*":
+ return msdos
+ elif pattern == "/dev/iso9660/*":
+ return iso9660
+ raise Exception
+ m_glob.side_effect = fake_glob
+
+ devlist = util.find_devs_with_freebsd(criteria=criteria)
+ assert devlist == expected_devlist
+
+ @pytest.mark.parametrize(
+ 'criteria,expected_devlist', (
+ (None, ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0']),
+ ('TYPE=iso9660', ['/dev/cd0']),
+ ('TYPE=vfat', ["/dev/ld0", "/dev/dk0", "/dev/dk1"]),
+ ('LABEL_FATBOOT=A_LABEL', # lp: #1841466
+ ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0']),
+ )
+ )
+ @mock.patch("cloudinit.subp.subp")
+ def test_find_devs_with_netbsd(self, m_subp, criteria, expected_devlist):
+ side_effect_values = [
+ ("ld0 dk0 dk1 cd0", ""),
+ (
+ (
+ "mscdlabel: CDIOREADTOCHEADER: "
+ "Inappropriate ioctl for device\n"
+ "track (ctl=4) at sector 0\n"
+ "disklabel not written\n"
+ ),
+ "",
+ ),
+ (
+ (
+ "mscdlabel: CDIOREADTOCHEADER: "
+ "Inappropriate ioctl for device\n"
+ "track (ctl=4) at sector 0\n"
+ "disklabel not written\n"
+ ),
+ "",
+ ),
+ (
+ (
+ "mscdlabel: CDIOREADTOCHEADER: "
+ "Inappropriate ioctl for device\n"
+ "track (ctl=4) at sector 0\n"
+ "disklabel not written\n"
+ ),
+ "",
+ ),
+ (
+ (
+ "track (ctl=4) at sector 0\n"
+ 'ISO filesystem, label "config-2", '
+ "creation time: 2020/03/31 17:29\n"
+ "adding as 'a'\n"
+ ),
+ "",
+ ),
+ ]
+ m_subp.side_effect = side_effect_values
+ devlist = util.find_devs_with_netbsd(criteria=criteria)
+ assert devlist == expected_devlist
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py
index b175a998..c8b59d83 100644
--- a/tests/unittests/test_vmware/test_guestcust_util.py
+++ b/tests/unittests/test_vmware/test_guestcust_util.py
@@ -5,9 +5,12 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import util
+from cloudinit import subp
+from cloudinit.sources.helpers.vmware.imc.config import Config
+from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
get_tools_config,
+ set_gc_status,
)
from cloudinit.tests.helpers import CiTestCase, mock
@@ -18,7 +21,7 @@ class TestGuestCustUtil(CiTestCase):
This test is designed to verify the behavior if vmware-toolbox-cmd
is not installed.
"""
- with mock.patch.object(util, 'which', return_value=None):
+ with mock.patch.object(subp, 'which', return_value=None):
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'), 'defaultVal')
@@ -27,10 +30,10 @@ class TestGuestCustUtil(CiTestCase):
This test is designed to verify the behavior if internal exception
is raised.
"""
- with mock.patch.object(util, 'which', return_value='/dummy/path'):
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'which', return_value='/dummy/path'):
+ with mock.patch.object(subp, 'subp',
return_value=('key=value', b''),
- side_effect=util.ProcessExecutionError(
+ side_effect=subp.ProcessExecutionError(
"subp failed", exit_code=99)):
# verify return value is 'defaultVal', not 'value'.
self.assertEqual(
@@ -42,31 +45,54 @@ class TestGuestCustUtil(CiTestCase):
This test is designed to verify the value could be parsed from
key = value of the given [section]
"""
- with mock.patch.object(util, 'which', return_value='/dummy/path'):
+ with mock.patch.object(subp, 'which', return_value='/dummy/path'):
# value is not blank
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('key = value ', b'')):
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'),
'value')
# value is blank
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('key = ', b'')):
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'),
'')
# value contains =
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('key=Bar=Wark', b'')):
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'),
'Bar=Wark')
# value contains specific characters
- with mock.patch.object(util, 'subp',
+ with mock.patch.object(subp, 'subp',
return_value=('[a] b.c_d=e-f', b'')):
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'),
'e-f')
+ def test_set_gc_status(self):
+ """
+ This test is designed to verify the behavior of set_gc_status
+ """
+ # config is None, return None
+ self.assertEqual(set_gc_status(None, 'Successful'), None)
+
+ # post gc status is NO, return None
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertEqual(set_gc_status(conf, 'Successful'), None)
+
+ # post gc status is YES, subp is called to execute command
+ cf._insertKey("MISC|POST-GC-STATUS", "YES")
+ conf = Config(cf)
+ with mock.patch.object(subp, 'subp',
+ return_value=('ok', b'')) as mockobj:
+ self.assertEqual(
+ set_gc_status(conf, 'Successful'), ('ok', b''))
+ mockobj.assert_called_once_with(
+ ['vmware-rpctool', 'info-set guestinfo.gc.status Successful'],
+ rcs=[0])
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py
index 16343ed2..9c7d25fa 100644
--- a/tests/unittests/test_vmware_config_file.py
+++ b/tests/unittests/test_vmware_config_file.py
@@ -348,6 +348,28 @@ class TestVmwareConfigFile(CiTestCase):
conf = Config(cf)
self.assertEqual("test-script", conf.custom_script_name)
+ def test_post_gc_status(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertFalse(conf.post_gc_status)
+ cf._insertKey("MISC|POST-GC-STATUS", "YES")
+ conf = Config(cf)
+ self.assertTrue(conf.post_gc_status)
+
+ def test_no_default_run_post_script(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertFalse(conf.default_run_post_script)
+ cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "NO")
+ conf = Config(cf)
+ self.assertFalse(conf.default_run_post_script)
+
+ def test_yes_default_run_post_script(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "yes")
+ conf = Config(cf)
+ self.assertTrue(conf.default_run_post_script)
+
class TestVmwareNetConfig(CiTestCase):
"""Test conversion of vmware config to cloud-init config."""
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
new file mode 100644
index 00000000..c67db436
--- /dev/null
+++ b/tools/.github-cla-signers
@@ -0,0 +1,22 @@
+AlexBaranowski
+beezly
+bipinbachhao
+BirknerAlex
+candlerb
+dermotbradley
+dhensby
+eandersson
+izzyleung
+johnsonshi
+landon912
+lucasmoura
+marlluslustosa
+matthewruffell
+nishigori
+omBratteng
+onitake
+smoser
+sshedi
+TheRealFalcon
+tomponline
+tsanghan
diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user
index 6b20d360..89422dbb 100644
--- a/tools/.lp-to-git-user
+++ b/tools/.lp-to-git-user
@@ -6,6 +6,7 @@
"askon": "ask0n",
"bitfehler": "bitfehler",
"chad.smith": "blackboxsw",
+ "chcheng": "chengcheng-chcheng",
"d-info-e": "do3meli",
"daniel-thewatkins": "OddBloke",
"eric-lafontaine1": "elafontaine",
@@ -13,10 +14,13 @@
"goneri": "goneri",
"harald-jensas": "hjensas",
"i.galic": "igalic",
+ "kgarloff": "garloff",
+ "killermoehre": "killermoehre",
"larsks": "larsks",
"legovini": "paride",
"louis": "karibou",
"madhuri-rai07": "madhuri-rai07",
+ "momousta": "Moustafa-Moustafa",
"otubo": "otubo",
"pengpengs": "PengpengSun",
"powersj": "powersj",
@@ -24,5 +28,6 @@
"rjschwei": "rjschwei",
"tribaal": "chrisglass",
"trstringer": "trstringer",
+ "vtqanh": "anhvoms",
"xiaofengw": "xiaofengw-vmware"
} \ No newline at end of file
diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd
index 876368a9..3211c355 100755
--- a/tools/build-on-freebsd
+++ b/tools/build-on-freebsd
@@ -28,8 +28,7 @@ pkgs="
$py_prefix-jsonschema
$py_prefix-oauthlib
$py_prefix-requests
- $py_prefix-serial
- $py_prefix-six
+ $py_prefix-pyserial
$py_prefix-yaml
sudo
"
diff --git a/tools/build-on-netbsd b/tools/build-on-netbsd
new file mode 100755
index 00000000..d2a7067d
--- /dev/null
+++ b/tools/build-on-netbsd
@@ -0,0 +1,36 @@
+#!/bin/sh
+
+fail() { echo "FAILED:" "$@" 1>&2; exit 1; }
+
+# Check dependencies:
+depschecked=/tmp/c-i.dependencieschecked
+pkgs="
+ bash
+ dmidecode
+ py37-configobj
+ py37-jinja2
+ py37-oauthlib
+ py37-requests
+ py37-setuptools
+ py37-yaml
+ sudo
+"
+[ -f "$depschecked" ] || pkg_add ${pkgs} || fail "install packages"
+
+touch $depschecked
+
+# Build the code and install in /usr/pkg/:
+python3.7 setup.py build
+python3.7 setup.py install -O1 --distro netbsd --skip-build --init-system sysvinit_netbsd
+mv -v /usr/local/etc/rc.d/cloud* /etc/rc.d
+
+# Enable cloud-init in /etc/rc.conf:
+sed -i.bak -e "/^cloud.*=.*/d" /etc/rc.conf
+echo '
+# You can safely remove the following lines starting with "cloud"
+cloudinitlocal="YES"
+cloudinit="YES"
+cloudconfig="YES"
+cloudfinal="YES"' >> /etc/rc.conf
+
+echo "Installation completed."
diff --git a/tools/build-on-openbsd b/tools/build-on-openbsd
new file mode 100755
index 00000000..ca028606
--- /dev/null
+++ b/tools/build-on-openbsd
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+fail() { echo "FAILED:" "$@" 1>&2; exit 1; }
+
+# Check dependencies:
+depschecked=/tmp/c-i.dependencieschecked
+pkgs="
+ bash
+ dmidecode
+ py3-configobj
+ py3-jinja2
+ py3-jsonschema
+ py3-oauthlib
+ py3-requests
+ py3-setuptools
+ py3-six
+ py3-yaml
+ sudo--
+"
+[ -f "$depschecked" ] || pkg_add ${pkgs} || fail "install packages"
+
+touch $depschecked
+
+python3 setup.py build
+python3 setup.py install -O1 --distro openbsd --skip-build
+
+echo "Installation completed."
diff --git a/tools/ccfg-merge-debug b/tools/ccfg-merge-debug
index 1f08e0cb..59c573af 100755
--- a/tools/ccfg-merge-debug
+++ b/tools/ccfg-merge-debug
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
from cloudinit import handlers
from cloudinit.handlers import cloud_config as cc_part
diff --git a/tools/ds-identify b/tools/ds-identify
index c93d4a77..4e5700fc 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -266,8 +266,9 @@ read_fs_info() {
isodevs="${isodevs},${dev}=$label"
ftype=""; dev=""; label="";
dev=${line#DEVNAME=};;
- LABEL=*) label="${line#LABEL=}";
- labels="${labels}${line#LABEL=}${delim}";;
+ LABEL=*|LABEL_FATBOOT=*)
+ label="${line#*=}";
+ labels="${labels}${label}${delim}";;
TYPE=*) ftype=${line#TYPE=};;
UUID=*) uuids="${uuids}${line#UUID=}$delim";;
esac
@@ -1062,6 +1063,10 @@ dscheck_OpenStack() {
return ${DS_FOUND}
fi
+ if dmi_chassis_asset_tag_matches "SAP CCloud VM"; then
+ return ${DS_FOUND}
+ fi
+
# LP: #1669875 : allow identification of OpenStack by asset tag
if dmi_chassis_asset_tag_matches "$nova"; then
return ${DS_FOUND}
diff --git a/tools/make-mime.py b/tools/make-mime.py
deleted file mode 100755
index d321479b..00000000
--- a/tools/make-mime.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/python
-
-import argparse
-import sys
-
-from email.mime.multipart import MIMEMultipart
-from email.mime.text import MIMEText
-
-KNOWN_CONTENT_TYPES = [
- 'text/x-include-once-url',
- 'text/x-include-url',
- 'text/cloud-config-archive',
- 'text/upstart-job',
- 'text/cloud-config',
- 'text/part-handler',
- 'text/x-shellscript',
- 'text/cloud-boothook',
-]
-
-
-def file_content_type(text):
- try:
- filename, content_type = text.split(":", 1)
- return (open(filename, 'r'), filename, content_type.strip())
- except ValueError:
- raise argparse.ArgumentError(text, "Invalid value for %r" % (text))
-
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument("-a", "--attach",
- dest="files",
- type=file_content_type,
- action='append',
- default=[],
- required=True,
- metavar="<file>:<content-type>",
- help="attach the given file in the specified "
- "content type")
- args = parser.parse_args()
- sub_messages = []
- for i, (fh, filename, format_type) in enumerate(args.files):
- contents = fh.read()
- sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
- sub_message.add_header('Content-Disposition',
- 'attachment; filename="%s"' % (filename))
- content_type = sub_message.get_content_type().lower()
- if content_type not in KNOWN_CONTENT_TYPES:
- sys.stderr.write(("WARNING: content type %r for attachment %s "
- "may be incorrect!\n") % (content_type, i + 1))
- sub_messages.append(sub_message)
- combined_message = MIMEMultipart()
- for msg in sub_messages:
- combined_message.attach(msg)
- print(combined_message)
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main())
-
-# vi: ts=4 expandtab
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
index 724f7fc4..9dd067b9 100755
--- a/tools/mock-meta.py
+++ b/tools/mock-meta.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
# Provides a somewhat random, somewhat compat, somewhat useful mock version of
# http://docs.amazonwebservices.com
@@ -258,12 +258,14 @@ class MetaDataHandler(object):
try:
key_id = int(mybe_key)
key_name = key_ids[key_id]
- except ValueError:
- raise WebException(hclient.BAD_REQUEST,
- "%s: not an integer" % mybe_key)
- except IndexError:
- raise WebException(hclient.NOT_FOUND,
- "Unknown key id %r" % mybe_key)
+ except ValueError as e:
+ raise WebException(
+ hclient.BAD_REQUEST, "%s: not an integer" % mybe_key
+ ) from e
+ except IndexError as e:
+ raise WebException(
+ hclient.NOT_FOUND, "Unknown key id %r" % mybe_key
+ ) from e
# Extract the possible sub-params
result = traverse(nparams[1:], {
"openssh-key": "\n".join(avail_keys[key_name]),
diff --git a/tools/pipremove b/tools/pipremove
index f8f4ff11..e1213edd 100755
--- a/tools/pipremove
+++ b/tools/pipremove
@@ -1,4 +1,4 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
import subprocess
import sys
diff --git a/tools/read-dependencies b/tools/read-dependencies
index b4656e69..6ad5f701 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""List pip dependencies or system package dependencies for cloud-init."""
# You might be tempted to rewrite this as a shell script, but you
@@ -9,7 +9,7 @@ try:
from argparse import ArgumentParser
except ImportError:
raise RuntimeError(
- 'Could not import python-argparse. Please install python-argparse '
+ 'Could not import argparse. Please install python3-argparse '
'package to continue')
import json
@@ -34,6 +34,13 @@ MAYBE_RELIABLE_YUM_INSTALL = [
'sh', '-c',
"""
error() { echo "$@" 1>&2; }
+ configure_repos_for_proxy_use() {
+ grep -q "^proxy=" /etc/yum.conf || return 0
+ error ":: http proxy in use => forcing the use of fixed URLs in /etc/yum.repos.d/*.repo"
+ sed -i --regexp-extended '/^#baseurl=/s/#// ; /^(mirrorlist|metalink)=/s/^/#/' /etc/yum.repos.d/*.repo
+ sed -i 's/download\.fedoraproject\.org/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo
+ }
+ configure_repos_for_proxy_use
n=0; max=10;
bcmd="yum install --downloadonly --assumeyes --setopt=keepcache=1"
while n=$(($n+1)); do
@@ -48,6 +55,7 @@ MAYBE_RELIABLE_YUM_INSTALL = [
done
error ":: running yum install --cacheonly --assumeyes $*"
yum install --cacheonly --assumeyes "$@"
+ configure_repos_for_proxy_use
""",
'reliable-yum-install']
@@ -73,8 +81,8 @@ DISTRO_INSTALL_PKG_CMD = {
# List of base system packages required to enable ci automation
CI_SYSTEM_BASE_PKGS = {
'common': ['make', 'sudo', 'tar'],
- 'redhat': ['python-tox'],
- 'centos': ['python-tox'],
+ 'redhat': ['python3-tox'],
+ 'centos': ['python3-tox'],
'ubuntu': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild'],
'debian': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild']}
@@ -93,13 +101,21 @@ def get_parser():
parser.add_argument(
'-d', '--distro', type=str, choices=DISTRO_PKG_TYPE_MAP.keys(),
help='The name of the distro to generate package deps for.')
+ deptype = parser.add_mutually_exclusive_group()
+ deptype.add_argument(
+ '-R', '--runtime-requires', action='store_true', default=False,
+ dest='runtime_requires',
+ help='Print only runtime required packages')
+ deptype.add_argument(
+ '-b', '--build-requires', action='store_true', default=False,
+ dest='build_requires', help='Print only buildtime required packages')
parser.add_argument(
'--dry-run', action='store_true', default=False, dest='dry_run',
help='Dry run the install, making no package changes.')
parser.add_argument(
'-s', '--system-pkg-names', action='store_true', default=False,
dest='system_pkg_names',
- help='The name of the distro to generate package deps for.')
+ help='Generate distribution package names (python3-pkgname).')
parser.add_argument(
'-i', '--install', action='store_true', default=False,
dest='install',
@@ -109,12 +125,6 @@ def get_parser():
dest='test_distro',
help='Additionally install continuous integration system packages '
'required for build and test automation.')
- parser.add_argument(
- '-v', '--python-version', type=str, dest='python_version',
- default=None, choices=["2", "3"],
- help='Override the version of python we want to generate system '
- 'package dependencies for. Defaults to the version of python '
- 'this script is called with')
return parser
@@ -132,6 +142,9 @@ def get_package_deps_from_json(topdir, distro):
deps = json.loads(stream.read())
if distro is None:
return {}
+ if deps.get(distro): # If we have a specific distro defined, use it.
+ return deps[distro]
+ # Use generic distro dependency map via DISTRO_PKG_TYPE_MAP
return deps[DISTRO_PKG_TYPE_MAP[distro]]
@@ -155,27 +168,20 @@ def parse_pip_requirements(requirements_path):
return dep_names
-def translate_pip_to_system_pkg(pip_requires, renames, python_ver):
+def translate_pip_to_system_pkg(pip_requires, renames):
"""Translate pip package names to distro-specific package names.
@param pip_requires: List of versionless pip package names to translate.
@param renames: Dict containg special case renames from pip name to system
package name for the distro.
- @param python_ver: Optional python version string "2" or "3". When None,
- use the python version that is calling this script via sys.version_info.
"""
- if python_ver is None:
- python_ver = str(sys.version_info[0])
- if python_ver == "2":
- prefix = "python-"
- else:
- prefix = "python3-"
+ prefix = "python3-"
standard_pkg_name = "{0}{1}"
translated_names = []
for pip_name in pip_requires:
pip_name = pip_name.lower()
# Find a rename if present for the distro package and python version
- rename = renames.get(pip_name, {}).get(python_ver, None)
+ rename = renames.get(pip_name, "")
if rename:
translated_names.append(rename)
else:
@@ -222,17 +228,26 @@ def main(distro):
deps_from_json = get_package_deps_from_json(topd, args.distro)
renames = deps_from_json.get('renames', {})
translated_pip_names = translate_pip_to_system_pkg(
- pip_pkg_names, renames, args.python_version)
+ pip_pkg_names, renames)
all_deps = []
+ select_requires = [args.build_requires, args.runtime_requires]
if args.distro:
- all_deps.extend(
- translated_pip_names + deps_from_json['requires'] +
- deps_from_json['build-requires'])
+ if not any(select_requires):
+ all_deps.extend(
+ translated_pip_names + deps_from_json['requires'] +
+ deps_from_json['build-requires'])
+ else:
+ if args.build_requires:
+ all_deps.extend(deps_from_json['build-requires'])
+ else:
+ all_deps.extend(
+ translated_pip_names + deps_from_json['requires'])
else:
if args.system_pkg_names:
all_deps = translated_pip_names
else:
all_deps = pip_pkg_names
+ all_deps = sorted(all_deps)
if args.install:
pkg_install(all_deps, args.distro, args.test_distro, args.dry_run)
else:
diff --git a/tools/read-version b/tools/read-version
index 92e9fc96..02c90643 100755
--- a/tools/read-version
+++ b/tools/read-version
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import os
import json
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
index 3d5fa725..ed454840 100755
--- a/tools/render-cloudcfg
+++ b/tools/render-cloudcfg
@@ -4,8 +4,10 @@ import argparse
import os
import sys
-VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd", "rhel",
- "suse", "ubuntu", "unknown"]
+VARIANTS = ["alpine", "amazon", "arch", "centos", "debian", "fedora",
+ "freebsd", "netbsd", "openbsd", "rhel", "suse", "ubuntu",
+ "unknown"]
+
if "avoid-pep8-E402-import-not-top-of-file":
_tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
diff --git a/tools/run-container b/tools/run-container
index 23243474..15948e77 100755
--- a/tools/run-container
+++ b/tools/run-container
@@ -287,8 +287,8 @@ prep() {
install_packages "$@"
}
-nose() {
- python3 -m nose "$@"
+pytest() {
+ python3 -m pytest "$@"
}
is_done_cloudinit() {
@@ -351,9 +351,8 @@ wait_for_boot() {
if [ "$OS_NAME" = "centos" ]; then
debug 1 "configuring proxy ${http_proxy}"
inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf"
- inside "$name" sed -i s/enabled=1/enabled=0/ \
- /etc/yum/pluginconf.d/fastestmirror.conf
- inside "$name" sh -c "sed -i '/^#baseurl=/s/#// ; s/^mirrorlist/#mirrorlist/' /etc/yum.repos.d/*.repo"
+ inside "$name" sh -c "sed -i --regexp-extended '/^#baseurl=/s/#// ; /^(mirrorlist|metalink)=/s/^/#/' /etc/yum.repos.d/*.repo"
+ inside "$name" sh -c "sed -i 's/download\.fedoraproject\.org/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo"
else
debug 1 "do not know how to configure proxy on $OS_NAME"
fi
@@ -463,9 +462,8 @@ main() {
return
}
- inside_as_cd "$name" root "$cdir" \
- python3 ./tools/read-dependencies "--distro=${OS_NAME}" \
- --test-distro || {
+ local rdcmd=(python3 tools/read-dependencies "--distro=${OS_NAME}" --install --test-distro)
+ inside_as_cd "$name" root "$cdir" "${rdcmd[@]}" || {
errorrc "FAIL: failed to install dependencies with read-dependencies"
return
}
@@ -478,10 +476,10 @@ main() {
if [ -n "$unittest" ]; then
debug 1 "running unit tests."
- run_self_inside_as_cd "$name" "$user" "$cdir" nose \
+ run_self_inside_as_cd "$name" "$user" "$cdir" pytest \
tests/unittests cloudinit/ || {
- errorrc "nosetests failed.";
- errors[${#errors[@]}]="nosetests"
+ errorrc "pytest failed.";
+ errors[${#errors[@]}]="pytest"
}
fi
@@ -557,7 +555,7 @@ main() {
}
case "${1:-}" in
- prep|os_info|wait_inside|nose) _n=$1; shift; "$_n" "$@";;
+ prep|os_info|wait_inside|pytest) _n=$1; shift; "$_n" "$@";;
*) main "$@";;
esac
diff --git a/tools/run-pyflakes b/tools/run-pyflakes
index b3759a94..179afebe 100755
--- a/tools/run-pyflakes
+++ b/tools/run-pyflakes
@@ -1,6 +1,5 @@
#!/bin/bash
-PYTHON_VERSION=${PYTHON_VERSION:-2}
CR="
"
pycheck_dirs=( "cloudinit/" "tests/" "tools/" )
@@ -12,7 +11,7 @@ else
files=( "$@" )
fi
-cmd=( "python${PYTHON_VERSION}" -m "pyflakes" "${files[@]}" )
+cmd=( "python3" -m "pyflakes" "${files[@]}" )
echo "Running: " "${cmd[@]}" 1>&2
exec "${cmd[@]}"
diff --git a/tools/run-pyflakes3 b/tools/run-pyflakes3
deleted file mode 100755
index e9f0863d..00000000
--- a/tools/run-pyflakes3
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-PYTHON_VERSION=3 exec "${0%/*}/run-pyflakes" "$@"
diff --git a/tools/tox-venv b/tools/tox-venv
index a5d21625..9dd02460 100755
--- a/tools/tox-venv
+++ b/tools/tox-venv
@@ -116,7 +116,7 @@ Usage: ${0##*/} [--no-create] tox-environment [command [args]]
be read from tox.ini. This allows you to do:
tox-venv py27 - tests/some/sub/dir
and have the 'command' read correctly and have that execute:
- python -m nose tests/some/sub/dir
+ python -m pytest tests/some/sub/dir
EOF
if [ -f "$tox_ini" ]; then
diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py
index a57ea847..d8bbcfcb 100755
--- a/tools/validate-yaml.py
+++ b/tools/validate-yaml.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""Try to read a YAML file and report any errors.
"""
diff --git a/tox.ini b/tox.ini
index 8612f034..a92c63e0 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,19 +1,19 @@
[tox]
-envlist = py3, xenial, pycodestyle, pyflakes, pylint
+envlist = py3, xenial-dev, flake8, pylint
recreate = True
[testenv]
-commands = python -m nose {posargs:tests/unittests cloudinit}
+commands = {envpython} -m pytest {posargs:tests/unittests cloudinit}
setenv =
LC_ALL = en_US.utf-8
passenv=
- NOSE_VERBOSE
+ PYTEST_ADDOPTS
-[testenv:pycodestyle]
+[testenv:flake8]
basepython = python3
deps =
- pycodestyle==2.4.0
-commands = {envpython} -m pycodestyle {posargs:cloudinit/ tests/ tools/}
+ flake8==3.8.2
+commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/}
# https://github.com/gabrielfalcao/HTTPretty/issues/223
setenv =
@@ -23,7 +23,7 @@ setenv =
basepython = python3
deps =
# requirements
- pylint==2.3.1
+ pylint==2.6.0
# test-requirements because unit tests are now present in cloudinit tree
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/integration-requirements.txt
@@ -32,27 +32,24 @@ commands = {envpython} -m pylint {posargs:cloudinit tests tools}
[testenv:py3]
basepython = python3
deps =
- nose-timer
-r{toxinidir}/test-requirements.txt
-commands = {envpython} -m nose --with-timer --timer-top-n 10 \
- {posargs:--with-coverage --cover-erase --cover-branches \
- --cover-inclusive --cover-package=cloudinit \
+commands = {envpython} -m pytest \
+ --durations 10 \
+ {posargs:--cov=cloudinit --cov-branch \
tests/unittests cloudinit}
[testenv:py27]
basepython = python2.7
deps = -r{toxinidir}/test-requirements.txt
-[testenv:py26]
-deps = -r{toxinidir}/test-requirements.txt
-commands = nosetests {posargs:tests/unittests cloudinit}
-setenv =
- LC_ALL = C
-
[flake8]
-#H102 Apache 2.0 license header not found
-ignore=H404,H405,H105,H301,H104,H403,H101,H102,H106,H304
+# E226: missing whitespace around arithmetic operator
+# W503: line break before binary operator
+# W504: line break after binary operator
+ignore=E226,W503,W504
exclude = .venv,.tox,dist,doc,*egg,.git,build,tools
+per-file-ignores =
+ cloudinit/cmd/main.py:E402
[testenv:doc]
basepython = python3
@@ -62,11 +59,15 @@ commands =
{envpython} -m sphinx {posargs:doc/rtd doc/rtd_html}
doc8 doc/rtd
-[testenv:xenial]
-commands =
- python ./tools/pipremove jsonschema
- python -m nose {posargs:tests/unittests cloudinit}
-basepython = python3
+[xenial-shared-deps]
+# The version of pytest in xenial doesn't work with Python 3.8, so we define
+# two xenial environments: [testenv:xenial] runs the tests with exactly the
+# version of pytest present in xenial, and is used in CI. [testenv:xenial-dev]
+# runs the tests with the lowest version of pytest that works with Python 3.8,
+# 3.0.7, but keeps the other dependencies at xenial's level.
+#
+# (This section is not a testenv, it is used to maintain a single definition of
+# the dependencies shared between the two xenial testenvs.)
deps =
# requirements
jinja2==2.8
@@ -75,59 +76,50 @@ deps =
pyserial==3.0.1
configobj==5.0.6
requests==2.9.1
- # jsonpatch in xenial is 1.10, not 1.19 (#839779). The oldest version
- # to work with python3.6 is 1.16 as found in Artful. To keep default
- # invocation of 'tox' happy, accept the difference in version here.
- jsonpatch==1.16
- six==1.10.0
# test-requirements
- httpretty==0.9.6
- mock==1.3.0
- nose==1.3.7
- unittest2==1.1.0
- contextlib2==0.5.1
+ pytest-catchlog==1.2.1
-[testenv:centos6]
-basepython = python2.6
-commands = nosetests {posargs:tests/unittests cloudinit}
+[testenv:xenial]
+# When updating this commands definition, also update the definition in
+# [testenv:xenial-dev]. See the comment there for details.
+commands =
+ python ./tools/pipremove jsonschema
+ python -m pytest {posargs:tests/unittests cloudinit}
+basepython = python3
deps =
- # requirements
- argparse==1.2.1
- jinja2==2.2.1
- pyyaml==3.10
- oauthlib==0.6.0
- configobj==4.6.0
- requests==2.6.0
- jsonpatch==1.2
- six==1.9.0
- -r{toxinidir}/test-requirements.txt
-
-[testenv:opensusel150]
-basepython = python2.7
-commands = nosetests {posargs:tests/unittests cloudinit}
+ # Refer to the comment in [xenial-shared-deps] for details
+ {[xenial-shared-deps]deps}
+ httpretty==0.8.6
+ jsonpatch==1.10
+ pytest==2.8.7
+
+[testenv:xenial-dev]
+# This should be:
+# commands = {[testenv:xenial]commands}
+# but the version of pytest in xenial has a bug
+# (https://github.com/tox-dev/tox/issues/208) which means that the {posargs}
+# substitution variable is misparsed and causes a traceback. Ensure that any
+# changes here are reflected in [testenv:xenial].
+commands =
+ python ./tools/pipremove jsonschema
+ python -m pytest {posargs:tests/unittests cloudinit}
+basepython = {[testenv:xenial]basepython}
deps =
- # requirements
- jinja2==2.10
- PyYAML==3.12
- oauthlib==2.0.6
- configobj==5.0.6
- requests==2.18.4
+ # Refer to the comment in [xenial-shared-deps] for details
+ {[xenial-shared-deps]deps}
+ # httpretty in xenial is 0.8.6, not 0.9.5. The oldest version to work with
+ # Python 3.7+ is 0.9.5, because it is the first to include this commit:
+ # https://github.com/gabrielfalcao/HTTPretty/commit/5776d97da3992b9071db5e21faf175f6e8729060
+ httpretty==0.9.5
+ # jsonpatch in xenial is 1.10, not 1.19 (#839779). The oldest version
+ # to work with python3.6 is 1.16 as found in Artful. To keep default
+ # invocation of 'tox' happy, accept the difference in version here.
jsonpatch==1.16
- six==1.11.0
- -r{toxinidir}/test-requirements.txt
+ pytest==3.0.7
-[testenv:tip-pycodestyle]
-commands = {envpython} -m pycodestyle {posargs:cloudinit/ tests/ tools/}
-deps = pycodestyle
-
-[testenv:pyflakes]
-commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/}
-deps =
- pyflakes==1.6.0
-
-[testenv:tip-pyflakes]
-commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/}
-deps = pyflakes
+[testenv:tip-flake8]
+commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/}
+deps = flake8
[testenv:tip-pylint]
commands = {envpython} -m pylint {posargs:cloudinit tests tools}
@@ -141,6 +133,14 @@ deps =
[testenv:citest]
basepython = python3
commands = {envpython} -m tests.cloud_tests {posargs}
-passenv = HOME
+passenv = HOME TRAVIS
deps =
-r{toxinidir}/integration-requirements.txt
+
+[pytest]
+# TODO: s/--strict/--strict-markers/ once xenial support is dropped
+addopts = --strict
+markers =
+ allow_subp_for: allow subp usage for the given commands (disable_subp_usage)
+ allow_all_subp: allow all subp usage (disable_subp_usage)
+ ds_sys_cfg: a sys_cfg dict to be used by datasource fixtures