summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKim Hagen <kim.sidney@gmail.com>2018-10-25 22:26:25 +0200
committerKim Hagen <kim.sidney@gmail.com>2018-10-25 22:26:25 +0200
commitb120f4f7a670674779a93f8c882c81f44a993888 (patch)
tree906d15f6520751b5e8fbeb49b680e673a5cc6aa3
parent838581d57c8765d3e487f58bc37ea103af39d26f (diff)
parent833adcdf6f85ec2305e62bea5a20f9363bf95507 (diff)
downloadvyos-cloud-init-b120f4f7a670674779a93f8c882c81f44a993888.tar.gz
vyos-cloud-init-b120f4f7a670674779a93f8c882c81f44a993888.zip
Merge tag 'ubuntu/18.4-0ubuntu1_16.04.2' into current
Conflicts: cloudinit/sources/DataSourceAzure.py config/cloud.cfg.tmpl integration-requirements.txt tools/read-version
-rw-r--r--.pylintrc5
-rw-r--r--ChangeLog309
-rw-r--r--MANIFEST.in1
-rw-r--r--bash_completion/cloud-init84
-rw-r--r--cloudinit/analyze/__main__.py2
-rw-r--r--cloudinit/analyze/dump.py2
-rw-r--r--cloudinit/analyze/tests/test_dump.py86
-rw-r--r--cloudinit/apport.py28
-rw-r--r--cloudinit/cloud.py4
-rw-r--r--cloudinit/cmd/devel/__init__.py25
-rw-r--r--cloudinit/cmd/devel/logs.py59
-rwxr-xr-xcloudinit/cmd/devel/net_convert.py132
-rw-r--r--cloudinit/cmd/devel/parser.py23
-rwxr-xr-xcloudinit/cmd/devel/render.py85
-rw-r--r--cloudinit/cmd/devel/tests/test_logs.py21
-rw-r--r--cloudinit/cmd/devel/tests/test_render.py101
-rw-r--r--cloudinit/cmd/main.py36
-rw-r--r--cloudinit/cmd/query.py155
-rw-r--r--cloudinit/cmd/tests/test_main.py10
-rw-r--r--cloudinit/cmd/tests/test_query.py193
-rw-r--r--cloudinit/cmd/tests/test_status.py6
-rw-r--r--cloudinit/config/cc_apt_configure.py4
-rw-r--r--cloudinit/config/cc_bootcmd.py9
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py14
-rw-r--r--cloudinit/config/cc_disk_setup.py12
-rw-r--r--cloudinit/config/cc_emit_upstart.py2
-rw-r--r--cloudinit/config/cc_lxd.py67
-rw-r--r--cloudinit/config/cc_mounts.py75
-rw-r--r--cloudinit/config/cc_ntp.py485
-rw-r--r--cloudinit/config/cc_phone_home.py7
-rw-r--r--cloudinit/config/cc_power_state_change.py2
-rw-r--r--cloudinit/config/cc_resizefs.py10
-rw-r--r--cloudinit/config/cc_rh_subscription.py45
-rw-r--r--cloudinit/config/cc_rsyslog.py4
-rw-r--r--cloudinit/config/cc_runcmd.py6
-rwxr-xr-xcloudinit/config/cc_set_passwords.py105
-rw-r--r--cloudinit/config/cc_snap.py5
-rw-r--r--cloudinit/config/cc_snappy.py4
-rwxr-xr-xcloudinit/config/cc_ssh.py7
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py5
-rw-r--r--cloudinit/config/cc_users_groups.py49
-rw-r--r--cloudinit/config/cc_write_files.py7
-rw-r--r--cloudinit/config/schema.py68
-rw-r--r--cloudinit/config/tests/test_disable_ec2_metadata.py50
-rw-r--r--cloudinit/config/tests/test_set_passwords.py71
-rw-r--r--cloudinit/config/tests/test_snap.py36
-rw-r--r--cloudinit/config/tests/test_ssh.py151
-rw-r--r--cloudinit/config/tests/test_ubuntu_advantage.py37
-rw-r--r--cloudinit/config/tests/test_users_groups.py144
-rw-r--r--[-rwxr-xr-x]cloudinit/distros/__init__.py50
-rw-r--r--cloudinit/distros/debian.py5
-rw-r--r--cloudinit/distros/freebsd.py10
-rw-r--r--cloudinit/distros/net_util.py19
-rw-r--r--cloudinit/distros/opensuse.py82
-rw-r--r--cloudinit/distros/rhel.py59
-rw-r--r--cloudinit/distros/ubuntu.py19
-rw-r--r--cloudinit/ec2_utils.py14
-rw-r--r--cloudinit/event.py17
-rw-r--r--cloudinit/gpg.py52
-rw-r--r--cloudinit/handlers/__init__.py11
-rw-r--r--cloudinit/handlers/boot_hook.py12
-rw-r--r--cloudinit/handlers/cloud_config.py15
-rw-r--r--cloudinit/handlers/jinja_template.py137
-rw-r--r--cloudinit/handlers/shell_script.py9
-rw-r--r--cloudinit/handlers/upstart_job.py11
-rw-r--r--cloudinit/helpers.py8
-rw-r--r--cloudinit/log.py12
-rw-r--r--cloudinit/net/__init__.py81
-rwxr-xr-xcloudinit/net/cmdline.py2
-rw-r--r--cloudinit/net/dhcp.py2
-rw-r--r--cloudinit/net/eni.py33
-rw-r--r--cloudinit/net/netplan.py28
-rw-r--r--cloudinit/net/network_state.py15
-rw-r--r--cloudinit/net/renderer.py9
-rw-r--r--cloudinit/net/sysconfig.py102
-rw-r--r--cloudinit/net/tests/test_init.py14
-rw-r--r--cloudinit/netinfo.py379
-rw-r--r--cloudinit/reporting/__init__.py8
-rw-r--r--cloudinit/reporting/events.py2
-rw-r--r--cloudinit/reporting/handlers.py246
-rw-r--r--cloudinit/settings.py3
-rw-r--r--cloudinit/sources/DataSourceAliYun.py2
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py29
-rw-r--r--cloudinit/sources/DataSourceAzure.py369
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py31
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py17
-rw-r--r--cloudinit/sources/DataSourceEc2.py48
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py119
-rw-r--r--cloudinit/sources/DataSourceMAAS.py4
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py4
-rw-r--r--cloudinit/sources/DataSourceOVF.py2
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py4
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py187
-rw-r--r--cloudinit/sources/DataSourceOracle.py233
-rw-r--r--cloudinit/sources/DataSourceScaleway.py54
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py225
-rw-r--r--cloudinit/sources/__init__.py278
-rw-r--r--cloudinit/sources/helpers/azure.py5
-rw-r--r--cloudinit/sources/helpers/digitalocean.py7
-rw-r--r--cloudinit/sources/helpers/openstack.py40
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py4
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py4
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py4
-rw-r--r--cloudinit/sources/tests/test_init.py362
-rw-r--r--cloudinit/sources/tests/test_oracle.py331
-rw-r--r--cloudinit/ssh_util.py76
-rw-r--r--cloudinit/stages.py66
-rw-r--r--cloudinit/templater.py40
-rw-r--r--cloudinit/tests/helpers.py192
-rw-r--r--cloudinit/tests/test_gpg.py54
-rw-r--r--cloudinit/tests/test_netinfo.py233
-rw-r--r--cloudinit/tests/test_stages.py231
-rw-r--r--cloudinit/tests/test_url_helper.py28
-rw-r--r--cloudinit/tests/test_util.py270
-rw-r--r--cloudinit/tests/test_version.py31
-rw-r--r--cloudinit/url_helper.py31
-rw-r--r--cloudinit/user_data.py28
-rw-r--r--cloudinit/util.py279
-rw-r--r--cloudinit/version.py6
-rw-r--r--cloudinit/warnings.py2
-rw-r--r--debian/changelog216
-rw-r--r--debian/patches/azure-apply-network-config-false.patch23
-rw-r--r--debian/patches/azure-use-walinux-agent.patch4
-rw-r--r--debian/patches/cpick-1d5e9aef-azure-Add-apply_network_config-option-to-disable228
-rw-r--r--debian/patches/ds-identify-behavior-xenial.patch4
-rw-r--r--debian/patches/openstack-no-network-config.patch40
-rw-r--r--debian/patches/series3
-rwxr-xr-xdebian/rules2
-rw-r--r--doc/examples/cloud-config-disk-setup.txt4
-rw-r--r--doc/examples/cloud-config-run-cmds.txt5
-rw-r--r--doc/examples/cloud-config-user-groups.txt36
-rw-r--r--doc/examples/cloud-config.txt24
-rw-r--r--doc/rtd/index.rst1
-rw-r--r--doc/rtd/topics/capabilities.rst118
-rw-r--r--doc/rtd/topics/datasources.rst14
-rw-r--r--doc/rtd/topics/datasources/aliyun.rst74
-rw-r--r--doc/rtd/topics/datasources/cloudstack.rst26
-rw-r--r--doc/rtd/topics/datasources/ec2.rst30
-rw-r--r--doc/rtd/topics/datasources/openstack.rst23
-rw-r--r--doc/rtd/topics/datasources/oracle.rst26
-rw-r--r--doc/rtd/topics/debugging.rst2
-rw-r--r--doc/rtd/topics/format.rst23
-rw-r--r--doc/rtd/topics/instancedata.rst297
-rw-r--r--doc/rtd/topics/network-config-format-v1.rst27
-rw-r--r--doc/rtd/topics/network-config-format-v2.rst6
-rw-r--r--doc/rtd/topics/tests.rst7
-rw-r--r--integration-requirements.txt6
-rwxr-xr-xpackages/bddeb40
-rwxr-xr-xpackages/brpm6
-rw-r--r--packages/debian/changelog.in2
-rw-r--r--packages/debian/control.in1
-rwxr-xr-xpackages/debian/rules.in2
-rw-r--r--packages/redhat/cloud-init.spec.in8
-rw-r--r--packages/suse/cloud-init.spec.in71
-rwxr-xr-xsetup.py18
-rw-r--r--systemd/cloud-config.service.tmpl1
-rw-r--r--templates/chrony.conf.debian.tmpl39
-rw-r--r--templates/chrony.conf.fedora.tmpl48
-rw-r--r--templates/chrony.conf.opensuse.tmpl38
-rw-r--r--templates/chrony.conf.rhel.tmpl45
-rw-r--r--templates/chrony.conf.sles.tmpl38
-rw-r--r--templates/chrony.conf.ubuntu.tmpl42
-rw-r--r--tests/cloud_tests/args.py3
-rw-r--r--tests/cloud_tests/bddeb.py2
-rw-r--r--tests/cloud_tests/collect.py20
-rw-r--r--tests/cloud_tests/platforms/instances.py44
-rw-r--r--tests/cloud_tests/platforms/lxd/instance.py54
-rw-r--r--tests/cloud_tests/releases.yaml16
-rw-r--r--tests/cloud_tests/setup_image.py21
-rw-r--r--tests/cloud_tests/stage.py15
-rw-r--r--tests/cloud_tests/testcases.yaml8
-rw-r--r--tests/cloud_tests/testcases/__init__.py58
-rw-r--r--tests/cloud_tests/testcases/base.py91
-rw-r--r--tests/cloud_tests/testcases/examples/including_user_groups.py2
-rw-r--r--tests/cloud_tests/testcases/modules/byobu.py3
-rw-r--r--tests/cloud_tests/testcases/modules/byobu.yaml3
-rw-r--r--tests/cloud_tests/testcases/modules/ca_certs.py21
-rw-r--r--tests/cloud_tests/testcases/modules/ca_certs.yaml8
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_bridge.py14
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_dir.py14
-rw-r--r--tests/cloud_tests/testcases/modules/ntp.py5
-rw-r--r--tests/cloud_tests/testcases/modules/ntp.yaml1
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_chrony.py26
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_chrony.yaml17
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_pools.yaml1
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_servers.yaml1
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_timesyncd.py15
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml15
-rw-r--r--tests/cloud_tests/testcases/modules/package_update_upgrade_install.py14
-rw-r--r--tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml9
-rw-r--r--tests/cloud_tests/testcases/modules/salt_minion.py39
-rw-r--r--tests/cloud_tests/testcases/modules/salt_minion.yaml42
-rw-r--r--tests/cloud_tests/testcases/modules/snap.yaml3
-rw-r--r--tests/cloud_tests/testcases/modules/snappy.yaml3
-rw-r--r--tests/cloud_tests/testcases/modules/user_groups.py2
-rw-r--r--tests/cloud_tests/testcases/modules/write_files.py7
-rw-r--r--tests/cloud_tests/testcases/modules/write_files.yaml15
-rw-r--r--tests/cloud_tests/util.py2
-rw-r--r--tests/cloud_tests/verify.py51
-rw-r--r--tests/data/netinfo/netdev-formatted-output10
-rw-r--r--tests/data/netinfo/netdev-formatted-output-down8
-rw-r--r--tests/data/netinfo/new-ifconfig-output18
-rw-r--r--tests/data/netinfo/new-ifconfig-output-down15
-rw-r--r--tests/data/netinfo/old-ifconfig-output18
-rw-r--r--tests/data/netinfo/route-formatted-output22
-rw-r--r--tests/data/netinfo/sample-ipaddrshow-output13
-rw-r--r--tests/data/netinfo/sample-ipaddrshow-output-down8
-rw-r--r--tests/data/netinfo/sample-iproute-output-v43
-rw-r--r--tests/data/netinfo/sample-iproute-output-v611
-rw-r--r--tests/data/netinfo/sample-route-output-v45
-rw-r--r--tests/data/netinfo/sample-route-output-v613
-rw-r--r--tests/unittests/test__init__.py10
-rw-r--r--tests/unittests/test_builtin_handlers.py324
-rw-r--r--tests/unittests/test_cli.py3
-rw-r--r--tests/unittests/test_data.py24
-rw-r--r--tests/unittests/test_datasource/test_aliyun.py2
-rw-r--r--tests/unittests/test_datasource/test_altcloud.py44
-rw-r--r--tests/unittests/test_datasource/test_azure.py664
-rw-r--r--tests/unittests/test_datasource/test_azure_helper.py6
-rw-r--r--tests/unittests/test_datasource/test_cloudsigma.py3
-rw-r--r--tests/unittests/test_datasource/test_common.py5
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py15
-rw-r--r--tests/unittests/test_datasource/test_ec2.py12
-rw-r--r--tests/unittests/test_datasource/test_gce.py1
-rw-r--r--tests/unittests/test_datasource/test_ibmcloud.py50
-rw-r--r--tests/unittests/test_datasource/test_maas.py4
-rw-r--r--tests/unittests/test_datasource/test_nocloud.py5
-rw-r--r--tests/unittests/test_datasource/test_opennebula.py409
-rw-r--r--tests/unittests/test_datasource/test_openstack.py354
-rw-r--r--tests/unittests/test_datasource/test_ovf.py8
-rw-r--r--tests/unittests/test_datasource/test_scaleway.py82
-rw-r--r--tests/unittests/test_datasource/test_smartos.py336
-rw-r--r--tests/unittests/test_distros/test_create_users.py99
-rw-r--r--tests/unittests/test_distros/test_netconfig.py935
-rw-r--r--tests/unittests/test_distros/test_user_data_normalize.py6
-rw-r--r--tests/unittests/test_ds_identify.py243
-rw-r--r--tests/unittests/test_ec2_util.py9
-rw-r--r--tests/unittests/test_filters/test_launch_index.py10
-rw-r--r--tests/unittests/test_handler/test_handler_apt_conf_v1.py16
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py7
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v1.py27
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v3.py41
-rw-r--r--tests/unittests/test_handler/test_handler_bootcmd.py40
-rw-r--r--tests/unittests/test_handler/test_handler_chef.py34
-rw-r--r--tests/unittests/test_handler/test_handler_etc_hosts.py1
-rw-r--r--tests/unittests/test_handler/test_handler_lxd.py92
-rw-r--r--tests/unittests/test_handler/test_handler_mounts.py104
-rw-r--r--tests/unittests/test_handler/test_handler_ntp.py877
-rw-r--r--tests/unittests/test_handler/test_handler_resizefs.py10
-rw-r--r--tests/unittests/test_handler/test_handler_runcmd.py33
-rw-r--r--tests/unittests/test_handler/test_schema.py51
-rw-r--r--tests/unittests/test_merging.py2
-rw-r--r--tests/unittests/test_net.py846
-rw-r--r--tests/unittests/test_reporting_hyperv.py134
-rw-r--r--tests/unittests/test_rh_subscription.py185
-rw-r--r--tests/unittests/test_runs/test_merge_run.py2
-rw-r--r--tests/unittests/test_runs/test_simple_run.py32
-rw-r--r--tests/unittests/test_sshutil.py97
-rw-r--r--tests/unittests/test_templating.py68
-rw-r--r--tests/unittests/test_util.py164
-rw-r--r--tests/unittests/test_version.py14
-rw-r--r--tests/unittests/test_vmware_config_file.py115
-rw-r--r--tools/Z99-cloud-locale-test.sh13
-rw-r--r--tools/Z99-cloudinit-warnings.sh8
-rwxr-xr-xtools/ds-identify135
-rwxr-xr-xtools/make-tarball15
-rwxr-xr-xtools/net-convert.py84
-rwxr-xr-xtools/read-dependencies8
-rwxr-xr-xtools/run-centos340
-rwxr-xr-xtools/run-container592
-rwxr-xr-xtools/tox-venv189
-rw-r--r--tox.ini16
272 files changed, 15219 insertions, 3557 deletions
diff --git a/.pylintrc b/.pylintrc
index 0bdfa59d..e376b48b 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -28,7 +28,7 @@ jobs=4
# W0703(broad-except)
# W1401(anomalous-backslash-in-string)
-disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0612, W0613, W0621, W0622, W0631, W0703, W1401
+disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401
[REPORTS]
@@ -61,7 +61,8 @@ ignored-modules=
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
-ignored-classes=optparse.Values,thread._local
+# argparse.Namespace from https://github.com/PyCQA/pylint/issues/2413
+ignored-classes=argparse.Namespace,optparse.Values,thread._local
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
diff --git a/ChangeLog b/ChangeLog
index daa7ccf6..9c043b08 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,312 @@
+18.4:
+ - add rtd example docs about new standardized keys
+ - use ds._crawled_metadata instance attribute if set when writing
+ instance-data.json
+ - ec2: update crawled metadata. add standardized keys
+ - tests: allow skipping an entire cloud_test without running.
+ - tests: disable lxd tests on cosmic
+ - cii-tests: use unittest2.SkipTest in ntp_chrony due to new deps
+ - lxd: adjust to snap installed lxd.
+ - docs: surface experimental doc in instance-data.json
+ - tests: fix ec2 integration tests. process meta_data instead of meta-data
+ - Add support for Infiniband network interfaces (IPoIB). [Mark Goddard]
+ - cli: add cloud-init query subcommand to query instance metadata
+ - tools/tox-venv: update for new features.
+ - pylint: ignore warning assignment-from-no-return for _write_network
+ - stages: Fix bug causing datasource to have incorrect sys_cfg.
+ (LP: #1787459)
+ - Remove dead-code _write_network distro implementations.
+ - net_util: ensure static configs have netmask in translate_network result
+ [Thomas Berger] (LP: #1792454)
+ - Fall back to root:root on syslog permissions if other options fail.
+ [Robert Schweikert]
+ - tests: Add mock for util.get_hostname. [Robert Schweikert] (LP: #1792799)
+ - ds-identify: doc string cleanup.
+ - OpenStack: Support setting mac address on bond.
+ [Fabian Wiesel] (LP: #1682064)
+ - bash_completion/cloud-init: fix shell syntax error.
+ - EphemeralIPv4Network: Be more explicit when adding default route.
+ (LP: #1792415)
+ - OpenStack: support reading of newer versions of metdata.
+ - OpenStack: fix bug causing 'latest' version to be used from network.
+ (LP: #1792157)
+ - user-data: jinja template to render instance-data.json in cloud-config
+ (LP: #1791781)
+ - config: disable ssh access to a configured user account
+ - tests: print failed testname instead of docstring upon failure
+ - tests: Disallow use of util.subp except for where needed.
+ - sysconfig: refactor sysconfig to accept distro specific templates paths
+ - Add unit tests for config/cc_ssh.py [Francis Ginther]
+ - Fix the built-in cloudinit/tests/helpers:skipIf
+ - read-version: enhance error message [Joshua Powers]
+ - hyperv_reporting_handler: simplify threaded publisher
+ - VMWare: Fix a network config bug in vm with static IPv4 and no gateway.
+ [Pengpeng Sun] (LP: #1766538)
+ - logging: Add logging config type hyperv for reporting via Azure KVP
+ [Andy Liu]
+ - tests: disable other snap test as well [Joshua Powers]
+ - tests: disable snap, fix write_files binary [Joshua Powers]
+ - Add datasource Oracle Compute Infrastructure (OCI).
+ - azure: allow azure to generate network configuration from IMDS per boot.
+ - Scaleway: Add network configuration to the DataSource [Louis Bouchard]
+ - docs: Fix example cloud-init analyze command to match output.
+ [Wesley Gao]
+ - netplan: Correctly render macaddress on a bonds and bridges when
+ provided. (LP: #1784699)
+ - tools: Add 'net-convert' subcommand command to 'cloud-init devel'.
+ - redhat: remove ssh keys on new instance. (LP: #1781094)
+ - Use typeset or local in profile.d scripts. (LP: #1784713)
+ - OpenNebula: Fix null gateway6 [Akihiko Ota] (LP: #1768547)
+ - oracle: fix detect_openstack to report True on OracleCloud.com DMI data
+ (LP: #1784685)
+ - tests: improve LXDInstance trying to workaround or catch bug.
+ - update_metadata re-config on every boot comments and tests not quite
+ right [Mike Gerdts]
+ - tests: Collect build_info from system if available.
+ - pylint: Fix pylint warnings reported in pylint 2.0.0.
+ - get_linux_distro: add support for rhel via redhat-release.
+ - get_linux_distro: add support for centos6 and rawhide flavors of redhat
+ (LP: #1781229)
+ - tools: add '--debug' to tools/net-convert.py
+ - tests: bump the version of paramiko to 2.4.1.
+ - docs: note in rtd about avoiding /tmp when writing files (LP: #1727876)
+ - ubuntu,centos,debian: get_linux_distro to align with platform.dist
+ (LP: #1780481)
+ - Fix boothook docs on environment variable name (INSTANCE_I ->
+ INSTANCE_ID) [Marc Tamsky]
+ - update_metadata: a datasource can support network re-config every boot
+ - tests: drop salt-minion integration test (LP: #1778737)
+ - Retry on failed import of gpg receive keys.
+ - tools: Fix run-container when neither source or binary package requested.
+ - docs: Fix a small spelling error. [Oz N Tiram]
+ - tox: use simplestreams from git repository rather than bzr.
+
+18.3:
+ - docs: represent sudo:false in docs for user_groups config module
+ - Explicitly prevent `sudo` access for user module
+ [Jacob Bednarz] (LP: #1771468)
+ - lxd: Delete default network and detach device if lxd-init created them.
+ (LP: #1776958)
+ - openstack: avoid unneeded metadata probe on non-openstack platforms
+ (LP: #1776701)
+ - stages: fix tracebacks if a module stage is undefined or empty
+ [Robert Schweikert] (LP: #1770462)
+ - Be more safe on string/bytes when writing multipart user-data to disk.
+ (LP: #1768600)
+ - Fix get_proc_env for pids that have non-utf8 content in environment.
+ (LP: #1775371)
+ - tests: fix salt_minion integration test on bionic and later
+ - tests: provide human-readable integration test summary when --verbose
+ - tests: skip chrony integration tests on lxd running artful or older
+ - test: add optional --preserve-instance arg to integraiton tests
+ - netplan: fix mtu if provided by network config for all rendered types
+ (LP: #1774666)
+ - tests: remove pip install workarounds for pylxd, take upstream fix.
+ - subp: support combine_capture argument.
+ - tests: ordered tox dependencies for pylxd install
+ - util: add get_linux_distro function to replace platform.dist
+ [Robert Schweikert] (LP: #1745235)
+ - pyflakes: fix unused variable references identified by pyflakes 2.0.0.
+ - - Do not use the systemd_prefix macro, not available in this environment
+ [Robert Schweikert]
+ - doc: Add config info to ec2, openstack and cloudstack datasource docs
+ - Enable SmartOS network metadata to work with netplan via per-subnet
+ routes [Dan McDonald] (LP: #1763512)
+ - openstack: Allow discovery in init-local using dhclient in a sandbox.
+ (LP: #1749717)
+ - tests: Avoid using https in httpretty, improve HttPretty test case.
+ (LP: #1771659)
+ - yaml_load/schema: Add invalid line and column nums to error message
+ - Azure: Ignore NTFS mount errors when checking ephemeral drive
+ [Paul Meyer]
+ - packages/brpm: Get proper dependencies for cmdline distro.
+ - packages: Make rpm spec files patch in package version like in debs.
+ - tools/run-container: replace tools/run-centos with more generic.
+ - Update version.version_string to contain packaged version. (LP: #1770712)
+ - cc_mounts: Do not add devices to fstab that are already present.
+ [Lars Kellogg-Stedman]
+ - ds-identify: ensure that we have certain tokens in PATH. (LP: #1771382)
+ - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers]
+ - read_file_or_url: move to url_helper, fix bug in its FileResponse.
+ - cloud_tests: help pylint [Ryan Harper]
+ - flake8: fix flake8 errors in previous commit.
+ - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford]
+ - tests: restructure SSH and initial connections [Joshua Powers]
+ - ds-identify: recognize container-other as a container, test SmartOS.
+ - cloud-config.service: run After snap.seeded.service. (LP: #1767131)
+ - tests: do not rely on host /proc/cmdline in test_net.py
+ [Lars Kellogg-Stedman] (LP: #1769952)
+ - ds-identify: Remove dupe call to is_ds_enabled, improve debug message.
+ - SmartOS: fix get_interfaces for nics that do not have addr_assign_type.
+ - tests: fix package and ca_cert cloud_tests on bionic
+ (LP: #1769985)
+ - ds-identify: make shellcheck 0.4.6 happy with ds-identify.
+ - pycodestyle: Fix deprecated string literals, move away from flake8.
+ - azure: Add reported ready marker file. [Joshua Chan] (LP: #1765214)
+ - tools: Support adding a release suffix through packages/bddeb.
+ - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt.
+ [Harm Weites] (LP: #1404745)
+ - tools: Re-use the orig tarball in packages/bddeb if it is around.
+ - netinfo: fix netdev_pformat when a nic does not have an address
+ assigned. (LP: #1766302)
+ - collect-logs: add -v flag, write to stderr, limit journal to single
+ boot. (LP: #1766335)
+ - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled.
+ (LP: #1766401)
+ - Add reporting events and log_time around early source of blocking time
+ [Ryan Harper]
+ - IBMCloud: recognize provisioning environment during debug boots.
+ (LP: #1767166)
+ - net: detect unstable network names and trigger a settle if needed
+ [Ryan Harper] (LP: #1766287)
+ - IBMCloud: improve documentation in datasource.
+ - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov]
+ - packages/debian/control.in: add missing dependency on iproute2.
+ (LP: #1766711)
+ - DataSourceSmartOS: add locking of serial device.
+ [Mike Gerdts] (LP: #1746605)
+ - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] (LP: #1765085)
+ - DataSourceSmartOS: list() should always return a list
+ [Mike Gerdts] (LP: #1763480)
+ - schema: in validation, raise ImportError if strict but no jsonschema.
+ - set_passwords: Add newline to end of sshd config, only restart if
+ updated. (LP: #1677205)
+ - pylint: pay attention to unused variable warnings.
+ - doc: Add documentation for AliYun datasource. [Junjie Wang]
+ - Schema: do not warn on duplicate items in commands. (LP: #1764264)
+ - net: Depend on iproute2's ip instead of net-tools ifconfig or route
+ - DataSourceSmartOS: fix hang when metadata service is down
+ [Mike Gerdts] (LP: #1667735)
+ - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to
+ ext4. [Mike Gerdts] (LP: #1763511)
+ - pycodestyle: Fix invalid escape sequences in string literals.
+ - Implement bash completion script for cloud-init command line
+ [Ryan Harper]
+ - tools: Fix make-tarball cli tool usage for development
+ - renderer: support unicode in render_from_file.
+ - Implement ntp client spec with auto support for distro selection
+ [Ryan Harper] (LP: #1749722)
+ - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds.
+ - tests: fix ec2 integration network metadata validation
+ - tests: fix integration tests to support lxd 3.0 release
+ - correct documentation to match correct attribute name usage.
+ [Dominic Schlegel] (LP: #1420018)
+ - cc_resizefs, util: handle no /dev/zfs [Ryan Harper]
+ - doc: Fix links in OpenStack datasource documentation.
+ [Dominic Schlegel] (LP: #1721660)
+ - docs: represent sudo:false in docs for user_groups config module
+ - Explicitly prevent `sudo` access for user module
+ [Jacob Bednarz] (LP: #1771468)
+ - lxd: Delete default network and detach device if lxd-init created them.
+ (LP: #1776958)
+ - openstack: avoid unneeded metadata probe on non-openstack platforms
+ (LP: #1776701)
+ - stages: fix tracebacks if a module stage is undefined or empty
+ [Robert Schweikert] (LP: #1770462)
+ - Be more safe on string/bytes when writing multipart user-data to disk.
+ (LP: #1768600)
+ - Fix get_proc_env for pids that have non-utf8 content in environment.
+ (LP: #1775371)
+ - tests: fix salt_minion integration test on bionic and later
+ - tests: provide human-readable integration test summary when --verbose
+ - tests: skip chrony integration tests on lxd running artful or older
+ - test: add optional --preserve-instance arg to integraiton tests
+ - netplan: fix mtu if provided by network config for all rendered types
+ (LP: #1774666)
+ - tests: remove pip install workarounds for pylxd, take upstream fix.
+ - subp: support combine_capture argument.
+ - tests: ordered tox dependencies for pylxd install
+ - util: add get_linux_distro function to replace platform.dist
+ [Robert Schweikert] (LP: #1745235)
+ - pyflakes: fix unused variable references identified by pyflakes 2.0.0.
+ - - Do not use the systemd_prefix macro, not available in this environment
+ [Robert Schweikert]
+ - doc: Add config info to ec2, openstack and cloudstack datasource docs
+ - Enable SmartOS network metadata to work with netplan via per-subnet
+ routes [Dan McDonald] (LP: #1763512)
+ - openstack: Allow discovery in init-local using dhclient in a sandbox.
+ (LP: #1749717)
+ - tests: Avoid using https in httpretty, improve HttPretty test case.
+ (LP: #1771659)
+ - yaml_load/schema: Add invalid line and column nums to error message
+ - Azure: Ignore NTFS mount errors when checking ephemeral drive
+ [Paul Meyer]
+ - packages/brpm: Get proper dependencies for cmdline distro.
+ - packages: Make rpm spec files patch in package version like in debs.
+ - tools/run-container: replace tools/run-centos with more generic.
+ - Update version.version_string to contain packaged version. (LP: #1770712)
+ - cc_mounts: Do not add devices to fstab that are already present.
+ [Lars Kellogg-Stedman]
+ - ds-identify: ensure that we have certain tokens in PATH. (LP: #1771382)
+ - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers]
+ - read_file_or_url: move to url_helper, fix bug in its FileResponse.
+ - cloud_tests: help pylint [Ryan Harper]
+ - flake8: fix flake8 errors in previous commit.
+ - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford]
+ - tests: restructure SSH and initial connections [Joshua Powers]
+ - ds-identify: recognize container-other as a container, test SmartOS.
+ - cloud-config.service: run After snap.seeded.service. (LP: #1767131)
+ - tests: do not rely on host /proc/cmdline in test_net.py
+ [Lars Kellogg-Stedman] (LP: #1769952)
+ - ds-identify: Remove dupe call to is_ds_enabled, improve debug message.
+ - SmartOS: fix get_interfaces for nics that do not have addr_assign_type.
+ - tests: fix package and ca_cert cloud_tests on bionic
+ (LP: #1769985)
+ - ds-identify: make shellcheck 0.4.6 happy with ds-identify.
+ - pycodestyle: Fix deprecated string literals, move away from flake8.
+ - azure: Add reported ready marker file. [Joshua Chan] (LP: #1765214)
+ - tools: Support adding a release suffix through packages/bddeb.
+ - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt.
+ [Harm Weites] (LP: #1404745)
+ - tools: Re-use the orig tarball in packages/bddeb if it is around.
+ - netinfo: fix netdev_pformat when a nic does not have an address
+ assigned. (LP: #1766302)
+ - collect-logs: add -v flag, write to stderr, limit journal to single
+ boot. (LP: #1766335)
+ - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled.
+ (LP: #1766401)
+ - Add reporting events and log_time around early source of blocking time
+ [Ryan Harper]
+ - IBMCloud: recognize provisioning environment during debug boots.
+ (LP: #1767166)
+ - net: detect unstable network names and trigger a settle if needed
+ [Ryan Harper] (LP: #1766287)
+ - IBMCloud: improve documentation in datasource.
+ - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov]
+ - packages/debian/control.in: add missing dependency on iproute2.
+ (LP: #1766711)
+ - DataSourceSmartOS: add locking of serial device.
+ [Mike Gerdts] (LP: #1746605)
+ - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] (LP: #1765085)
+ - DataSourceSmartOS: list() should always return a list
+ [Mike Gerdts] (LP: #1763480)
+ - schema: in validation, raise ImportError if strict but no jsonschema.
+ - set_passwords: Add newline to end of sshd config, only restart if
+ updated. (LP: #1677205)
+ - pylint: pay attention to unused variable warnings.
+ - doc: Add documentation for AliYun datasource. [Junjie Wang]
+ - Schema: do not warn on duplicate items in commands. (LP: #1764264)
+ - net: Depend on iproute2's ip instead of net-tools ifconfig or route
+ - DataSourceSmartOS: fix hang when metadata service is down
+ [Mike Gerdts] (LP: #1667735)
+ - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to
+ ext4. [Mike Gerdts] (LP: #1763511)
+ - pycodestyle: Fix invalid escape sequences in string literals.
+ - Implement bash completion script for cloud-init command line
+ [Ryan Harper]
+ - tools: Fix make-tarball cli tool usage for development
+ - renderer: support unicode in render_from_file.
+ - Implement ntp client spec with auto support for distro selection
+ [Ryan Harper] (LP: #1749722)
+ - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds.
+ - tests: fix ec2 integration network metadata validation
+ - tests: fix integration tests to support lxd 3.0 release
+ - correct documentation to match correct attribute name usage.
+ [Dominic Schlegel] (LP: #1420018)
+ - cc_resizefs, util: handle no /dev/zfs [Ryan Harper]
+ - doc: Fix links in OpenStack datasource documentation.
+ [Dominic Schlegel] (LP: #1721660)
+
18.2:
- Hetzner: Exit early if dmi system-manufacturer is not Hetzner.
- Add missing dependency on isc-dhcp-client to trunk ubuntu packaging.
diff --git a/MANIFEST.in b/MANIFEST.in
index 1a4d7711..57a85ea7 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,5 +1,6 @@
include *.py MANIFEST.in LICENSE* ChangeLog
global-include *.txt *.rst *.ini *.in *.conf *.cfg *.sh
+graft bash_completion
graft config
graft doc
graft packages
diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init
new file mode 100644
index 00000000..8c25032f
--- /dev/null
+++ b/bash_completion/cloud-init
@@ -0,0 +1,84 @@
+# Copyright (C) 2018 Canonical Ltd.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# bash completion for cloud-init cli
+_cloudinit_complete()
+{
+
+ local cur_word prev_word
+ cur_word="${COMP_WORDS[COMP_CWORD]}"
+ prev_word="${COMP_WORDS[COMP_CWORD-1]}"
+
+ subcmds="analyze clean collect-logs devel dhclient-hook features init modules query single status"
+ base_params="--help --file --version --debug --force"
+ case ${COMP_CWORD} in
+ 1)
+ COMPREPLY=($(compgen -W "$base_params $subcmds" -- $cur_word))
+ ;;
+ 2)
+ case ${prev_word} in
+ analyze)
+ COMPREPLY=($(compgen -W "--help blame dump show" -- $cur_word))
+ ;;
+ clean)
+ COMPREPLY=($(compgen -W "--help --logs --reboot --seed" -- $cur_word))
+ ;;
+ collect-logs)
+ COMPREPLY=($(compgen -W "--help --tarfile --include-userdata" -- $cur_word))
+ ;;
+ devel)
+ COMPREPLY=($(compgen -W "--help schema net-convert" -- $cur_word))
+ ;;
+ dhclient-hook|features)
+ COMPREPLY=($(compgen -W "--help" -- $cur_word))
+ ;;
+ init)
+ COMPREPLY=($(compgen -W "--help --local" -- $cur_word))
+ ;;
+ modules)
+ COMPREPLY=($(compgen -W "--help --mode" -- $cur_word))
+ ;;
+
+ query)
+ COMPREPLY=($(compgen -W "--all --help --instance-data --list-keys --user-data --vendor-data --debug" -- $cur_word));;
+ single)
+ COMPREPLY=($(compgen -W "--help --name --frequency --report" -- $cur_word))
+ ;;
+ status)
+ COMPREPLY=($(compgen -W "--help --long --wait" -- $cur_word))
+ ;;
+ esac
+ ;;
+ 3)
+ case ${prev_word} in
+ blame|dump)
+ COMPREPLY=($(compgen -W "--help --infile --outfile" -- $cur_word))
+ ;;
+ --mode)
+ COMPREPLY=($(compgen -W "--help init config final" -- $cur_word))
+ ;;
+ --frequency)
+ COMPREPLY=($(compgen -W "--help instance always once" -- $cur_word))
+ ;;
+ net-convert)
+ COMPREPLY=($(compgen -W "--help --network-data --kind --directory --output-kind" -- $cur_word))
+ ;;
+ render)
+ COMPREPLY=($(compgen -W "--help --instance-data --debug" -- $cur_word));;
+ schema)
+ COMPREPLY=($(compgen -W "--help --config-file --doc --annotate" -- $cur_word))
+ ;;
+ show)
+ COMPREPLY=($(compgen -W "--help --format --infile --outfile" -- $cur_word))
+ ;;
+ esac
+ ;;
+ *)
+ COMPREPLY=()
+ ;;
+ esac
+}
+complete -F _cloudinit_complete cloud-init
+
+# vi: syntax=sh expandtab
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
index 3ba5903f..f8613656 100644
--- a/cloudinit/analyze/__main__.py
+++ b/cloudinit/analyze/__main__.py
@@ -69,7 +69,7 @@ def analyze_blame(name, args):
"""
(infh, outfh) = configure_io(args)
blame_format = ' %ds (%n)'
- r = re.compile('(^\s+\d+\.\d+)', re.MULTILINE)
+ r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE)
for idx, record in enumerate(show.show_events(_get_events(infh),
blame_format)):
srecs = sorted(filter(r.match, record), reverse=True)
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
index b071aa19..1f3060d0 100644
--- a/cloudinit/analyze/dump.py
+++ b/cloudinit/analyze/dump.py
@@ -112,7 +112,7 @@ def parse_ci_logline(line):
return None
event_description = stage_to_description[event_name]
else:
- (pymodloglvl, event_type, event_name) = eventstr.split()[0:3]
+ (_pymodloglvl, event_type, event_name) = eventstr.split()[0:3]
event_description = eventstr.split(event_name)[1].strip()
event = {
diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py
index f4c42841..db2a667b 100644
--- a/cloudinit/analyze/tests/test_dump.py
+++ b/cloudinit/analyze/tests/test_dump.py
@@ -5,8 +5,8 @@ from textwrap import dedent
from cloudinit.analyze.dump import (
dump_events, parse_ci_logline, parse_timestamp)
-from cloudinit.util import subp, write_file
-from cloudinit.tests.helpers import CiTestCase
+from cloudinit.util import which, write_file
+from cloudinit.tests.helpers import CiTestCase, mock, skipIf
class TestParseTimestamp(CiTestCase):
@@ -15,21 +15,9 @@ class TestParseTimestamp(CiTestCase):
"""Logs with cloud-init detailed formats will be properly parsed."""
trusty_fmt = '%Y-%m-%d %H:%M:%S,%f'
trusty_stamp = '2016-09-12 14:39:20,839'
-
- parsed = parse_timestamp(trusty_stamp)
-
- # convert ourselves
dt = datetime.strptime(trusty_stamp, trusty_fmt)
- expected = float(dt.strftime('%s.%f'))
-
- # use date(1)
- out, _err = subp(['date', '+%s.%3N', '-d', trusty_stamp])
- timestamp = out.strip()
- date_ts = float(timestamp)
-
- self.assertEqual(expected, parsed)
- self.assertEqual(expected, date_ts)
- self.assertEqual(date_ts, parsed)
+ self.assertEqual(
+ float(dt.strftime('%s.%f')), parse_timestamp(trusty_stamp))
def test_parse_timestamp_handles_syslog_adding_year(self):
"""Syslog timestamps lack a year. Add year and properly parse."""
@@ -39,17 +27,9 @@ class TestParseTimestamp(CiTestCase):
# convert stamp ourselves by adding the missing year value
year = datetime.now().year
dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt)
- expected = float(dt.strftime('%s.%f'))
- parsed = parse_timestamp(syslog_stamp)
-
- # use date(1)
- out, _ = subp(['date', '+%s.%3N', '-d', syslog_stamp])
- timestamp = out.strip()
- date_ts = float(timestamp)
-
- self.assertEqual(expected, parsed)
- self.assertEqual(expected, date_ts)
- self.assertEqual(date_ts, parsed)
+ self.assertEqual(
+ float(dt.strftime('%s.%f')),
+ parse_timestamp(syslog_stamp))
def test_parse_timestamp_handles_journalctl_format_adding_year(self):
"""Journalctl precise timestamps lack a year. Add year and parse."""
@@ -59,37 +39,22 @@ class TestParseTimestamp(CiTestCase):
# convert stamp ourselves by adding the missing year value
year = datetime.now().year
dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt)
- expected = float(dt.strftime('%s.%f'))
- parsed = parse_timestamp(journal_stamp)
-
- # use date(1)
- out, _ = subp(['date', '+%s.%6N', '-d', journal_stamp])
- timestamp = out.strip()
- date_ts = float(timestamp)
-
- self.assertEqual(expected, parsed)
- self.assertEqual(expected, date_ts)
- self.assertEqual(date_ts, parsed)
+ self.assertEqual(
+ float(dt.strftime('%s.%f')), parse_timestamp(journal_stamp))
+ @skipIf(not which("date"), "'date' command not available.")
def test_parse_unexpected_timestamp_format_with_date_command(self):
- """Dump sends unexpected timestamp formats to data for processing."""
+ """Dump sends unexpected timestamp formats to date for processing."""
new_fmt = '%H:%M %m/%d %Y'
new_stamp = '17:15 08/08'
-
# convert stamp ourselves by adding the missing year value
year = datetime.now().year
dt = datetime.strptime(new_stamp + " " + str(year), new_fmt)
- expected = float(dt.strftime('%s.%f'))
- parsed = parse_timestamp(new_stamp)
# use date(1)
- out, _ = subp(['date', '+%s.%6N', '-d', new_stamp])
- timestamp = out.strip()
- date_ts = float(timestamp)
-
- self.assertEqual(expected, parsed)
- self.assertEqual(expected, date_ts)
- self.assertEqual(date_ts, parsed)
+ with self.allow_subp(["date"]):
+ self.assertEqual(
+ float(dt.strftime('%s.%f')), parse_timestamp(new_stamp))
class TestParseCILogLine(CiTestCase):
@@ -135,7 +100,9 @@ class TestParseCILogLine(CiTestCase):
'timestamp': timestamp}
self.assertEqual(expected, parse_ci_logline(line))
- def test_parse_logline_returns_event_for_finish_events(self):
+ @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
+ def test_parse_logline_returns_event_for_finish_events(self,
+ m_parse_from_date):
"""parse_ci_logline returns a finish event for a parsed log line."""
line = ('2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]'
' handlers.py[DEBUG]: finish: modules-final: SUCCESS: running'
@@ -147,7 +114,10 @@ class TestParseCILogLine(CiTestCase):
'origin': 'cloudinit',
'result': 'SUCCESS',
'timestamp': 1472594005.972}
+ m_parse_from_date.return_value = "1472594005.972"
self.assertEqual(expected, parse_ci_logline(line))
+ m_parse_from_date.assert_has_calls(
+ [mock.call("2016-08-30 21:53:25.972325+00:00")])
SAMPLE_LOGS = dedent("""\
@@ -162,10 +132,16 @@ Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\
class TestDumpEvents(CiTestCase):
maxDiff = None
- def test_dump_events_with_rawdata(self):
+ @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
+ def test_dump_events_with_rawdata(self, m_parse_from_date):
"""Rawdata is split and parsed into a tuple of events and data"""
+ m_parse_from_date.return_value = "1472594005.972"
events, data = dump_events(rawdata=SAMPLE_LOGS)
expected_data = SAMPLE_LOGS.splitlines()
+ self.assertEqual(
+ [mock.call("2016-08-30 21:53:25.972325+00:00")],
+ m_parse_from_date.call_args_list)
+ self.assertEqual(expected_data, data)
year = datetime.now().year
dt1 = datetime.strptime(
'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
@@ -183,12 +159,14 @@ class TestDumpEvents(CiTestCase):
'result': 'SUCCESS',
'timestamp': 1472594005.972}]
self.assertEqual(expected_events, events)
- self.assertEqual(expected_data, data)
- def test_dump_events_with_cisource(self):
+ @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
+ def test_dump_events_with_cisource(self, m_parse_from_date):
"""Cisource file is read and parsed into a tuple of events and data."""
tmpfile = self.tmp_path('logfile')
write_file(tmpfile, SAMPLE_LOGS)
+ m_parse_from_date.return_value = 1472594005.972
+
events, data = dump_events(cisource=open(tmpfile))
year = datetime.now().year
dt1 = datetime.strptime(
@@ -208,3 +186,5 @@ class TestDumpEvents(CiTestCase):
'timestamp': 1472594005.972}]
self.assertEqual(expected_events, events)
self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data])
+ m_parse_from_date.assert_has_calls(
+ [mock.call("2016-08-30 21:53:25.972325+00:00")])
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 618b0160..22cb7fde 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -13,10 +13,30 @@ except ImportError:
KNOWN_CLOUD_NAMES = [
- 'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma',
- 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine',
- 'Hetzner Cloud', 'MAAS', 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF',
- 'Scaleway', 'SmartOS', 'VMware', 'Other']
+ 'AliYun',
+ 'AltCloud',
+ 'Amazon - Ec2',
+ 'Azure',
+ 'Bigstep',
+ 'Brightbox',
+ 'CloudSigma',
+ 'CloudStack',
+ 'DigitalOcean',
+ 'GCE - Google Compute Engine',
+ 'Hetzner Cloud',
+ 'IBM - (aka SoftLayer or BlueMix)',
+ 'LXD',
+ 'MAAS',
+ 'NoCloud',
+ 'OpenNebula',
+ 'OpenStack',
+ 'Oracle',
+ 'OVF',
+ 'OpenTelekomCloud',
+ 'Scaleway',
+ 'SmartOS',
+ 'VMware',
+ 'Other']
# Potentially clear text collected logs
CLOUDINIT_LOG = '/var/log/cloud-init.log'
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index 6d12c437..7ae98e1c 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -47,7 +47,7 @@ class Cloud(object):
@property
def cfg(self):
- # Ensure that not indirectly modified
+ # Ensure that cfg is not indirectly modified
return copy.deepcopy(self._cfg)
def run(self, name, functor, args, freq=None, clear_on_fail=False):
@@ -61,7 +61,7 @@ class Cloud(object):
return None
return fn
- # The rest of thes are just useful proxies
+ # The rest of these are just useful proxies
def get_userdata(self, apply_filter=True):
return self.datasource.get_userdata(apply_filter)
diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py
index e69de29b..3ae28b69 100644
--- a/cloudinit/cmd/devel/__init__.py
+++ b/cloudinit/cmd/devel/__init__.py
@@ -0,0 +1,25 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Common cloud-init devel commandline utility functions."""
+
+
+import logging
+
+from cloudinit import log
+from cloudinit.stages import Init
+
+
+def addLogHandlerCLI(logger, log_level):
+ """Add a commandline logging handler to emit messages to stderr."""
+ formatter = logging.Formatter('%(levelname)s: %(message)s')
+ log.setupBasicLogging(log_level, formatter=formatter)
+ return logger
+
+
+def read_cfg_paths():
+ """Return a Paths object based on the system configuration on disk."""
+ init = Init(ds_deps=[])
+ init.read_cfg()
+ return init.paths
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index 35ca478f..df725204 100644
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -11,6 +11,7 @@ from cloudinit.temp_utils import tempdir
from datetime import datetime
import os
import shutil
+import sys
CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']
@@ -31,6 +32,8 @@ def get_parser(parser=None):
parser = argparse.ArgumentParser(
prog='collect-logs',
description='Collect and tar all cloud-init debug info')
+ parser.add_argument('--verbose', '-v', action='count', default=0,
+ dest='verbosity', help="Be more verbose.")
parser.add_argument(
"--tarfile", '-t', default='cloud-init.tar.gz',
help=('The tarfile to create containing all collected logs.'
@@ -43,17 +46,33 @@ def get_parser(parser=None):
return parser
-def _write_command_output_to_file(cmd, filename):
+def _write_command_output_to_file(cmd, filename, msg, verbosity):
"""Helper which runs a command and writes output or error to filename."""
try:
out, _ = subp(cmd)
except ProcessExecutionError as e:
write_file(filename, str(e))
+ _debug("collecting %s failed.\n" % msg, 1, verbosity)
else:
write_file(filename, out)
+ _debug("collected %s\n" % msg, 1, verbosity)
+ return out
-def collect_logs(tarfile, include_userdata):
+def _debug(msg, level, verbosity):
+ if level <= verbosity:
+ sys.stderr.write(msg)
+
+
+def _collect_file(path, out_dir, verbosity):
+ if os.path.isfile(path):
+ copy(path, out_dir)
+ _debug("collected file: %s\n" % path, 1, verbosity)
+ else:
+ _debug("file %s did not exist\n" % path, 2, verbosity)
+
+
+def collect_logs(tarfile, include_userdata, verbosity=0):
"""Collect all cloud-init logs and tar them up into the provided tarfile.
@param tarfile: The path of the tar-gzipped file to create.
@@ -64,28 +83,46 @@ def collect_logs(tarfile, include_userdata):
log_dir = 'cloud-init-logs-{0}'.format(date)
with tempdir(dir='/tmp') as tmp_dir:
log_dir = os.path.join(tmp_dir, log_dir)
- _write_command_output_to_file(
+ version = _write_command_output_to_file(
+ ['cloud-init', '--version'],
+ os.path.join(log_dir, 'version'),
+ "cloud-init --version", verbosity)
+ dpkg_ver = _write_command_output_to_file(
['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'],
- os.path.join(log_dir, 'version'))
+ os.path.join(log_dir, 'dpkg-version'),
+ "dpkg version", verbosity)
+ if not version:
+ version = dpkg_ver if dpkg_ver else "not-available"
+ _debug("collected cloud-init version: %s\n" % version, 1, verbosity)
_write_command_output_to_file(
- ['dmesg'], os.path.join(log_dir, 'dmesg.txt'))
+ ['dmesg'], os.path.join(log_dir, 'dmesg.txt'),
+ "dmesg output", verbosity)
_write_command_output_to_file(
- ['journalctl', '-o', 'short-precise'],
- os.path.join(log_dir, 'journal.txt'))
+ ['journalctl', '--boot=0', '-o', 'short-precise'],
+ os.path.join(log_dir, 'journal.txt'),
+ "systemd journal of current boot", verbosity)
+
for log in CLOUDINIT_LOGS:
- copy(log, log_dir)
+ _collect_file(log, log_dir, verbosity)
if include_userdata:
- copy(USER_DATA_FILE, log_dir)
+ _collect_file(USER_DATA_FILE, log_dir, verbosity)
run_dir = os.path.join(log_dir, 'run')
ensure_dir(run_dir)
- shutil.copytree(CLOUDINIT_RUN_DIR, os.path.join(run_dir, 'cloud-init'))
+ if os.path.exists(CLOUDINIT_RUN_DIR):
+ shutil.copytree(CLOUDINIT_RUN_DIR,
+ os.path.join(run_dir, 'cloud-init'))
+ _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity)
+ else:
+ _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1,
+ verbosity)
with chdir(tmp_dir):
subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')])
+ sys.stderr.write("Wrote %s\n" % tarfile)
def handle_collect_logs_args(name, args):
"""Handle calls to 'cloud-init collect-logs' as a subcommand."""
- collect_logs(args.tarfile, args.userdata)
+ collect_logs(args.tarfile, args.userdata, args.verbosity)
def main():
diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py
new file mode 100755
index 00000000..a0f58a0a
--- /dev/null
+++ b/cloudinit/cmd/devel/net_convert.py
@@ -0,0 +1,132 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Debug network config format conversions."""
+import argparse
+import json
+import os
+import sys
+import yaml
+
+from cloudinit.sources.helpers import openstack
+from cloudinit.sources import DataSourceAzure as azure
+
+from cloudinit import distros
+from cloudinit.net import eni, netplan, network_state, sysconfig
+from cloudinit import log
+
+NAME = 'net-convert'
+
+
+def get_parser(parser=None):
+ """Build or extend and arg parser for net-convert utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ subcommand which will be extended to support the args of this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
+ parser.add_argument("-p", "--network-data", type=open,
+ metavar="PATH", required=True)
+ parser.add_argument("-k", "--kind",
+ choices=['eni', 'network_data.json', 'yaml',
+ 'azure-imds'],
+ required=True)
+ parser.add_argument("-d", "--directory",
+ metavar="PATH",
+ help="directory to place output in",
+ required=True)
+ parser.add_argument("-D", "--distro",
+ choices=[item for sublist in
+ distros.OSFAMILIES.values()
+ for item in sublist],
+ required=True)
+ parser.add_argument("-m", "--mac",
+ metavar="name,mac",
+ action='append',
+ help="interface name to mac mapping")
+ parser.add_argument("--debug", action='store_true',
+ help='enable debug logging to stderr.')
+ parser.add_argument("-O", "--output-kind",
+ choices=['eni', 'netplan', 'sysconfig'],
+ required=True)
+ return parser
+
+
+def handle_args(name, args):
+ if not args.directory.endswith("/"):
+ args.directory += "/"
+
+ if not os.path.isdir(args.directory):
+ os.makedirs(args.directory)
+
+ if args.debug:
+ log.setupBasicLogging(level=log.DEBUG)
+ else:
+ log.setupBasicLogging(level=log.WARN)
+ if args.mac:
+ known_macs = {}
+ for item in args.mac:
+ iface_name, iface_mac = item.split(",", 1)
+ known_macs[iface_mac] = iface_name
+ else:
+ known_macs = None
+
+ net_data = args.network_data.read()
+ if args.kind == "eni":
+ pre_ns = eni.convert_eni_data(net_data)
+ ns = network_state.parse_net_config_data(pre_ns)
+ elif args.kind == "yaml":
+ pre_ns = yaml.load(net_data)
+ if 'network' in pre_ns:
+ pre_ns = pre_ns.get('network')
+ if args.debug:
+ sys.stderr.write('\n'.join(
+ ["Input YAML",
+ yaml.dump(pre_ns, default_flow_style=False, indent=4), ""]))
+ ns = network_state.parse_net_config_data(pre_ns)
+ elif args.kind == 'network_data.json':
+ pre_ns = openstack.convert_net_json(
+ json.loads(net_data), known_macs=known_macs)
+ ns = network_state.parse_net_config_data(pre_ns)
+ elif args.kind == 'azure-imds':
+ pre_ns = azure.parse_network_config(json.loads(net_data))
+ ns = network_state.parse_net_config_data(pre_ns)
+
+ if not ns:
+ raise RuntimeError("No valid network_state object created from"
+ "input data")
+
+ if args.debug:
+ sys.stderr.write('\n'.join([
+ "", "Internal State",
+ yaml.dump(ns, default_flow_style=False, indent=4), ""]))
+ distro_cls = distros.fetch(args.distro)
+ distro = distro_cls(args.distro, {}, None)
+ config = {}
+ if args.output_kind == "eni":
+ r_cls = eni.Renderer
+ config = distro.renderer_configs.get('eni')
+ elif args.output_kind == "netplan":
+ r_cls = netplan.Renderer
+ config = distro.renderer_configs.get('netplan')
+ else:
+ r_cls = sysconfig.Renderer
+ config = distro.renderer_configs.get('sysconfig')
+
+ r = r_cls(config=config)
+ sys.stderr.write(''.join([
+ "Read input format '%s' from '%s'.\n" % (
+ args.kind, args.network_data.name),
+ "Wrote output format '%s' to '%s'\n" % (
+ args.output_kind, args.directory)]) + "\n")
+ r.render_network_state(network_state=ns, target=args.directory)
+
+
+if __name__ == '__main__':
+ args = get_parser().parse_args()
+ handle_args(NAME, args)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
index acacc4ed..99a234ce 100644
--- a/cloudinit/cmd/devel/parser.py
+++ b/cloudinit/cmd/devel/parser.py
@@ -5,8 +5,10 @@
"""Define 'devel' subcommand argument parsers to include in cloud-init cmd."""
import argparse
-from cloudinit.config.schema import (
- get_parser as schema_parser, handle_schema_args)
+from cloudinit.config import schema
+
+from . import net_convert
+from . import render
def get_parser(parser=None):
@@ -17,10 +19,17 @@ def get_parser(parser=None):
subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
subparsers.required = True
- parser_schema = subparsers.add_parser(
- 'schema', help='Validate cloud-config files or document schema')
- # Construct schema subcommand parser
- schema_parser(parser_schema)
- parser_schema.set_defaults(action=('schema', handle_schema_args))
+ subcmds = [
+ ('schema', 'Validate cloud-config files for document schema',
+ schema.get_parser, schema.handle_schema_args),
+ (net_convert.NAME, net_convert.__doc__,
+ net_convert.get_parser, net_convert.handle_args),
+ (render.NAME, render.__doc__,
+ render.get_parser, render.handle_args)
+ ]
+ for (subcmd, helpmsg, get_parser, handler) in subcmds:
+ parser = subparsers.add_parser(subcmd, help=helpmsg)
+ get_parser(parser)
+ parser.set_defaults(action=(subcmd, handler))
return parser
diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
new file mode 100755
index 00000000..2ba6b681
--- /dev/null
+++ b/cloudinit/cmd/devel/render.py
@@ -0,0 +1,85 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Debug jinja template rendering of user-data."""
+
+import argparse
+import os
+import sys
+
+from cloudinit.handlers.jinja_template import render_jinja_payload_from_file
+from cloudinit import log
+from cloudinit.sources import INSTANCE_JSON_FILE
+from . import addLogHandlerCLI, read_cfg_paths
+
+NAME = 'render'
+DEFAULT_INSTANCE_DATA = '/run/cloud-init/instance-data.json'
+
+LOG = log.getLogger(NAME)
+
+
+def get_parser(parser=None):
+ """Build or extend and arg parser for jinja render utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ subcommand which will be extended to support the args of this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
+ parser.add_argument(
+ 'user_data', type=str, help='Path to the user-data file to render')
+ parser.add_argument(
+ '-i', '--instance-data', type=str,
+ help=('Optional path to instance-data.json file. Defaults to'
+ ' /run/cloud-init/instance-data.json'))
+ parser.add_argument('-d', '--debug', action='store_true', default=False,
+ help='Add verbose messages during template render')
+ return parser
+
+
+def handle_args(name, args):
+ """Render the provided user-data template file using instance-data values.
+
+ Also setup CLI log handlers to report to stderr since this is a development
+ utility which should be run by a human on the CLI.
+
+ @return 0 on success, 1 on failure.
+ """
+ addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
+ if not args.instance_data:
+ paths = read_cfg_paths()
+ instance_data_fn = os.path.join(
+ paths.run_dir, INSTANCE_JSON_FILE)
+ else:
+ instance_data_fn = args.instance_data
+ if not os.path.exists(instance_data_fn):
+ LOG.error('Missing instance-data.json file: %s', instance_data_fn)
+ return 1
+ try:
+ with open(args.user_data) as stream:
+ user_data = stream.read()
+ except IOError:
+ LOG.error('Missing user-data file: %s', args.user_data)
+ return 1
+ rendered_payload = render_jinja_payload_from_file(
+ payload=user_data, payload_fn=args.user_data,
+ instance_data_file=instance_data_fn,
+ debug=True if args.debug else False)
+ if not rendered_payload:
+ LOG.error('Unable to render user-data file: %s', args.user_data)
+ return 1
+ sys.stdout.write(rendered_payload)
+ return 0
+
+
+def main():
+ args = get_parser().parse_args()
+ return(handle_args(NAME, args))
+
+
+if __name__ == '__main__':
+ sys.exit(main())
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py
index dc4947cc..98b47560 100644
--- a/cloudinit/cmd/devel/tests/test_logs.py
+++ b/cloudinit/cmd/devel/tests/test_logs.py
@@ -4,6 +4,7 @@ from cloudinit.cmd.devel import logs
from cloudinit.util import ensure_dir, load_file, subp, write_file
from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call
from datetime import datetime
+import mock
import os
@@ -27,11 +28,13 @@ class TestCollectLogs(FilesystemMockingTestCase):
date = datetime.utcnow().date().strftime('%Y-%m-%d')
date_logdir = 'cloud-init-logs-{0}'.format(date)
+ version_out = '/usr/bin/cloud-init 18.2fake\n'
expected_subp = {
('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
'0.7fake\n',
+ ('cloud-init', '--version'): version_out,
('dmesg',): 'dmesg-out\n',
- ('journalctl', '-o', 'short-precise'): 'journal-out\n',
+ ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
('tar', 'czvf', output_tarfile, date_logdir): ''
}
@@ -44,9 +47,12 @@ class TestCollectLogs(FilesystemMockingTestCase):
subp(cmd) # Pass through tar cmd so we can check output
return expected_subp[cmd_tuple], ''
+ fake_stderr = mock.MagicMock()
+
wrap_and_call(
'cloudinit.cmd.devel.logs',
{'subp': {'side_effect': fake_subp},
+ 'sys.stderr': {'new': fake_stderr},
'CLOUDINIT_LOGS': {'new': [log1, log2]},
'CLOUDINIT_RUN_DIR': {'new': self.run_dir}},
logs.collect_logs, output_tarfile, include_userdata=False)
@@ -55,7 +61,9 @@ class TestCollectLogs(FilesystemMockingTestCase):
out_logdir = self.tmp_path(date_logdir, self.new_root)
self.assertEqual(
'0.7fake\n',
- load_file(os.path.join(out_logdir, 'version')))
+ load_file(os.path.join(out_logdir, 'dpkg-version')))
+ self.assertEqual(version_out,
+ load_file(os.path.join(out_logdir, 'version')))
self.assertEqual(
'cloud-init-log',
load_file(os.path.join(out_logdir, 'cloud-init.log')))
@@ -72,6 +80,7 @@ class TestCollectLogs(FilesystemMockingTestCase):
'results',
load_file(
os.path.join(out_logdir, 'run', 'cloud-init', 'results.json')))
+ fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
def test_collect_logs_includes_optional_userdata(self):
"""collect-logs include userdata when --include-userdata is set."""
@@ -88,11 +97,13 @@ class TestCollectLogs(FilesystemMockingTestCase):
date = datetime.utcnow().date().strftime('%Y-%m-%d')
date_logdir = 'cloud-init-logs-{0}'.format(date)
+ version_out = '/usr/bin/cloud-init 18.2fake\n'
expected_subp = {
('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
'0.7fake',
+ ('cloud-init', '--version'): version_out,
('dmesg',): 'dmesg-out\n',
- ('journalctl', '-o', 'short-precise'): 'journal-out\n',
+ ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
('tar', 'czvf', output_tarfile, date_logdir): ''
}
@@ -105,9 +116,12 @@ class TestCollectLogs(FilesystemMockingTestCase):
subp(cmd) # Pass through tar cmd so we can check output
return expected_subp[cmd_tuple], ''
+ fake_stderr = mock.MagicMock()
+
wrap_and_call(
'cloudinit.cmd.devel.logs',
{'subp': {'side_effect': fake_subp},
+ 'sys.stderr': {'new': fake_stderr},
'CLOUDINIT_LOGS': {'new': [log1, log2]},
'CLOUDINIT_RUN_DIR': {'new': self.run_dir},
'USER_DATA_FILE': {'new': userdata}},
@@ -118,3 +132,4 @@ class TestCollectLogs(FilesystemMockingTestCase):
self.assertEqual(
'user-data',
load_file(os.path.join(out_logdir, 'user-data.txt')))
+ fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py
new file mode 100644
index 00000000..fc5d2c0d
--- /dev/null
+++ b/cloudinit/cmd/devel/tests/test_render.py
@@ -0,0 +1,101 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from six import StringIO
+import os
+
+from collections import namedtuple
+from cloudinit.cmd.devel import render
+from cloudinit.helpers import Paths
+from cloudinit.sources import INSTANCE_JSON_FILE
+from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJinja
+from cloudinit.util import ensure_dir, write_file
+
+
+class TestRender(CiTestCase):
+
+ with_logs = True
+
+ args = namedtuple('renderargs', 'user_data instance_data debug')
+
+ def setUp(self):
+ super(TestRender, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_handle_args_error_on_missing_user_data(self):
+ """When user_data file path does not exist, log an error."""
+ absent_file = self.tmp_path('user-data', dir=self.tmp)
+ instance_data = self.tmp_path('instance-data', dir=self.tmp)
+ write_file(instance_data, '{}')
+ args = self.args(
+ user_data=absent_file, instance_data=instance_data, debug=False)
+ with mock.patch('sys.stderr', new_callable=StringIO):
+ self.assertEqual(1, render.handle_args('anyname', args))
+ self.assertIn(
+ 'Missing user-data file: %s' % absent_file,
+ self.logs.getvalue())
+
+ def test_handle_args_error_on_missing_instance_data(self):
+ """When instance_data file path does not exist, log an error."""
+ user_data = self.tmp_path('user-data', dir=self.tmp)
+ absent_file = self.tmp_path('instance-data', dir=self.tmp)
+ args = self.args(
+ user_data=user_data, instance_data=absent_file, debug=False)
+ with mock.patch('sys.stderr', new_callable=StringIO):
+ self.assertEqual(1, render.handle_args('anyname', args))
+ self.assertIn(
+ 'Missing instance-data.json file: %s' % absent_file,
+ self.logs.getvalue())
+
+ def test_handle_args_defaults_instance_data(self):
+ """When no instance_data argument, default to configured run_dir."""
+ user_data = self.tmp_path('user-data', dir=self.tmp)
+ run_dir = self.tmp_path('run_dir', dir=self.tmp)
+ ensure_dir(run_dir)
+ paths = Paths({'run_dir': run_dir})
+ self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths')
+ self.m_paths.return_value = paths
+ args = self.args(
+ user_data=user_data, instance_data=None, debug=False)
+ with mock.patch('sys.stderr', new_callable=StringIO):
+ self.assertEqual(1, render.handle_args('anyname', args))
+ json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
+ self.assertIn(
+ 'Missing instance-data.json file: %s' % json_file,
+ self.logs.getvalue())
+
+ @skipUnlessJinja()
+ def test_handle_args_renders_instance_data_vars_in_template(self):
+ """If user_data file is a jinja template render instance-data vars."""
+ user_data = self.tmp_path('user-data', dir=self.tmp)
+ write_file(user_data, '##template: jinja\nrendering: {{ my_var }}')
+ instance_data = self.tmp_path('instance-data', dir=self.tmp)
+ write_file(instance_data, '{"my-var": "jinja worked"}')
+ args = self.args(
+ user_data=user_data, instance_data=instance_data, debug=True)
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_console_err:
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, render.handle_args('anyname', args))
+ self.assertIn(
+ 'DEBUG: Converted jinja variables\n{', self.logs.getvalue())
+ self.assertIn(
+ 'DEBUG: Converted jinja variables\n{', m_console_err.getvalue())
+ self.assertEqual('rendering: jinja worked', m_stdout.getvalue())
+
+ @skipUnlessJinja()
+ def test_handle_args_warns_and_gives_up_on_invalid_jinja_operation(self):
+ """If user_data file has invalid jinja operations log warnings."""
+ user_data = self.tmp_path('user-data', dir=self.tmp)
+ write_file(user_data, '##template: jinja\nrendering: {{ my-var }}')
+ instance_data = self.tmp_path('instance-data', dir=self.tmp)
+ write_file(instance_data, '{"my-var": "jinja worked"}')
+ args = self.args(
+ user_data=user_data, instance_data=instance_data, debug=True)
+ with mock.patch('sys.stderr', new_callable=StringIO):
+ self.assertEqual(1, render.handle_args('anyname', args))
+ self.assertIn(
+ 'WARNING: Ignoring jinja template for %s: Undefined jinja'
+ ' variable: "my-var". Jinja tried subtraction. Perhaps you meant'
+ ' "my_var"?' % user_data,
+ self.logs.getvalue())
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 3f2dbb93..5a437020 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -187,7 +187,7 @@ def attempt_cmdline_url(path, network=True, cmdline=None):
data = None
header = b'#cloud-config'
try:
- resp = util.read_file_or_url(**kwargs)
+ resp = url_helper.read_file_or_url(**kwargs)
if resp.ok():
data = resp.contents
if not resp.contents.startswith(header):
@@ -315,7 +315,7 @@ def main_init(name, args):
existing = "trust"
init.purge_cache()
- # Delete the non-net file as well
+ # Delete the no-net file as well
util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))
# Stage 5
@@ -339,7 +339,7 @@ def main_init(name, args):
" Likely bad things to come!"))
if not args.force:
init.apply_network_config(bring_up=not args.local)
- LOG.debug("[%s] Exiting without datasource in local mode", mode)
+ LOG.debug("[%s] Exiting without datasource", mode)
if mode == sources.DSMODE_LOCAL:
return (None, [])
else:
@@ -348,6 +348,7 @@ def main_init(name, args):
LOG.debug("[%s] barreling on in force mode without datasource",
mode)
+ _maybe_persist_instance_data(init)
# Stage 6
iid = init.instancify()
LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s",
@@ -490,6 +491,7 @@ def main_modules(action_name, args):
print_exc(msg)
if not args.force:
return [(msg)]
+ _maybe_persist_instance_data(init)
# Stage 3
mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
# Stage 4
@@ -541,6 +543,7 @@ def main_single(name, args):
" likely bad things to come!"))
if not args.force:
return 1
+ _maybe_persist_instance_data(init)
# Stage 3
mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
mod_args = args.module_args
@@ -688,6 +691,15 @@ def status_wrapper(name, args, data_d=None, link_d=None):
return len(v1[mode]['errors'])
+def _maybe_persist_instance_data(init):
+ """Write instance-data.json file if absent and datasource is restored."""
+ if init.ds_restored:
+ instance_data_file = os.path.join(
+ init.paths.run_dir, sources.INSTANCE_JSON_FILE)
+ if not os.path.exists(instance_data_file):
+ init.datasource.persist_instance_data()
+
+
def _maybe_set_hostname(init, stage, retry_stage):
"""Call set-hostname if metadata, vendordata or userdata provides it.
@@ -779,6 +791,10 @@ def main(sysv_args=None):
' pass to this module'))
parser_single.set_defaults(action=('single', main_single))
+ parser_query = subparsers.add_parser(
+ 'query',
+ help='Query standardized instance metadata from the command line.')
+
parser_dhclient = subparsers.add_parser('dhclient-hook',
help=('run the dhclient hook'
'to record network info'))
@@ -830,6 +846,12 @@ def main(sysv_args=None):
clean_parser(parser_clean)
parser_clean.set_defaults(
action=('clean', handle_clean_args))
+ elif sysv_args[0] == 'query':
+ from cloudinit.cmd.query import (
+ get_parser as query_parser, handle_args as handle_query_args)
+ query_parser(parser_query)
+ parser_query.set_defaults(
+ action=('render', handle_query_args))
elif sysv_args[0] == 'status':
from cloudinit.cmd.status import (
get_parser as status_parser, handle_status_args)
@@ -877,14 +899,18 @@ def main(sysv_args=None):
rname, rdesc, reporting_enabled=report_on)
with args.reporter:
- return util.log_time(
+ retval = util.log_time(
logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
get_uptime=True, func=functor, args=(name, args))
+ reporting.flush_events()
+ return retval
if __name__ == '__main__':
if 'TZ' not in os.environ:
os.environ['TZ'] = ":/etc/localtime"
- main(sys.argv)
+ return_value = main(sys.argv)
+ if return_value:
+ sys.exit(return_value)
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
new file mode 100644
index 00000000..7d2d4fe4
--- /dev/null
+++ b/cloudinit/cmd/query.py
@@ -0,0 +1,155 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Query standardized instance metadata from the command line."""
+
+import argparse
+import os
+import six
+import sys
+
+from cloudinit.handlers.jinja_template import (
+ convert_jinja_instance_data, render_jinja_payload)
+from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths
+from cloudinit import log
+from cloudinit.sources import (
+ INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, REDACT_SENSITIVE_VALUE)
+from cloudinit import util
+
+NAME = 'query'
+LOG = log.getLogger(NAME)
+
+
+def get_parser(parser=None):
+ """Build or extend an arg parser for query utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ query subcommand which will be extended to support the args of
+ this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser(
+ prog=NAME, description='Query cloud-init instance data')
+ parser.add_argument(
+ '-d', '--debug', action='store_true', default=False,
+ help='Add verbose messages during template render')
+ parser.add_argument(
+ '-i', '--instance-data', type=str,
+ help=('Path to instance-data.json file. Default is /run/cloud-init/%s'
+ % INSTANCE_JSON_FILE))
+ parser.add_argument(
+ '-l', '--list-keys', action='store_true', default=False,
+ help=('List query keys available at the provided instance-data'
+ ' <varname>.'))
+ parser.add_argument(
+ '-u', '--user-data', type=str,
+ help=('Path to user-data file. Default is'
+ ' /var/lib/cloud/instance/user-data.txt'))
+ parser.add_argument(
+ '-v', '--vendor-data', type=str,
+ help=('Path to vendor-data file. Default is'
+ ' /var/lib/cloud/instance/vendor-data.txt'))
+ parser.add_argument(
+ 'varname', type=str, nargs='?',
+ help=('A dot-delimited instance data variable to query from'
+ ' instance-data query. For example: v2.local_hostname'))
+ parser.add_argument(
+ '-a', '--all', action='store_true', default=False, dest='dump_all',
+ help='Dump all available instance-data')
+ parser.add_argument(
+ '-f', '--format', type=str, dest='format',
+ help=('Optionally specify a custom output format string. Any'
+ ' instance-data variable can be specified between double-curly'
+ ' braces. For example -f "{{ v2.cloud_name }}"'))
+ return parser
+
+
+def handle_args(name, args):
+ """Handle calls to 'cloud-init query' as a subcommand."""
+ paths = None
+ addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
+ if not any([args.list_keys, args.varname, args.format, args.dump_all]):
+ LOG.error(
+ 'Expected one of the options: --all, --format,'
+ ' --list-keys or varname')
+ get_parser().print_help()
+ return 1
+
+ uid = os.getuid()
+ if not all([args.instance_data, args.user_data, args.vendor_data]):
+ paths = read_cfg_paths()
+ if not args.instance_data:
+ if uid == 0:
+ default_json_fn = INSTANCE_JSON_SENSITIVE_FILE
+ else:
+ default_json_fn = INSTANCE_JSON_FILE # World readable
+ instance_data_fn = os.path.join(paths.run_dir, default_json_fn)
+ else:
+ instance_data_fn = args.instance_data
+ if not args.user_data:
+ user_data_fn = os.path.join(paths.instance_link, 'user-data.txt')
+ else:
+ user_data_fn = args.user_data
+ if not args.vendor_data:
+ vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt')
+ else:
+ vendor_data_fn = args.vendor_data
+
+ try:
+ instance_json = util.load_file(instance_data_fn)
+ except IOError:
+ LOG.error('Missing instance-data.json file: %s', instance_data_fn)
+ return 1
+
+ instance_data = util.load_json(instance_json)
+ if uid != 0:
+ instance_data['userdata'] = (
+ '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, user_data_fn))
+ instance_data['vendordata'] = (
+ '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn))
+ else:
+ instance_data['userdata'] = util.load_file(user_data_fn)
+ instance_data['vendordata'] = util.load_file(vendor_data_fn)
+ if args.format:
+ payload = '## template: jinja\n{fmt}'.format(fmt=args.format)
+ rendered_payload = render_jinja_payload(
+ payload=payload, payload_fn='query commandline',
+ instance_data=instance_data,
+ debug=True if args.debug else False)
+ if rendered_payload:
+ print(rendered_payload)
+ return 0
+ return 1
+
+ response = convert_jinja_instance_data(instance_data)
+ if args.varname:
+ try:
+ for var in args.varname.split('.'):
+ response = response[var]
+ except KeyError:
+ LOG.error('Undefined instance-data key %s', args.varname)
+ return 1
+ if args.list_keys:
+ if not isinstance(response, dict):
+ LOG.error("--list-keys provided but '%s' is not a dict", var)
+ return 1
+ response = '\n'.join(sorted(response.keys()))
+ elif args.list_keys:
+ response = '\n'.join(sorted(response.keys()))
+ if not isinstance(response, six.string_types):
+ response = util.json_dumps(response)
+ print(response)
+ return 0
+
+
+def main():
+ """Tool to query specific instance-data values."""
+ parser = get_parser()
+ sys.exit(handle_args(NAME, parser.parse_args()))
+
+
+if __name__ == '__main__':
+ main()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
index dbe421c0..a1e534fb 100644
--- a/cloudinit/cmd/tests/test_main.py
+++ b/cloudinit/cmd/tests/test_main.py
@@ -56,7 +56,7 @@ class TestMain(FilesystemMockingTestCase):
cmdargs = myargs(
debug=False, files=None, force=False, local=False, reporter=None,
subcommand='init')
- (item1, item2) = wrap_and_call(
+ (_item1, item2) = wrap_and_call(
'cloudinit.cmd.main',
{'util.close_stdin': True,
'netinfo.debug_info': 'my net debug info',
@@ -85,7 +85,7 @@ class TestMain(FilesystemMockingTestCase):
cmdargs = myargs(
debug=False, files=None, force=False, local=False, reporter=None,
subcommand='init')
- (item1, item2) = wrap_and_call(
+ (_item1, item2) = wrap_and_call(
'cloudinit.cmd.main',
{'util.close_stdin': True,
'netinfo.debug_info': 'my net debug info',
@@ -125,7 +125,9 @@ class TestMain(FilesystemMockingTestCase):
updated_cfg.update(
{'def_log_file': '/var/log/cloud-init.log',
'log_cfgs': [],
- 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'],
+ 'syslog_fix_perms': [
+ 'syslog:adm', 'root:adm', 'root:wheel', 'root:root'
+ ],
'vendor_data': {'enabled': True, 'prefix': []}})
updated_cfg.pop('system_info')
@@ -133,7 +135,7 @@ class TestMain(FilesystemMockingTestCase):
self.assertEqual(main.LOG, log)
self.assertIsNone(args)
- (item1, item2) = wrap_and_call(
+ (_item1, item2) = wrap_and_call(
'cloudinit.cmd.main',
{'util.close_stdin': True,
'netinfo.debug_info': 'my net debug info',
diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py
new file mode 100644
index 00000000..fb87c6ab
--- /dev/null
+++ b/cloudinit/cmd/tests/test_query.py
@@ -0,0 +1,193 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from six import StringIO
+from textwrap import dedent
+import os
+
+from collections import namedtuple
+from cloudinit.cmd import query
+from cloudinit.helpers import Paths
+from cloudinit.sources import REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE
+from cloudinit.tests.helpers import CiTestCase, mock
+from cloudinit.util import ensure_dir, write_file
+
+
+class TestQuery(CiTestCase):
+
+ with_logs = True
+
+ args = namedtuple(
+ 'queryargs',
+ ('debug dump_all format instance_data list_keys user_data vendor_data'
+ ' varname'))
+
+ def setUp(self):
+ super(TestQuery, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.instance_data = self.tmp_path('instance-data', dir=self.tmp)
+
+ def test_handle_args_error_on_missing_param(self):
+ """Error when missing required parameters and print usage."""
+ args = self.args(
+ debug=False, dump_all=False, format=None, instance_data=None,
+ list_keys=False, user_data=None, vendor_data=None, varname=None)
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(1, query.handle_args('anyname', args))
+ expected_error = (
+ 'ERROR: Expected one of the options: --all, --format, --list-keys'
+ ' or varname\n')
+ self.assertIn(expected_error, self.logs.getvalue())
+ self.assertIn('usage: query', m_stdout.getvalue())
+ self.assertIn(expected_error, m_stderr.getvalue())
+
+ def test_handle_args_error_on_missing_instance_data(self):
+ """When instance_data file path does not exist, log an error."""
+ absent_fn = self.tmp_path('absent', dir=self.tmp)
+ args = self.args(
+ debug=False, dump_all=True, format=None, instance_data=absent_fn,
+ list_keys=False, user_data='ud', vendor_data='vd', varname=None)
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ self.assertEqual(1, query.handle_args('anyname', args))
+ self.assertIn(
+ 'ERROR: Missing instance-data.json file: %s' % absent_fn,
+ self.logs.getvalue())
+ self.assertIn(
+ 'ERROR: Missing instance-data.json file: %s' % absent_fn,
+ m_stderr.getvalue())
+
+ def test_handle_args_defaults_instance_data(self):
+ """When no instance_data argument, default to configured run_dir."""
+ args = self.args(
+ debug=False, dump_all=True, format=None, instance_data=None,
+ list_keys=False, user_data=None, vendor_data=None, varname=None)
+ run_dir = self.tmp_path('run_dir', dir=self.tmp)
+ ensure_dir(run_dir)
+ paths = Paths({'run_dir': run_dir})
+ self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
+ self.m_paths.return_value = paths
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ self.assertEqual(1, query.handle_args('anyname', args))
+ json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
+ self.assertIn(
+ 'ERROR: Missing instance-data.json file: %s' % json_file,
+ self.logs.getvalue())
+ self.assertIn(
+ 'ERROR: Missing instance-data.json file: %s' % json_file,
+ m_stderr.getvalue())
+
+ def test_handle_args_dumps_all_instance_data(self):
+ """When --all is specified query will dump all instance data vars."""
+ write_file(self.instance_data, '{"my-var": "it worked"}')
+ args = self.args(
+ debug=False, dump_all=True, format=None,
+ instance_data=self.instance_data, list_keys=False,
+ user_data='ud', vendor_data='vd', varname=None)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, query.handle_args('anyname', args))
+ self.assertEqual(
+ '{\n "my_var": "it worked",\n "userdata": "<%s> file:ud",\n'
+ ' "vendordata": "<%s> file:vd"\n}\n' % (
+ REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE),
+ m_stdout.getvalue())
+
+ def test_handle_args_returns_top_level_varname(self):
+ """When the argument varname is passed, report its value."""
+ write_file(self.instance_data, '{"my-var": "it worked"}')
+ args = self.args(
+ debug=False, dump_all=True, format=None,
+ instance_data=self.instance_data, list_keys=False,
+ user_data='ud', vendor_data='vd', varname='my_var')
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, query.handle_args('anyname', args))
+ self.assertEqual('it worked\n', m_stdout.getvalue())
+
+ def test_handle_args_returns_nested_varname(self):
+ """If user_data file is a jinja template render instance-data vars."""
+ write_file(self.instance_data,
+ '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}')
+ args = self.args(
+ debug=False, dump_all=False, format=None,
+ instance_data=self.instance_data, user_data='ud', vendor_data='vd',
+ list_keys=False, varname='v1.key_2')
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, query.handle_args('anyname', args))
+ self.assertEqual('value-2\n', m_stdout.getvalue())
+
+ def test_handle_args_returns_standardized_vars_to_top_level_aliases(self):
+ """Any standardized vars under v# are promoted as top-level aliases."""
+ write_file(
+ self.instance_data,
+ '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
+ ' "top": "gun"}')
+ expected = dedent("""\
+ {
+ "top": "gun",
+ "userdata": "<redacted for non-root user> file:ud",
+ "v1": {
+ "v1_1": "val1.1"
+ },
+ "v1_1": "val1.1",
+ "v2": {
+ "v2_2": "val2.2"
+ },
+ "v2_2": "val2.2",
+ "vendordata": "<redacted for non-root user> file:vd"
+ }
+ """)
+ args = self.args(
+ debug=False, dump_all=True, format=None,
+ instance_data=self.instance_data, user_data='ud', vendor_data='vd',
+ list_keys=False, varname=None)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, query.handle_args('anyname', args))
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(self):
+ """Sort all top-level keys when only --list-keys provided."""
+ write_file(
+ self.instance_data,
+ '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
+ ' "top": "gun"}')
+ expected = 'top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n'
+ args = self.args(
+ debug=False, dump_all=False, format=None,
+ instance_data=self.instance_data, list_keys=True, user_data='ud',
+ vendor_data='vd', varname=None)
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, query.handle_args('anyname', args))
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_handle_args_list_keys_sorts_nested_keys_when_varname(self):
+ """Sort all nested keys of varname object when --list-keys provided."""
+ write_file(
+ self.instance_data,
+ '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":' +
+ ' {"v2_2": "val2.2"}, "top": "gun"}')
+ expected = 'v1_1\nv1_2\n'
+ args = self.args(
+ debug=False, dump_all=False, format=None,
+ instance_data=self.instance_data, list_keys=True,
+ user_data='ud', vendor_data='vd', varname='v1')
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, query.handle_args('anyname', args))
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(self):
+ """Raise an error when --list-keys and varname specify a non-list."""
+ write_file(
+ self.instance_data,
+ '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' +
+ '{"v2_2": "val2.2"}, "top": "gun"}')
+ expected_error = "ERROR: --list-keys provided but 'top' is not a dict"
+ args = self.args(
+ debug=False, dump_all=False, format=None,
+ instance_data=self.instance_data, list_keys=True, user_data='ud',
+ vendor_data='vd', varname='top')
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ self.assertEqual(1, query.handle_args('anyname', args))
+ self.assertEqual('', m_stdout.getvalue())
+ self.assertIn(expected_error, m_stderr.getvalue())
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py
index 37a89936..aded8580 100644
--- a/cloudinit/cmd/tests/test_status.py
+++ b/cloudinit/cmd/tests/test_status.py
@@ -39,7 +39,8 @@ class TestStatus(CiTestCase):
ensure_file(self.disable_file) # Create the ignored disable file
(is_disabled, reason) = wrap_and_call(
'cloudinit.cmd.status',
- {'uses_systemd': False},
+ {'uses_systemd': False,
+ 'get_cmdline': "root=/dev/my-root not-important"},
status._is_cloudinit_disabled, self.disable_file, self.paths)
self.assertFalse(
is_disabled, 'expected enabled cloud-init on sysvinit')
@@ -50,7 +51,8 @@ class TestStatus(CiTestCase):
ensure_file(self.disable_file) # Create observed disable file
(is_disabled, reason) = wrap_and_call(
'cloudinit.cmd.status',
- {'uses_systemd': True},
+ {'uses_systemd': True,
+ 'get_cmdline': "root=/dev/my-root not-important"},
status._is_cloudinit_disabled, self.disable_file, self.paths)
self.assertTrue(is_disabled, 'expected disabled cloud-init')
self.assertEqual(
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 5b9cbca0..e18944ec 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -121,7 +121,7 @@ and https protocols respectively. The ``proxy`` key also exists as an alias for
All source entries in ``apt-sources`` that match regex in
``add_apt_repo_match`` will be added to the system using
``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults
-to ``^[\w-]+:\w``
+to ``^[\\w-]+:\\w``
**Add source list entries:**
@@ -378,7 +378,7 @@ def apply_debconf_selections(cfg, target=None):
# get a complete list of packages listed in input
pkgs_cfgd = set()
- for key, content in selsets.items():
+ for _key, content in selsets.items():
for line in content.splitlines():
if line.startswith("#"):
continue
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 233da1ef..6813f534 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -42,7 +42,13 @@ schema = {
.. note::
bootcmd should only be used for things that could not be done later
- in the boot process."""),
+ in the boot process.
+
+ .. note::
+
+ when writing files, do not use /tmp dir as it races with
+ systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
+ """),
'distros': distros,
'examples': [dedent("""\
bootcmd:
@@ -63,7 +69,6 @@ schema = {
'additionalProperties': False,
'minItems': 1,
'required': [],
- 'uniqueItems': True
}
}
}
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index c56319b5..885b3138 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -32,13 +32,23 @@ from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
-REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject']
+REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject']
+REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254']
def handle(name, cfg, _cloud, log, _args):
disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
if disabled:
- util.subp(REJECT_CMD, capture=False)
+ reject_cmd = None
+ if util.which('ip'):
+ reject_cmd = REJECT_CMD_IP
+ elif util.which('ifconfig'):
+ reject_cmd = REJECT_CMD_IF
+ else:
+ log.error(('Neither "route" nor "ip" command found, unable to '
+ 'manipulate routing table'))
+ return
+ util.subp(reject_cmd, capture=False)
else:
log.debug(("Skipping module named %s,"
" disabling the ec2 route not enabled"), name)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index c3e8c484..943089e0 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -680,13 +680,13 @@ def read_parttbl(device):
reliable way to probe the partition table.
"""
blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
- udevadm_settle()
+ util.udevadm_settle()
try:
util.subp(blkdev_cmd)
except Exception as e:
util.logexc(LOG, "Failed reading the partition table %s" % e)
- udevadm_settle()
+ util.udevadm_settle()
def exec_mkpart_mbr(device, layout):
@@ -737,14 +737,10 @@ def exec_mkpart(table_type, device, layout):
return get_dyn_func("exec_mkpart_%s", table_type, device, layout)
-def udevadm_settle():
- util.subp(['udevadm', 'settle'])
-
-
def assert_and_settle_device(device):
"""Assert that device exists and settle so it is fully recognized."""
if not os.path.exists(device):
- udevadm_settle()
+ util.udevadm_settle()
if not os.path.exists(device):
raise RuntimeError("Device %s did not exist and was not created "
"with a udevamd settle." % device)
@@ -752,7 +748,7 @@ def assert_and_settle_device(device):
# Whether or not the device existed above, it is possible that udev
# events that would populate udev database (for reading by lsdname) have
# not yet finished. So settle again.
- udevadm_settle()
+ util.udevadm_settle()
def mkpart(device, definition):
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index 69dc2d5e..eb9fbe66 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -43,7 +43,7 @@ def is_upstart_system():
del myenv['UPSTART_SESSION']
check_cmd = ['initctl', 'version']
try:
- (out, err) = util.subp(check_cmd, env=myenv)
+ (out, _err) = util.subp(check_cmd, env=myenv)
return 'upstart' in out
except util.ProcessExecutionError as e:
LOG.debug("'%s' returned '%s', not using upstart",
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 09374d2e..24a8ebea 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -47,11 +47,16 @@ lxd-bridge will be configured accordingly.
domain: <domain>
"""
+from cloudinit import log as logging
from cloudinit import util
import os
distros = ['ubuntu']
+LOG = logging.getLogger(__name__)
+
+_DEFAULT_NETWORK_NAME = "lxdbr0"
+
def handle(name, cfg, cloud, log, args):
# Get config
@@ -99,6 +104,7 @@ def handle(name, cfg, cloud, log, args):
'network_address', 'network_port', 'storage_backend',
'storage_create_device', 'storage_create_loop',
'storage_pool', 'trust_password')
+ util.subp(['lxd', 'waitready', '--timeout=300'])
cmd = ['lxd', 'init', '--auto']
for k in init_keys:
if init_cfg.get(k):
@@ -109,6 +115,7 @@ def handle(name, cfg, cloud, log, args):
# Set up lxd-bridge if bridge config is given
dconf_comm = "debconf-communicate"
if bridge_cfg:
+ net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
if os.path.exists("/etc/default/lxd-bridge") \
and util.which(dconf_comm):
# Bridge configured through packaging
@@ -135,15 +142,18 @@ def handle(name, cfg, cloud, log, args):
else:
# Built-in LXD bridge support
cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg)
+ maybe_cleanup_default(
+ net_name=net_name, did_init=bool(init_cfg),
+ create=bool(cmd_create), attach=bool(cmd_attach))
if cmd_create:
log.debug("Creating lxd bridge: %s" %
" ".join(cmd_create))
- util.subp(cmd_create)
+ _lxc(cmd_create)
if cmd_attach:
log.debug("Setting up default lxd bridge: %s" %
" ".join(cmd_create))
- util.subp(cmd_attach)
+ _lxc(cmd_attach)
elif bridge_cfg:
raise RuntimeError(
@@ -204,10 +214,10 @@ def bridge_to_cmd(bridge_cfg):
if bridge_cfg.get("mode") == "none":
return None, None
- bridge_name = bridge_cfg.get("name", "lxdbr0")
+ bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
cmd_create = []
- cmd_attach = ["lxc", "network", "attach-profile", bridge_name,
- "default", "eth0", "--force-local"]
+ cmd_attach = ["network", "attach-profile", bridge_name,
+ "default", "eth0"]
if bridge_cfg.get("mode") == "existing":
return None, cmd_attach
@@ -215,7 +225,7 @@ def bridge_to_cmd(bridge_cfg):
if bridge_cfg.get("mode") != "new":
raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
- cmd_create = ["lxc", "network", "create", bridge_name]
+ cmd_create = ["network", "create", bridge_name]
if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"):
cmd_create.append("ipv4.address=%s/%s" %
@@ -247,8 +257,49 @@ def bridge_to_cmd(bridge_cfg):
if bridge_cfg.get("domain"):
cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain"))
- cmd_create.append("--force-local")
-
return cmd_create, cmd_attach
+
+def _lxc(cmd):
+ env = {'LC_ALL': 'C',
+ 'HOME': os.environ.get('HOME', '/root'),
+ 'USER': os.environ.get('USER', 'root')}
+ util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
+
+
+def maybe_cleanup_default(net_name, did_init, create, attach,
+ profile="default", nic_name="eth0"):
+ """Newer versions of lxc (3.0.1+) create a lxdbr0 network when
+ 'lxd init --auto' is run. Older versions did not.
+
+ By removing ay that lxd-init created, we simply leave the add/attach
+ code in-tact.
+
+ https://github.com/lxc/lxd/issues/4649"""
+ if net_name != _DEFAULT_NETWORK_NAME or not did_init:
+ return
+
+ fail_assume_enoent = "failed. Assuming it did not exist."
+ succeeded = "succeeded."
+ if create:
+ msg = "Deletion of lxd network '%s' %s"
+ try:
+ _lxc(["network", "delete", net_name])
+ LOG.debug(msg, net_name, succeeded)
+ except util.ProcessExecutionError as e:
+ if e.exit_code != 1:
+ raise e
+ LOG.debug(msg, net_name, fail_assume_enoent)
+
+ if attach:
+ msg = "Removal of device '%s' from profile '%s' %s"
+ try:
+ _lxc(["profile", "device", "remove", profile, nic_name])
+ LOG.debug(msg, nic_name, profile, succeeded)
+ except util.ProcessExecutionError as e:
+ if e.exit_code != 1:
+ raise e
+ LOG.debug(msg, nic_name, profile, fail_assume_enoent)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index f14a4fc5..339baba9 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -76,6 +76,7 @@ DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
WS = re.compile("[%s]+" % (whitespace))
FSTAB_PATH = "/etc/fstab"
+MNT_COMMENT = "comment=cloudconfig"
LOG = logging.getLogger(__name__)
@@ -232,8 +233,8 @@ def setup_swapfile(fname, size=None, maxsize=None):
if str(size).lower() == "auto":
try:
memsize = util.read_meminfo()['total']
- except IOError as e:
- LOG.debug("Not creating swap. failed to read meminfo")
+ except IOError:
+ LOG.debug("Not creating swap: failed to read meminfo")
return
util.ensure_dir(tdir)
@@ -280,17 +281,17 @@ def handle_swapcfg(swapcfg):
if os.path.exists(fname):
if not os.path.exists("/proc/swaps"):
- LOG.debug("swap file %s existed. no /proc/swaps. Being safe.",
- fname)
+ LOG.debug("swap file %s exists, but no /proc/swaps exists, "
+ "being safe", fname)
return fname
try:
for line in util.load_file("/proc/swaps").splitlines():
if line.startswith(fname + " "):
- LOG.debug("swap file %s already in use.", fname)
+ LOG.debug("swap file %s already in use", fname)
return fname
- LOG.debug("swap file %s existed, but not in /proc/swaps", fname)
+ LOG.debug("swap file %s exists, but not in /proc/swaps", fname)
except Exception:
- LOG.warning("swap file %s existed. Error reading /proc/swaps",
+ LOG.warning("swap file %s exists. Error reading /proc/swaps",
fname)
return fname
@@ -327,6 +328,22 @@ def handle(_name, cfg, cloud, log, _args):
LOG.debug("mounts configuration is %s", cfgmnt)
+ fstab_lines = []
+ fstab_devs = {}
+ fstab_removed = []
+
+ for line in util.load_file(FSTAB_PATH).splitlines():
+ if MNT_COMMENT in line:
+ fstab_removed.append(line)
+ continue
+
+ try:
+ toks = WS.split(line)
+ except Exception:
+ pass
+ fstab_devs[toks[0]] = line
+ fstab_lines.append(line)
+
for i in range(len(cfgmnt)):
# skip something that wasn't a list
if not isinstance(cfgmnt[i], list):
@@ -336,12 +353,17 @@ def handle(_name, cfg, cloud, log, _args):
start = str(cfgmnt[i][0])
sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
+ if sanitized != start:
+ log.debug("changed %s => %s" % (start, sanitized))
+
if sanitized is None:
- log.debug("Ignorming nonexistant named mount %s", start)
+ log.debug("Ignoring nonexistent named mount %s", start)
+ continue
+ elif sanitized in fstab_devs:
+ log.info("Device %s already defined in fstab: %s",
+ sanitized, fstab_devs[sanitized])
continue
- if sanitized != start:
- log.debug("changed %s => %s" % (start, sanitized))
cfgmnt[i][0] = sanitized
# in case the user did not quote a field (likely fs-freq, fs_passno)
@@ -373,11 +395,17 @@ def handle(_name, cfg, cloud, log, _args):
for defmnt in defmnts:
start = defmnt[0]
sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
- if sanitized is None:
- log.debug("Ignoring nonexistant default named mount %s", start)
- continue
if sanitized != start:
log.debug("changed default device %s => %s" % (start, sanitized))
+
+ if sanitized is None:
+ log.debug("Ignoring nonexistent default named mount %s", start)
+ continue
+ elif sanitized in fstab_devs:
+ log.debug("Device %s already defined in fstab: %s",
+ sanitized, fstab_devs[sanitized])
+ continue
+
defmnt[0] = sanitized
cfgmnt_has = False
@@ -397,7 +425,7 @@ def handle(_name, cfg, cloud, log, _args):
actlist = []
for x in cfgmnt:
if x[1] is None:
- log.debug("Skipping non-existent device named %s", x[0])
+ log.debug("Skipping nonexistent device named %s", x[0])
else:
actlist.append(x)
@@ -406,34 +434,21 @@ def handle(_name, cfg, cloud, log, _args):
actlist.append([swapret, "none", "swap", "sw", "0", "0"])
if len(actlist) == 0:
- log.debug("No modifications to fstab needed.")
+ log.debug("No modifications to fstab needed")
return
- comment = "comment=cloudconfig"
cc_lines = []
needswap = False
dirs = []
for line in actlist:
# write 'comment' in the fs_mntops, entry, claiming this
- line[3] = "%s,%s" % (line[3], comment)
+ line[3] = "%s,%s" % (line[3], MNT_COMMENT)
if line[2] == "swap":
needswap = True
if line[1].startswith("/"):
dirs.append(line[1])
cc_lines.append('\t'.join(line))
- fstab_lines = []
- removed = []
- for line in util.load_file(FSTAB_PATH).splitlines():
- try:
- toks = WS.split(line)
- if toks[3].find(comment) != -1:
- removed.append(line)
- continue
- except Exception:
- pass
- fstab_lines.append(line)
-
for d in dirs:
try:
util.ensure_dir(d)
@@ -441,7 +456,7 @@ def handle(_name, cfg, cloud, log, _args):
util.logexc(log, "Failed to make '%s' config-mount", d)
sadds = [WS.sub(" ", n) for n in cc_lines]
- sdrops = [WS.sub(" ", n) for n in removed]
+ sdrops = [WS.sub(" ", n) for n in fstab_removed]
sops = (["- " + drop for drop in sdrops if drop not in sadds] +
["+ " + add for add in sadds if add not in sdrops])
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index cbd0237d..9e074bda 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -10,20 +10,95 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import temp_utils
from cloudinit import templater
from cloudinit import type_utils
from cloudinit import util
+import copy
import os
+import six
from textwrap import dedent
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
-TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf'
NR_POOL_SERVERS = 4
-distros = ['centos', 'debian', 'fedora', 'opensuse', 'sles', 'ubuntu']
+distros = ['centos', 'debian', 'fedora', 'opensuse', 'rhel', 'sles', 'ubuntu']
+
+NTP_CLIENT_CONFIG = {
+ 'chrony': {
+ 'check_exe': 'chronyd',
+ 'confpath': '/etc/chrony.conf',
+ 'packages': ['chrony'],
+ 'service_name': 'chrony',
+ 'template_name': 'chrony.conf.{distro}',
+ 'template': None,
+ },
+ 'ntp': {
+ 'check_exe': 'ntpd',
+ 'confpath': NTP_CONF,
+ 'packages': ['ntp'],
+ 'service_name': 'ntp',
+ 'template_name': 'ntp.conf.{distro}',
+ 'template': None,
+ },
+ 'ntpdate': {
+ 'check_exe': 'ntpdate',
+ 'confpath': NTP_CONF,
+ 'packages': ['ntpdate'],
+ 'service_name': 'ntpdate',
+ 'template_name': 'ntp.conf.{distro}',
+ 'template': None,
+ },
+ 'systemd-timesyncd': {
+ 'check_exe': '/lib/systemd/systemd-timesyncd',
+ 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf',
+ 'packages': [],
+ 'service_name': 'systemd-timesyncd',
+ 'template_name': 'timesyncd.conf',
+ 'template': None,
+ },
+}
+
+# This is Distro-specific configuration overrides of the base config
+DISTRO_CLIENT_CONFIG = {
+ 'debian': {
+ 'chrony': {
+ 'confpath': '/etc/chrony/chrony.conf',
+ },
+ },
+ 'opensuse': {
+ 'chrony': {
+ 'service_name': 'chronyd',
+ },
+ 'ntp': {
+ 'confpath': '/etc/ntp.conf',
+ 'service_name': 'ntpd',
+ },
+ 'systemd-timesyncd': {
+ 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
+ },
+ },
+ 'sles': {
+ 'chrony': {
+ 'service_name': 'chronyd',
+ },
+ 'ntp': {
+ 'confpath': '/etc/ntp.conf',
+ 'service_name': 'ntpd',
+ },
+ 'systemd-timesyncd': {
+ 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
+ },
+ },
+ 'ubuntu': {
+ 'chrony': {
+ 'confpath': '/etc/chrony/chrony.conf',
+ },
+ },
+}
# The schema definition for each cloud-config module is a strict contract for
@@ -48,7 +123,34 @@ schema = {
'distros': distros,
'examples': [
dedent("""\
+ # Override ntp with chrony configuration on Ubuntu
+ ntp:
+ enabled: true
+ ntp_client: chrony # Uses cloud-init default chrony configuration
+ """),
+ dedent("""\
+ # Provide a custom ntp client configuration
ntp:
+ enabled: true
+ ntp_client: myntpclient
+ config:
+ confpath: /etc/myntpclient/myntpclient.conf
+ check_exe: myntpclientd
+ packages:
+ - myntpclient
+ service_name: myntpclient
+ template: |
+ ## template:jinja
+ # My NTP Client config
+ {% if pools -%}# pools{% endif %}
+ {% for pool in pools -%}
+ pool {{pool}} iburst
+ {% endfor %}
+ {%- if servers %}# servers
+ {% endif %}
+ {% for server in servers -%}
+ server {{server}} iburst
+ {% endfor %}
pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org]
servers:
- ntp.server.local
@@ -83,79 +185,159 @@ schema = {
List of ntp servers. If both pools and servers are
empty, 4 default pool servers will be provided with
the format ``{0-3}.{distro}.pool.ntp.org``.""")
- }
+ },
+ 'ntp_client': {
+ 'type': 'string',
+ 'default': 'auto',
+ 'description': dedent("""\
+ Name of an NTP client to use to configure system NTP.
+ When unprovided or 'auto' the default client preferred
+ by the distribution will be used. The following
+ built-in client names can be used to override existing
+ configuration defaults: chrony, ntp, ntpdate,
+ systemd-timesyncd."""),
+ },
+ 'enabled': {
+ 'type': 'boolean',
+ 'default': True,
+ 'description': dedent("""\
+ Attempt to enable ntp clients if set to True. If set
+ to False, ntp client will not be configured or
+ installed"""),
+ },
+ 'config': {
+ 'description': dedent("""\
+ Configuration settings or overrides for the
+ ``ntp_client`` specified."""),
+ 'type': ['object'],
+ 'properties': {
+ 'confpath': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The path to where the ``ntp_client``
+ configuration is written."""),
+ },
+ 'check_exe': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The executable name for the ``ntp_client``.
+ For example, ntp service ``check_exe`` is
+ 'ntpd' because it runs the ntpd binary."""),
+ },
+ 'packages': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string',
+ },
+ 'uniqueItems': True,
+ 'description': dedent("""\
+ List of packages needed to be installed for the
+ selected ``ntp_client``."""),
+ },
+ 'service_name': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The systemd or sysvinit service name used to
+ start and stop the ``ntp_client``
+ service."""),
+ },
+ 'template': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Inline template allowing users to define their
+ own ``ntp_client`` configuration template.
+ The value must start with '## template:jinja'
+ to enable use of templating support.
+ """),
+ },
+ },
+ # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override
+ # of builtin client values.
+ 'required': [],
+ 'minProperties': 1, # If we have config, define something
+ 'additionalProperties': False
+ },
},
'required': [],
'additionalProperties': False
}
}
}
-
-__doc__ = get_schema_doc(schema) # Supplement python help()
+REQUIRED_NTP_CONFIG_KEYS = frozenset([
+ 'check_exe', 'confpath', 'packages', 'service_name'])
-def handle(name, cfg, cloud, log, _args):
- """Enable and configure ntp."""
- if 'ntp' not in cfg:
- LOG.debug(
- "Skipping module named %s, not present or disabled by cfg", name)
- return
- ntp_cfg = cfg['ntp']
- if ntp_cfg is None:
- ntp_cfg = {} # Allow empty config which will install the package
+__doc__ = get_schema_doc(schema) # Supplement python help()
- # TODO drop this when validate_cloudconfig_schema is strict=True
- if not isinstance(ntp_cfg, (dict)):
- raise RuntimeError(
- "'ntp' key existed in config, but not a dictionary type,"
- " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)))
- validate_cloudconfig_schema(cfg, schema)
- if ntp_installable():
- service_name = 'ntp'
- confpath = NTP_CONF
- template_name = None
- packages = ['ntp']
- check_exe = 'ntpd'
- else:
- service_name = 'systemd-timesyncd'
- confpath = TIMESYNCD_CONF
- template_name = 'timesyncd.conf'
- packages = []
- check_exe = '/lib/systemd/systemd-timesyncd'
-
- rename_ntp_conf()
- # ensure when ntp is installed it has a configuration file
- # to use instead of starting up with packaged defaults
- write_ntp_config_template(ntp_cfg, cloud, confpath, template=template_name)
- install_ntp(cloud.distro.install_packages, packages=packages,
- check_exe=check_exe)
+def distro_ntp_client_configs(distro):
+ """Construct a distro-specific ntp client config dictionary by merging
+ distro specific changes into base config.
- try:
- reload_ntp(service_name, systemd=cloud.distro.uses_systemd())
- except util.ProcessExecutionError as e:
- LOG.exception("Failed to reload/start ntp service: %s", e)
- raise
+ @param distro: String providing the distro class name.
+ @returns: Dict of distro configurations for ntp clients.
+ """
+ dcfg = DISTRO_CLIENT_CONFIG
+ cfg = copy.copy(NTP_CLIENT_CONFIG)
+ if distro in dcfg:
+ cfg = util.mergemanydict([cfg, dcfg[distro]], reverse=True)
+ return cfg
-def ntp_installable():
- """Check if we can install ntp package
+def select_ntp_client(ntp_client, distro):
+ """Determine which ntp client is to be used, consulting the distro
+ for its preference.
- Ubuntu-Core systems do not have an ntp package available, so
- we always return False. Other systems require package managers to install
- the ntp package If we fail to find one of the package managers, then we
- cannot install ntp.
+ @param ntp_client: String name of the ntp client to use.
+ @param distro: Distro class instance.
+ @returns: Dict of the selected ntp client or {} if none selected.
"""
- if util.system_is_snappy():
- return False
- if any(map(util.which, ['apt-get', 'dnf', 'yum', 'zypper'])):
- return True
+ # construct distro-specific ntp_client_config dict
+ distro_cfg = distro_ntp_client_configs(distro.name)
+
+ # user specified client, return its config
+ if ntp_client and ntp_client != 'auto':
+ LOG.debug('Selected NTP client "%s" via user-data configuration',
+ ntp_client)
+ return distro_cfg.get(ntp_client, {})
+
+ # default to auto if unset in distro
+ distro_ntp_client = distro.get_option('ntp_client', 'auto')
+
+ clientcfg = {}
+ if distro_ntp_client == "auto":
+ for client in distro.preferred_ntp_clients:
+ cfg = distro_cfg.get(client)
+ if util.which(cfg.get('check_exe')):
+ LOG.debug('Selected NTP client "%s", already installed',
+ client)
+ clientcfg = cfg
+ break
+
+ if not clientcfg:
+ client = distro.preferred_ntp_clients[0]
+ LOG.debug(
+ 'Selected distro preferred NTP client "%s", not yet installed',
+ client)
+ clientcfg = distro_cfg.get(client)
+ else:
+ LOG.debug('Selected NTP client "%s" via distro system config',
+ distro_ntp_client)
+ clientcfg = distro_cfg.get(distro_ntp_client, {})
+
+ return clientcfg
- return False
+def install_ntp_client(install_func, packages=None, check_exe="ntpd"):
+ """Install ntp client package if not already installed.
-def install_ntp(install_func, packages=None, check_exe="ntpd"):
+ @param install_func: function. This parameter is invoked with the contents
+ of the packages parameter.
+ @param packages: list. This parameter defaults to ['ntp'].
+ @param check_exe: string. The name of a binary that indicates the package
+ the specified package is already installed.
+ """
if util.which(check_exe):
return
if packages is None:
@@ -164,15 +346,23 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"):
install_func(packages)
-def rename_ntp_conf(config=None):
- """Rename any existing ntp.conf file"""
- if config is None: # For testing
- config = NTP_CONF
- if os.path.exists(config):
- util.rename(config, config + ".dist")
+def rename_ntp_conf(confpath=None):
+ """Rename any existing ntp client config file
+
+ @param confpath: string. Specify a path to an existing ntp client
+ configuration file.
+ """
+ if os.path.exists(confpath):
+ util.rename(confpath, confpath + ".dist")
def generate_server_names(distro):
+ """Generate a list of server names to populate an ntp client configuration
+ file.
+
+ @param distro: string. Specify the distro name
+ @returns: list: A list of strings representing ntp servers for this distro.
+ """
names = []
pool_distro = distro
# For legal reasons x.pool.sles.ntp.org does not exist,
@@ -185,34 +375,60 @@ def generate_server_names(distro):
return names
-def write_ntp_config_template(cfg, cloud, path, template=None):
- servers = cfg.get('servers', [])
- pools = cfg.get('pools', [])
+def write_ntp_config_template(distro_name, servers=None, pools=None,
+ path=None, template_fn=None, template=None):
+ """Render a ntp client configuration for the specified client.
+
+ @param distro_name: string. The distro class name.
+ @param servers: A list of strings specifying ntp servers. Defaults to empty
+ list.
+ @param pools: A list of strings specifying ntp pools. Defaults to empty
+ list.
+ @param path: A string to specify where to write the rendered template.
+ @param template_fn: A string to specify the template source file.
+ @param template: A string specifying the contents of the template. This
+ content will be written to a temporary file before being used to render
+ the configuration file.
+
+ @raises: ValueError when path is None.
+ @raises: ValueError when template_fn is None and template is None.
+ """
+ if not servers:
+ servers = []
+ if not pools:
+ pools = []
if len(servers) == 0 and len(pools) == 0:
- pools = generate_server_names(cloud.distro.name)
+ pools = generate_server_names(distro_name)
LOG.debug(
'Adding distro default ntp pool servers: %s', ','.join(pools))
- params = {
- 'servers': servers,
- 'pools': pools,
- }
+ if not path:
+ raise ValueError('Invalid value for path parameter')
- if template is None:
- template = 'ntp.conf.%s' % cloud.distro.name
+ if not template_fn and not template:
+ raise ValueError('Not template_fn or template provided')
- template_fn = cloud.get_template_filename(template)
- if not template_fn:
- template_fn = cloud.get_template_filename('ntp.conf')
- if not template_fn:
- raise RuntimeError(
- 'No template found, not rendering {path}'.format(path=path))
+ params = {'servers': servers, 'pools': pools}
+ if template:
+ tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
+ template_fn = tfile[1] # filepath is second item in tuple
+ util.write_file(template_fn, content=template)
templater.render_to_file(template_fn, path, params)
+ # clean up temporary template
+ if template:
+ util.del_file(template_fn)
def reload_ntp(service, systemd=False):
+ """Restart or reload an ntp system service.
+
+ @param service: A string specifying the name of the service to be affected.
+ @param systemd: A boolean indicating if the distro uses systemd, defaults
+ to False.
+ @returns: A tuple of stdout, stderr results from executing the action.
+ """
if systemd:
cmd = ['systemctl', 'reload-or-restart', service]
else:
@@ -220,4 +436,117 @@ def reload_ntp(service, systemd=False):
util.subp(cmd, capture=True)
+def supplemental_schema_validation(ntp_config):
+ """Validate user-provided ntp:config option values.
+
+ This function supplements flexible jsonschema validation with specific
+ value checks to aid in triage of invalid user-provided configuration.
+
+ @param ntp_config: Dictionary of configuration value under 'ntp'.
+
+ @raises: ValueError describing invalid values provided.
+ """
+ errors = []
+ missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys()))
+ if missing:
+ keys = ', '.join(sorted(missing))
+ errors.append(
+ 'Missing required ntp:config keys: {keys}'.format(keys=keys))
+ elif not any([ntp_config.get('template'),
+ ntp_config.get('template_name')]):
+ errors.append(
+ 'Either ntp:config:template or ntp:config:template_name values'
+ ' are required')
+ for key, value in sorted(ntp_config.items()):
+ keypath = 'ntp:config:' + key
+ if key == 'confpath':
+ if not all([value, isinstance(value, six.string_types)]):
+ errors.append(
+ 'Expected a config file path {keypath}.'
+ ' Found ({value})'.format(keypath=keypath, value=value))
+ elif key == 'packages':
+ if not isinstance(value, list):
+ errors.append(
+ 'Expected a list of required package names for {keypath}.'
+ ' Found ({value})'.format(keypath=keypath, value=value))
+ elif key in ('template', 'template_name'):
+ if value is None: # Either template or template_name can be none
+ continue
+ if not isinstance(value, six.string_types):
+ errors.append(
+ 'Expected a string type for {keypath}.'
+ ' Found ({value})'.format(keypath=keypath, value=value))
+ elif not isinstance(value, six.string_types):
+ errors.append(
+ 'Expected a string type for {keypath}.'
+ ' Found ({value})'.format(keypath=keypath, value=value))
+
+ if errors:
+ raise ValueError(r'Invalid ntp configuration:\n{errors}'.format(
+ errors='\n'.join(errors)))
+
+
+def handle(name, cfg, cloud, log, _args):
+ """Enable and configure ntp."""
+ if 'ntp' not in cfg:
+ LOG.debug(
+ "Skipping module named %s, not present or disabled by cfg", name)
+ return
+ ntp_cfg = cfg['ntp']
+ if ntp_cfg is None:
+ ntp_cfg = {} # Allow empty config which will install the package
+
+ # TODO drop this when validate_cloudconfig_schema is strict=True
+ if not isinstance(ntp_cfg, (dict)):
+ raise RuntimeError(
+ "'ntp' key existed in config, but not a dictionary type,"
+ " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)))
+
+ validate_cloudconfig_schema(cfg, schema)
+
+ # Allow users to explicitly enable/disable
+ enabled = ntp_cfg.get('enabled', True)
+ if util.is_false(enabled):
+ LOG.debug("Skipping module named %s, disabled by cfg", name)
+ return
+
+ # Select which client is going to be used and get the configuration
+ ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'),
+ cloud.distro)
+
+ # Allow user ntp config to override distro configurations
+ ntp_client_config = util.mergemanydict(
+ [ntp_client_config, ntp_cfg.get('config', {})], reverse=True)
+
+ supplemental_schema_validation(ntp_client_config)
+ rename_ntp_conf(confpath=ntp_client_config.get('confpath'))
+
+ template_fn = None
+ if not ntp_client_config.get('template'):
+ template_name = (
+ ntp_client_config.get('template_name').replace('{distro}',
+ cloud.distro.name))
+ template_fn = cloud.get_template_filename(template_name)
+ if not template_fn:
+ msg = ('No template found, not rendering %s' %
+ ntp_client_config.get('template_name'))
+ raise RuntimeError(msg)
+
+ write_ntp_config_template(cloud.distro.name,
+ servers=ntp_cfg.get('servers', []),
+ pools=ntp_cfg.get('pools', []),
+ path=ntp_client_config.get('confpath'),
+ template_fn=template_fn,
+ template=ntp_client_config.get('template'))
+
+ install_ntp_client(cloud.distro.install_packages,
+ packages=ntp_client_config['packages'],
+ check_exe=ntp_client_config['check_exe'])
+ try:
+ reload_ntp(ntp_client_config['service_name'],
+ systemd=cloud.distro.uses_systemd())
+ except util.ProcessExecutionError as e:
+ LOG.exception("Failed to reload/start ntp service: %s", e)
+ raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 878069b7..3be0d1c1 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -41,6 +41,7 @@ keys to post. Available keys are:
"""
from cloudinit import templater
+from cloudinit import url_helper
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
@@ -136,9 +137,9 @@ def handle(name, cfg, cloud, log, args):
}
url = templater.render_string(url, url_params)
try:
- util.read_file_or_url(url, data=real_submit_keys,
- retries=tries, sec_between=3,
- ssl_details=util.fetch_ssl_details(cloud.paths))
+ url_helper.read_file_or_url(
+ url, data=real_submit_keys, retries=tries, sec_between=3,
+ ssl_details=util.fetch_ssl_details(cloud.paths))
except Exception:
util.logexc(log, "Failed to post phone home data to %s in %s tries",
url, tries)
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 4da3a588..50b37470 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -74,7 +74,7 @@ def givecmdline(pid):
if util.is_FreeBSD():
(output, _err) = util.subp(['procstat', '-c', str(pid)])
line = output.splitlines()[1]
- m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line)
+ m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line)
return m.group(2)
else:
return util.load_file("/proc/%s/cmdline" % pid)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 013e69b5..2edddd0c 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -81,7 +81,7 @@ def _resize_xfs(mount_point, devpth):
def _resize_ufs(mount_point, devpth):
- return ('growfs', devpth)
+ return ('growfs', '-y', devpth)
def _resize_zfs(mount_point, devpth):
@@ -89,13 +89,11 @@ def _resize_zfs(mount_point, devpth):
def _get_dumpfs_output(mount_point):
- dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point])
- return dumpfs_res
+ return util.subp(['dumpfs', '-m', mount_point])[0]
def _get_gpart_output(part):
- gpart_res, err = util.subp(['gpart', 'show', part])
- return gpart_res
+ return util.subp(['gpart', 'show', part])[0]
def _can_skip_resize_ufs(mount_point, devpth):
@@ -113,7 +111,7 @@ def _can_skip_resize_ufs(mount_point, devpth):
if not line.startswith('#'):
newfs_cmd = shlex.split(line)
opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:'
- optlist, args = getopt.getopt(newfs_cmd[1:], opt_value)
+ optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value)
for o, a in optlist:
if o == "-s":
cur_fs_sz = int(a)
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 530808ce..edee01e5 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -126,7 +126,6 @@ class SubscriptionManager(object):
self.enable_repo = self.rhel_cfg.get('enable-repo')
self.disable_repo = self.rhel_cfg.get('disable-repo')
self.servicelevel = self.rhel_cfg.get('service-level')
- self.subman = ['subscription-manager']
def log_success(self, msg):
'''Simple wrapper for logging info messages. Useful for unittests'''
@@ -173,21 +172,12 @@ class SubscriptionManager(object):
cmd = ['identity']
try:
- self._sub_man_cli(cmd)
+ _sub_man_cli(cmd)
except util.ProcessExecutionError:
return False
return True
- def _sub_man_cli(self, cmd, logstring_val=False):
- '''
- Uses the prefered cloud-init subprocess def of util.subp
- and runs subscription-manager. Breaking this to a
- separate function for later use in mocking and unittests
- '''
- cmd = self.subman + cmd
- return util.subp(cmd, logstring=logstring_val)
-
def rhn_register(self):
'''
Registers the system by userid and password or activation key
@@ -209,8 +199,7 @@ class SubscriptionManager(object):
cmd.append("--serverurl={0}".format(self.server_hostname))
try:
- return_out, return_err = self._sub_man_cli(cmd,
- logstring_val=True)
+ return_out = _sub_man_cli(cmd, logstring_val=True)[0]
except util.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
@@ -233,8 +222,7 @@ class SubscriptionManager(object):
# Attempting to register the system only
try:
- return_out, return_err = self._sub_man_cli(cmd,
- logstring_val=True)
+ return_out = _sub_man_cli(cmd, logstring_val=True)[0]
except util.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
@@ -257,7 +245,7 @@ class SubscriptionManager(object):
.format(self.servicelevel)]
try:
- return_out, return_err = self._sub_man_cli(cmd)
+ return_out = _sub_man_cli(cmd)[0]
except util.ProcessExecutionError as e:
if e.stdout.rstrip() != '':
for line in e.stdout.split("\n"):
@@ -275,7 +263,7 @@ class SubscriptionManager(object):
def _set_auto_attach(self):
cmd = ['attach', '--auto']
try:
- return_out, return_err = self._sub_man_cli(cmd)
+ return_out = _sub_man_cli(cmd)[0]
except util.ProcessExecutionError as e:
self.log_warn("Auto-attach failed with: {0}".format(e))
return False
@@ -294,12 +282,12 @@ class SubscriptionManager(object):
# Get all available pools
cmd = ['list', '--available', '--pool-only']
- results, errors = self._sub_man_cli(cmd)
+ results = _sub_man_cli(cmd)[0]
available = (results.rstrip()).split("\n")
# Get all consumed pools
cmd = ['list', '--consumed', '--pool-only']
- results, errors = self._sub_man_cli(cmd)
+ results = _sub_man_cli(cmd)[0]
consumed = (results.rstrip()).split("\n")
return available, consumed
@@ -311,14 +299,14 @@ class SubscriptionManager(object):
'''
cmd = ['repos', '--list-enabled']
- return_out, return_err = self._sub_man_cli(cmd)
+ return_out = _sub_man_cli(cmd)[0]
active_repos = []
for repo in return_out.split("\n"):
if "Repo ID:" in repo:
active_repos.append((repo.split(':')[1]).strip())
cmd = ['repos', '--list-disabled']
- return_out, return_err = self._sub_man_cli(cmd)
+ return_out = _sub_man_cli(cmd)[0]
inactive_repos = []
for repo in return_out.split("\n"):
@@ -348,7 +336,7 @@ class SubscriptionManager(object):
if len(pool_list) > 0:
cmd.extend(pool_list)
try:
- self._sub_man_cli(cmd)
+ _sub_man_cli(cmd)
self.log.debug("Attached the following pools to your "
"system: %s", (", ".join(pool_list))
.replace('--pool=', ''))
@@ -425,7 +413,7 @@ class SubscriptionManager(object):
cmd.extend(enable_list)
try:
- self._sub_man_cli(cmd)
+ _sub_man_cli(cmd)
except util.ProcessExecutionError as e:
self.log_warn("Unable to alter repos due to {0}".format(e))
return False
@@ -441,4 +429,15 @@ class SubscriptionManager(object):
def is_configured(self):
return bool((self.userid and self.password) or self.activation_key)
+
+def _sub_man_cli(cmd, logstring_val=False):
+ '''
+ Uses the prefered cloud-init subprocess def of util.subp
+ and runs subscription-manager. Breaking this to a
+ separate function for later use in mocking and unittests
+ '''
+ return util.subp(['subscription-manager'] + cmd,
+ logstring=logstring_val)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index af08788c..27d2366c 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -203,8 +203,8 @@ LOG = logging.getLogger(__name__)
COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
HOST_PORT_RE = re.compile(
r'^(?P<proto>[@]{0,2})'
- '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
- '([:](?P<port>[0-9]+))?$')
+ r'(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
+ r'([:](?P<port>[0-9]+))?$')
def reload_syslog(command=DEF_RELOAD, systemd=False):
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index 539cbd5d..1f75d6c5 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -42,6 +42,11 @@ schema = {
all commands must be proper yaml, so you have to quote any characters
yaml would eat (':' can be problematic)
+
+ .. note::
+
+ when writing files, do not use /tmp dir as it races with
+ systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
"""),
'distros': distros,
'examples': [dedent("""\
@@ -66,7 +71,6 @@ schema = {
'additionalProperties': False,
'minItems': 1,
'required': [],
- 'uniqueItems': True
}
}
}
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index bb24d57f..5ef97376 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -68,16 +68,57 @@ import re
import sys
from cloudinit.distros import ug_util
-from cloudinit import ssh_util
+from cloudinit import log as logging
+from cloudinit.ssh_util import update_ssh_config
from cloudinit import util
from string import ascii_letters, digits
+LOG = logging.getLogger(__name__)
+
# We are removing certain 'painful' letters/numbers
PW_SET = (''.join([x for x in ascii_letters + digits
if x not in 'loLOI01']))
+def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
+ """Apply sshd PasswordAuthentication changes.
+
+ @param pw_auth: config setting from 'pw_auth'.
+ Best given as True, False, or "unchanged".
+ @param service_cmd: The service command list (['service'])
+ @param service_name: The name of the sshd service for the system.
+
+ @return: None"""
+ cfg_name = "PasswordAuthentication"
+ if service_cmd is None:
+ service_cmd = ["service"]
+
+ if util.is_true(pw_auth):
+ cfg_val = 'yes'
+ elif util.is_false(pw_auth):
+ cfg_val = 'no'
+ else:
+ bmsg = "Leaving ssh config '%s' unchanged." % cfg_name
+ if pw_auth is None or pw_auth.lower() == 'unchanged':
+ LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth)
+ else:
+ LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth)
+ return
+
+ updated = update_ssh_config({cfg_name: cfg_val})
+ if not updated:
+ LOG.debug("No need to restart ssh service, %s not updated.", cfg_name)
+ return
+
+ if 'systemctl' in service_cmd:
+ cmd = list(service_cmd) + ["restart", service_name]
+ else:
+ cmd = list(service_cmd) + [service_name, "restart"]
+ util.subp(cmd)
+ LOG.debug("Restarted the ssh daemon.")
+
+
def handle(_name, cfg, cloud, log, args):
if len(args) != 0:
# if run from command line, and give args, wipe the chpasswd['list']
@@ -170,65 +211,9 @@ def handle(_name, cfg, cloud, log, args):
if expired_users:
log.debug("Expired passwords for: %s users", expired_users)
- change_pwauth = False
- pw_auth = None
- if 'ssh_pwauth' in cfg:
- if util.is_true(cfg['ssh_pwauth']):
- change_pwauth = True
- pw_auth = 'yes'
- elif util.is_false(cfg['ssh_pwauth']):
- change_pwauth = True
- pw_auth = 'no'
- elif str(cfg['ssh_pwauth']).lower() == 'unchanged':
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- elif not str(cfg['ssh_pwauth']).strip():
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- elif not cfg['ssh_pwauth']:
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- else:
- msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth']
- util.logexc(log, msg)
-
- if change_pwauth:
- replaced_auth = False
-
- # See: man sshd_config
- old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG)
- new_lines = []
- i = 0
- for (i, line) in enumerate(old_lines):
- # Keywords are case-insensitive and arguments are case-sensitive
- if line.key == 'passwordauthentication':
- log.debug("Replacing auth line %s with %s", i + 1, pw_auth)
- replaced_auth = True
- line.value = pw_auth
- new_lines.append(line)
-
- if not replaced_auth:
- log.debug("Adding new auth line %s", i + 1)
- replaced_auth = True
- new_lines.append(ssh_util.SshdConfigLine('',
- 'PasswordAuthentication',
- pw_auth))
-
- lines = [str(l) for l in new_lines]
- util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines),
- copy_mode=True)
-
- try:
- cmd = cloud.distro.init_cmd # Default service
- cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh'))
- cmd.append('restart')
- if 'systemctl' in cmd: # Switch action ordering
- cmd[1], cmd[2] = cmd[2], cmd[1]
- cmd = filter(None, cmd) # Remove empty arguments
- util.subp(cmd)
- log.debug("Restarted the ssh daemon")
- except Exception:
- util.logexc(log, "Restarting of the ssh daemon failed")
+ handle_ssh_pwauth(
+ cfg.get('ssh_pwauth'), service_cmd=cloud.distro.init_cmd,
+ service_name=cloud.distro.get_option('ssh_svcname', 'ssh'))
if len(errors):
log.debug("%s errors occured, re-raising the last one", len(errors))
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 34a53fd4..90724b81 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -110,7 +110,6 @@ schema = {
'additionalItems': False, # Reject non-string & non-list
'minItems': 1,
'minProperties': 1,
- 'uniqueItems': True
},
'squashfuse_in_container': {
'type': 'boolean'
@@ -204,12 +203,12 @@ def maybe_install_squashfuse(cloud):
return
try:
cloud.distro.update_package_sources()
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Package update failed")
raise
try:
cloud.distro.install_packages(['squashfuse'])
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Failed to install squashfuse")
raise
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index bab80bbe..15bee2d3 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -213,7 +213,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
def read_installed_packages():
ret = []
- for (name, date, version, dev) in read_pkg_data():
+ for (name, _date, _version, dev) in read_pkg_data():
if dev:
ret.append(NAMESPACE_DELIM.join([name, dev]))
else:
@@ -222,7 +222,7 @@ def read_installed_packages():
def read_pkg_data():
- out, err = util.subp([SNAPPY_CMD, "list"])
+ out, _err = util.subp([SNAPPY_CMD, "list"])
pkg_data = []
for line in out.splitlines()[1:]:
toks = line.split(sep=None, maxsplit=3)
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 45204a07..f8f7cb35 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -101,10 +101,6 @@ from cloudinit.distros import ug_util
from cloudinit import ssh_util
from cloudinit import util
-DISABLE_ROOT_OPTS = (
- "no-port-forwarding,no-agent-forwarding,"
- "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
- " rather than the user \\\"root\\\".\';echo;sleep 10\"")
GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
@@ -185,7 +181,7 @@ def handle(_name, cfg, cloud, log, _args):
(user, _user_config) = ug_util.extract_default(users)
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
- DISABLE_ROOT_OPTS)
+ ssh_util.DISABLE_USER_OPTS)
keys = cloud.get_public_ssh_keys() or []
if "ssh_authorized_keys" in cfg:
@@ -207,6 +203,7 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
if not user:
user = "NONE"
key_prefix = disable_root_opts.replace('$USER', user)
+ key_prefix = key_prefix.replace('$DISABLE_USER', 'root')
else:
key_prefix = ''
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index 16b1868b..5e082bd6 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -87,7 +87,6 @@ schema = {
'additionalItems': False, # Reject non-string & non-list
'minItems': 1,
'minProperties': 1,
- 'uniqueItems': True
}
},
'additionalProperties': False, # Reject keys not in schema
@@ -149,12 +148,12 @@ def maybe_install_ua_tools(cloud):
return
try:
cloud.distro.update_package_sources()
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Package update failed")
raise
try:
cloud.distro.install_packages(['ubuntu-advantage-tools'])
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Failed to install ubuntu-advantage-tools")
raise
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index b215e95a..c32a743a 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -52,10 +52,20 @@ config keys for an entry in ``users`` are as follows:
associated with the address, username and SSH keys will be requested from
there. Default: none
- ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's
- authkeys file. Default: none
- - ``ssh_import_id``: Optional. SSH id to import for user. Default: none
- - ``sudo``: Optional. Sudo rule to use, or list of sudo rules to use.
- Default: none.
+ authkeys file. Default: none. This key can not be combined with
+ ``ssh_redirect_user``.
+ - ``ssh_import_id``: Optional. SSH id to import for user. Default: none.
+ This key can not be combined with ``ssh_redirect_user``.
+ - ``ssh_redirect_user``: Optional. Boolean set to true to disable SSH
+ logins for this user. When specified, all cloud meta-data public ssh
+ keys will be set up in a disabled state for this username. Any ssh login
+ as this username will timeout and prompt with a message to login instead
+ as the configured <default_username> for this instance. Default: false.
+ This key can not be combined with ``ssh_import_id`` or
+ ``ssh_authorized_keys``.
+ - ``sudo``: Optional. Sudo rule to use, list of sudo rules to use or False.
+ Default: none. An absence of sudo key, or a value of none or false
+ will result in no sudo rules being written for the user.
- ``system``: Optional. Create user as system user with no home directory.
Default: false
- ``uid``: Optional. The user's ID. Default: The next available value.
@@ -82,6 +92,9 @@ config keys for an entry in ``users`` are as follows:
users:
- default
+ # User explicitly omitted from sudo permission; also default behavior.
+ - name: <some_restricted_user>
+ sudo: false
- name: <username>
expiredate: <date>
gecos: <comment>
@@ -97,6 +110,7 @@ config keys for an entry in ``users`` are as follows:
selinux_user: <selinux username>
shell: <shell path>
snapuser: <email>
+ ssh_redirect_user: <true/false>
ssh_authorized_keys:
- <key>
- <key>
@@ -110,17 +124,44 @@ config keys for an entry in ``users`` are as follows:
# since the module attribute 'distros'
# is a list of distros that are supported, not a sub-module
from cloudinit.distros import ug_util
+from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+LOG = logging.getLogger(__name__)
+
frequency = PER_INSTANCE
def handle(name, cfg, cloud, _log, _args):
(users, groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
+ (default_user, _user_config) = ug_util.extract_default(users)
+ cloud_keys = cloud.get_public_ssh_keys() or []
for (name, members) in groups.items():
cloud.distro.create_group(name, members)
for (user, config) in users.items():
+ ssh_redirect_user = config.pop("ssh_redirect_user", False)
+ if ssh_redirect_user:
+ if 'ssh_authorized_keys' in config or 'ssh_import_id' in config:
+ raise ValueError(
+ 'Not creating user %s. ssh_redirect_user cannot be'
+ ' provided with ssh_import_id or ssh_authorized_keys' %
+ user)
+ if ssh_redirect_user not in (True, 'default'):
+ raise ValueError(
+ 'Not creating user %s. Invalid value of'
+ ' ssh_redirect_user: %s. Expected values: true, default'
+ ' or false.' % (user, ssh_redirect_user))
+ if default_user is None:
+ LOG.warning(
+ 'Ignoring ssh_redirect_user: %s for %s.'
+ ' No default_user defined.'
+ ' Perhaps missing cloud configuration users: '
+ ' [default, ..].',
+ ssh_redirect_user, user)
+ else:
+ config['ssh_redirect_user'] = default_user
+ config['cloud_public_ssh_keys'] = cloud_keys
cloud.distro.create_user(user, **config)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 54ae3a68..31d1db61 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -15,9 +15,14 @@ binary gzip data can be specified and will be decoded before being written.
.. note::
if multiline data is provided, care should be taken to ensure that it
- follows yaml formatting standargs. to specify binary data, use the yaml
+ follows yaml formatting standards. to specify binary data, use the yaml
option ``!!binary``
+.. note::
+ Do not write files under /tmp during boot because of a race with
+ systemd-tmpfiles-clean that can cause temp files to get cleaned during
+ the early boot process. Use /run/somedir instead to avoid race LP:1707222.
+
**Internal name:** ``cc_write_files``
**Module frequency:** per instance
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index ca7d0d5b..080a6d06 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -4,7 +4,7 @@
from __future__ import print_function
from cloudinit import importer
-from cloudinit.util import find_modules, read_file_or_url
+from cloudinit.util import find_modules, load_file
import argparse
from collections import defaultdict
@@ -93,20 +93,33 @@ def validate_cloudconfig_schema(config, schema, strict=False):
def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
"""Return contents of the cloud-config file annotated with schema errors.
- @param cloudconfig: YAML-loaded object from the original_content.
+ @param cloudconfig: YAML-loaded dict from the original_content or empty
+ dict if unparseable.
@param original_content: The contents of a cloud-config file
@param schema_errors: List of tuples from a JSONSchemaValidationError. The
tuples consist of (schemapath, error_message).
"""
if not schema_errors:
return original_content
- schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content)
+ schemapaths = {}
+ if cloudconfig:
+ schemapaths = _schemapath_for_cloudconfig(
+ cloudconfig, original_content)
errors_by_line = defaultdict(list)
error_count = 1
error_footer = []
annotated_content = []
for path, msg in schema_errors:
- errors_by_line[schemapaths[path]].append(msg)
+ match = re.match(r'format-l(?P<line>\d+)\.c(?P<col>\d+).*', path)
+ if match:
+ line, col = match.groups()
+ errors_by_line[int(line)].append(msg)
+ else:
+ col = None
+ errors_by_line[schemapaths[path]].append(msg)
+ if col is not None:
+ msg = 'Line {line} column {col}: {msg}'.format(
+ line=line, col=col, msg=msg)
error_footer.append('# E{0}: {1}'.format(error_count, msg))
error_count += 1
lines = original_content.decode().split('\n')
@@ -139,21 +152,34 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
"""
if not os.path.exists(config_path):
raise RuntimeError('Configfile {0} does not exist'.format(config_path))
- content = read_file_or_url('file://{0}'.format(config_path)).contents
+ content = load_file(config_path, decode=False)
if not content.startswith(CLOUD_CONFIG_HEADER):
errors = (
- ('header', 'File {0} needs to begin with "{1}"'.format(
+ ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format(
config_path, CLOUD_CONFIG_HEADER.decode())),)
- raise SchemaValidationError(errors)
-
+ error = SchemaValidationError(errors)
+ if annotate:
+ print(annotated_cloudconfig_file({}, content, error.schema_errors))
+ raise error
try:
cloudconfig = yaml.safe_load(content)
- except yaml.parser.ParserError as e:
- errors = (
- ('format', 'File {0} is not valid yaml. {1}'.format(
- config_path, str(e))),)
- raise SchemaValidationError(errors)
-
+ except (yaml.YAMLError) as e:
+ line = column = 1
+ mark = None
+ if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
+ mark = getattr(e, 'context_mark')
+ elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'):
+ mark = getattr(e, 'problem_mark')
+ if mark:
+ line = mark.line + 1
+ column = mark.column + 1
+ errors = (('format-l{line}.c{col}'.format(line=line, col=column),
+ 'File {0} is not valid yaml. {1}'.format(
+ config_path, str(e))),)
+ error = SchemaValidationError(errors)
+ if annotate:
+ print(annotated_cloudconfig_file({}, content, error.schema_errors))
+ raise error
try:
validate_cloudconfig_schema(
cloudconfig, schema, strict=True)
@@ -176,7 +202,7 @@ def _schemapath_for_cloudconfig(config, original_content):
list_index = 0
RE_YAML_INDENT = r'^(\s*)'
scopes = []
- for line_number, line in enumerate(content_lines):
+ for line_number, line in enumerate(content_lines, 1):
indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])
line = line.strip()
if not line or line.startswith('#'):
@@ -208,8 +234,8 @@ def _schemapath_for_cloudconfig(config, original_content):
scopes.append((indent_depth + 2, key + '.0'))
for inner_list_index in range(0, len(yaml.safe_load(value))):
list_key = key + '.' + str(inner_list_index)
- schema_line_numbers[list_key] = line_number + 1
- schema_line_numbers[key] = line_number + 1
+ schema_line_numbers[list_key] = line_number
+ schema_line_numbers[key] = line_number
return schema_line_numbers
@@ -297,8 +323,8 @@ def get_schema():
configs_dir = os.path.dirname(os.path.abspath(__file__))
potential_handlers = find_modules(configs_dir)
- for (fname, mod_name) in potential_handlers.items():
- mod_locs, looked_locs = importer.find_module(
+ for (_fname, mod_name) in potential_handlers.items():
+ mod_locs, _looked_locs = importer.find_module(
mod_name, ['cloudinit.config'], ['schema'])
if mod_locs:
mod = importer.import_module(mod_locs[0])
@@ -337,9 +363,11 @@ def handle_schema_args(name, args):
try:
validate_cloudconfig_file(
args.config_file, full_schema, args.annotate)
- except (SchemaValidationError, RuntimeError) as e:
+ except SchemaValidationError as e:
if not args.annotate:
error(str(e))
+ except RuntimeError as e:
+ error(str(e))
else:
print("Valid cloud-config file {0}".format(args.config_file))
if args.doc:
diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py
new file mode 100644
index 00000000..67646b03
--- /dev/null
+++ b/cloudinit/config/tests/test_disable_ec2_metadata.py
@@ -0,0 +1,50 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests cc_disable_ec2_metadata handler"""
+
+import cloudinit.config.cc_disable_ec2_metadata as ec2_meta
+
+from cloudinit.tests.helpers import CiTestCase, mock
+
+import logging
+
+LOG = logging.getLogger(__name__)
+
+DISABLE_CFG = {'disable_ec2_metadata': 'true'}
+
+
+class TestEC2MetadataRoute(CiTestCase):
+
+ with_logs = True
+
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ def test_disable_ifconfig(self, m_subp, m_which):
+ """Set the route if ifconfig command is available"""
+ m_which.side_effect = lambda x: x if x == 'ifconfig' else None
+ ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
+ m_subp.assert_called_with(
+ ['route', 'add', '-host', '169.254.169.254', 'reject'],
+ capture=False)
+
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ def test_disable_ip(self, m_subp, m_which):
+ """Set the route if ip command is available"""
+ m_which.side_effect = lambda x: x if x == 'ip' else None
+ ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
+ m_subp.assert_called_with(
+ ['ip', 'route', 'add', 'prohibit', '169.254.169.254'],
+ capture=False)
+
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ def test_disable_no_tool(self, m_subp, m_which):
+ """Log error when neither route nor ip commands are available"""
+ m_which.return_value = None # Find neither ifconfig nor ip
+ ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
+ self.assertEqual(
+ [mock.call('ip'), mock.call('ifconfig')], m_which.call_args_list)
+ m_subp.assert_not_called()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
new file mode 100644
index 00000000..b051ec82
--- /dev/null
+++ b/cloudinit/config/tests/test_set_passwords.py
@@ -0,0 +1,71 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import mock
+
+from cloudinit.config import cc_set_passwords as setpass
+from cloudinit.tests.helpers import CiTestCase
+from cloudinit import util
+
+MODPATH = "cloudinit.config.cc_set_passwords."
+
+
+class TestHandleSshPwauth(CiTestCase):
+ """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth."""
+
+ with_logs = True
+
+ @mock.patch(MODPATH + "util.subp")
+ def test_unknown_value_logs_warning(self, m_subp):
+ setpass.handle_ssh_pwauth("floo")
+ self.assertIn("Unrecognized value: ssh_pwauth=floo",
+ self.logs.getvalue())
+ m_subp.assert_not_called()
+
+ @mock.patch(MODPATH + "update_ssh_config", return_value=True)
+ @mock.patch(MODPATH + "util.subp")
+ def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config):
+ """If systemctl in service cmd: systemctl restart name."""
+ setpass.handle_ssh_pwauth(
+ True, service_cmd=["systemctl"], service_name="myssh")
+ self.assertEqual(mock.call(["systemctl", "restart", "myssh"]),
+ m_subp.call_args)
+
+ @mock.patch(MODPATH + "update_ssh_config", return_value=True)
+ @mock.patch(MODPATH + "util.subp")
+ def test_service_as_service_cmd(self, m_subp, m_update_ssh_config):
+ """If systemctl in service cmd: systemctl restart name."""
+ setpass.handle_ssh_pwauth(
+ True, service_cmd=["service"], service_name="myssh")
+ self.assertEqual(mock.call(["service", "myssh", "restart"]),
+ m_subp.call_args)
+
+ @mock.patch(MODPATH + "update_ssh_config", return_value=False)
+ @mock.patch(MODPATH + "util.subp")
+ def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config):
+ """If config is not updated, then no system restart should be done."""
+ setpass.handle_ssh_pwauth(True)
+ m_subp.assert_not_called()
+ self.assertIn("No need to restart ssh", self.logs.getvalue())
+
+ @mock.patch(MODPATH + "update_ssh_config", return_value=True)
+ @mock.patch(MODPATH + "util.subp")
+ def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config):
+ """If 'unchanged', then no updates to config and no restart."""
+ setpass.handle_ssh_pwauth(
+ "unchanged", service_cmd=["systemctl"], service_name="myssh")
+ m_update_ssh_config.assert_not_called()
+ m_subp.assert_not_called()
+
+ @mock.patch(MODPATH + "util.subp")
+ def test_valid_change_values(self, m_subp):
+ """If value is a valid changen value, then update should be called."""
+ upname = MODPATH + "update_ssh_config"
+ optname = "PasswordAuthentication"
+ for value in util.FALSE_STRINGS + util.TRUE_STRINGS:
+ optval = "yes" if value in util.TRUE_STRINGS else "no"
+ with mock.patch(upname, return_value=False) as m_update:
+ setpass.handle_ssh_pwauth(value)
+ m_update.assert_called_with({optname: optval})
+ m_subp.assert_not_called()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py
index c5b4a9de..3c472891 100644
--- a/cloudinit/config/tests/test_snap.py
+++ b/cloudinit/config/tests/test_snap.py
@@ -9,7 +9,7 @@ from cloudinit.config.cc_snap import (
from cloudinit.config.schema import validate_cloudconfig_schema
from cloudinit import util
from cloudinit.tests.helpers import (
- CiTestCase, mock, wrap_and_call, skipUnlessJsonSchema)
+ CiTestCase, SchemaTestCaseMixin, mock, wrap_and_call, skipUnlessJsonSchema)
SYSTEM_USER_ASSERTION = """\
@@ -162,6 +162,7 @@ class TestAddAssertions(CiTestCase):
class TestRunCommands(CiTestCase):
with_logs = True
+ allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
def setUp(self):
super(TestRunCommands, self).setUp()
@@ -245,9 +246,10 @@ class TestRunCommands(CiTestCase):
@skipUnlessJsonSchema()
-class TestSchema(CiTestCase):
+class TestSchema(CiTestCase, SchemaTestCaseMixin):
with_logs = True
+ schema = schema
def test_schema_warns_on_snap_not_as_dict(self):
"""If the snap configuration is not a dict, emit a warning."""
@@ -340,6 +342,30 @@ class TestSchema(CiTestCase):
{'snap': {'assertions': {'01': 'also valid'}}}, schema)
self.assertEqual('', self.logs.getvalue())
+ def test_duplicates_are_fine_array_array(self):
+ """Duplicated commands array/array entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': [["echo", "bye"], ["echo" "bye"]]},
+ "command entries can be duplicate.")
+
+ def test_duplicates_are_fine_array_string(self):
+ """Duplicated commands array/string entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': ["echo bye", "echo bye"]},
+ "command entries can be duplicate.")
+
+ def test_duplicates_are_fine_dict_array(self):
+ """Duplicated commands dict/array entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}},
+ "command entries can be duplicate.")
+
+ def test_duplicates_are_fine_dict_string(self):
+ """Duplicated commands dict/string entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': {'00': "echo bye", '01': "echo bye"}},
+ "command entries can be duplicate.")
+
class TestHandle(CiTestCase):
@@ -399,8 +425,10 @@ class TestHandle(CiTestCase):
'snap': {'commands': ['echo "HI" >> %s' % outfile,
'echo "MOM" >> %s' % outfile]}}
mock_path = 'cloudinit.config.cc_snap.sys.stderr'
- with mock.patch(mock_path, new_callable=StringIO):
- handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None)
+ with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]):
+ with mock.patch(mock_path, new_callable=StringIO):
+ handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None)
+
self.assertEqual('HI\nMOM\n', util.load_file(outfile))
@mock.patch('cloudinit.config.cc_snap.util.subp')
diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py
new file mode 100644
index 00000000..c8a4271f
--- /dev/null
+++ b/cloudinit/config/tests/test_ssh.py
@@ -0,0 +1,151 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+
+from cloudinit.config import cc_ssh
+from cloudinit import ssh_util
+from cloudinit.tests.helpers import CiTestCase, mock
+
+MODPATH = "cloudinit.config.cc_ssh."
+
+
+@mock.patch(MODPATH + "ssh_util.setup_user_keys")
+class TestHandleSsh(CiTestCase):
+ """Test cc_ssh handling of ssh config."""
+
+ def test_apply_credentials_with_user(self, m_setup_keys):
+ """Apply keys for the given user and root."""
+ keys = ["key1"]
+ user = "clouduser"
+ cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS)
+ self.assertEqual([mock.call(set(keys), user),
+ mock.call(set(keys), "root", options="")],
+ m_setup_keys.call_args_list)
+
+ def test_apply_credentials_with_no_user(self, m_setup_keys):
+ """Apply keys for root only."""
+ keys = ["key1"]
+ user = None
+ cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS)
+ self.assertEqual([mock.call(set(keys), "root", options="")],
+ m_setup_keys.call_args_list)
+
+ def test_apply_credentials_with_user_disable_root(self, m_setup_keys):
+ """Apply keys for the given user and disable root ssh."""
+ keys = ["key1"]
+ user = "clouduser"
+ options = ssh_util.DISABLE_USER_OPTS
+ cc_ssh.apply_credentials(keys, user, True, options)
+ options = options.replace("$USER", user)
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual([mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=options)],
+ m_setup_keys.call_args_list)
+
+ def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys):
+ """Apply keys no user and disable root ssh."""
+ keys = ["key1"]
+ user = None
+ options = ssh_util.DISABLE_USER_OPTS
+ cc_ssh.apply_credentials(keys, user, True, options)
+ options = options.replace("$USER", "NONE")
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual([mock.call(set(keys), "root", options=options)],
+ m_setup_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_no_cfg(self, m_path_exists, m_nug,
+ m_glob, m_setup_keys):
+ """Test handle with no config ignores generating existing keyfiles."""
+ cfg = {}
+ keys = ["key1"]
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ([], {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cc_ssh.handle("name", cfg, cloud, None, None)
+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE")
+ options = options.replace("$DISABLE_USER", "root")
+ m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*')
+ self.assertIn(
+ [mock.call('/etc/ssh/ssh_host_rsa_key'),
+ mock.call('/etc/ssh/ssh_host_dsa_key'),
+ mock.call('/etc/ssh/ssh_host_ecdsa_key'),
+ mock.call('/etc/ssh/ssh_host_ed25519_key')],
+ m_path_exists.call_args_list)
+ self.assertEqual([mock.call(set(keys), "root", options=options)],
+ m_setup_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug,
+ m_glob, m_setup_keys):
+ """Test handle with no config and a default distro user."""
+ cfg = {}
+ keys = ["key1"]
+ user = "clouduser"
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cc_ssh.handle("name", cfg, cloud, None, None)
+
+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual([mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=options)],
+ m_setup_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug,
+ m_glob, m_setup_keys):
+ """Test handle with explicit disable_root and a default distro user."""
+ # This test is identical to test_handle_no_cfg_and_default_root,
+ # except this uses an explicit cfg value
+ cfg = {"disable_root": True}
+ keys = ["key1"]
+ user = "clouduser"
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cc_ssh.handle("name", cfg, cloud, None, None)
+
+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual([mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=options)],
+ m_setup_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_cfg_without_disable_root(self, m_path_exists, m_nug,
+ m_glob, m_setup_keys):
+ """Test handle with disable_root == False."""
+ # When disable_root == False, the ssh redirect for root is skipped
+ cfg = {"disable_root": False}
+ keys = ["key1"]
+ user = "clouduser"
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cloud.get_public_ssh_keys = mock.Mock(return_value=keys)
+ cc_ssh.handle("name", cfg, cloud, None, None)
+
+ self.assertEqual([mock.call(set(keys), user),
+ mock.call(set(keys), "root", options="")],
+ m_setup_keys.call_args_list)
diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
index f2a59faf..b7cf9bee 100644
--- a/cloudinit/config/tests/test_ubuntu_advantage.py
+++ b/cloudinit/config/tests/test_ubuntu_advantage.py
@@ -7,7 +7,8 @@ from cloudinit.config.cc_ubuntu_advantage import (
handle, maybe_install_ua_tools, run_commands, schema)
from cloudinit.config.schema import validate_cloudconfig_schema
from cloudinit import util
-from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema
+from cloudinit.tests.helpers import (
+ CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
# Module path used in mocks
@@ -22,6 +23,7 @@ class FakeCloud(object):
class TestRunCommands(CiTestCase):
with_logs = True
+ allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
def setUp(self):
super(TestRunCommands, self).setUp()
@@ -105,9 +107,10 @@ class TestRunCommands(CiTestCase):
@skipUnlessJsonSchema()
-class TestSchema(CiTestCase):
+class TestSchema(CiTestCase, SchemaTestCaseMixin):
with_logs = True
+ schema = schema
def test_schema_warns_on_ubuntu_advantage_not_as_dict(self):
"""If ubuntu-advantage configuration is not a dict, emit a warning."""
@@ -169,6 +172,30 @@ class TestSchema(CiTestCase):
{'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema)
self.assertEqual('', self.logs.getvalue())
+ def test_duplicates_are_fine_array_array(self):
+ """Duplicated commands array/array entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': [["echo", "bye"], ["echo" "bye"]]},
+ "command entries can be duplicate.")
+
+ def test_duplicates_are_fine_array_string(self):
+ """Duplicated commands array/string entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': ["echo bye", "echo bye"]},
+ "command entries can be duplicate.")
+
+ def test_duplicates_are_fine_dict_array(self):
+ """Duplicated commands dict/array entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}},
+ "command entries can be duplicate.")
+
+ def test_duplicates_are_fine_dict_string(self):
+ """Duplicated commands dict/string entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': {'00': "echo bye", '01': "echo bye"}},
+ "command entries can be duplicate.")
+
class TestHandle(CiTestCase):
@@ -208,8 +235,10 @@ class TestHandle(CiTestCase):
'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile,
'echo "MOM" >> %s' % outfile]}}
mock_path = '%s.sys.stderr' % MPATH
- with mock.patch(mock_path, new_callable=StringIO):
- handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
+ with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]):
+ with mock.patch(mock_path, new_callable=StringIO):
+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger,
+ args=None)
self.assertEqual('HI\nMOM\n', util.load_file(outfile))
diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py
new file mode 100644
index 00000000..ba0afae3
--- /dev/null
+++ b/cloudinit/config/tests/test_users_groups.py
@@ -0,0 +1,144 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+
+from cloudinit.config import cc_users_groups
+from cloudinit.tests.helpers import CiTestCase, mock
+
+MODPATH = "cloudinit.config.cc_users_groups"
+
+
+@mock.patch('cloudinit.distros.ubuntu.Distro.create_group')
+@mock.patch('cloudinit.distros.ubuntu.Distro.create_user')
+class TestHandleUsersGroups(CiTestCase):
+ """Test cc_users_groups handling of config."""
+
+ with_logs = True
+
+ def test_handle_no_cfg_creates_no_users_or_groups(self, m_user, m_group):
+ """Test handle with no config will not create users or groups."""
+ cfg = {} # merged cloud-config
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
+ 'groups': ['lxd', 'sudo'],
+ 'shell': '/bin/bash'}}
+ metadata = {}
+ cloud = self.tmp_cloud(
+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ m_user.assert_not_called()
+ m_group.assert_not_called()
+
+ def test_handle_users_in_cfg_calls_create_users(self, m_user, m_group):
+ """When users in config, create users with distro.create_user."""
+ cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
+ 'groups': ['lxd', 'sudo'],
+ 'shell': '/bin/bash'}}
+ metadata = {}
+ cloud = self.tmp_cloud(
+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ self.assertItemsEqual(
+ m_user.call_args_list,
+ [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
+ shell='/bin/bash'),
+ mock.call('me2', default=False)])
+ m_group.assert_not_called()
+
+ def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group):
+ """When ssh_redirect_user is True pass default user and cloud keys."""
+ cfg = {
+ 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]}
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
+ 'groups': ['lxd', 'sudo'],
+ 'shell': '/bin/bash'}}
+ metadata = {'public-keys': ['key1']}
+ cloud = self.tmp_cloud(
+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ self.assertItemsEqual(
+ m_user.call_args_list,
+ [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
+ shell='/bin/bash'),
+ mock.call('me2', cloud_public_ssh_keys=['key1'], default=False,
+ ssh_redirect_user='ubuntu')])
+ m_group.assert_not_called()
+
+ def test_users_with_ssh_redirect_user_default_str(self, m_user, m_group):
+ """When ssh_redirect_user is 'default' pass default username."""
+ cfg = {
+ 'users': ['default', {'name': 'me2',
+ 'ssh_redirect_user': 'default'}]}
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
+ 'groups': ['lxd', 'sudo'],
+ 'shell': '/bin/bash'}}
+ metadata = {'public-keys': ['key1']}
+ cloud = self.tmp_cloud(
+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ self.assertItemsEqual(
+ m_user.call_args_list,
+ [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
+ shell='/bin/bash'),
+ mock.call('me2', cloud_public_ssh_keys=['key1'], default=False,
+ ssh_redirect_user='ubuntu')])
+ m_group.assert_not_called()
+
+ def test_users_with_ssh_redirect_user_non_default(self, m_user, m_group):
+ """Warn when ssh_redirect_user is not 'default'."""
+ cfg = {
+ 'users': ['default', {'name': 'me2',
+ 'ssh_redirect_user': 'snowflake'}]}
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
+ 'groups': ['lxd', 'sudo'],
+ 'shell': '/bin/bash'}}
+ metadata = {'public-keys': ['key1']}
+ cloud = self.tmp_cloud(
+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
+ with self.assertRaises(ValueError) as context_manager:
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ m_group.assert_not_called()
+ self.assertEqual(
+ 'Not creating user me2. Invalid value of ssh_redirect_user:'
+ ' snowflake. Expected values: true, default or false.',
+ str(context_manager.exception))
+
+ def test_users_with_ssh_redirect_user_default_false(self, m_user, m_group):
+ """When unspecified ssh_redirect_user is false and not set up."""
+ cfg = {'users': ['default', {'name': 'me2'}]}
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
+ 'groups': ['lxd', 'sudo'],
+ 'shell': '/bin/bash'}}
+ metadata = {'public-keys': ['key1']}
+ cloud = self.tmp_cloud(
+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ self.assertItemsEqual(
+ m_user.call_args_list,
+ [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
+ shell='/bin/bash'),
+ mock.call('me2', default=False)])
+ m_group.assert_not_called()
+
+ def test_users_ssh_redirect_user_and_no_default(self, m_user, m_group):
+ """Warn when ssh_redirect_user is True and no default user present."""
+ cfg = {
+ 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]}
+ # System config defines *no* default user for the distro.
+ sys_cfg = {}
+ metadata = {} # no public-keys defined
+ cloud = self.tmp_cloud(
+ distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ m_user.assert_called_once_with('me2', default=False)
+ m_group.assert_not_called()
+ self.assertEqual(
+ 'WARNING: Ignoring ssh_redirect_user: True for me2. No'
+ ' default_user defined. Perhaps missing'
+ ' cloud configuration users: [default, ..].\n',
+ self.logs.getvalue())
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 55260eae..ef618c28 100755..100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -49,6 +49,9 @@ LOG = logging.getLogger(__name__)
# It could break when Amazon adds new regions and new AZs.
_EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$')
+# Default NTP Client Configurations
+PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate']
+
@six.add_metaclass(abc.ABCMeta)
class Distro(object):
@@ -60,6 +63,7 @@ class Distro(object):
tz_zone_dir = "/usr/share/zoneinfo"
init_cmd = ['service'] # systemctl, service etc
renderer_configs = {}
+ _preferred_ntp_clients = None
def __init__(self, name, cfg, paths):
self._paths = paths
@@ -70,11 +74,10 @@ class Distro(object):
def install_packages(self, pkglist):
raise NotImplementedError()
- @abc.abstractmethod
def _write_network(self, settings):
- # In the future use the http://fedorahosted.org/netcf/
- # to write this blob out in a distro format
- raise NotImplementedError()
+ raise RuntimeError(
+ "Legacy function '_write_network' was called in distro '%s'.\n"
+ "_write_network_config needs implementation.\n" % self.name)
def _write_network_config(self, settings):
raise NotImplementedError()
@@ -87,7 +90,7 @@ class Distro(object):
LOG.debug("Selected renderer '%s' from priority list: %s",
name, priority)
renderer = render_cls(config=self.renderer_configs.get(name))
- renderer.render_network_config(network_config=network_config)
+ renderer.render_network_config(network_config)
return []
def _find_tz_file(self, tz):
@@ -140,7 +143,11 @@ class Distro(object):
# this applies network where 'settings' is interfaces(5) style
# it is obsolete compared to apply_network_config
# Write it out
+
+ # pylint: disable=assignment-from-no-return
+ # We have implementations in arch, freebsd and gentoo still
dev_names = self._write_network(settings)
+ # pylint: enable=assignment-from-no-return
# Now try to bring them up
if bring_up:
return self._bring_up_interfaces(dev_names)
@@ -153,7 +160,7 @@ class Distro(object):
distro)
header = '\n'.join([
"# Converted from network_config for distro %s" % distro,
- "# Implmentation of _write_network_config is needed."
+ "# Implementation of _write_network_config is needed."
])
ns = network_state.parse_net_config_data(netconfig)
contents = eni.network_state_to_eni(
@@ -339,6 +346,14 @@ class Distro(object):
contents.write("%s\n" % (eh))
util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644)
+ @property
+ def preferred_ntp_clients(self):
+ """Allow distro to determine the preferred ntp client list"""
+ if not self._preferred_ntp_clients:
+ self._preferred_ntp_clients = list(PREFERRED_NTP_CLIENTS)
+
+ return self._preferred_ntp_clients
+
def _bring_up_interface(self, device_name):
cmd = ['ifup', device_name]
LOG.debug("Attempting to run bring up interface %s using command %s",
@@ -369,6 +384,9 @@ class Distro(object):
"""
Add a user to the system using standard GNU tools
"""
+ # XXX need to make add_user idempotent somehow as we
+ # still want to add groups or modify ssh keys on pre-existing
+ # users in the image.
if util.is_user(name):
LOG.info("User %s already exists, skipping.", name)
return
@@ -519,7 +537,7 @@ class Distro(object):
self.lock_passwd(name)
# Configure sudo access
- if 'sudo' in kwargs:
+ if 'sudo' in kwargs and kwargs['sudo'] is not False:
self.write_sudo_rules(name, kwargs['sudo'])
# Import SSH keys
@@ -535,10 +553,24 @@ class Distro(object):
LOG.warning("Invalid type '%s' detected for"
" 'ssh_authorized_keys', expected list,"
" string, dict, or set.", type(keys))
+ keys = []
else:
keys = set(keys) or []
- ssh_util.setup_user_keys(keys, name, options=None)
-
+ ssh_util.setup_user_keys(set(keys), name)
+ if 'ssh_redirect_user' in kwargs:
+ cloud_keys = kwargs.get('cloud_public_ssh_keys', [])
+ if not cloud_keys:
+ LOG.warning(
+ 'Unable to disable ssh logins for %s given'
+ ' ssh_redirect_user: %s. No cloud public-keys present.',
+ name, kwargs['ssh_redirect_user'])
+ else:
+ redirect_user = kwargs['ssh_redirect_user']
+ disable_option = ssh_util.DISABLE_USER_OPTS
+ disable_option = disable_option.replace('$USER', redirect_user)
+ disable_option = disable_option.replace('$DISABLE_USER', name)
+ ssh_util.setup_user_keys(
+ set(cloud_keys), name, options=disable_option)
return True
def lock_passwd(self, name):
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 33cc0bf1..d517fb88 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -109,11 +109,6 @@ class Distro(distros.Distro):
self.update_package_sources()
self.package_command('install', pkgs=pkglist)
- def _write_network(self, settings):
- # this is a legacy method, it will always write eni
- util.write_file(self.network_conf_fn["eni"], settings)
- return ['all']
-
def _write_network_config(self, netconfig):
_maybe_remove_legacy_eth0()
return self._supported_write_network_config(netconfig)
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 754d3df6..ff22d568 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -110,15 +110,15 @@ class Distro(distros.Distro):
if dev.startswith('lo'):
return dev
- n = re.search('\d+$', dev)
+ n = re.search(r'\d+$', dev)
index = n.group(0)
- (out, err) = util.subp(['ifconfig', '-a'])
+ (out, _err) = util.subp(['ifconfig', '-a'])
ifconfigoutput = [x for x in (out.strip()).splitlines()
if len(x.split()) > 0]
bsddev = 'NOT_FOUND'
for line in ifconfigoutput:
- m = re.match('^\w+', line)
+ m = re.match(r'^\w+', line)
if m:
if m.group(0).startswith('lo'):
continue
@@ -128,7 +128,7 @@ class Distro(distros.Distro):
break
# Replace the index with the one we're after.
- bsddev = re.sub('\d+$', index, bsddev)
+ bsddev = re.sub(r'\d+$', index, bsddev)
LOG.debug("Using network interface %s", bsddev)
return bsddev
@@ -266,7 +266,7 @@ class Distro(distros.Distro):
self.lock_passwd(name)
# Configure sudo access
- if 'sudo' in kwargs:
+ if 'sudo' in kwargs and kwargs['sudo'] is not False:
self.write_sudo_rules(name, kwargs['sudo'])
# Import SSH keys
diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py
index 1ce1aa71..edfcd99d 100644
--- a/cloudinit/distros/net_util.py
+++ b/cloudinit/distros/net_util.py
@@ -67,6 +67,10 @@
# }
# }
+from cloudinit.net.network_state import (
+ net_prefix_to_ipv4_mask, mask_and_ipv4_to_bcast_addr)
+
+
def translate_network(settings):
# Get the standard cmd, args from the ubuntu format
entries = []
@@ -134,6 +138,21 @@ def translate_network(settings):
val = info[k].strip().lower()
if val:
iface_info[k] = val
+ # handle static ip configurations using
+ # ipaddress/prefix-length format
+ if 'address' in iface_info:
+ if 'netmask' not in iface_info:
+ # check if the address has a network prefix
+ addr, _, prefix = iface_info['address'].partition('/')
+ if prefix:
+ iface_info['netmask'] = (
+ net_prefix_to_ipv4_mask(prefix))
+ iface_info['address'] = addr
+ # if we set the netmask, we also can set the broadcast
+ iface_info['broadcast'] = (
+ mask_and_ipv4_to_bcast_addr(
+ iface_info['netmask'], addr))
+
# Name server info provided??
if 'dns-nameservers' in info:
iface_info['dns-nameservers'] = info['dns-nameservers'].split()
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index 162dfa05..1bfe0478 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -16,7 +16,6 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
-from cloudinit.distros import net_util
from cloudinit.distros import rhel_util as rhutil
from cloudinit.settings import PER_INSTANCE
@@ -28,13 +27,23 @@ class Distro(distros.Distro):
hostname_conf_fn = '/etc/HOSTNAME'
init_cmd = ['service']
locale_conf_fn = '/etc/sysconfig/language'
- network_conf_fn = '/etc/sysconfig/network'
+ network_conf_fn = '/etc/sysconfig/network/config'
network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
resolve_conf_fn = '/etc/resolv.conf'
route_conf_tpl = '/etc/sysconfig/network/ifroute-%s'
systemd_hostname_conf_fn = '/etc/hostname'
systemd_locale_conf_fn = '/etc/locale.conf'
tz_local_fn = '/etc/localtime'
+ renderer_configs = {
+ 'sysconfig': {
+ 'control': 'etc/sysconfig/network/config',
+ 'iface_templates': '%(base)s/network/ifcfg-%(name)s',
+ 'route_templates': {
+ 'ipv4': '%(base)s/network/ifroute-%(name)s',
+ 'ipv6': '%(base)s/network/ifroute-%(name)s',
+ }
+ }
+ }
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -162,50 +171,31 @@ class Distro(distros.Distro):
conf.set_hostname(hostname)
util.write_file(out_fn, str(conf), 0o644)
- def _write_network(self, settings):
- # Convert debian settings to ifcfg format
- entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
- # Make the intermediate format as the suse format...
- nameservers = []
- searchservers = []
- dev_names = entries.keys()
- for (dev, info) in entries.items():
- net_fn = self.network_script_tpl % (dev)
- route_fn = self.route_conf_tpl % (dev)
- mode = None
- if info.get('auto', None):
- mode = 'auto'
- else:
- mode = 'manual'
- bootproto = info.get('bootproto', None)
- gateway = info.get('gateway', None)
- net_cfg = {
- 'BOOTPROTO': bootproto,
- 'BROADCAST': info.get('broadcast'),
- 'GATEWAY': gateway,
- 'IPADDR': info.get('address'),
- 'LLADDR': info.get('hwaddress'),
- 'NETMASK': info.get('netmask'),
- 'STARTMODE': mode,
- 'USERCONTROL': 'no'
- }
- if dev != 'lo':
- net_cfg['ETHTOOL_OPTIONS'] = ''
+ def _write_network_config(self, netconfig):
+ return self._supported_write_network_config(netconfig)
+
+ @property
+ def preferred_ntp_clients(self):
+ """The preferred ntp client is dependent on the version."""
+
+ """Allow distro to determine the preferred ntp client list"""
+ if not self._preferred_ntp_clients:
+ distro_info = util.system_info()['dist']
+ name = distro_info[0]
+ major_ver = int(distro_info[1].split('.')[0])
+
+ # This is horribly complicated because of a case of
+ # "we do not care if versions should be increasing syndrome"
+ if (
+ (major_ver >= 15 and 'openSUSE' not in name) or
+ (major_ver >= 15 and 'openSUSE' in name and major_ver != 42)
+ ):
+ self._preferred_ntp_clients = ['chrony',
+ 'systemd-timesyncd', 'ntp']
else:
- net_cfg['FIREWALL'] = 'no'
- rhutil.update_sysconfig_file(net_fn, net_cfg, True)
- if gateway and bootproto == 'static':
- default_route = 'default %s' % gateway
- util.write_file(route_fn, default_route, 0o644)
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if 'dns-search' in info:
- searchservers.extend(info['dns-search'])
- if nameservers or searchservers:
- rhutil.update_resolve_conf_file(self.resolve_conf_fn,
- nameservers, searchservers)
- return dev_names
+ self._preferred_ntp_clients = ['ntp',
+ 'systemd-timesyncd', 'chrony']
+
+ return self._preferred_ntp_clients
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 1fecb619..f55d96f7 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -13,7 +13,6 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
-from cloudinit.distros import net_util
from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
@@ -39,6 +38,16 @@ class Distro(distros.Distro):
resolve_conf_fn = "/etc/resolv.conf"
tz_local_fn = "/etc/localtime"
usr_lib_exec = "/usr/libexec"
+ renderer_configs = {
+ 'sysconfig': {
+ 'control': 'etc/sysconfig/network',
+ 'iface_templates': '%(base)s/network-scripts/ifcfg-%(name)s',
+ 'route_templates': {
+ 'ipv4': '%(base)s/network-scripts/route-%(name)s',
+ 'ipv6': '%(base)s/network-scripts/route6-%(name)s'
+ }
+ }
+ }
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -55,54 +64,6 @@ class Distro(distros.Distro):
def _write_network_config(self, netconfig):
return self._supported_write_network_config(netconfig)
- def _write_network(self, settings):
- # TODO(harlowja) fix this... since this is the ubuntu format
- entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
- # Make the intermediate format as the rhel format...
- nameservers = []
- searchservers = []
- dev_names = entries.keys()
- use_ipv6 = False
- for (dev, info) in entries.items():
- net_fn = self.network_script_tpl % (dev)
- net_cfg = {
- 'DEVICE': dev,
- 'NETMASK': info.get('netmask'),
- 'IPADDR': info.get('address'),
- 'BOOTPROTO': info.get('bootproto'),
- 'GATEWAY': info.get('gateway'),
- 'BROADCAST': info.get('broadcast'),
- 'MACADDR': info.get('hwaddress'),
- 'ONBOOT': _make_sysconfig_bool(info.get('auto')),
- }
- if info.get('inet6'):
- use_ipv6 = True
- net_cfg.update({
- 'IPV6INIT': _make_sysconfig_bool(True),
- 'IPV6ADDR': info.get('ipv6').get('address'),
- 'IPV6_DEFAULTGW': info.get('ipv6').get('gateway'),
- })
- rhel_util.update_sysconfig_file(net_fn, net_cfg)
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if 'dns-search' in info:
- searchservers.extend(info['dns-search'])
- if nameservers or searchservers:
- rhel_util.update_resolve_conf_file(self.resolve_conf_fn,
- nameservers, searchservers)
- if dev_names:
- net_cfg = {
- 'NETWORKING': _make_sysconfig_bool(True),
- }
- # If IPv6 interface present, enable ipv6 networking
- if use_ipv6:
- net_cfg['NETWORKING_IPV6'] = _make_sysconfig_bool(True)
- net_cfg['IPV6_AUTOCONF'] = _make_sysconfig_bool(False)
- rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg)
- return dev_names
-
def apply_locale(self, locale, out_fn=None):
if self.uses_systemd():
if not out_fn:
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index 82ca34f5..68154104 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -10,12 +10,31 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.distros import debian
+from cloudinit.distros import PREFERRED_NTP_CLIENTS
from cloudinit import log as logging
+from cloudinit import util
+
+import copy
LOG = logging.getLogger(__name__)
class Distro(debian.Distro):
+
+ @property
+ def preferred_ntp_clients(self):
+ """The preferred ntp client is dependent on the version."""
+ if not self._preferred_ntp_clients:
+ (_name, _version, codename) = util.system_info()['dist']
+ # Xenial cloud-init only installed ntp, UbuntuCore has timesyncd.
+ if codename == "xenial" and not util.system_is_snappy():
+ self._preferred_ntp_clients = ['ntp']
+ else:
+ self._preferred_ntp_clients = (
+ copy.deepcopy(PREFERRED_NTP_CLIENTS))
+ return self._preferred_ntp_clients
+
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index dc3f0fc3..3b7b17f1 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -150,11 +150,9 @@ def get_instance_userdata(api_version='latest',
# NOT_FOUND occurs) and just in that case returning an empty string.
exception_cb = functools.partial(_skip_retry_on_codes,
SKIP_USERDATA_CODES)
- response = util.read_file_or_url(ud_url,
- ssl_details=ssl_details,
- timeout=timeout,
- retries=retries,
- exception_cb=exception_cb)
+ response = url_helper.read_file_or_url(
+ ud_url, ssl_details=ssl_details, timeout=timeout,
+ retries=retries, exception_cb=exception_cb)
user_data = response.contents
except url_helper.UrlError as e:
if e.code not in SKIP_USERDATA_CODES:
@@ -169,9 +167,9 @@ def _get_instance_metadata(tree, api_version='latest',
ssl_details=None, timeout=5, retries=5,
leaf_decoder=None):
md_url = url_helper.combine_url(metadata_address, api_version, tree)
- caller = functools.partial(util.read_file_or_url,
- ssl_details=ssl_details, timeout=timeout,
- retries=retries)
+ caller = functools.partial(
+ url_helper.read_file_or_url, ssl_details=ssl_details,
+ timeout=timeout, retries=retries)
def mcaller(url):
return caller(url).contents
diff --git a/cloudinit/event.py b/cloudinit/event.py
new file mode 100644
index 00000000..f7b311fb
--- /dev/null
+++ b/cloudinit/event.py
@@ -0,0 +1,17 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Classes and functions related to event handling."""
+
+
+# Event types which can generate maintenance requests for cloud-init.
+class EventType(object):
+ BOOT = "System boot"
+ BOOT_NEW_INSTANCE = "New instance first boot"
+
+ # TODO: Cloud-init will grow support for the follow event types:
+ # UDEV
+ # METADATA_CHANGE
+ # USER_REQUEST
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
index d58d73e0..7fe17a2e 100644
--- a/cloudinit/gpg.py
+++ b/cloudinit/gpg.py
@@ -10,6 +10,8 @@
from cloudinit import log as logging
from cloudinit import util
+import time
+
LOG = logging.getLogger(__name__)
@@ -25,16 +27,46 @@ def export_armour(key):
return armour
-def recv_key(key, keyserver):
- """Receive gpg key from the specified keyserver"""
- LOG.debug('Receive gpg key "%s"', key)
- try:
- util.subp(["gpg", "--keyserver", keyserver, "--recv", key],
- capture=True)
- except util.ProcessExecutionError as error:
- raise ValueError(('Failed to import key "%s" '
- 'from server "%s" - error %s') %
- (key, keyserver, error))
+def recv_key(key, keyserver, retries=(1, 1)):
+ """Receive gpg key from the specified keyserver.
+
+ Retries are done by default because keyservers can be unreliable.
+ Additionally, there is no way to determine the difference between
+ a non-existant key and a failure. In both cases gpg (at least 2.2.4)
+ exits with status 2 and stderr: "keyserver receive failed: No data"
+ It is assumed that a key provided to cloud-init exists on the keyserver
+ so re-trying makes better sense than failing.
+
+ @param key: a string key fingerprint (as passed to gpg --recv-keys).
+ @param keyserver: the keyserver to request keys from.
+ @param retries: an iterable of sleep lengths for retries.
+ Use None to indicate no retries."""
+ LOG.debug("Importing key '%s' from keyserver '%s'", key, keyserver)
+ cmd = ["gpg", "--keyserver=%s" % keyserver, "--recv-keys", key]
+ if retries is None:
+ retries = []
+ trynum = 0
+ error = None
+ sleeps = iter(retries)
+ while True:
+ trynum += 1
+ try:
+ util.subp(cmd, capture=True)
+ LOG.debug("Imported key '%s' from keyserver '%s' on try %d",
+ key, keyserver, trynum)
+ return
+ except util.ProcessExecutionError as e:
+ error = e
+ try:
+ naplen = next(sleeps)
+ LOG.debug(
+ "Import failed with exit code %d, will try again in %ss",
+ error.exit_code, naplen)
+ time.sleep(naplen)
+ except StopIteration:
+ raise ValueError(
+ ("Failed to import key '%s' from keyserver '%s' "
+ "after %d tries: %s") % (key, keyserver, trynum, error))
def delete_key(key):
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index c3576c04..0db75af9 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -41,7 +41,7 @@ PART_HANDLER_FN_TMPL = 'part-handler-%03d'
# For parts without filenames
PART_FN_TPL = 'part-%03d'
-# Different file beginnings to there content type
+# Different file beginnings to their content type
INCLUSION_TYPES_MAP = {
'#include': 'text/x-include-url',
'#include-once': 'text/x-include-once-url',
@@ -52,6 +52,7 @@ INCLUSION_TYPES_MAP = {
'#cloud-boothook': 'text/cloud-boothook',
'#cloud-config-archive': 'text/cloud-config-archive',
'#cloud-config-jsonp': 'text/cloud-config-jsonp',
+ '## template: jinja': 'text/jinja2',
}
# Sorted longest first
@@ -69,9 +70,13 @@ class Handler(object):
def __repr__(self):
return "%s: [%s]" % (type_utils.obj_name(self), self.list_types())
- @abc.abstractmethod
def list_types(self):
- raise NotImplementedError()
+ # Each subclass must define the supported content prefixes it handles.
+ if not hasattr(self, 'prefixes'):
+ raise NotImplementedError('Missing prefixes subclass attribute')
+ else:
+ return [INCLUSION_TYPES_MAP[prefix]
+ for prefix in getattr(self, 'prefixes')]
@abc.abstractmethod
def handle_part(self, *args, **kwargs):
diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
index 057b4dbc..dca50a49 100644
--- a/cloudinit/handlers/boot_hook.py
+++ b/cloudinit/handlers/boot_hook.py
@@ -17,10 +17,13 @@ from cloudinit import util
from cloudinit.settings import (PER_ALWAYS)
LOG = logging.getLogger(__name__)
-BOOTHOOK_PREFIX = "#cloud-boothook"
class BootHookPartHandler(handlers.Handler):
+
+ # The content prefixes this handler understands.
+ prefixes = ['#cloud-boothook']
+
def __init__(self, paths, datasource, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS)
self.boothook_dir = paths.get_ipath("boothooks")
@@ -28,16 +31,11 @@ class BootHookPartHandler(handlers.Handler):
if datasource:
self.instance_id = datasource.get_instance_id()
- def list_types(self):
- return [
- handlers.type_from_starts_with(BOOTHOOK_PREFIX),
- ]
-
def _write_part(self, payload, filename):
filename = util.clean_filename(filename)
filepath = os.path.join(self.boothook_dir, filename)
contents = util.strip_prefix_suffix(util.dos2unix(payload),
- prefix=BOOTHOOK_PREFIX)
+ prefix=self.prefixes[0])
util.write_file(filepath, contents.lstrip(), 0o700)
return filepath
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
index 178a5b9b..99bf0e61 100644
--- a/cloudinit/handlers/cloud_config.py
+++ b/cloudinit/handlers/cloud_config.py
@@ -42,14 +42,12 @@ DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()')
CLOUD_PREFIX = "#cloud-config"
JSONP_PREFIX = "#cloud-config-jsonp"
-# The file header -> content types this module will handle.
-CC_TYPES = {
- JSONP_PREFIX: handlers.type_from_starts_with(JSONP_PREFIX),
- CLOUD_PREFIX: handlers.type_from_starts_with(CLOUD_PREFIX),
-}
-
class CloudConfigPartHandler(handlers.Handler):
+
+ # The content prefixes this handler understands.
+ prefixes = [CLOUD_PREFIX, JSONP_PREFIX]
+
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS, version=3)
self.cloud_buf = None
@@ -58,9 +56,6 @@ class CloudConfigPartHandler(handlers.Handler):
self.cloud_fn = paths.get_ipath(_kwargs["cloud_config_path"])
self.file_names = []
- def list_types(self):
- return list(CC_TYPES.values())
-
def _write_cloud_config(self):
if not self.cloud_fn:
return
@@ -138,7 +133,7 @@ class CloudConfigPartHandler(handlers.Handler):
# First time through, merge with an empty dict...
if self.cloud_buf is None or not self.file_names:
self.cloud_buf = {}
- if ctype == CC_TYPES[JSONP_PREFIX]:
+ if ctype == handlers.INCLUSION_TYPES_MAP[JSONP_PREFIX]:
self._merge_patch(payload)
else:
self._merge_part(payload, headers)
diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py
new file mode 100644
index 00000000..3fa4097e
--- /dev/null
+++ b/cloudinit/handlers/jinja_template.py
@@ -0,0 +1,137 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import re
+
+try:
+ from jinja2.exceptions import UndefinedError as JUndefinedError
+except ImportError:
+ # No jinja2 dependency
+ JUndefinedError = Exception
+
+from cloudinit import handlers
+from cloudinit import log as logging
+from cloudinit.sources import INSTANCE_JSON_FILE
+from cloudinit.templater import render_string, MISSING_JINJA_PREFIX
+from cloudinit.util import b64d, load_file, load_json, json_dumps
+
+from cloudinit.settings import PER_ALWAYS
+
+LOG = logging.getLogger(__name__)
+
+
+class JinjaTemplatePartHandler(handlers.Handler):
+
+ prefixes = ['## template: jinja']
+
+ def __init__(self, paths, **_kwargs):
+ handlers.Handler.__init__(self, PER_ALWAYS, version=3)
+ self.paths = paths
+ self.sub_handlers = {}
+ for handler in _kwargs.get('sub_handlers', []):
+ for ctype in handler.list_types():
+ self.sub_handlers[ctype] = handler
+
+ def handle_part(self, data, ctype, filename, payload, frequency, headers):
+ if ctype in handlers.CONTENT_SIGNALS:
+ return
+ jinja_json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
+ rendered_payload = render_jinja_payload_from_file(
+ payload, filename, jinja_json_file)
+ if not rendered_payload:
+ return
+ subtype = handlers.type_from_starts_with(rendered_payload)
+ sub_handler = self.sub_handlers.get(subtype)
+ if not sub_handler:
+ LOG.warning(
+ 'Ignoring jinja template for %s. Could not find supported'
+ ' sub-handler for type %s', filename, subtype)
+ return
+ if sub_handler.handler_version == 3:
+ sub_handler.handle_part(
+ data, ctype, filename, rendered_payload, frequency, headers)
+ elif sub_handler.handler_version == 2:
+ sub_handler.handle_part(
+ data, ctype, filename, rendered_payload, frequency)
+
+
+def render_jinja_payload_from_file(
+ payload, payload_fn, instance_data_file, debug=False):
+ """Render a jinja template payload sourcing variables from jinja_vars_path.
+
+ @param payload: String of jinja template content. Should begin with
+ ## template: jinja\n.
+ @param payload_fn: String representing the filename from which the payload
+ was read used in error reporting. Generally in part-handling this is
+ 'part-##'.
+ @param instance_data_file: A path to a json file containing variables that
+ will be used as jinja template variables.
+
+ @return: A string of jinja-rendered content with the jinja header removed.
+ Returns None on error.
+ """
+ instance_data = {}
+ rendered_payload = None
+ if not os.path.exists(instance_data_file):
+ raise RuntimeError(
+ 'Cannot render jinja template vars. Instance data not yet'
+ ' present at %s' % instance_data_file)
+ instance_data = load_json(load_file(instance_data_file))
+ rendered_payload = render_jinja_payload(
+ payload, payload_fn, instance_data, debug)
+ if not rendered_payload:
+ return None
+ return rendered_payload
+
+
+def render_jinja_payload(payload, payload_fn, instance_data, debug=False):
+ instance_jinja_vars = convert_jinja_instance_data(
+ instance_data,
+ decode_paths=instance_data.get('base64-encoded-keys', []))
+ if debug:
+ LOG.debug('Converted jinja variables\n%s',
+ json_dumps(instance_jinja_vars))
+ try:
+ rendered_payload = render_string(payload, instance_jinja_vars)
+ except (TypeError, JUndefinedError) as e:
+ LOG.warning(
+ 'Ignoring jinja template for %s: %s', payload_fn, str(e))
+ return None
+ warnings = [
+ "'%s'" % var.replace(MISSING_JINJA_PREFIX, '')
+ for var in re.findall(
+ r'%s[^\s]+' % MISSING_JINJA_PREFIX, rendered_payload)]
+ if warnings:
+ LOG.warning(
+ "Could not render jinja template variables in file '%s': %s",
+ payload_fn, ', '.join(warnings))
+ return rendered_payload
+
+
+def convert_jinja_instance_data(data, prefix='', sep='/', decode_paths=()):
+ """Process instance-data.json dict for use in jinja templates.
+
+ Replace hyphens with underscores for jinja templates and decode any
+ base64_encoded_keys.
+ """
+ result = {}
+ decode_paths = [path.replace('-', '_') for path in decode_paths]
+ for key, value in sorted(data.items()):
+ if '-' in key:
+ # Standardize keys for use in #cloud-config/shell templates
+ key = key.replace('-', '_')
+ key_path = '{0}{1}{2}'.format(prefix, sep, key) if prefix else key
+ if key_path in decode_paths:
+ value = b64d(value)
+ if isinstance(value, dict):
+ result[key] = convert_jinja_instance_data(
+ value, key_path, sep=sep, decode_paths=decode_paths)
+ if re.match(r'v\d+', key):
+ # Copy values to top-level aliases
+ for subkey, subvalue in result[key].items():
+ result[subkey] = subvalue
+ else:
+ result[key] = value
+ return result
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
index e4945a23..214714bc 100644
--- a/cloudinit/handlers/shell_script.py
+++ b/cloudinit/handlers/shell_script.py
@@ -17,21 +17,18 @@ from cloudinit import util
from cloudinit.settings import (PER_ALWAYS)
LOG = logging.getLogger(__name__)
-SHELL_PREFIX = "#!"
class ShellScriptPartHandler(handlers.Handler):
+
+ prefixes = ['#!']
+
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS)
self.script_dir = paths.get_ipath_cur('scripts')
if 'script_path' in _kwargs:
self.script_dir = paths.get_ipath_cur(_kwargs['script_path'])
- def list_types(self):
- return [
- handlers.type_from_starts_with(SHELL_PREFIX),
- ]
-
def handle_part(self, data, ctype, filename, payload, frequency):
if ctype in handlers.CONTENT_SIGNALS:
# TODO(harlowja): maybe delete existing things here
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index 1ca92d4b..83fb0724 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -18,19 +18,16 @@ from cloudinit import util
from cloudinit.settings import (PER_INSTANCE)
LOG = logging.getLogger(__name__)
-UPSTART_PREFIX = "#upstart-job"
class UpstartJobPartHandler(handlers.Handler):
+
+ prefixes = ['#upstart-job']
+
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_INSTANCE)
self.upstart_dir = paths.upstart_conf_d
- def list_types(self):
- return [
- handlers.type_from_starts_with(UPSTART_PREFIX),
- ]
-
def handle_part(self, data, ctype, filename, payload, frequency):
if ctype in handlers.CONTENT_SIGNALS:
return
@@ -97,7 +94,7 @@ def _has_suitable_upstart():
else:
util.logexc(LOG, "dpkg --compare-versions failed [%s]",
e.exit_code)
- except Exception as e:
+ except Exception:
util.logexc(LOG, "dpkg --compare-versions failed")
return False
else:
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 1979cd96..dcd2645e 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -239,6 +239,10 @@ class ConfigMerger(object):
if cc_fn and os.path.isfile(cc_fn):
try:
i_cfgs.append(util.read_conf(cc_fn))
+ except PermissionError:
+ LOG.debug(
+ 'Skipped loading cloud-config from %s due to'
+ ' non-root.', cc_fn)
except Exception:
util.logexc(LOG, 'Failed loading of cloud-config from %s',
cc_fn)
@@ -449,4 +453,8 @@ class DefaultingConfigParser(RawConfigParser):
contents = '\n'.join([header, contents, ''])
return contents
+
+def identity(object):
+ return object
+
# vi: ts=4 expandtab
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 1d75c9ff..5ae312ba 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -38,10 +38,18 @@ DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s'
logging.Formatter.converter = time.gmtime
-def setupBasicLogging(level=DEBUG):
+def setupBasicLogging(level=DEBUG, formatter=None):
+ if not formatter:
+ formatter = logging.Formatter(DEF_CON_FORMAT)
root = logging.getLogger()
+ for handler in root.handlers:
+ if hasattr(handler, 'stream') and hasattr(handler.stream, 'name'):
+ if handler.stream.name == '<stderr>':
+ handler.setLevel(level)
+ return
+ # Didn't have an existing stderr handler; create a new handler
console = logging.StreamHandler(sys.stderr)
- console.setFormatter(logging.Formatter(DEF_CON_FORMAT))
+ console.setFormatter(formatter)
console.setLevel(level)
root.addHandler(console)
root.setLevel(level)
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index f69c0ef2..f83d3681 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -107,6 +107,21 @@ def is_bond(devname):
return os.path.exists(sys_dev_path(devname, "bonding"))
+def is_renamed(devname):
+ """
+ /* interface name assignment types (sysfs name_assign_type attribute) */
+ #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */
+ #define NET_NAME_ENUM 1 /* enumerated by kernel */
+ #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */
+ #define NET_NAME_USER 3 /* provided by user-space */
+ #define NET_NAME_RENAMED 4 /* renamed by user-space */
+ """
+ name_assign_type = read_sys_net_safe(devname, 'name_assign_type')
+ if name_assign_type and name_assign_type in ['3', '4']:
+ return True
+ return False
+
+
def is_vlan(devname):
uevent = str(read_sys_net_safe(devname, "uevent"))
return 'DEVTYPE=vlan' in uevent.splitlines()
@@ -180,6 +195,17 @@ def find_fallback_nic(blacklist_drivers=None):
if not blacklist_drivers:
blacklist_drivers = []
+ if 'net.ifnames=0' in util.get_cmdline():
+ LOG.debug('Stable ifnames disabled by net.ifnames=0 in /proc/cmdline')
+ else:
+ unstable = [device for device in get_devicelist()
+ if device != 'lo' and not is_renamed(device)]
+ if len(unstable):
+ LOG.debug('Found unstable nic names: %s; calling udevadm settle',
+ unstable)
+ msg = 'Waiting for udev events to settle'
+ util.log_time(LOG.debug, msg, func=util.udevadm_settle)
+
# get list of interfaces that could have connections
invalid_interfaces = set(['lo'])
potential_interfaces = set([device for device in get_devicelist()
@@ -295,7 +321,7 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
def _version_2(netcfg):
renames = []
- for key, ent in netcfg.get('ethernets', {}).items():
+ for ent in netcfg.get('ethernets', {}).values():
# only rename if configured to do so
name = ent.get('set-name')
if not name:
@@ -333,8 +359,12 @@ def interface_has_own_mac(ifname, strict=False):
1: randomly generated 3: set using dev_set_mac_address"""
assign_type = read_sys_net_int(ifname, "addr_assign_type")
- if strict and assign_type is None:
- raise ValueError("%s had no addr_assign_type.")
+ if assign_type is None:
+ # None is returned if this nic had no 'addr_assign_type' entry.
+ # if strict, raise an error, if not return True.
+ if strict:
+ raise ValueError("%s had no addr_assign_type.")
+ return True
return assign_type in (0, 1, 3)
@@ -539,6 +569,20 @@ def get_interface_mac(ifname):
return read_sys_net_safe(ifname, path)
+def get_ib_interface_hwaddr(ifname, ethernet_format):
+ """Returns the string value of an Infiniband interface's hardware
+ address. If ethernet_format is True, an Ethernet MAC-style 6 byte
+ representation of the address will be returned.
+ """
+ # Type 32 is Infiniband.
+ if read_sys_net_safe(ifname, 'type') == '32':
+ mac = get_interface_mac(ifname)
+ if mac and ethernet_format:
+ # Use bytes 13-15 and 18-20 of the hardware address.
+ mac = mac[36:-14] + mac[51:]
+ return mac
+
+
def get_interfaces_by_mac():
"""Build a dictionary of tuples {mac: name}.
@@ -550,6 +594,15 @@ def get_interfaces_by_mac():
"duplicate mac found! both '%s' and '%s' have mac '%s'" %
(name, ret[mac], mac))
ret[mac] = name
+ # Try to get an Infiniband hardware address (in 6 byte Ethernet format)
+ # for the interface.
+ ib_mac = get_ib_interface_hwaddr(name, True)
+ if ib_mac:
+ if ib_mac in ret:
+ raise RuntimeError(
+ "duplicate mac found! both '%s' and '%s' have mac '%s'" %
+ (name, ret[ib_mac], ib_mac))
+ ret[ib_mac] = name
return ret
@@ -577,6 +630,21 @@ def get_interfaces():
return ret
+def get_ib_hwaddrs_by_interface():
+ """Build a dictionary mapping Infiniband interface names to their hardware
+ address."""
+ ret = {}
+ for name, _, _, _ in get_interfaces():
+ ib_mac = get_ib_interface_hwaddr(name, False)
+ if ib_mac:
+ if ib_mac in ret:
+ raise RuntimeError(
+ "duplicate mac found! both '%s' and '%s' have mac '%s'" %
+ (name, ret[ib_mac], ib_mac))
+ ret[name] = ib_mac
+ return ret
+
+
class EphemeralIPv4Network(object):
"""Context manager which sets up temporary static network configuration.
@@ -668,6 +736,13 @@ class EphemeralIPv4Network(object):
self.interface, out.strip())
return
util.subp(
+ ['ip', '-4', 'route', 'add', self.router, 'dev', self.interface,
+ 'src', self.ip], capture=True)
+ self.cleanup_cmds.insert(
+ 0,
+ ['ip', '-4', 'route', 'del', self.router, 'dev', self.interface,
+ 'src', self.ip])
+ util.subp(
['ip', '-4', 'route', 'add', 'default', 'via', self.router,
'dev', self.interface], capture=True)
self.cleanup_cmds.insert(
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index 9e9fe0fe..f89a0f73 100755
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -65,7 +65,7 @@ def _klibc_to_config_entry(content, mac_addrs=None):
iface['mac_address'] = mac_addrs[name]
# Handle both IPv4 and IPv6 values
- for v, pre in (('ipv4', 'IPV4'), ('ipv6', 'IPV6')):
+ for pre in ('IPV4', 'IPV6'):
# if no IPV4ADDR or IPV6ADDR, then go on.
if pre + "ADDR" not in data:
continue
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 087c0c03..12cf5097 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -216,7 +216,7 @@ def networkd_get_option_from_leases(keyname, leases_d=None):
if leases_d is None:
leases_d = NETWORKD_LEASES_DIR
leases = networkd_load_leases(leases_d=leases_d)
- for ifindex, data in sorted(leases.items()):
+ for _ifindex, data in sorted(leases.items()):
if data.get(keyname):
return data[keyname]
return None
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index c6a71d16..c6f631a9 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -10,9 +10,12 @@ from . import ParserError
from . import renderer
from .network_state import subnet_is_ipv6
+from cloudinit import log as logging
from cloudinit import util
+LOG = logging.getLogger(__name__)
+
NET_CONFIG_COMMANDS = [
"pre-up", "up", "post-up", "down", "pre-down", "post-down",
]
@@ -61,7 +64,7 @@ def _iface_add_subnet(iface, subnet):
# TODO: switch to valid_map for attrs
-def _iface_add_attrs(iface, index):
+def _iface_add_attrs(iface, index, ipv4_subnet_mtu):
# If the index is non-zero, this is an alias interface. Alias interfaces
# represent additional interface addresses, and should not have additional
# attributes. (extra attributes here are almost always either incorrect,
@@ -100,6 +103,13 @@ def _iface_add_attrs(iface, index):
value = 'on' if iface[key] else 'off'
if not value or key in ignore_map:
continue
+ if key == 'mtu' and ipv4_subnet_mtu:
+ if value != ipv4_subnet_mtu:
+ LOG.warning(
+ "Network config: ignoring %s device-level mtu:%s because"
+ " ipv4 subnet-level mtu:%s provided.",
+ iface['name'], value, ipv4_subnet_mtu)
+ continue
if key in multiline_keys:
for v in value:
content.append(" {0} {1}".format(renames.get(key, key), v))
@@ -237,8 +247,15 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
ifaces[currif]['bridge']['ports'] = []
for iface in split[1:]:
ifaces[currif]['bridge']['ports'].append(iface)
- elif option == "bridge_hw" and split[1].lower() == "mac":
- ifaces[currif]['bridge']['mac'] = split[2]
+ elif option == "bridge_hw":
+ # doc is confusing and thus some may put literal 'MAC'
+ # bridge_hw MAC <address>
+ # but correct is:
+ # bridge_hw <address>
+ if split[1].lower() == "mac":
+ ifaces[currif]['bridge']['mac'] = split[2]
+ else:
+ ifaces[currif]['bridge']['mac'] = split[1]
elif option == "bridge_pathcost":
if 'pathcost' not in ifaces[currif]['bridge']:
ifaces[currif]['bridge']['pathcost'] = {}
@@ -377,12 +394,15 @@ class Renderer(renderer.Renderer):
subnets = iface.get('subnets', {})
if subnets:
for index, subnet in enumerate(subnets):
+ ipv4_subnet_mtu = None
iface['index'] = index
iface['mode'] = subnet['type']
iface['control'] = subnet.get('control', 'auto')
subnet_inet = 'inet'
if subnet_is_ipv6(subnet):
subnet_inet += '6'
+ else:
+ ipv4_subnet_mtu = subnet.get('mtu')
iface['inet'] = subnet_inet
if subnet['type'].startswith('dhcp'):
iface['mode'] = 'dhcp'
@@ -397,7 +417,7 @@ class Renderer(renderer.Renderer):
_iface_start_entry(
iface, index, render_hwaddress=render_hwaddress) +
_iface_add_subnet(iface, subnet) +
- _iface_add_attrs(iface, index)
+ _iface_add_attrs(iface, index, ipv4_subnet_mtu)
)
for route in subnet.get('routes', []):
lines.extend(self._render_route(route, indent=" "))
@@ -409,7 +429,8 @@ class Renderer(renderer.Renderer):
if 'bond-master' in iface or 'bond-slaves' in iface:
lines.append("auto {name}".format(**iface))
lines.append("iface {name} {inet} {mode}".format(**iface))
- lines.extend(_iface_add_attrs(iface, index=0))
+ lines.extend(
+ _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None))
sections.append(lines)
return sections
@@ -459,7 +480,7 @@ class Renderer(renderer.Renderer):
return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n"
- def render_network_state(self, network_state, target=None):
+ def render_network_state(self, network_state, templates=None, target=None):
fpeni = util.target_path(target, self.eni_path)
util.ensure_dir(os.path.dirname(fpeni))
header = self.eni_header if self.eni_header else ""
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 63443484..bc1087f9 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -34,7 +34,7 @@ def _get_params_dict_by_match(config, match):
if key.startswith(match))
-def _extract_addresses(config, entry):
+def _extract_addresses(config, entry, ifname):
"""This method parse a cloudinit.net.network_state dictionary (config) and
maps netstate keys/values into a dictionary (entry) to represent
netplan yaml.
@@ -124,6 +124,15 @@ def _extract_addresses(config, entry):
addresses.append(addr)
+ if 'mtu' in config:
+ entry_mtu = entry.get('mtu')
+ if entry_mtu and config['mtu'] != entry_mtu:
+ LOG.warning(
+ "Network config: ignoring %s device-level mtu:%s because"
+ " ipv4 subnet-level mtu:%s provided.",
+ ifname, config['mtu'], entry_mtu)
+ else:
+ entry['mtu'] = config['mtu']
if len(addresses) > 0:
entry.update({'addresses': addresses})
if len(routes) > 0:
@@ -180,7 +189,7 @@ class Renderer(renderer.Renderer):
self._postcmds = config.get('postcmds', False)
self.clean_default = config.get('clean_default', True)
- def render_network_state(self, network_state, target):
+ def render_network_state(self, network_state, templates=None, target=None):
# check network state for version
# if v2, then extract network_state.config
# else render_v2_from_state
@@ -262,10 +271,7 @@ class Renderer(renderer.Renderer):
else:
del eth['match']
del eth['set-name']
- if 'mtu' in ifcfg:
- eth['mtu'] = ifcfg.get('mtu')
-
- _extract_addresses(ifcfg, eth)
+ _extract_addresses(ifcfg, eth, ifname)
ethernets.update({ifname: eth})
elif if_type == 'bond':
@@ -285,10 +291,12 @@ class Renderer(renderer.Renderer):
if len(bond_config) > 0:
bond.update({'parameters': bond_config})
+ if ifcfg.get('mac_address'):
+ bond['macaddress'] = ifcfg.get('mac_address').lower()
slave_interfaces = ifcfg.get('bond-slaves')
if slave_interfaces == 'none':
_extract_bond_slaves_by_name(interfaces, bond, ifname)
- _extract_addresses(ifcfg, bond)
+ _extract_addresses(ifcfg, bond, ifname)
bonds.update({ifname: bond})
elif if_type == 'bridge':
@@ -321,7 +329,9 @@ class Renderer(renderer.Renderer):
if len(br_config) > 0:
bridge.update({'parameters': br_config})
- _extract_addresses(ifcfg, bridge)
+ if ifcfg.get('mac_address'):
+ bridge['macaddress'] = ifcfg.get('mac_address').lower()
+ _extract_addresses(ifcfg, bridge, ifname)
bridges.update({ifname: bridge})
elif if_type == 'vlan':
@@ -333,7 +343,7 @@ class Renderer(renderer.Renderer):
macaddr = ifcfg.get('mac_address', None)
if macaddr is not None:
vlan['macaddress'] = macaddr.lower()
- _extract_addresses(ifcfg, vlan)
+ _extract_addresses(ifcfg, vlan, ifname)
vlans.update({ifname: vlan})
# inject global nameserver values under each all interface which
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 6d63e5c5..f76e508a 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -7,6 +7,8 @@
import copy
import functools
import logging
+import socket
+import struct
import six
@@ -481,6 +483,10 @@ class NetworkStateInterpreter(object):
interfaces.update({iface['name']: iface})
+ @ensure_command_keys(['name'])
+ def handle_infiniband(self, command):
+ self.handle_physical(command)
+
@ensure_command_keys(['address'])
def handle_nameserver(self, command):
dns = self._network_state.get('dns')
@@ -886,12 +892,9 @@ def net_prefix_to_ipv4_mask(prefix):
This is the inverse of ipv4_mask_to_net_prefix.
24 -> "255.255.255.0"
Also supports input as a string."""
-
- mask = [0, 0, 0, 0]
- for i in list(range(0, int(prefix))):
- idx = int(i / 8)
- mask[idx] = mask[idx] + (1 << (7 - i % 8))
- return ".".join([str(x) for x in mask])
+ mask = socket.inet_ntoa(
+ struct.pack(">I", (0xffffffff << (32 - int(prefix)) & 0xffffffff)))
+ return mask
def ipv4_mask_to_net_prefix(mask):
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index 57652e27..5f32e90f 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -45,11 +45,14 @@ class Renderer(object):
return content.getvalue()
@abc.abstractmethod
- def render_network_state(self, network_state, target=None):
+ def render_network_state(self, network_state, templates=None,
+ target=None):
"""Render network state."""
- def render_network_config(self, network_config, target=None):
+ def render_network_config(self, network_config, templates=None,
+ target=None):
return self.render_network_state(
- network_state=parse_net_config_data(network_config), target=target)
+ network_state=parse_net_config_data(network_config),
+ templates=templates, target=target)
# vi: ts=4 expandtab
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 39d89c46..9c16d3a7 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -91,19 +91,20 @@ class ConfigMap(object):
class Route(ConfigMap):
"""Represents a route configuration."""
- route_fn_tpl_ipv4 = '%(base)s/network-scripts/route-%(name)s'
- route_fn_tpl_ipv6 = '%(base)s/network-scripts/route6-%(name)s'
-
- def __init__(self, route_name, base_sysconf_dir):
+ def __init__(self, route_name, base_sysconf_dir,
+ ipv4_tpl, ipv6_tpl):
super(Route, self).__init__()
self.last_idx = 1
self.has_set_default_ipv4 = False
self.has_set_default_ipv6 = False
self._route_name = route_name
self._base_sysconf_dir = base_sysconf_dir
+ self.route_fn_tpl_ipv4 = ipv4_tpl
+ self.route_fn_tpl_ipv6 = ipv6_tpl
def copy(self):
- r = Route(self._route_name, self._base_sysconf_dir)
+ r = Route(self._route_name, self._base_sysconf_dir,
+ self.route_fn_tpl_ipv4, self.route_fn_tpl_ipv6)
r._conf = self._conf.copy()
r.last_idx = self.last_idx
r.has_set_default_ipv4 = self.has_set_default_ipv4
@@ -169,18 +170,23 @@ class Route(ConfigMap):
class NetInterface(ConfigMap):
"""Represents a sysconfig/networking-script (and its config + children)."""
- iface_fn_tpl = '%(base)s/network-scripts/ifcfg-%(name)s'
-
iface_types = {
'ethernet': 'Ethernet',
'bond': 'Bond',
'bridge': 'Bridge',
+ 'infiniband': 'InfiniBand',
}
- def __init__(self, iface_name, base_sysconf_dir, kind='ethernet'):
+ def __init__(self, iface_name, base_sysconf_dir, templates,
+ kind='ethernet'):
super(NetInterface, self).__init__()
self.children = []
- self.routes = Route(iface_name, base_sysconf_dir)
+ self.templates = templates
+ route_tpl = self.templates.get('route_templates')
+ self.routes = Route(iface_name, base_sysconf_dir,
+ ipv4_tpl=route_tpl.get('ipv4'),
+ ipv6_tpl=route_tpl.get('ipv6'))
+ self.iface_fn_tpl = self.templates.get('iface_templates')
self.kind = kind
self._iface_name = iface_name
@@ -213,7 +219,8 @@ class NetInterface(ConfigMap):
'name': self.name})
def copy(self, copy_children=False, copy_routes=False):
- c = NetInterface(self.name, self._base_sysconf_dir, kind=self._kind)
+ c = NetInterface(self.name, self._base_sysconf_dir,
+ self.templates, kind=self._kind)
c._conf = self._conf.copy()
if copy_children:
c.children = list(self.children)
@@ -251,6 +258,8 @@ class Renderer(renderer.Renderer):
('bridge_bridgeprio', 'PRIO'),
])
+ templates = {}
+
def __init__(self, config=None):
if not config:
config = {}
@@ -261,6 +270,11 @@ class Renderer(renderer.Renderer):
nm_conf_path = 'etc/NetworkManager/conf.d/99-cloud-init.conf'
self.networkmanager_conf_path = config.get('networkmanager_conf_path',
nm_conf_path)
+ self.templates = {
+ 'control': config.get('control'),
+ 'iface_templates': config.get('iface_templates'),
+ 'route_templates': config.get('route_templates'),
+ }
@classmethod
def _render_iface_shared(cls, iface, iface_cfg):
@@ -287,7 +301,6 @@ class Renderer(renderer.Renderer):
if subnet_type == 'dhcp6':
iface_cfg['IPV6INIT'] = True
iface_cfg['DHCPV6C'] = True
- iface_cfg['BOOTPROTO'] = 'dhcp'
elif subnet_type in ['dhcp4', 'dhcp']:
iface_cfg['BOOTPROTO'] = 'dhcp'
elif subnet_type == 'static':
@@ -305,6 +318,13 @@ class Renderer(renderer.Renderer):
mtu_key = 'IPV6_MTU'
iface_cfg['IPV6INIT'] = True
if 'mtu' in subnet:
+ mtu_mismatch = bool(mtu_key in iface_cfg and
+ subnet['mtu'] != iface_cfg[mtu_key])
+ if mtu_mismatch:
+ LOG.warning(
+ 'Network config: ignoring %s device-level mtu:%s'
+ ' because ipv4 subnet-level mtu:%s provided.',
+ iface_cfg.name, iface_cfg[mtu_key], subnet['mtu'])
iface_cfg[mtu_key] = subnet['mtu']
elif subnet_type == 'manual':
# If the subnet has an MTU setting, then ONBOOT=True
@@ -364,7 +384,7 @@ class Renderer(renderer.Renderer):
@classmethod
def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets):
- for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
+ for _, subnet in enumerate(subnets, start=len(iface_cfg.children)):
for route in subnet.get('routes', []):
is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway'])
@@ -506,7 +526,7 @@ class Renderer(renderer.Renderer):
return content_str
@staticmethod
- def _render_networkmanager_conf(network_state):
+ def _render_networkmanager_conf(network_state, templates=None):
content = networkmanager_conf.NetworkManagerConf("")
# If DNS server information is provided, configure
@@ -550,20 +570,36 @@ class Renderer(renderer.Renderer):
cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
@classmethod
- def _render_sysconfig(cls, base_sysconf_dir, network_state):
+ def _render_ib_interfaces(cls, network_state, iface_contents):
+ ib_filter = renderer.filter_by_type('infiniband')
+ for iface in network_state.iter_interfaces(ib_filter):
+ iface_name = iface['name']
+ iface_cfg = iface_contents[iface_name]
+ iface_cfg.kind = 'infiniband'
+ iface_subnets = iface.get("subnets", [])
+ route_cfg = iface_cfg.routes
+ cls._render_subnets(iface_cfg, iface_subnets)
+ cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
+
+ @classmethod
+ def _render_sysconfig(cls, base_sysconf_dir, network_state,
+ templates=None):
'''Given state, return /etc/sysconfig files + contents'''
+ if not templates:
+ templates = cls.templates
iface_contents = {}
for iface in network_state.iter_interfaces():
if iface['type'] == "loopback":
continue
iface_name = iface['name']
- iface_cfg = NetInterface(iface_name, base_sysconf_dir)
+ iface_cfg = NetInterface(iface_name, base_sysconf_dir, templates)
cls._render_iface_shared(iface, iface_cfg)
iface_contents[iface_name] = iface_cfg
cls._render_physical_interfaces(network_state, iface_contents)
cls._render_bond_interfaces(network_state, iface_contents)
cls._render_vlan_interfaces(network_state, iface_contents)
cls._render_bridge_interfaces(network_state, iface_contents)
+ cls._render_ib_interfaces(network_state, iface_contents)
contents = {}
for iface_name, iface_cfg in iface_contents.items():
if iface_cfg or iface_cfg.children:
@@ -572,17 +608,21 @@ class Renderer(renderer.Renderer):
if iface_cfg:
contents[iface_cfg.path] = iface_cfg.to_string()
if iface_cfg.routes:
- contents[iface_cfg.routes.path_ipv4] = \
- iface_cfg.routes.to_string("ipv4")
- contents[iface_cfg.routes.path_ipv6] = \
- iface_cfg.routes.to_string("ipv6")
+ for cpath, proto in zip([iface_cfg.routes.path_ipv4,
+ iface_cfg.routes.path_ipv6],
+ ["ipv4", "ipv6"]):
+ if cpath not in contents:
+ contents[cpath] = iface_cfg.routes.to_string(proto)
return contents
- def render_network_state(self, network_state, target=None):
+ def render_network_state(self, network_state, templates=None, target=None):
+ if not templates:
+ templates = self.templates
file_mode = 0o644
base_sysconf_dir = util.target_path(target, self.sysconf_dir)
for path, data in self._render_sysconfig(base_sysconf_dir,
- network_state).items():
+ network_state,
+ templates=templates).items():
util.write_file(path, data, file_mode)
if self.dns_path:
dns_path = util.target_path(target, self.dns_path)
@@ -592,7 +632,8 @@ class Renderer(renderer.Renderer):
if self.networkmanager_conf_path:
nm_conf_path = util.target_path(target,
self.networkmanager_conf_path)
- nm_conf_content = self._render_networkmanager_conf(network_state)
+ nm_conf_content = self._render_networkmanager_conf(network_state,
+ templates)
if nm_conf_content:
util.write_file(nm_conf_path, nm_conf_content, file_mode)
if self.netrules_path:
@@ -600,13 +641,16 @@ class Renderer(renderer.Renderer):
netrules_path = util.target_path(target, self.netrules_path)
util.write_file(netrules_path, netrules_content, file_mode)
- # always write /etc/sysconfig/network configuration
- sysconfig_path = util.target_path(target, "etc/sysconfig/network")
- netcfg = [_make_header(), 'NETWORKING=yes']
- if network_state.use_ipv6:
- netcfg.append('NETWORKING_IPV6=yes')
- netcfg.append('IPV6_AUTOCONF=no')
- util.write_file(sysconfig_path, "\n".join(netcfg) + "\n", file_mode)
+ sysconfig_path = util.target_path(target, templates.get('control'))
+ # Distros configuring /etc/sysconfig/network as a file e.g. Centos
+ if sysconfig_path.endswith('network'):
+ util.ensure_dir(os.path.dirname(sysconfig_path))
+ netcfg = [_make_header(), 'NETWORKING=yes']
+ if network_state.use_ipv6:
+ netcfg.append('NETWORKING_IPV6=yes')
+ netcfg.append('IPV6_AUTOCONF=no')
+ util.write_file(sysconfig_path,
+ "\n".join(netcfg) + "\n", file_mode)
def available(target=None):
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index 276556ee..58e0a591 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -199,6 +199,9 @@ class TestGenerateFallbackConfig(CiTestCase):
self.sysdir = self.tmp_dir() + '/'
self.m_sys_path.return_value = self.sysdir
self.addCleanup(sys_mock.stop)
+ self.add_patch('cloudinit.net.util.is_container', 'm_is_container',
+ return_value=False)
+ self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle')
def test_generate_fallback_finds_connected_eth_with_mac(self):
"""generate_fallback_config finds any connected device with a mac."""
@@ -512,12 +515,17 @@ class TestEphemeralIPV4Network(CiTestCase):
capture=True),
mock.call(
['ip', 'route', 'show', '0.0.0.0/0'], capture=True),
+ mock.call(['ip', '-4', 'route', 'add', '192.168.2.1',
+ 'dev', 'eth0', 'src', '192.168.2.2'], capture=True),
mock.call(
['ip', '-4', 'route', 'add', 'default', 'via',
'192.168.2.1', 'dev', 'eth0'], capture=True)]
- expected_teardown_calls = [mock.call(
- ['ip', '-4', 'route', 'del', 'default', 'dev', 'eth0'],
- capture=True)]
+ expected_teardown_calls = [
+ mock.call(['ip', '-4', 'route', 'del', 'default', 'dev', 'eth0'],
+ capture=True),
+ mock.call(['ip', '-4', 'route', 'del', '192.168.2.1',
+ 'dev', 'eth0', 'src', '192.168.2.2'], capture=True),
+ ]
with net.EphemeralIPv4Network(**params):
self.assertEqual(expected_setup_calls, m_subp.call_args_list)
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 993b26cf..9ff929c2 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -8,9 +8,11 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+from copy import copy, deepcopy
import re
from cloudinit import log as logging
+from cloudinit.net.network_state import net_prefix_to_ipv4_mask
from cloudinit import util
from cloudinit.simpletable import SimpleTable
@@ -18,18 +20,90 @@ from cloudinit.simpletable import SimpleTable
LOG = logging.getLogger()
-def netdev_info(empty=""):
- fields = ("hwaddr", "addr", "bcast", "mask")
- (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
+DEFAULT_NETDEV_INFO = {
+ "ipv4": [],
+ "ipv6": [],
+ "hwaddr": "",
+ "up": False
+}
+
+
+def _netdev_info_iproute(ipaddr_out):
+ """
+ Get network device dicts from ip route and ip link info.
+
+ @param ipaddr_out: Output string from 'ip addr show' command.
+
+ @returns: A dict of device info keyed by network device name containing
+ device configuration values.
+ @raise: TypeError if ipaddr_out isn't a string.
+ """
+ devs = {}
+ dev_name = None
+ for num, line in enumerate(ipaddr_out.splitlines()):
+ m = re.match(r'^\d+:\s(?P<dev>[^:]+):\s+<(?P<flags>\S+)>\s+.*', line)
+ if m:
+ dev_name = m.group('dev').lower().split('@')[0]
+ flags = m.group('flags').split(',')
+ devs[dev_name] = {
+ 'ipv4': [], 'ipv6': [], 'hwaddr': '',
+ 'up': bool('UP' in flags and 'LOWER_UP' in flags),
+ }
+ elif 'inet6' in line:
+ m = re.match(
+ r'\s+inet6\s(?P<ip>\S+)\sscope\s(?P<scope6>\S+).*', line)
+ if not m:
+ LOG.warning(
+ 'Could not parse ip addr show: (line:%d) %s', num, line)
+ continue
+ devs[dev_name]['ipv6'].append(m.groupdict())
+ elif 'inet' in line:
+ m = re.match(
+ r'\s+inet\s(?P<cidr4>\S+)(\sbrd\s(?P<bcast>\S+))?\sscope\s'
+ r'(?P<scope>\S+).*', line)
+ if not m:
+ LOG.warning(
+ 'Could not parse ip addr show: (line:%d) %s', num, line)
+ continue
+ match = m.groupdict()
+ cidr4 = match.pop('cidr4')
+ addr, _, prefix = cidr4.partition('/')
+ if not prefix:
+ prefix = '32'
+ devs[dev_name]['ipv4'].append({
+ 'ip': addr,
+ 'bcast': match['bcast'] if match['bcast'] else '',
+ 'mask': net_prefix_to_ipv4_mask(prefix),
+ 'scope': match['scope']})
+ elif 'link' in line:
+ m = re.match(
+ r'\s+link/(?P<link_type>\S+)\s(?P<hwaddr>\S+).*', line)
+ if not m:
+ LOG.warning(
+ 'Could not parse ip addr show: (line:%d) %s', num, line)
+ continue
+ if m.group('link_type') == 'ether':
+ devs[dev_name]['hwaddr'] = m.group('hwaddr')
+ else:
+ devs[dev_name]['hwaddr'] = ''
+ else:
+ continue
+ return devs
+
+
+def _netdev_info_ifconfig(ifconfig_data):
+ # fields that need to be returned in devs for each dev
devs = {}
- for line in str(ifcfg_out).splitlines():
+ for line in ifconfig_data.splitlines():
if len(line) == 0:
continue
if line[0] not in ("\t", " "):
curdev = line.split()[0]
- devs[curdev] = {"up": False}
- for field in fields:
- devs[curdev][field] = ""
+ # current ifconfig pops a ':' on the end of the device
+ if curdev.endswith(':'):
+ curdev = curdev[:-1]
+ if curdev not in devs:
+ devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO)
toks = line.lower().strip().split()
if toks[0] == "up":
devs[curdev]['up'] = True
@@ -39,59 +113,164 @@ def netdev_info(empty=""):
if re.search(r"flags=\d+<up,", toks[1]):
devs[curdev]['up'] = True
- fieldpost = ""
- if toks[0] == "inet6":
- fieldpost = "6"
-
for i in range(len(toks)):
- # older net-tools (ubuntu) show 'inet addr:xx.yy',
- # newer (freebsd and fedora) show 'inet xx.yy'
- # just skip this 'inet' entry. (LP: #1285185)
- try:
- if ((toks[i] in ("inet", "inet6") and
- toks[i + 1].startswith("addr:"))):
- continue
- except IndexError:
- pass
-
- # Couple the different items we're interested in with the correct
- # field since FreeBSD/CentOS/Fedora differ in the output.
- ifconfigfields = {
- "addr:": "addr", "inet": "addr",
- "bcast:": "bcast", "broadcast": "bcast",
- "mask:": "mask", "netmask": "mask",
- "hwaddr": "hwaddr", "ether": "hwaddr",
- "scope": "scope",
- }
- for origfield, field in ifconfigfields.items():
- target = "%s%s" % (field, fieldpost)
- if devs[curdev].get(target, ""):
- continue
- if toks[i] == "%s" % origfield:
- try:
- devs[curdev][target] = toks[i + 1]
- except IndexError:
- pass
- elif toks[i].startswith("%s" % origfield):
- devs[curdev][target] = toks[i][len(field) + 1:]
-
- if empty != "":
- for (_devname, dev) in devs.items():
- for field in dev:
- if dev[field] == "":
- dev[field] = empty
+ if toks[i] == "inet": # Create new ipv4 addr entry
+ devs[curdev]['ipv4'].append(
+ {'ip': toks[i + 1].lstrip("addr:")})
+ elif toks[i].startswith("bcast:"):
+ devs[curdev]['ipv4'][-1]['bcast'] = toks[i].lstrip("bcast:")
+ elif toks[i] == "broadcast":
+ devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1]
+ elif toks[i].startswith("mask:"):
+ devs[curdev]['ipv4'][-1]['mask'] = toks[i].lstrip("mask:")
+ elif toks[i] == "netmask":
+ devs[curdev]['ipv4'][-1]['mask'] = toks[i + 1]
+ elif toks[i] == "hwaddr" or toks[i] == "ether":
+ devs[curdev]['hwaddr'] = toks[i + 1]
+ elif toks[i] == "inet6":
+ if toks[i + 1] == "addr:":
+ devs[curdev]['ipv6'].append({'ip': toks[i + 2]})
+ else:
+ devs[curdev]['ipv6'].append({'ip': toks[i + 1]})
+ elif toks[i] == "prefixlen": # Add prefix to current ipv6 value
+ addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1]
+ devs[curdev]['ipv6'][-1]['ip'] = addr6
+ elif toks[i].startswith("scope:"):
+ devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:")
+ elif toks[i] == "scopeid":
+ res = re.match(r'.*<(\S+)>', toks[i + 1])
+ if res:
+ devs[curdev]['ipv6'][-1]['scope6'] = res.group(1)
+ return devs
+
+
+def netdev_info(empty=""):
+ devs = {}
+ if util.which('ip'):
+ # Try iproute first of all
+ (ipaddr_out, _err) = util.subp(["ip", "addr", "show"])
+ devs = _netdev_info_iproute(ipaddr_out)
+ elif util.which('ifconfig'):
+ # Fall back to net-tools if iproute2 is not present
+ (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
+ devs = _netdev_info_ifconfig(ifcfg_out)
+ else:
+ LOG.warning(
+ "Could not print networks: missing 'ip' and 'ifconfig' commands")
+ if empty == "":
+ return devs
+
+ recurse_types = (dict, tuple, list)
+
+ def fill(data, new_val="", empty_vals=("", b"")):
+ """Recursively replace 'empty_vals' in data (dict, tuple, list)
+ with new_val"""
+ if isinstance(data, dict):
+ myiter = data.items()
+ elif isinstance(data, (tuple, list)):
+ myiter = enumerate(data)
+ else:
+ raise TypeError("Unexpected input to fill")
+
+ for key, val in myiter:
+ if val in empty_vals:
+ data[key] = new_val
+ elif isinstance(val, recurse_types):
+ fill(val, new_val)
+
+ fill(devs, new_val=empty)
return devs
-def route_info():
- (route_out, _err) = util.subp(["netstat", "-rn"], rcs=[0, 1])
+def _netdev_route_info_iproute(iproute_data):
+ """
+ Get network route dicts from ip route info.
+
+ @param iproute_data: Output string from ip route command.
+
+ @returns: A dict containing ipv4 and ipv6 route entries as lists. Each
+ item in the list is a route dictionary representing destination,
+ gateway, flags, genmask and interface information.
+ """
+
+ routes = {}
+ routes['ipv4'] = []
+ routes['ipv6'] = []
+ entries = iproute_data.splitlines()
+ default_route_entry = {
+ 'destination': '', 'flags': '', 'gateway': '', 'genmask': '',
+ 'iface': '', 'metric': ''}
+ for line in entries:
+ entry = copy(default_route_entry)
+ if not line:
+ continue
+ toks = line.split()
+ flags = ['U']
+ if toks[0] == "default":
+ entry['destination'] = "0.0.0.0"
+ entry['genmask'] = "0.0.0.0"
+ else:
+ if '/' in toks[0]:
+ (addr, cidr) = toks[0].split("/")
+ else:
+ addr = toks[0]
+ cidr = '32'
+ flags.append("H")
+ entry['genmask'] = net_prefix_to_ipv4_mask(cidr)
+ entry['destination'] = addr
+ entry['genmask'] = net_prefix_to_ipv4_mask(cidr)
+ entry['gateway'] = "0.0.0.0"
+ for i in range(len(toks)):
+ if toks[i] == "via":
+ entry['gateway'] = toks[i + 1]
+ flags.insert(1, "G")
+ if toks[i] == "dev":
+ entry["iface"] = toks[i + 1]
+ if toks[i] == "metric":
+ entry['metric'] = toks[i + 1]
+ entry['flags'] = ''.join(flags)
+ routes['ipv4'].append(entry)
+ try:
+ (iproute_data6, _err6) = util.subp(
+ ["ip", "--oneline", "-6", "route", "list", "table", "all"],
+ rcs=[0, 1])
+ except util.ProcessExecutionError:
+ pass
+ else:
+ entries6 = iproute_data6.splitlines()
+ for line in entries6:
+ entry = {}
+ if not line:
+ continue
+ toks = line.split()
+ if toks[0] == "default":
+ entry['destination'] = "::/0"
+ entry['flags'] = "UG"
+ else:
+ entry['destination'] = toks[0]
+ entry['gateway'] = "::"
+ entry['flags'] = "U"
+ for i in range(len(toks)):
+ if toks[i] == "via":
+ entry['gateway'] = toks[i + 1]
+ entry['flags'] = "UG"
+ if toks[i] == "dev":
+ entry["iface"] = toks[i + 1]
+ if toks[i] == "metric":
+ entry['metric'] = toks[i + 1]
+ if toks[i] == "expires":
+ entry['flags'] = entry['flags'] + 'e'
+ routes['ipv6'].append(entry)
+ return routes
+
+def _netdev_route_info_netstat(route_data):
routes = {}
routes['ipv4'] = []
routes['ipv6'] = []
- entries = route_out.splitlines()[1:]
+ entries = route_data.splitlines()
for line in entries:
if not line:
continue
@@ -101,8 +280,8 @@ def route_info():
# default 10.65.0.1 UGS 0 34920 vtnet0
#
# Linux netstat shows 2 more:
- # Destination Gateway Genmask Flags MSS Window irtt Iface
- # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
+ # Destination Gateway Genmask Flags Metric Ref Use Iface
+ # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
if (len(toks) < 6 or toks[0] == "Kernel" or
toks[0] == "Destination" or toks[0] == "Internet" or
toks[0] == "Internet6" or toks[0] == "Routing"):
@@ -125,31 +304,57 @@ def route_info():
routes['ipv4'].append(entry)
try:
- (route_out6, _err6) = util.subp(["netstat", "-A", "inet6", "-n"],
- rcs=[0, 1])
+ (route_data6, _err6) = util.subp(
+ ["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1])
except util.ProcessExecutionError:
pass
else:
- entries6 = route_out6.splitlines()[1:]
+ entries6 = route_data6.splitlines()
for line in entries6:
if not line:
continue
toks = line.split()
- if (len(toks) < 6 or toks[0] == "Kernel" or
+ if (len(toks) < 7 or toks[0] == "Kernel" or
+ toks[0] == "Destination" or toks[0] == "Internet" or
toks[0] == "Proto" or toks[0] == "Active"):
continue
entry = {
- 'proto': toks[0],
- 'recv-q': toks[1],
- 'send-q': toks[2],
- 'local address': toks[3],
- 'foreign address': toks[4],
- 'state': toks[5],
+ 'destination': toks[0],
+ 'gateway': toks[1],
+ 'flags': toks[2],
+ 'metric': toks[3],
+ 'ref': toks[4],
+ 'use': toks[5],
+ 'iface': toks[6],
}
+ # skip lo interface on ipv6
+ if entry['iface'] == "lo":
+ continue
+ # strip /128 from address if it's included
+ if entry['destination'].endswith('/128'):
+ entry['destination'] = re.sub(
+ r'\/128$', '', entry['destination'])
routes['ipv6'].append(entry)
return routes
+def route_info():
+ routes = {}
+ if util.which('ip'):
+ # Try iproute first of all
+ (iproute_out, _err) = util.subp(["ip", "-o", "route", "list"])
+ routes = _netdev_route_info_iproute(iproute_out)
+ elif util.which('netstat'):
+ # Fall back to net-tools if iproute2 is not present
+ (route_out, _err) = util.subp(
+ ["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1])
+ routes = _netdev_route_info_netstat(route_out)
+ else:
+ LOG.warning(
+ "Could not print routes: missing 'ip' and 'netstat' commands")
+ return routes
+
+
def getgateway():
try:
routes = route_info()
@@ -164,23 +369,36 @@ def getgateway():
def netdev_pformat():
lines = []
+ empty = "."
try:
- netdev = netdev_info(empty=".")
- except Exception:
- lines.append(util.center("Net device info failed", '!', 80))
+ netdev = netdev_info(empty=empty)
+ except Exception as e:
+ lines.append(
+ util.center(
+ "Net device info failed ({error})".format(error=str(e)),
+ '!', 80))
else:
+ if not netdev:
+ return '\n'
fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
tbl = SimpleTable(fields)
- for (dev, d) in sorted(netdev.items()):
- tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]])
- if d.get('addr6'):
- tbl.add_row([dev, d["up"],
- d["addr6"], ".", d.get("scope6"), d["hwaddr"]])
+ for (dev, data) in sorted(netdev.items()):
+ for addr in data.get('ipv4'):
+ tbl.add_row(
+ (dev, data["up"], addr["ip"], addr["mask"],
+ addr.get('scope', empty), data["hwaddr"]))
+ for addr in data.get('ipv6'):
+ tbl.add_row(
+ (dev, data["up"], addr["ip"], empty, addr["scope6"],
+ data["hwaddr"]))
+ if len(data.get('ipv6')) + len(data.get('ipv4')) == 0:
+ tbl.add_row((dev, data["up"], empty, empty, empty,
+ data["hwaddr"]))
netdev_s = tbl.get_string()
max_len = len(max(netdev_s.splitlines(), key=len))
header = util.center("Net device info", "+", max_len)
lines.extend([header, netdev_s])
- return "\n".join(lines)
+ return "\n".join(lines) + "\n"
def route_pformat():
@@ -188,7 +406,10 @@ def route_pformat():
try:
routes = route_info()
except Exception as e:
- lines.append(util.center('Route info failed', '!', 80))
+ lines.append(
+ util.center(
+ 'Route info failed ({error})'.format(error=str(e)),
+ '!', 80))
util.logexc(LOG, "Route info failed: %s" % e)
else:
if routes.get('ipv4'):
@@ -205,20 +426,20 @@ def route_pformat():
header = util.center("Route IPv4 info", "+", max_len)
lines.extend([header, route_s])
if routes.get('ipv6'):
- fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q',
- 'Local Address', 'Foreign Address', 'State']
+ fields_v6 = ['Route', 'Destination', 'Gateway', 'Interface',
+ 'Flags']
tbl_v6 = SimpleTable(fields_v6)
for (n, r) in enumerate(routes.get('ipv6')):
route_id = str(n)
- tbl_v6.add_row([route_id, r['proto'],
- r['recv-q'], r['send-q'],
- r['local address'], r['foreign address'],
- r['state']])
+ if r['iface'] == 'lo':
+ continue
+ tbl_v6.add_row([route_id, r['destination'],
+ r['gateway'], r['iface'], r['flags']])
route_s = tbl_v6.get_string()
max_len = len(max(route_s.splitlines(), key=len))
header = util.center("Route IPv6 info", "+", max_len)
lines.extend([header, route_s])
- return "\n".join(lines)
+ return "\n".join(lines) + "\n"
def debug_info(prefix='ci-info: '):
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
index 1ed2b487..ed5c7038 100644
--- a/cloudinit/reporting/__init__.py
+++ b/cloudinit/reporting/__init__.py
@@ -18,7 +18,7 @@ DEFAULT_CONFIG = {
def update_configuration(config):
- """Update the instanciated_handler_registry.
+ """Update the instantiated_handler_registry.
:param config:
The dictionary containing changes to apply. If a key is given
@@ -37,6 +37,12 @@ def update_configuration(config):
instantiated_handler_registry.register_item(handler_name, instance)
+def flush_events():
+ for _, handler in instantiated_handler_registry.registered_items.items():
+ if hasattr(handler, 'flush'):
+ handler.flush()
+
+
instantiated_handler_registry = DictRegistry()
update_configuration(DEFAULT_CONFIG)
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
index 4f62d2f9..e5dfab33 100644
--- a/cloudinit/reporting/events.py
+++ b/cloudinit/reporting/events.py
@@ -192,7 +192,7 @@ class ReportEventStack(object):
def _childrens_finish_info(self):
for cand_result in (status.FAIL, status.WARN):
- for name, (value, msg) in self.children.items():
+ for _name, (value, _msg) in self.children.items():
if value == cand_result:
return (value, self.message)
return (self.result, self.message)
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 4066076c..6d23558e 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -1,17 +1,32 @@
# This file is part of cloud-init. See LICENSE file for license information.
import abc
+import fcntl
import json
import six
+import os
+import re
+import struct
+import threading
+import time
from cloudinit import log as logging
from cloudinit.registry import DictRegistry
from cloudinit import (url_helper, util)
+from datetime import datetime
+if six.PY2:
+ from multiprocessing.queues import JoinableQueue as JQueue
+else:
+ from queue import Queue as JQueue
LOG = logging.getLogger(__name__)
+class ReportException(Exception):
+ pass
+
+
@six.add_metaclass(abc.ABCMeta)
class ReportingHandler(object):
"""Base class for report handlers.
@@ -24,6 +39,10 @@ class ReportingHandler(object):
def publish_event(self, event):
"""Publish an event."""
+ def flush(self):
+ """Ensure ReportingHandler has published all events"""
+ pass
+
class LogHandler(ReportingHandler):
"""Publishes events to the cloud-init log at the ``DEBUG`` log level."""
@@ -85,9 +104,236 @@ class WebHookHandler(ReportingHandler):
LOG.warning("failed posting event: %s", event.as_string())
+class HyperVKvpReportingHandler(ReportingHandler):
+ """
+ Reports events to a Hyper-V host using Key-Value-Pair exchange protocol
+ and can be used to obtain high level diagnostic information from the host.
+
+ To use this facility, the KVP user-space daemon (hv_kvp_daemon) has to be
+ running. It reads the kvp_file when the host requests the guest to
+ enumerate the KVP's.
+
+ This reporter collates all events for a module (origin|name) in a single
+ json string in the dictionary.
+
+ For more information, see
+ https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests
+ """
+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048
+ HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512
+ HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE +
+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE)
+ EVENT_PREFIX = 'CLOUD_INIT'
+ MSG_KEY = 'msg'
+ RESULT_KEY = 'result'
+ DESC_IDX_KEY = 'msg_i'
+ JSON_SEPARATORS = (',', ':')
+ KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1'
+
+ def __init__(self,
+ kvp_file_path=KVP_POOL_FILE_GUEST,
+ event_types=None):
+ super(HyperVKvpReportingHandler, self).__init__()
+ self._kvp_file_path = kvp_file_path
+ self._event_types = event_types
+ self.q = JQueue()
+ self.kvp_file = None
+ self.incarnation_no = self._get_incarnation_no()
+ self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
+ self.incarnation_no)
+ self._current_offset = 0
+ self.publish_thread = threading.Thread(
+ target=self._publish_event_routine)
+ self.publish_thread.daemon = True
+ self.publish_thread.start()
+
+ def _get_incarnation_no(self):
+ """
+ use the time passed as the incarnation number.
+ the incarnation number is the number which are used to
+ distinguish the old data stored in kvp and the new data.
+ """
+ uptime_str = util.uptime()
+ try:
+ return int(time.time() - float(uptime_str))
+ except ValueError:
+ LOG.warning("uptime '%s' not in correct format.", uptime_str)
+ return 0
+
+ def _iterate_kvps(self, offset):
+ """iterate the kvp file from the current offset."""
+ try:
+ with open(self._kvp_file_path, 'rb+') as f:
+ self.kvp_file = f
+ fcntl.flock(f, fcntl.LOCK_EX)
+ f.seek(offset)
+ record_data = f.read(self.HV_KVP_RECORD_SIZE)
+ while len(record_data) == self.HV_KVP_RECORD_SIZE:
+ self._current_offset += self.HV_KVP_RECORD_SIZE
+ kvp_item = self._decode_kvp_item(record_data)
+ yield kvp_item
+ record_data = f.read(self.HV_KVP_RECORD_SIZE)
+ fcntl.flock(f, fcntl.LOCK_UN)
+ finally:
+ self.kvp_file = None
+
+ def _event_key(self, event):
+ """
+ the event key format is:
+ CLOUD_INIT|<incarnation number>|<event_type>|<event_name>
+ """
+ return u"{0}|{1}|{2}".format(self.event_key_prefix,
+ event.event_type, event.name)
+
+ def _encode_kvp_item(self, key, value):
+ data = (struct.pack("%ds%ds" % (
+ self.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
+ self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
+ key.encode('utf-8'), value.encode('utf-8')))
+ return data
+
+ def _decode_kvp_item(self, record_data):
+ record_data_len = len(record_data)
+ if record_data_len != self.HV_KVP_RECORD_SIZE:
+ raise ReportException(
+ "record_data len not correct {0} {1}."
+ .format(record_data_len, self.HV_KVP_RECORD_SIZE))
+ k = (record_data[0:self.HV_KVP_EXCHANGE_MAX_KEY_SIZE].decode('utf-8')
+ .strip('\x00'))
+ v = (
+ record_data[
+ self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE
+ ].decode('utf-8').strip('\x00'))
+
+ return {'key': k, 'value': v}
+
+ def _update_kvp_item(self, record_data):
+ if self.kvp_file is None:
+ raise ReportException(
+ "kvp file '{0}' not opened."
+ .format(self._kvp_file_path))
+ self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1)
+ self.kvp_file.write(record_data)
+
+ def _append_kvp_item(self, record_data):
+ with open(self._kvp_file_path, 'rb+') as f:
+ fcntl.flock(f, fcntl.LOCK_EX)
+ # seek to end of the file
+ f.seek(0, 2)
+ f.write(record_data)
+ f.flush()
+ fcntl.flock(f, fcntl.LOCK_UN)
+ self._current_offset = f.tell()
+
+ def _break_down(self, key, meta_data, description):
+ del meta_data[self.MSG_KEY]
+ des_in_json = json.dumps(description)
+ des_in_json = des_in_json[1:(len(des_in_json) - 1)]
+ i = 0
+ result_array = []
+ message_place_holder = "\"" + self.MSG_KEY + "\":\"\""
+ while True:
+ meta_data[self.DESC_IDX_KEY] = i
+ meta_data[self.MSG_KEY] = ''
+ data_without_desc = json.dumps(meta_data,
+ separators=self.JSON_SEPARATORS)
+ room_for_desc = (
+ self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE -
+ len(data_without_desc) - 8)
+ value = data_without_desc.replace(
+ message_place_holder,
+ '"{key}":"{desc}"'.format(
+ key=self.MSG_KEY, desc=des_in_json[:room_for_desc]))
+ result_array.append(self._encode_kvp_item(key, value))
+ i += 1
+ des_in_json = des_in_json[room_for_desc:]
+ if len(des_in_json) == 0:
+ break
+ return result_array
+
+ def _encode_event(self, event):
+ """
+ encode the event into kvp data bytes.
+ if the event content reaches the maximum length of kvp value.
+ then it would be cut to multiple slices.
+ """
+ key = self._event_key(event)
+ meta_data = {
+ "name": event.name,
+ "type": event.event_type,
+ "ts": (datetime.utcfromtimestamp(event.timestamp)
+ .isoformat() + 'Z'),
+ }
+ if hasattr(event, self.RESULT_KEY):
+ meta_data[self.RESULT_KEY] = event.result
+ meta_data[self.MSG_KEY] = event.description
+ value = json.dumps(meta_data, separators=self.JSON_SEPARATORS)
+ # if it reaches the maximum length of kvp value,
+ # break it down to slices.
+ # this should be very corner case.
+ if len(value) > self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE:
+ return self._break_down(key, meta_data, event.description)
+ else:
+ data = self._encode_kvp_item(key, value)
+ return [data]
+
+ def _publish_event_routine(self):
+ while True:
+ try:
+ event = self.q.get(block=True)
+ need_append = True
+ try:
+ if not os.path.exists(self._kvp_file_path):
+ LOG.warning(
+ "skip writing events %s to %s. file not present.",
+ event.as_string(),
+ self._kvp_file_path)
+ encoded_event = self._encode_event(event)
+ # for each encoded_event
+ for encoded_data in (encoded_event):
+ for kvp in self._iterate_kvps(self._current_offset):
+ match = (
+ re.match(
+ r"^{0}\|(\d+)\|.+"
+ .format(self.EVENT_PREFIX),
+ kvp['key']
+ ))
+ if match:
+ match_groups = match.groups(0)
+ if int(match_groups[0]) < self.incarnation_no:
+ need_append = False
+ self._update_kvp_item(encoded_data)
+ continue
+ if need_append:
+ self._append_kvp_item(encoded_data)
+ except IOError as e:
+ LOG.warning(
+ "failed posting event to kvp: %s e:%s",
+ event.as_string(), e)
+ finally:
+ self.q.task_done()
+
+ # when main process exits, q.get() will through EOFError
+ # indicating we should exit this thread.
+ except EOFError:
+ return
+
+ # since the saving to the kvp pool can be a time costing task
+ # if the kvp pool already contains a chunk of data,
+ # so defer it to another thread.
+ def publish_event(self, event):
+ if (not self._event_types or event.event_type in self._event_types):
+ self.q.put(event)
+
+ def flush(self):
+ LOG.debug('HyperVReportingHandler flushing remaining events')
+ self.q.join()
+
+
available_handlers = DictRegistry()
available_handlers.register_item('log', LogHandler)
available_handlers.register_item('print', PrintHandler)
available_handlers.register_item('webhook', WebHookHandler)
+available_handlers.register_item('hyperv', HyperVKvpReportingHandler)
# vi: ts=4 expandtab
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index dde5749d..b1ebaade 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -38,12 +38,13 @@ CFG_BUILTIN = {
'Scaleway',
'Hetzner',
'IBMCloud',
+ 'Oracle',
# At the end to act as a 'catch' when none of the above work...
'None',
],
'def_log_file': '/var/log/cloud-init.log',
'log_cfgs': [],
- 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'],
+ 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel', 'root:root'],
'system_info': {
'paths': {
'cloud_dir': '/var/lib/cloud',
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 22279d09..858e0827 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -45,7 +45,7 @@ def _is_aliyun():
def parse_public_keys(public_keys):
keys = []
- for key_id, key_body in public_keys.items():
+ for _key_id, key_body in public_keys.items():
if isinstance(key_body, str):
keys.append(key_body.strip())
elif isinstance(key_body, list):
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index e1d0055b..8cd312d0 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -29,7 +29,6 @@ CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
# Shell command lists
CMD_PROBE_FLOPPY = ['modprobe', 'floppy']
-CMD_UDEVADM_SETTLE = ['udevadm', 'settle', '--timeout=5']
META_DATA_NOT_SUPPORTED = {
'block-device-mapping': {},
@@ -182,29 +181,18 @@ class DataSourceAltCloud(sources.DataSource):
# modprobe floppy
try:
- cmd = CMD_PROBE_FLOPPY
- (cmd_out, _err) = util.subp(cmd)
- LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
- except ProcessExecutionError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
- return False
- except OSError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
+ modprobe_floppy()
+ except ProcessExecutionError as e:
+ util.logexc(LOG, 'Failed modprobe: %s', e)
return False
floppy_dev = '/dev/fd0'
# udevadm settle for floppy device
try:
- cmd = CMD_UDEVADM_SETTLE
- cmd.append('--exit-if-exists=' + floppy_dev)
- (cmd_out, _err) = util.subp(cmd)
- LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
- except ProcessExecutionError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
- return False
- except OSError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
+ util.udevadm_settle(exists=floppy_dev, timeout=5)
+ except (ProcessExecutionError, OSError) as e:
+ util.logexc(LOG, 'Failed udevadm_settle: %s\n', e)
return False
try:
@@ -261,6 +249,11 @@ class DataSourceAltCloud(sources.DataSource):
return False
+def modprobe_floppy():
+ out, _err = util.subp(CMD_PROBE_FLOPPY)
+ LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out)
+
+
# Used to match classes to dependencies
# Source DataSourceAltCloud does not really depend on networking.
# In the future 'dsmode' like behavior can be added to offer user
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index b66da647..629f006f 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -8,6 +8,7 @@ import base64
import contextlib
import crypt
from functools import partial
+import json
import os
import os.path
import re
@@ -18,6 +19,7 @@ import xml.etree.ElementTree as ET
from cloudinit import log as logging
from cloudinit import net
+from cloudinit.event import EventType
from cloudinit.net.dhcp import EphemeralDHCPv4
from cloudinit import sources
from cloudinit.sources.helpers.azure import get_metadata_from_fabric
@@ -49,7 +51,18 @@ DEFAULT_FS = 'ext4'
# DMI chassis-asset-tag is set static for all azure instances
AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
-IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata"
+REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
+AGENT_SEED_DIR = '/var/lib/waagent'
+IMDS_URL = "http://169.254.169.254/metadata/"
+
+# List of static scripts and network config artifacts created by
+# stock ubuntu suported images.
+UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
+ '/etc/netplan/90-azure-hotplug.yaml',
+ '/usr/local/sbin/ephemeral_eth.sh',
+ '/etc/udev/rules.d/10-net-device-added.rules',
+ '/run/network/interfaces.ephemeral.d',
+]
def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
@@ -108,31 +121,24 @@ def find_dev_from_busdev(camcontrol_out, busdev):
return None
-def get_dev_storvsc_sysctl():
+def execute_or_debug(cmd, fail_ret=None):
try:
- sysctl_out, err = util.subp(['sysctl', 'dev.storvsc'])
+ return util.subp(cmd)[0]
except util.ProcessExecutionError:
- LOG.debug("Fail to execute sysctl dev.storvsc")
- sysctl_out = ""
- return sysctl_out
+ LOG.debug("Failed to execute: %s", ' '.join(cmd))
+ return fail_ret
+
+
+def get_dev_storvsc_sysctl():
+ return execute_or_debug(["sysctl", "dev.storvsc"], fail_ret="")
def get_camcontrol_dev_bus():
- try:
- camcontrol_b_out, err = util.subp(['camcontrol', 'devlist', '-b'])
- except util.ProcessExecutionError:
- LOG.debug("Fail to execute camcontrol devlist -b")
- return None
- return camcontrol_b_out
+ return execute_or_debug(['camcontrol', 'devlist', '-b'])
def get_camcontrol_dev():
- try:
- camcontrol_out, err = util.subp(['camcontrol', 'devlist'])
- except util.ProcessExecutionError:
- LOG.debug("Fail to execute camcontrol devlist")
- return None
- return camcontrol_out
+ return execute_or_debug(['camcontrol', 'devlist'])
def get_resource_disk_on_freebsd(port_id):
@@ -192,7 +198,7 @@ if util.is_FreeBSD():
BUILTIN_DS_CONFIG = {
'agent_command': AGENT_START_BUILTIN,
- 'data_dir': "/var/lib/waagent",
+ 'data_dir': AGENT_SEED_DIR,
'set_hostname': True,
'hostname_bounce': {
'interface': DEFAULT_PRIMARY_NIC,
@@ -215,6 +221,7 @@ BUILTIN_CLOUD_CONFIG = {
}
DS_CFG_PATH = ['datasource', DS_NAME]
+DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs'
DEF_EPHEMERAL_LABEL = 'Temporary Storage'
# The redacted password fails to meet password complexity requirements
@@ -258,6 +265,7 @@ class DataSourceAzure(sources.DataSource):
dsname = 'Azure'
_negotiated = False
+ _metadata_imds = sources.UNSET
process_name = 'dhclient'
tmpps = os.popen("ps -Af").read()
@@ -274,6 +282,8 @@ class DataSourceAzure(sources.DataSource):
BUILTIN_DS_CONFIG])
self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
self._network_config = None
+ # Regenerate network config new_instance boot and every boot
+ self.update_events['network'].add(EventType.BOOT)
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -347,15 +357,17 @@ class DataSourceAzure(sources.DataSource):
metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
return metadata
- def _get_data(self):
+ def crawl_metadata(self):
+ """Walk all instance metadata sources returning a dict on success.
+
+ @return: A dictionary of any metadata content for this instance.
+ @raise: InvalidMetaDataException when the expected metadata service is
+ unavailable, broken or disabled.
+ """
+ crawled_data = {}
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
# need to look in the datadir and consider that valid
- asset_tag = util.read_dmi_data('chassis-asset-tag')
- if asset_tag != AZURE_CHASSIS_ASSET_TAG:
- LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
- return False
-
ddir = self.ds_cfg['data_dir']
candidates = [self.seed_dir]
@@ -384,46 +396,84 @@ class DataSourceAzure(sources.DataSource):
except NonAzureDataSource:
continue
except BrokenAzureDataSource as exc:
- raise exc
+ msg = 'BrokenAzureDataSource: %s' % exc
+ raise sources.InvalidMetaDataException(msg)
except util.MountFailedError:
LOG.warning("%s was not mountable", cdev)
continue
if reprovision or self._should_reprovision(ret):
ret = self._reprovision()
- (md, self.userdata_raw, cfg, files) = ret
+ imds_md = get_metadata_from_imds(
+ self.fallback_interface, retries=3)
+ (md, userdata_raw, cfg, files) = ret
self.seed = cdev
- self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
- self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
+ crawled_data.update({
+ 'cfg': cfg,
+ 'files': files,
+ 'metadata': util.mergemanydict(
+ [md, {'imds': imds_md}]),
+ 'userdata_raw': userdata_raw})
found = cdev
LOG.debug("found datasource in %s", cdev)
break
if not found:
- return False
+ raise sources.InvalidMetaDataException('No Azure metadata found')
if found == ddir:
LOG.debug("using files cached in %s", ddir)
- # azure / hyper-v provides random data here
- # TODO. find the seed on FreeBSD platform
- # now update ds_cfg to reflect contents pass in config
- if not util.is_FreeBSD():
- seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
- quiet=True, decode=False)
- if seed:
- self.metadata['random_seed'] = seed
+ seed = _get_random_seed()
+ if seed:
+ crawled_data['metadata']['random_seed'] = seed
+ crawled_data['metadata']['instance-id'] = util.read_dmi_data(
+ 'system-uuid')
+ return crawled_data
+
+ def _is_platform_viable(self):
+ """Check platform environment to report if this datasource may run."""
+ return _is_platform_viable(self.seed_dir)
+
+ def clear_cached_attrs(self, attr_defaults=()):
+ """Reset any cached class attributes to defaults."""
+ super(DataSourceAzure, self).clear_cached_attrs(attr_defaults)
+ self._metadata_imds = sources.UNSET
+
+ def _get_data(self):
+ """Crawl and process datasource metadata caching metadata as attrs.
+
+ @return: True on success, False on error, invalid or disabled
+ datasource.
+ """
+ if not self._is_platform_viable():
+ return False
+ try:
+ crawled_data = util.log_time(
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self.crawl_metadata)
+ except sources.InvalidMetaDataException as e:
+ LOG.warning('Could not crawl Azure metadata: %s', e)
+ return False
+ if self.distro and self.distro.name == 'ubuntu':
+ maybe_remove_ubuntu_network_config_scripts()
+
+ # Process crawled data and augment with various config defaults
+ self.cfg = util.mergemanydict(
+ [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG])
+ self._metadata_imds = crawled_data['metadata']['imds']
+ self.metadata = util.mergemanydict(
+ [crawled_data['metadata'], DEFAULT_METADATA])
+ self.userdata_raw = crawled_data['userdata_raw']
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
# walinux agent writes files world readable, but expects
# the directory to be protected.
- write_files(ddir, files, dirmode=0o700)
-
- self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
-
+ write_files(
+ self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700)
return True
def device_name_to_device(self, name):
@@ -449,11 +499,12 @@ class DataSourceAzure(sources.DataSource):
LOG.debug("negotiating already done for %s",
self.get_instance_id())
- def _poll_imds(self, report_ready=True):
+ def _poll_imds(self):
"""Poll IMDS for the new provisioning data until we get a valid
response. Then return the returned JSON object."""
- url = IMDS_URL + "?api-version=2017-04-02"
+ url = IMDS_URL + "reprovisiondata?api-version=2017-04-02"
headers = {"Metadata": "true"}
+ report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
LOG.debug("Start polling IMDS")
def exc_cb(msg, exception):
@@ -463,13 +514,17 @@ class DataSourceAzure(sources.DataSource):
# call DHCP and setup the ephemeral network to acquire the new IP.
return False
- need_report = report_ready
while True:
try:
with EphemeralDHCPv4() as lease:
- if need_report:
+ if report_ready:
+ path = REPORTED_READY_MARKER_FILE
+ LOG.info(
+ "Creating a marker file to report ready: %s", path)
+ util.write_file(path, "{pid}: {time}\n".format(
+ pid=os.getpid(), time=time()))
self._report_ready(lease=lease)
- need_report = False
+ report_ready = False
return readurl(url, timeout=1, headers=headers,
exception_cb=exc_cb, infinite=True).contents
except UrlError:
@@ -480,7 +535,7 @@ class DataSourceAzure(sources.DataSource):
before we go into our polling loop."""
try:
get_metadata_from_fabric(None, lease['unknown-245'])
- except Exception as exc:
+ except Exception:
LOG.warning(
"Error communicating with Azure fabric; You may experience."
"connectivity issues.", exc_info=True)
@@ -498,13 +553,15 @@ class DataSourceAzure(sources.DataSource):
jump back into the polling loop in order to retrieve the ovf_env."""
if not ret:
return False
- (md, self.userdata_raw, cfg, files) = ret
+ (_md, _userdata_raw, cfg, _files) = ret
path = REPROVISION_MARKER_FILE
if (cfg.get('PreprovisionedVm') is True or
os.path.isfile(path)):
if not os.path.isfile(path):
- LOG.info("Creating a marker file to poll imds")
- util.write_file(path, "%s: %s\n" % (os.getpid(), time()))
+ LOG.info("Creating a marker file to poll imds: %s",
+ path)
+ util.write_file(path, "{pid}: {time}\n".format(
+ pid=os.getpid(), time=time()))
return True
return False
@@ -534,37 +591,33 @@ class DataSourceAzure(sources.DataSource):
self.ds_cfg['agent_command'])
try:
fabric_data = metadata_func()
- except Exception as exc:
+ except Exception:
LOG.warning(
"Error communicating with Azure fabric; You may experience."
"connectivity issues.", exc_info=True)
return False
+ util.del_file(REPORTED_READY_MARKER_FILE)
util.del_file(REPROVISION_MARKER_FILE)
return fabric_data
def activate(self, cfg, is_new_instance):
- address_ephemeral_resize(is_new_instance=is_new_instance)
+ address_ephemeral_resize(is_new_instance=is_new_instance,
+ preserve_ntfs=self.ds_cfg.get(
+ DS_CFG_KEY_PRESERVE_NTFS, False))
return
@property
def network_config(self):
"""Generate a network config like net.generate_fallback_network() with
- the following execptions.
+ the following exceptions.
1. Probe the drivers of the net-devices present and inject them in
the network configuration under params: driver: <driver> value
2. Generate a fallback network config that does not include any of
the blacklisted devices.
"""
- blacklist = ['mlx4_core']
if not self._network_config:
- LOG.debug('Azure: generating fallback configuration')
- # generate a network config, blacklist picking any mlx4_core devs
- netconfig = net.generate_fallback_config(
- blacklist_drivers=blacklist, config_driver=True)
-
- self._network_config = netconfig
-
+ self._network_config = parse_network_config(self._metadata_imds)
return self._network_config
@@ -587,17 +640,29 @@ def _has_ntfs_filesystem(devpath):
return os.path.realpath(devpath) in ntfs_devices
-def can_dev_be_reformatted(devpath):
- """Determine if block device devpath is newly formatted ephemeral.
+def can_dev_be_reformatted(devpath, preserve_ntfs):
+ """Determine if the ephemeral drive at devpath should be reformatted.
- A newly formatted disk will:
+ A fresh ephemeral disk is formatted by Azure and will:
a.) have a partition table (dos or gpt)
b.) have 1 partition that is ntfs formatted, or
have 2 partitions with the second partition ntfs formatted.
(larger instances with >2TB ephemeral disk have gpt, and will
have a microsoft reserved partition as part 1. LP: #1686514)
c.) the ntfs partition will have no files other than possibly
- 'dataloss_warning_readme.txt'"""
+ 'dataloss_warning_readme.txt'
+
+ User can indicate that NTFS should never be destroyed by setting
+ DS_CFG_KEY_PRESERVE_NTFS in dscfg.
+ If data is found on NTFS, user is warned to set DS_CFG_KEY_PRESERVE_NTFS
+ to make sure cloud-init does not accidentally wipe their data.
+ If cloud-init cannot mount the disk to check for data, destruction
+ will be allowed, unless the dscfg key is set."""
+ if preserve_ntfs:
+ msg = ('config says to never destroy NTFS (%s.%s), skipping checks' %
+ (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS))
+ return False, msg
+
if not os.path.exists(devpath):
return False, 'device %s does not exist' % devpath
@@ -630,18 +695,27 @@ def can_dev_be_reformatted(devpath):
bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
(cand_part, cand_path, devpath))
try:
- file_count = util.mount_cb(cand_path, count_files)
+ file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
+ update_env_for_mount={'LANG': 'C'})
except util.MountFailedError as e:
+ if "mount: unknown filesystem type 'ntfs'" in str(e):
+ return True, (bmsg + ' but this system cannot mount NTFS,'
+ ' assuming there are no important files.'
+ ' Formatting allowed.')
return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
if file_count != 0:
+ LOG.warning("it looks like you're using NTFS on the ephemeral disk, "
+ 'to ensure that filesystem does not get wiped, set '
+ '%s.%s in config', '.'.join(DS_CFG_PATH),
+ DS_CFG_KEY_PRESERVE_NTFS)
return False, bmsg + ' but had %d files on it.' % file_count
return True, bmsg + ' and had no important files. Safe for reformatting.'
def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
- is_new_instance=False):
+ is_new_instance=False, preserve_ntfs=False):
# wait for ephemeral disk to come up
naplen = .2
missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen,
@@ -657,7 +731,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
if is_new_instance:
result, msg = (True, "First instance boot.")
else:
- result, msg = can_dev_be_reformatted(devpath)
+ result, msg = can_dev_be_reformatted(devpath, preserve_ntfs)
LOG.debug("reformattable=%s: %s", result, msg)
if not result:
@@ -971,6 +1045,18 @@ def _check_freebsd_cdrom(cdrom_dev):
return False
+def _get_random_seed():
+ """Return content random seed file if available, otherwise,
+ return None."""
+ # azure / hyper-v provides random data here
+ # TODO. find the seed on FreeBSD platform
+ # now update ds_cfg to reflect contents pass in config
+ if util.is_FreeBSD():
+ return None
+ return util.load_file("/sys/firmware/acpi/tables/OEM0",
+ quiet=True, decode=False)
+
+
def list_possible_azure_ds_devs():
devlist = []
if util.is_FreeBSD():
@@ -998,6 +1084,151 @@ def load_azure_ds_dir(source_dir):
return (md, ud, cfg, {'ovf-env.xml': contents})
+def parse_network_config(imds_metadata):
+ """Convert imds_metadata dictionary to network v2 configuration.
+
+ Parses network configuration from imds metadata if present or generate
+ fallback network config excluding mlx4_core devices.
+
+ @param: imds_metadata: Dict of content read from IMDS network service.
+ @return: Dictionary containing network version 2 standard configuration.
+ """
+ if imds_metadata != sources.UNSET and imds_metadata:
+ netconfig = {'version': 2, 'ethernets': {}}
+ LOG.debug('Azure: generating network configuration from IMDS')
+ network_metadata = imds_metadata['network']
+ for idx, intf in enumerate(network_metadata['interface']):
+ nicname = 'eth{idx}'.format(idx=idx)
+ dev_config = {}
+ for addr4 in intf['ipv4']['ipAddress']:
+ privateIpv4 = addr4['privateIpAddress']
+ if privateIpv4:
+ if dev_config.get('dhcp4', False):
+ # Append static address config for nic > 1
+ netPrefix = intf['ipv4']['subnet'][0].get(
+ 'prefix', '24')
+ if not dev_config.get('addresses'):
+ dev_config['addresses'] = []
+ dev_config['addresses'].append(
+ '{ip}/{prefix}'.format(
+ ip=privateIpv4, prefix=netPrefix))
+ else:
+ dev_config['dhcp4'] = True
+ for addr6 in intf['ipv6']['ipAddress']:
+ privateIpv6 = addr6['privateIpAddress']
+ if privateIpv6:
+ dev_config['dhcp6'] = True
+ break
+ if dev_config:
+ mac = ':'.join(re.findall(r'..', intf['macAddress']))
+ dev_config.update(
+ {'match': {'macaddress': mac.lower()},
+ 'set-name': nicname})
+ netconfig['ethernets'][nicname] = dev_config
+ else:
+ blacklist = ['mlx4_core']
+ LOG.debug('Azure: generating fallback configuration')
+ # generate a network config, blacklist picking mlx4_core devs
+ netconfig = net.generate_fallback_config(
+ blacklist_drivers=blacklist, config_driver=True)
+ return netconfig
+
+
+def get_metadata_from_imds(fallback_nic, retries):
+ """Query Azure's network metadata service, returning a dictionary.
+
+ If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
+ IMDS. For more info on IMDS:
+ https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service
+
+ @param fallback_nic: String. The name of the nic which requires active
+ network in order to query IMDS.
+ @param retries: The number of retries of the IMDS_URL.
+
+ @return: A dict of instance metadata containing compute and network
+ info.
+ """
+ kwargs = {'logfunc': LOG.debug,
+ 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
+ 'func': _get_metadata_from_imds, 'args': (retries,)}
+ if net.is_up(fallback_nic):
+ return util.log_time(**kwargs)
+ else:
+ with EphemeralDHCPv4(fallback_nic):
+ return util.log_time(**kwargs)
+
+
+def _get_metadata_from_imds(retries):
+
+ def retry_on_url_error(msg, exception):
+ if isinstance(exception, UrlError) and exception.code == 404:
+ return True # Continue retries
+ return False # Stop retries on all other exceptions
+
+ url = IMDS_URL + "instance?api-version=2017-12-01"
+ headers = {"Metadata": "true"}
+ try:
+ response = readurl(
+ url, timeout=1, headers=headers, retries=retries,
+ exception_cb=retry_on_url_error)
+ except Exception as e:
+ LOG.debug('Ignoring IMDS instance metadata: %s', e)
+ return {}
+ try:
+ return util.load_json(str(response))
+ except json.decoder.JSONDecodeError:
+ LOG.warning(
+ 'Ignoring non-json IMDS instance metadata: %s', str(response))
+ return {}
+
+
+def maybe_remove_ubuntu_network_config_scripts(paths=None):
+ """Remove Azure-specific ubuntu network config for non-primary nics.
+
+ @param paths: List of networking scripts or directories to remove when
+ present.
+
+ In certain supported ubuntu images, static udev rules or netplan yaml
+ config is delivered in the base ubuntu image to support dhcp on any
+ additional interfaces which get attached by a customer at some point
+ after initial boot. Since the Azure datasource can now regenerate
+ network configuration as metadata reports these new devices, we no longer
+ want the udev rules or netplan's 90-azure-hotplug.yaml to configure
+ networking on eth1 or greater as it might collide with cloud-init's
+ configuration.
+
+ Remove the any existing extended network scripts if the datasource is
+ enabled to write network per-boot.
+ """
+ if not paths:
+ paths = UBUNTU_EXTENDED_NETWORK_SCRIPTS
+ logged = False
+ for path in paths:
+ if os.path.exists(path):
+ if not logged:
+ LOG.info(
+ 'Removing Ubuntu extended network scripts because'
+ ' cloud-init updates Azure network configuration on the'
+ ' following event: %s.',
+ EventType.BOOT)
+ logged = True
+ if os.path.isdir(path):
+ util.del_dir(path)
+ else:
+ util.del_file(path)
+
+
+def _is_platform_viable(seed_dir):
+ """Check platform environment to report if this datasource may run."""
+ asset_tag = util.read_dmi_data('chassis-asset-tag')
+ if asset_tag == AZURE_CHASSIS_ASSET_TAG:
+ return True
+ LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
+ if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
+ return True
+ return False
+
+
class BrokenAzureDataSource(Exception):
pass
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 0df545fc..d4b758f2 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -68,6 +68,10 @@ class DataSourceCloudStack(sources.DataSource):
dsname = 'CloudStack'
+ # Setup read_url parameters per get_url_params.
+ url_max_wait = 120
+ url_timeout = 50
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, 'cs')
@@ -80,33 +84,18 @@ class DataSourceCloudStack(sources.DataSource):
self.metadata_address = "http://%s/" % (self.vr_addr,)
self.cfg = {}
- def _get_url_settings(self):
- mcfg = self.ds_cfg
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
+ def wait_for_metadata_service(self):
+ url_params = self.get_url_params()
- if max_wait == 0:
+ if url_params.max_wait_seconds <= 0:
return False
- timeout = 50
- try:
- timeout = int(mcfg.get("timeout", timeout))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
-
- return (max_wait, timeout)
-
- def wait_for_metadata_service(self):
- (max_wait, timeout) = self._get_url_settings()
-
urls = [uhelp.combine_url(self.metadata_address,
'latest/meta-data/instance-id')]
start_time = time.time()
- url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
+ url = uhelp.wait_for_url(
+ urls=urls, max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds, status_cb=LOG.warn)
if url:
LOG.debug("Using metadata source: '%s'", url)
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index c7b5fe5f..664dc4b7 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -43,7 +43,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
self.version = None
self.ec2_metadata = None
self._network_config = None
- self.network_json = None
+ self.network_json = sources.UNSET
self.network_eni = None
self.known_macs = None
self.files = {}
@@ -69,7 +69,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
util.logexc(LOG, "Failed reading config drive from %s", sdir)
if not found:
- for dev in find_candidate_devs():
+ dslist = self.sys_cfg.get('datasource_list')
+ for dev in find_candidate_devs(dslist=dslist):
try:
# Set mtype if freebsd and turn off sync
if dev.startswith("/dev/cd"):
@@ -148,7 +149,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
@property
def network_config(self):
if self._network_config is None:
- if self.network_json is not None:
+ if self.network_json not in (None, sources.UNSET):
LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
self.network_json, known_macs=self.known_macs)
@@ -195,7 +196,7 @@ def on_first_boot(data, distro=None, network=True):
net_conf = data.get("network_config", '')
if net_conf and distro:
LOG.warning("Updating network interfaces from config drive")
- distro.apply_network(net_conf)
+ distro.apply_network_config(eni.convert_eni_data(net_conf))
write_injected_files(data.get('files'))
@@ -211,7 +212,7 @@ def write_injected_files(files):
util.logexc(LOG, "Failed writing file: %s", filename)
-def find_candidate_devs(probe_optical=True):
+def find_candidate_devs(probe_optical=True, dslist=None):
"""Return a list of devices that may contain the config drive.
The returned list is sorted by search order where the first item has
@@ -227,6 +228,9 @@ def find_candidate_devs(probe_optical=True):
* either vfat or iso9660 formated
* labeled with 'config-2' or 'CONFIG-2'
"""
+ if dslist is None:
+ dslist = []
+
# query optical drive to get it in blkid cache for 2.6 kernels
if probe_optical:
for device in OPTICAL_DEVICES:
@@ -257,7 +261,8 @@ def find_candidate_devs(probe_optical=True):
devices = [d for d in candidates
if d in by_label or not util.is_partition(d)]
- if devices:
+ LOG.debug("devices=%s dslist=%s", devices, dslist)
+ if devices and "IBMCloud" in dslist:
# IBMCloud uses config-2 label, but limited to a single UUID.
ibm_platform, ibm_path = get_ibm_platform()
if ibm_path in devices:
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index efb29f88..98ea7bbc 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -29,8 +29,6 @@ STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
DEFAULT_PRIMARY_NIC = 'eth0'
-_unset = "_unset"
-
class Platforms(object):
# TODO Rename and move to cloudinit.cloud.CloudNames
@@ -67,15 +65,16 @@ class DataSourceEc2(sources.DataSource):
# for extended metadata content. IPv6 support comes in 2016-09-02
extended_metadata_versions = ['2016-09-02']
+ # Setup read_url parameters per get_url_params.
+ url_max_wait = 120
+ url_timeout = 50
+
_cloud_platform = None
- _network_config = _unset # Used for caching calculated network config v1
+ _network_config = sources.UNSET # Used to cache calculated network cfg v1
# Whether we want to get network configuration from the metadata service.
- get_network_metadata = False
-
- # Track the discovered fallback nic for use in configuration generation.
- _fallback_interface = None
+ perform_dhcp_setup = False
def __init__(self, sys_cfg, distro, paths):
super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)
@@ -106,7 +105,7 @@ class DataSourceEc2(sources.DataSource):
elif self.cloud_platform == Platforms.NO_EC2_METADATA:
return False
- if self.get_network_metadata: # Setup networking in init-local stage.
+ if self.perform_dhcp_setup: # Setup networking in init-local stage.
if util.is_FreeBSD():
LOG.debug("FreeBSD doesn't support running dhclient with -sf")
return False
@@ -166,27 +165,11 @@ class DataSourceEc2(sources.DataSource):
else:
return self.metadata['instance-id']
- def _get_url_settings(self):
- mcfg = self.ds_cfg
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- timeout = 50
- try:
- timeout = max(0, int(mcfg.get("timeout", timeout)))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
-
- return (max_wait, timeout)
-
def wait_for_metadata_service(self):
mcfg = self.ds_cfg
- (max_wait, timeout) = self._get_url_settings()
- if max_wait <= 0:
+ url_params = self.get_url_params()
+ if url_params.max_wait_seconds <= 0:
return False
# Remove addresses from the list that wont resolve.
@@ -213,7 +196,8 @@ class DataSourceEc2(sources.DataSource):
start_time = time.time()
url = uhelp.wait_for_url(
- urls=urls, max_wait=max_wait, timeout=timeout, status_cb=LOG.warn)
+ urls=urls, max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds, status_cb=LOG.warn)
if url:
self.metadata_address = url2base[url]
@@ -318,11 +302,11 @@ class DataSourceEc2(sources.DataSource):
@property
def network_config(self):
"""Return a network config dict for rendering ENI or netplan files."""
- if self._network_config != _unset:
+ if self._network_config != sources.UNSET:
return self._network_config
if self.metadata is None:
- # this would happen if get_data hadn't been called. leave as _unset
+ # this would happen if get_data hadn't been called. leave as UNSET
LOG.warning(
"Unexpected call to network_config when metadata is None.")
return None
@@ -361,9 +345,7 @@ class DataSourceEc2(sources.DataSource):
self._fallback_interface = _legacy_fbnic
self.fallback_nic = None
else:
- self._fallback_interface = net.find_fallback_nic()
- if self._fallback_interface is None:
- LOG.warning("Did not find a fallback interface on EC2.")
+ return super(DataSourceEc2, self).fallback_interface
return self._fallback_interface
def _crawl_metadata(self):
@@ -398,7 +380,7 @@ class DataSourceEc2Local(DataSourceEc2):
metadata service. If the metadata service provides network configuration
then render the network configuration for that instance based on metadata.
"""
- get_network_metadata = True # Get metadata network config if present
+ perform_dhcp_setup = True # Use dhcp before querying metadata
def get_data(self):
supported_platforms = (Platforms.AWS,)
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index 02b3d56f..a5358148 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -8,17 +8,11 @@ There are 2 different api exposed launch methods.
* template: This is the legacy method of launching instances.
When booting from an image template, the system boots first into
a "provisioning" mode. There, host <-> guest mechanisms are utilized
- to execute code in the guest and provision it.
+ to execute code in the guest and configure it. The configuration
+ includes configuring the system network and possibly installing
+ packages and other software stack.
- Cloud-init will disable itself when it detects that it is in the
- provisioning mode. It detects this by the presence of
- a file '/root/provisioningConfiguration.cfg'.
-
- When provided with user-data, the "first boot" will contain a
- ConfigDrive-like disk labeled with 'METADATA'. If there is no user-data
- provided, then there is no data-source.
-
- Cloud-init never does any network configuration in this mode.
+ After the provisioning is finished, the system reboots.
* os_code: Essentially "launch by OS Code" (Operating System Code).
This is a more modern approach. There is no specific "provisioning" boot.
@@ -30,11 +24,73 @@ There are 2 different api exposed launch methods.
mean that 1 in 8^16 (~4 billion) Xen ConfigDrive systems will be
incorrectly identified as IBMCloud.
+The combination of these 2 launch methods and with or without user-data
+creates 6 boot scenarios.
+ A. os_code with user-data
+ B. os_code without user-data
+ Cloud-init is fully operational in this mode.
+
+ There is a block device attached with label 'config-2'.
+ As it differs from OpenStack's config-2, we have to differentiate.
+ We do so by requiring the UUID on the filesystem to be "9796-932E".
+
+ This disk will have the following files. Specifically note, there
+ is no versioned path to the meta-data, only 'latest':
+ openstack/latest/meta_data.json
+ openstack/latest/network_data.json
+ openstack/latest/user_data [optional]
+ openstack/latest/vendor_data.json
+
+ vendor_data.json as of 2018-04 looks like this:
+ {"cloud-init":"#!/bin/bash\necho 'root:$6$<snip>' | chpasswd -e"}
+
+ The only difference between A and B in this mode is the presence
+ of user_data on the config disk.
+
+ C. template, provisioning boot with user-data
+ D. template, provisioning boot without user-data.
+ With ds-identify cloud-init is fully disabled in this mode.
+ Without ds-identify, cloud-init None datasource will be used.
+
+ This is currently identified by the presence of
+ /root/provisioningConfiguration.cfg . That file is placed into the
+ system before it is booted.
+
+ The difference between C and D is the presence of the METADATA disk
+ as described in E below. There is no METADATA disk attached unless
+ user-data is provided.
+
+ E. template, post-provisioning boot with user-data.
+ Cloud-init is fully operational in this mode.
+
+ This is identified by a block device with filesystem label "METADATA".
+ The looks similar to a version-1 OpenStack config drive. It will
+ have the following files:
+
+ openstack/latest/user_data
+ openstack/latest/meta_data.json
+ openstack/content/interfaces
+ meta.js
+
+ meta.js contains something similar to user_data. cloud-init ignores it.
+ cloud-init ignores the 'interfaces' style file here.
+ In this mode, cloud-init has networking code disabled. It relies
+ on the provisioning boot to have configured networking.
+
+ F. template, post-provisioning boot without user-data.
+ With ds-identify, cloud-init will be fully disabled.
+ Without ds-identify, cloud-init None datasource will be used.
+
+ There is no information available to identify this scenario.
+
+ The user will be able to ssh in as as root with their public keys that
+ have been installed into /root/ssh/.authorized_keys
+ during the provisioning stage.
+
TODO:
* is uuid (/sys/hypervisor/uuid) stable for life of an instance?
it seems it is not the same as data's uuid in the os_code case
but is in the template case.
-
"""
import base64
import json
@@ -138,8 +194,30 @@ def _is_xen():
return os.path.exists("/proc/xen")
-def _is_ibm_provisioning():
- return os.path.exists("/root/provisioningConfiguration.cfg")
+def _is_ibm_provisioning(
+ prov_cfg="/root/provisioningConfiguration.cfg",
+ inst_log="/root/swinstall.log",
+ boot_ref="/proc/1/environ"):
+ """Return boolean indicating if this boot is ibm provisioning boot."""
+ if os.path.exists(prov_cfg):
+ msg = "config '%s' exists." % prov_cfg
+ result = True
+ if os.path.exists(inst_log):
+ if os.path.exists(boot_ref):
+ result = (os.stat(inst_log).st_mtime >
+ os.stat(boot_ref).st_mtime)
+ msg += (" log '%s' from %s boot." %
+ (inst_log, "current" if result else "previous"))
+ else:
+ msg += (" log '%s' existed, but no reference file '%s'." %
+ (inst_log, boot_ref))
+ result = False
+ else:
+ msg += " log '%s' did not exist." % inst_log
+ else:
+ result, msg = (False, "config '%s' did not exist." % prov_cfg)
+ LOG.debug("ibm_provisioning=%s: %s", result, msg)
+ return result
def get_ibm_platform():
@@ -189,7 +267,7 @@ def get_ibm_platform():
else:
return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path)
elif _is_ibm_provisioning():
- return (Platforms.TEMPLATE_PROVISIONING_NODATA, None)
+ return (Platforms.TEMPLATE_PROVISIONING_NODATA, None)
return not_found
@@ -217,7 +295,7 @@ def read_md():
results = metadata_from_dir(path)
else:
results = util.mount_cb(path, metadata_from_dir)
- except BrokenMetadata as e:
+ except sources.BrokenMetadata as e:
raise RuntimeError(
"Failed reading IBM config disk (platform=%s path=%s): %s" %
(platform, path, e))
@@ -226,10 +304,6 @@ def read_md():
return ret
-class BrokenMetadata(IOError):
- pass
-
-
def metadata_from_dir(source_dir):
"""Walk source_dir extracting standardized metadata.
@@ -274,12 +348,13 @@ def metadata_from_dir(source_dir):
try:
data = transl(raw)
except Exception as e:
- raise BrokenMetadata("Failed decoding %s: %s" % (path, e))
+ raise sources.BrokenMetadata(
+ "Failed decoding %s: %s" % (path, e))
results[name] = data
if results.get('metadata_raw') is None:
- raise BrokenMetadata(
+ raise sources.BrokenMetadata(
"%s missing required file 'meta_data.json'" % source_dir)
results['metadata'] = {}
@@ -290,7 +365,7 @@ def metadata_from_dir(source_dir):
try:
md['random_seed'] = base64.b64decode(md_raw['random_seed'])
except (ValueError, TypeError) as e:
- raise BrokenMetadata(
+ raise sources.BrokenMetadata(
"Badly formatted metadata random_seed entry: %s" % e)
renames = (
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 6ac88635..bcb38544 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -198,13 +198,13 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
If version is None, then <version>/ will not be used.
"""
if read_file_or_url is None:
- read_file_or_url = util.read_file_or_url
+ read_file_or_url = url_helper.read_file_or_url
if seed_url.endswith("/"):
seed_url = seed_url[:-1]
md = {}
- for path, dictname, binary, optional in DS_FIELDS:
+ for path, _dictname, binary, optional in DS_FIELDS:
if version is None:
url = "%s/%s" % (seed_url, path)
else:
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 5d3a8ddb..2daea59d 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -78,7 +78,7 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Using seeded data from %s", path)
mydata = _merge_new_seed(mydata, seeded)
break
- except ValueError as e:
+ except ValueError:
pass
# If the datasource config had a 'seedfrom' entry, then that takes
@@ -117,7 +117,7 @@ class DataSourceNoCloud(sources.DataSource):
try:
seeded = util.mount_cb(dev, _pp2d_callback,
pp2d_kwargs)
- except ValueError as e:
+ except ValueError:
if dev in label_list:
LOG.warning("device %s with label=%s not a"
"valid seed.", dev, label)
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index dc914a72..178ccb0f 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -556,7 +556,7 @@ def search_file(dirpath, filename):
if not dirpath or not filename:
return None
- for root, dirs, files in os.walk(dirpath):
+ for root, _dirs, files in os.walk(dirpath):
if filename in files:
return os.path.join(root, filename)
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index d4a41116..77ccd128 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -232,7 +232,7 @@ class OpenNebulaNetwork(object):
# Set IPv6 default gateway
gateway6 = self.get_gateway6(c_dev)
- if gateway:
+ if gateway6:
devconf['gateway6'] = gateway6
# Set DNS servers and search domains
@@ -378,7 +378,7 @@ def read_context_disk_dir(source_dir, asuser=None):
if asuser is not None:
try:
pwd.getpwnam(asuser)
- except KeyError as e:
+ except KeyError:
raise BrokenContextDiskDir(
"configured user '{user}' does not exist".format(
user=asuser))
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index e55a7638..4a015240 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -7,11 +7,13 @@
import time
from cloudinit import log as logging
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
from cloudinit.sources.helpers import openstack
+from cloudinit.sources import DataSourceOracle as oracle
LOG = logging.getLogger(__name__)
@@ -22,51 +24,37 @@ DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
}
+# OpenStack DMI constants
+DMI_PRODUCT_NOVA = 'OpenStack Nova'
+DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
+VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
+DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
+VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM]
+
class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
dsname = "OpenStack"
+ _network_config = sources.UNSET # Used to cache calculated network cfg v1
+
+ # Whether we want to get network configuration from the metadata service.
+ perform_dhcp_setup = False
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
self.metadata_address = None
self.ssl_details = util.fetch_ssl_details(self.paths)
self.version = None
self.files = {}
- self.ec2_metadata = None
+ self.ec2_metadata = sources.UNSET
+ self.network_json = sources.UNSET
def __str__(self):
root = sources.DataSource.__str__(self)
mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
return mstr
- def _get_url_settings(self):
- # TODO(harlowja): this is shared with ec2 datasource, we should just
- # move it to a shared location instead...
- # Note: the defaults here are different though.
-
- # max_wait < 0 indicates do not wait
- max_wait = -1
- timeout = 10
- retries = 5
-
- try:
- max_wait = int(self.ds_cfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- try:
- timeout = max(0, int(self.ds_cfg.get("timeout", timeout)))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
-
- try:
- retries = int(self.ds_cfg.get("retries", retries))
- except Exception:
- util.logexc(LOG, "Failed to get retries. using %s", retries)
-
- return (max_wait, timeout, retries)
-
def wait_for_metadata_service(self):
urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
filtered = [x for x in urls if util.is_resolvable_url(x)]
@@ -86,10 +74,11 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
md_urls.append(md_url)
url2base[md_url] = url
- (max_wait, timeout, retries) = self._get_url_settings()
+ url_params = self.get_url_params()
start_time = time.time()
- avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait,
- timeout=timeout)
+ avail_url = url_helper.wait_for_url(
+ urls=md_urls, max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds)
if avail_url:
LOG.debug("Using metadata source: '%s'", url2base[avail_url])
else:
@@ -99,38 +88,68 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
self.metadata_address = url2base.get(avail_url)
return bool(avail_url)
- def _get_data(self):
- try:
- if not self.wait_for_metadata_service():
- return False
- except IOError:
- return False
+ def check_instance_id(self, sys_cfg):
+ # quickly (local check only) if self.instance_id is still valid
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
- (max_wait, timeout, retries) = self._get_url_settings()
+ @property
+ def network_config(self):
+ """Return a network config dict for rendering ENI or netplan files."""
+ if self._network_config != sources.UNSET:
+ return self._network_config
+
+ # RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide
+ # network_config by default unless configured in /etc/cloud/cloud.cfg*.
+ # Patch Xenial and Artful before release to default to False.
+ if util.is_false(self.ds_cfg.get('apply_network_config', True)):
+ self._network_config = None
+ return self._network_config
+ if self.network_json == sources.UNSET:
+ # this would happen if get_data hadn't been called. leave as UNSET
+ LOG.warning(
+ 'Unexpected call to network_config when network_json is None.')
+ return None
+
+ LOG.debug('network config provided via network_json')
+ self._network_config = openstack.convert_net_json(
+ self.network_json, known_macs=None)
+ return self._network_config
- try:
- results = util.log_time(LOG.debug,
- 'Crawl of openstack metadata service',
- read_metadata_service,
- args=[self.metadata_address],
- kwargs={'ssl_details': self.ssl_details,
- 'retries': retries,
- 'timeout': timeout})
- except openstack.NonReadable:
- return False
- except (openstack.BrokenMetadata, IOError):
- util.logexc(LOG, "Broken metadata address %s",
- self.metadata_address)
+ def _get_data(self):
+ """Crawl metadata, parse and persist that data for this instance.
+
+ @return: True when metadata discovered indicates OpenStack datasource.
+ False when unable to contact metadata service or when metadata
+ format is invalid or disabled.
+ """
+ oracle_considered = 'Oracle' in self.sys_cfg.get('datasource_list')
+ if not detect_openstack(accept_oracle=not oracle_considered):
return False
+ if self.perform_dhcp_setup: # Setup networking in init-local stage.
+ try:
+ with EphemeralDHCPv4(self.fallback_interface):
+ results = util.log_time(
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self._crawl_metadata)
+ except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
+ util.logexc(LOG, str(e))
+ return False
+ else:
+ try:
+ results = self._crawl_metadata()
+ except sources.InvalidMetaDataException as e:
+ util.logexc(LOG, str(e))
+ return False
+
self.dsmode = self._determine_dsmode([results.get('dsmode')])
if self.dsmode == sources.DSMODE_DISABLED:
return False
-
md = results.get('metadata', {})
md = util.mergemanydict([md, DEFAULT_METADATA])
self.metadata = md
self.ec2_metadata = results.get('ec2-metadata')
+ self.network_json = results.get('networkdata')
self.userdata_raw = results.get('userdata')
self.version = results['version']
self.files.update(results.get('files', {}))
@@ -145,9 +164,50 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
return True
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- return sources.instance_id_matches_system_uuid(self.get_instance_id())
+ def _crawl_metadata(self):
+ """Crawl metadata service when available.
+
+ @returns: Dictionary with all metadata discovered for this datasource.
+ @raise: InvalidMetaDataException on unreadable or broken
+ metadata.
+ """
+ try:
+ if not self.wait_for_metadata_service():
+ raise sources.InvalidMetaDataException(
+ 'No active metadata service found')
+ except IOError as e:
+ raise sources.InvalidMetaDataException(
+ 'IOError contacting metadata service: {error}'.format(
+ error=str(e)))
+
+ url_params = self.get_url_params()
+
+ try:
+ result = util.log_time(
+ LOG.debug, 'Crawl of openstack metadata service',
+ read_metadata_service, args=[self.metadata_address],
+ kwargs={'ssl_details': self.ssl_details,
+ 'retries': url_params.num_retries,
+ 'timeout': url_params.timeout_seconds})
+ except openstack.NonReadable as e:
+ raise sources.InvalidMetaDataException(str(e))
+ except (openstack.BrokenMetadata, IOError):
+ msg = 'Broken metadata address {addr}'.format(
+ addr=self.metadata_address)
+ raise sources.InvalidMetaDataException(msg)
+ return result
+
+
+class DataSourceOpenStackLocal(DataSourceOpenStack):
+ """Run in init-local using a dhcp discovery prior to metadata crawl.
+
+ In init-local, no network is available. This subclass sets up minimal
+ networking with dhclient on a viable nic so that it can talk to the
+ metadata service. If the metadata service provides network configuration
+ then render the network configuration for that instance based on metadata.
+ """
+
+ perform_dhcp_setup = True # Get metadata network config if present
def read_metadata_service(base_url, ssl_details=None,
@@ -157,8 +217,25 @@ def read_metadata_service(base_url, ssl_details=None,
return reader.read_v2()
+def detect_openstack(accept_oracle=False):
+ """Return True when a potential OpenStack platform is detected."""
+ if not util.is_x86():
+ return True # Non-Intel cpus don't properly report dmi product names
+ product_name = util.read_dmi_data('system-product-name')
+ if product_name in VALID_DMI_PRODUCT_NAMES:
+ return True
+ elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS:
+ return True
+ elif accept_oracle and oracle._is_platform_viable():
+ return True
+ elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA:
+ return True
+ return False
+
+
# Used to match classes to dependencies
datasources = [
+ (DataSourceOpenStackLocal, (sources.DEP_FILESYSTEM,)),
(DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
new file mode 100644
index 00000000..fab39af3
--- /dev/null
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -0,0 +1,233 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Datasource for Oracle (OCI/Oracle Cloud Infrastructure)
+
+OCI provides a OpenStack like metadata service which provides only
+'2013-10-17' and 'latest' versions..
+
+Notes:
+ * This datasource does not support the OCI-Classic. OCI-Classic
+ provides an EC2 lookalike metadata service.
+ * The uuid provided in DMI data is not the same as the meta-data provided
+ instance-id, but has an equivalent lifespan.
+ * We do need to support upgrade from an instance that cloud-init
+ identified as OpenStack.
+ * Both bare-metal and vms use iscsi root
+ * Both bare-metal and vms provide chassis-asset-tag of OracleCloud.com
+"""
+
+from cloudinit.url_helper import combine_url, readurl, UrlError
+from cloudinit.net import dhcp
+from cloudinit import net
+from cloudinit import sources
+from cloudinit import util
+from cloudinit.net import cmdline
+from cloudinit import log as logging
+
+import json
+import re
+
+LOG = logging.getLogger(__name__)
+
+CHASSIS_ASSET_TAG = "OracleCloud.com"
+METADATA_ENDPOINT = "http://169.254.169.254/openstack/"
+
+
+class DataSourceOracle(sources.DataSource):
+
+ dsname = 'Oracle'
+ system_uuid = None
+ vendordata_pure = None
+ _network_config = sources.UNSET
+
+ def _is_platform_viable(self):
+ """Check platform environment to report if this datasource may run."""
+ return _is_platform_viable()
+
+ def _get_data(self):
+ if not self._is_platform_viable():
+ return False
+
+ # network may be configured if iscsi root. If that is the case
+ # then read_kernel_cmdline_config will return non-None.
+ if _is_iscsi_root():
+ data = self.crawl_metadata()
+ else:
+ with dhcp.EphemeralDHCPv4(net.find_fallback_nic()):
+ data = self.crawl_metadata()
+
+ self._crawled_metadata = data
+ vdata = data['2013-10-17']
+
+ self.userdata_raw = vdata.get('user_data')
+ self.system_uuid = vdata['system_uuid']
+
+ vd = vdata.get('vendor_data')
+ if vd:
+ self.vendordata_pure = vd
+ try:
+ self.vendordata_raw = sources.convert_vendordata(vd)
+ except ValueError as e:
+ LOG.warning("Invalid content in vendor-data: %s", e)
+ self.vendordata_raw = None
+
+ mdcopies = ('public_keys',)
+ md = dict([(k, vdata['meta_data'].get(k))
+ for k in mdcopies if k in vdata['meta_data']])
+
+ mdtrans = (
+ # oracle meta_data.json name, cloudinit.datasource.metadata name
+ ('availability_zone', 'availability-zone'),
+ ('hostname', 'local-hostname'),
+ ('launch_index', 'launch-index'),
+ ('uuid', 'instance-id'),
+ )
+ for dsname, ciname in mdtrans:
+ if dsname in vdata['meta_data']:
+ md[ciname] = vdata['meta_data'][dsname]
+
+ self.metadata = md
+ return True
+
+ def crawl_metadata(self):
+ return read_metadata()
+
+ def check_instance_id(self, sys_cfg):
+ """quickly check (local only) if self.instance_id is still valid
+
+ On Oracle, the dmi-provided system uuid differs from the instance-id
+ but has the same life-span."""
+ return sources.instance_id_matches_system_uuid(self.system_uuid)
+
+ def get_public_ssh_keys(self):
+ return sources.normalize_pubkey_data(self.metadata.get('public_keys'))
+
+ @property
+ def network_config(self):
+ """Network config is read from initramfs provided files
+ If none is present, then we fall back to fallback configuration.
+
+ One thing to note here is that this method is not currently
+ considered at all if there is is kernel/initramfs provided
+ data. In that case, stages considers that the cmdline data
+ overrides datasource provided data and does not consult here.
+
+ We nonetheless return cmdline provided config if present
+ and fallback to generate fallback."""
+ if self._network_config == sources.UNSET:
+ cmdline_cfg = cmdline.read_kernel_cmdline_config()
+ if cmdline_cfg:
+ self._network_config = cmdline_cfg
+ else:
+ self._network_config = self.distro.generate_fallback_config()
+ return self._network_config
+
+
+def _read_system_uuid():
+ sys_uuid = util.read_dmi_data('system-uuid')
+ return None if sys_uuid is None else sys_uuid.lower()
+
+
+def _is_platform_viable():
+ asset_tag = util.read_dmi_data('chassis-asset-tag')
+ return asset_tag == CHASSIS_ASSET_TAG
+
+
+def _is_iscsi_root():
+ return bool(cmdline.read_kernel_cmdline_config())
+
+
+def _load_index(content):
+ """Return a list entries parsed from content.
+
+ OpenStack's metadata service returns a newline delimited list
+ of items. Oracle's implementation has html formatted list of links.
+ The parser here just grabs targets from <a href="target">
+ and throws away "../".
+
+ Oracle has accepted that to be buggy and may fix in the future
+ to instead return a '\n' delimited plain text list. This function
+ will continue to work if that change is made."""
+ if not content.lower().startswith("<html>"):
+ return content.splitlines()
+ items = re.findall(
+ r'href="(?P<target>[^"]*)"', content, re.MULTILINE | re.IGNORECASE)
+ return [i for i in items if not i.startswith(".")]
+
+
+def read_metadata(endpoint_base=METADATA_ENDPOINT, sys_uuid=None,
+ version='2013-10-17'):
+ """Read metadata, return a dictionary.
+
+ Each path listed in the index will be represented in the dictionary.
+ If the path ends in .json, then the content will be decoded and
+ populated into the dictionary.
+
+ The system uuid (/sys/class/dmi/id/product_uuid) is also populated.
+ Example: given paths = ('user_data', 'meta_data.json')
+ This would return:
+ {version: {'user_data': b'blob', 'meta_data': json.loads(blob.decode())
+ 'system_uuid': '3b54f2e0-3ab2-458d-b770-af9926eee3b2'}}
+ """
+ endpoint = combine_url(endpoint_base, version) + "/"
+ if sys_uuid is None:
+ sys_uuid = _read_system_uuid()
+ if not sys_uuid:
+ raise sources.BrokenMetadata("Failed to read system uuid.")
+
+ try:
+ resp = readurl(endpoint)
+ if not resp.ok():
+ raise sources.BrokenMetadata(
+ "Bad response from %s: %s" % (endpoint, resp.code))
+ except UrlError as e:
+ raise sources.BrokenMetadata(
+ "Failed to read index at %s: %s" % (endpoint, e))
+
+ entries = _load_index(resp.contents.decode('utf-8'))
+ LOG.debug("index url %s contained: %s", endpoint, entries)
+
+ # meta_data.json is required.
+ mdj = 'meta_data.json'
+ if mdj not in entries:
+ raise sources.BrokenMetadata(
+ "Required field '%s' missing in index at %s" % (mdj, endpoint))
+
+ ret = {'system_uuid': sys_uuid}
+ for path in entries:
+ response = readurl(combine_url(endpoint, path))
+ if path.endswith(".json"):
+ ret[path.rpartition(".")[0]] = (
+ json.loads(response.contents.decode('utf-8')))
+ else:
+ ret[path] = response.contents
+
+ return {version: ret}
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceOracle, (sources.DEP_FILESYSTEM,)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+if __name__ == "__main__":
+ import argparse
+ import os
+
+ parser = argparse.ArgumentParser(description='Query Oracle Cloud Metadata')
+ parser.add_argument("--endpoint", metavar="URL",
+ help="The url of the metadata service.",
+ default=METADATA_ENDPOINT)
+ args = parser.parse_args()
+ sys_uuid = "uuid-not-available-not-root" if os.geteuid() != 0 else None
+
+ data = read_metadata(endpoint_base=args.endpoint, sys_uuid=sys_uuid)
+ data['is_platform_viable'] = _is_platform_viable()
+ print(util.json_dumps(data))
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index e2502b02..9dc4ab23 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -29,7 +29,9 @@ from cloudinit import log as logging
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
-
+from cloudinit import net
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from cloudinit.event import EventType
LOG = logging.getLogger(__name__)
@@ -168,8 +170,8 @@ def query_data_api(api_type, api_address, retries, timeout):
class DataSourceScaleway(sources.DataSource):
-
dsname = "Scaleway"
+ update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]}
def __init__(self, sys_cfg, distro, paths):
super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
@@ -185,11 +187,10 @@ class DataSourceScaleway(sources.DataSource):
self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
+ self._fallback_interface = None
+ self._network_config = None
- def _get_data(self):
- if not on_scaleway():
- return False
-
+ def _crawl_metadata(self):
resp = url_helper.readurl(self.metadata_address,
timeout=self.timeout,
retries=self.retries)
@@ -203,9 +204,48 @@ class DataSourceScaleway(sources.DataSource):
'vendor-data', self.vendordata_address,
self.retries, self.timeout
)
+
+ def _get_data(self):
+ if not on_scaleway():
+ return False
+
+ if self._fallback_interface is None:
+ self._fallback_interface = net.find_fallback_nic()
+ try:
+ with EphemeralDHCPv4(self._fallback_interface):
+ util.log_time(
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self._crawl_metadata)
+ except (NoDHCPLeaseError) as e:
+ util.logexc(LOG, str(e))
+ return False
return True
@property
+ def network_config(self):
+ """
+ Configure networking according to data received from the
+ metadata API.
+ """
+ if self._network_config:
+ return self._network_config
+
+ if self._fallback_interface is None:
+ self._fallback_interface = net.find_fallback_nic()
+
+ netcfg = {'type': 'physical', 'name': '%s' % self._fallback_interface}
+ subnets = [{'type': 'dhcp4'}]
+ if self.metadata['ipv6']:
+ subnets += [{'type': 'static',
+ 'address': '%s' % self.metadata['ipv6']['address'],
+ 'gateway': '%s' % self.metadata['ipv6']['gateway'],
+ 'netmask': '%s' % self.metadata['ipv6']['netmask'],
+ }]
+ netcfg['subnets'] = subnets
+ self._network_config = {'version': 1, 'config': [netcfg]}
+ return self._network_config
+
+ @property
def launch_index(self):
return None
@@ -228,7 +268,7 @@ class DataSourceScaleway(sources.DataSource):
datasources = [
- (DataSourceScaleway, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceScaleway, (sources.DEP_FILESYSTEM,)),
]
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 86bfa5d8..593ac91a 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -1,4 +1,5 @@
# Copyright (C) 2013 Canonical Ltd.
+# Copyright (c) 2018, Joyent, Inc.
#
# Author: Ben Howard <ben.howard@canonical.com>
#
@@ -10,17 +11,19 @@
# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests
# The meta-data is transmitted via key/value pairs made by
# requests on the console. For example, to get the hostname, you
-# would send "GET hostname" on /dev/ttyS1.
+# would send "GET sdc:hostname" on /dev/ttyS1.
# For Linux Guests running in LX-Brand Zones on SmartOS hosts
# a socket (/native/.zonecontrol/metadata.sock) is used instead
# of a serial console.
#
# Certain behavior is defined by the DataDictionary
-# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
+# https://eng.joyent.com/mdata/datadict.html
# Comments with "@datadictionary" are snippets of the definition
import base64
import binascii
+import errno
+import fcntl
import json
import os
import random
@@ -108,7 +111,7 @@ BUILTIN_CLOUD_CONFIG = {
'overwrite': False}
},
'fs_setup': [{'label': 'ephemeral0',
- 'filesystem': 'ext3',
+ 'filesystem': 'ext4',
'device': 'ephemeral0'}],
}
@@ -162,9 +165,8 @@ class DataSourceSmartOS(sources.DataSource):
dsname = "Joyent"
- _unset = "_unset"
- smartos_type = _unset
- md_client = _unset
+ smartos_type = sources.UNSET
+ md_client = sources.UNSET
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -186,12 +188,12 @@ class DataSourceSmartOS(sources.DataSource):
return "%s [client=%s]" % (root, self.md_client)
def _init(self):
- if self.smartos_type == self._unset:
+ if self.smartos_type == sources.UNSET:
self.smartos_type = get_smartos_environ()
if self.smartos_type is None:
self.md_client = None
- if self.md_client == self._unset:
+ if self.md_client == sources.UNSET:
self.md_client = jmc_client_factory(
smartos_type=self.smartos_type,
metadata_sockfile=self.ds_cfg['metadata_sockfile'],
@@ -229,6 +231,9 @@ class DataSourceSmartOS(sources.DataSource):
self.md_client)
return False
+ # Open once for many requests, rather than once for each request
+ self.md_client.open_transport()
+
for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
smartos_noun, strip = attribute
md[ci_noun] = self.md_client.get(smartos_noun, strip=strip)
@@ -236,6 +241,8 @@ class DataSourceSmartOS(sources.DataSource):
for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items():
md[ci_noun] = self.md_client.get_json(smartos_noun)
+ self.md_client.close_transport()
+
# @datadictionary: This key may contain a program that is written
# to a file in the filesystem of the guest on each boot and then
# executed. It may be of any format that would be considered
@@ -266,8 +273,14 @@ class DataSourceSmartOS(sources.DataSource):
write_boot_content(u_data, u_data_f)
# Handle the cloud-init regular meta
+
+ # The hostname may or may not be qualified with the local domain name.
+ # This follows section 3.14 of RFC 2132.
if not md['local-hostname']:
- md['local-hostname'] = md['instance-id']
+ if md['hostname']:
+ md['local-hostname'] = md['hostname']
+ else:
+ md['local-hostname'] = md['instance-id']
ud = None
if md['user-data']:
@@ -285,6 +298,7 @@ class DataSourceSmartOS(sources.DataSource):
self.userdata_raw = ud
self.vendordata_raw = md['vendor-data']
self.network_data = md['network-data']
+ self.routes_data = md['routes']
self._set_provisioned()
return True
@@ -308,7 +322,8 @@ class DataSourceSmartOS(sources.DataSource):
convert_smartos_network_data(
network_data=self.network_data,
dns_servers=self.metadata['dns_servers'],
- dns_domain=self.metadata['dns_domain']))
+ dns_domain=self.metadata['dns_domain'],
+ routes=self.routes_data))
return self._network_config
@@ -316,6 +331,10 @@ class JoyentMetadataFetchException(Exception):
pass
+class JoyentMetadataTimeoutException(JoyentMetadataFetchException):
+ pass
+
+
class JoyentMetadataClient(object):
"""
A client implementing v2 of the Joyent Metadata Protocol Specification.
@@ -360,6 +379,47 @@ class JoyentMetadataClient(object):
LOG.debug('Value "%s" found.', value)
return value
+ def _readline(self):
+ """
+ Reads a line a byte at a time until \n is encountered. Returns an
+ ascii string with the trailing newline removed.
+
+ If a timeout (per-byte) is set and it expires, a
+ JoyentMetadataFetchException will be thrown.
+ """
+ response = []
+
+ def as_ascii():
+ return b''.join(response).decode('ascii')
+
+ msg = "Partial response: '%s'"
+ while True:
+ try:
+ byte = self.fp.read(1)
+ if len(byte) == 0:
+ raise JoyentMetadataTimeoutException(msg % as_ascii())
+ if byte == b'\n':
+ return as_ascii()
+ response.append(byte)
+ except OSError as exc:
+ if exc.errno == errno.EAGAIN:
+ raise JoyentMetadataTimeoutException(msg % as_ascii())
+ raise
+
+ def _write(self, msg):
+ self.fp.write(msg.encode('ascii'))
+ self.fp.flush()
+
+ def _negotiate(self):
+ LOG.debug('Negotiating protocol V2')
+ self._write('NEGOTIATE V2\n')
+ response = self._readline()
+ LOG.debug('read "%s"', response)
+ if response != 'V2_OK':
+ raise JoyentMetadataFetchException(
+ 'Invalid response "%s" to "NEGOTIATE V2"' % response)
+ LOG.debug('Negotiation complete')
+
def request(self, rtype, param=None):
request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
message_body = ' '.join((request_id, rtype,))
@@ -374,18 +434,11 @@ class JoyentMetadataClient(object):
self.open_transport()
need_close = True
- self.fp.write(msg.encode('ascii'))
- self.fp.flush()
-
- response = bytearray()
- response.extend(self.fp.read(1))
- while response[-1:] != b'\n':
- response.extend(self.fp.read(1))
-
+ self._write(msg)
+ response = self._readline()
if need_close:
self.close_transport()
- response = response.rstrip().decode('ascii')
LOG.debug('Read "%s" from metadata transport.', response)
if 'SUCCESS' not in response:
@@ -410,9 +463,9 @@ class JoyentMetadataClient(object):
def list(self):
result = self.request(rtype='KEYS')
- if result:
- result = result.split('\n')
- return result
+ if not result:
+ return []
+ return result.split('\n')
def put(self, key, val):
param = b' '.join([base64.b64encode(i.encode())
@@ -450,6 +503,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.socketpath)
self.fp = sock.makefile('rwb')
+ self._negotiate()
def exists(self):
return os.path.exists(self.socketpath)
@@ -459,8 +513,9 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
class JoyentMetadataSerialClient(JoyentMetadataClient):
- def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM):
- super(JoyentMetadataSerialClient, self).__init__(smartos_type)
+ def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM,
+ fp=None):
+ super(JoyentMetadataSerialClient, self).__init__(smartos_type, fp)
self.device = device
self.timeout = timeout
@@ -468,10 +523,51 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
return os.path.exists(self.device)
def open_transport(self):
- ser = serial.Serial(self.device, timeout=self.timeout)
- if not ser.isOpen():
- raise SystemError("Unable to open %s" % self.device)
- self.fp = ser
+ if self.fp is None:
+ ser = serial.Serial(self.device, timeout=self.timeout)
+ if not ser.isOpen():
+ raise SystemError("Unable to open %s" % self.device)
+ self.fp = ser
+ fcntl.lockf(ser, fcntl.LOCK_EX)
+ self._flush()
+ self._negotiate()
+
+ def _flush(self):
+ LOG.debug('Flushing input')
+ # Read any pending data
+ timeout = self.fp.timeout
+ self.fp.timeout = 0.1
+ while True:
+ try:
+ self._readline()
+ except JoyentMetadataTimeoutException:
+ break
+ LOG.debug('Input empty')
+
+ # Send a newline and expect "invalid command". Keep trying until
+ # successful. Retry rather frequently so that the "Is the host
+ # metadata service running" appears on the console soon after someone
+ # attaches in an effort to debug.
+ if timeout > 5:
+ self.fp.timeout = 5
+ else:
+ self.fp.timeout = timeout
+ while True:
+ LOG.debug('Writing newline, expecting "invalid command"')
+ self._write('\n')
+ try:
+ response = self._readline()
+ if response == 'invalid command':
+ break
+ if response == 'FAILURE':
+ LOG.debug('Got "FAILURE". Retrying.')
+ continue
+ LOG.warning('Unexpected response "%s" during flush', response)
+ except JoyentMetadataTimeoutException:
+ LOG.warning('Timeout while initializing metadata client. '
+ 'Is the host metadata service running?')
+ LOG.debug('Got "invalid command". Flush complete.')
+ self.fp.timeout = timeout
def __repr__(self):
return "%s(device=%s, timeout=%s)" % (
@@ -587,6 +683,18 @@ def jmc_client_factory(
raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
+def identify_file(content_f):
+ cmd = ["file", "--brief", "--mime-type", content_f]
+ f_type = None
+ try:
+ (f_type, _err) = util.subp(cmd)
+ LOG.debug("script %s mime type is %s", content_f, f_type)
+ except util.ProcessExecutionError as e:
+ util.logexc(
+ LOG, ("Failed to identify script type for %s" % content_f, e))
+ return None if f_type is None else f_type.strip()
+
+
def write_boot_content(content, content_f, link=None, shebang=False,
mode=0o400):
"""
@@ -619,18 +727,11 @@ def write_boot_content(content, content_f, link=None, shebang=False,
util.write_file(content_f, content, mode=mode)
if shebang and not content.startswith("#!"):
- try:
- cmd = ["file", "--brief", "--mime-type", content_f]
- (f_type, _err) = util.subp(cmd)
- LOG.debug("script %s mime type is %s", content_f, f_type)
- if f_type.strip() == "text/plain":
- new_content = "\n".join(["#!/bin/bash", content])
- util.write_file(content_f, new_content, mode=mode)
- LOG.debug("added shebang to file %s", content_f)
-
- except Exception as e:
- util.logexc(LOG, ("Failed to identify script type for %s" %
- content_f, e))
+ f_type = identify_file(content_f)
+ if f_type == "text/plain":
+ util.write_file(
+ content_f, "\n".join(["#!/bin/bash", content]), mode=mode)
+ LOG.debug("added shebang to file %s", content_f)
if link:
try:
@@ -650,7 +751,7 @@ def get_smartos_environ(uname_version=None, product_name=None):
# report 'BrandZ virtual linux' as the kernel version
if uname_version is None:
uname_version = uname[3]
- if uname_version.lower() == 'brandz virtual linux':
+ if uname_version == 'BrandZ virtual linux':
return SMARTOS_ENV_LX_BRAND
if product_name is None:
@@ -658,7 +759,7 @@ def get_smartos_environ(uname_version=None, product_name=None):
else:
system_type = product_name
- if system_type and 'smartdc' in system_type.lower():
+ if system_type and system_type.startswith('SmartDC'):
return SMARTOS_ENV_KVM
return None
@@ -666,7 +767,8 @@ def get_smartos_environ(uname_version=None, product_name=None):
# Convert SMARTOS 'sdc:nics' data to network_config yaml
def convert_smartos_network_data(network_data=None,
- dns_servers=None, dns_domain=None):
+ dns_servers=None, dns_domain=None,
+ routes=None):
"""Return a dictionary of network_config by parsing provided
SMARTOS sdc:nics configuration data
@@ -684,6 +786,10 @@ def convert_smartos_network_data(network_data=None,
keys are related to ip configuration. For each ip in the 'ips' list
we create a subnet entry under 'subnets' pairing the ip to a one in
the 'gateways' list.
+
+ Each route in sdc:routes is mapped to a route on each interface.
+ The sdc:routes properties 'dst' and 'gateway' map to 'network' and
+ 'gateway'. The 'linklocal' sdc:routes property is ignored.
"""
valid_keys = {
@@ -706,6 +812,10 @@ def convert_smartos_network_data(network_data=None,
'scope',
'type',
],
+ 'route': [
+ 'network',
+ 'gateway',
+ ],
}
if dns_servers:
@@ -720,6 +830,9 @@ def convert_smartos_network_data(network_data=None,
else:
dns_domain = []
+ if not routes:
+ routes = []
+
def is_valid_ipv4(addr):
return '.' in addr
@@ -746,6 +859,7 @@ def convert_smartos_network_data(network_data=None,
if ip == "dhcp":
subnet = {'type': 'dhcp4'}
else:
+ routeents = []
subnet = dict((k, v) for k, v in nic.items()
if k in valid_keys['subnet'])
subnet.update({
@@ -767,6 +881,25 @@ def convert_smartos_network_data(network_data=None,
pgws[proto]['gw'] = gateways[0]
subnet.update({'gateway': pgws[proto]['gw']})
+ for route in routes:
+ rcfg = dict((k, v) for k, v in route.items()
+ if k in valid_keys['route'])
+ # Linux uses the value of 'gateway' to determine
+ # automatically if the route is a forward/next-hop
+ # (non-local IP for gateway) or an interface/resolver
+ # (local IP for gateway). So we can ignore the
+ # 'interface' attribute of sdc:routes, because SDC
+ # guarantees that the gateway is a local IP for
+ # "interface=true".
+ #
+ # Eventually we should be smart and compare "gateway"
+ # to see if it's in the prefix. We can then smartly
+ # add or not-add this route. But for now,
+ # when in doubt, use brute force! Routes for everyone!
+ rcfg.update({'network': route['dst']})
+ routeents.append(rcfg)
+ subnet.update({'routes': routeents})
+
subnets.append(subnet)
cfg.update({'subnets': subnets})
config.append(cfg)
@@ -810,12 +943,14 @@ if __name__ == "__main__":
keyname = SMARTOS_ATTRIB_JSON[key]
data[key] = client.get_json(keyname)
elif key == "network_config":
- for depkey in ('network-data', 'dns_servers', 'dns_domain'):
+ for depkey in ('network-data', 'dns_servers', 'dns_domain',
+ 'routes'):
load_key(client, depkey, data)
data[key] = convert_smartos_network_data(
network_data=data['network-data'],
dns_servers=data['dns_servers'],
- dns_domain=data['dns_domain'])
+ dns_domain=data['dns_domain'],
+ routes=data['routes'])
else:
if key in SMARTOS_ATTRIB_MAP:
keyname, strip = SMARTOS_ATTRIB_MAP[key]
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index df0b374a..5ac98826 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -9,6 +9,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import abc
+from collections import namedtuple
import copy
import json
import os
@@ -17,6 +18,8 @@ import six
from cloudinit.atomic_helper import write_json
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import net
+from cloudinit.event import EventType
from cloudinit import type_utils
from cloudinit import user_data as ud
from cloudinit import util
@@ -35,12 +38,23 @@ DEP_FILESYSTEM = "FILESYSTEM"
DEP_NETWORK = "NETWORK"
DS_PREFIX = 'DataSource'
-# File in which instance meta-data, user-data and vendor-data is written
+EXPERIMENTAL_TEXT = (
+ "EXPERIMENTAL: The structure and format of content scoped under the 'ds'"
+ " key may change in subsequent releases of cloud-init.")
+
+
+# File in which public available instance meta-data is written
+# security-sensitive key values are redacted from this world-readable file
INSTANCE_JSON_FILE = 'instance-data.json'
+# security-sensitive key values are present in this root-readable file
+INSTANCE_JSON_SENSITIVE_FILE = 'instance-data-sensitive.json'
+REDACT_SENSITIVE_VALUE = 'redacted for non-root user'
# Key which can be provide a cloud's official product name to cloud-init
METADATA_CLOUD_NAME_KEY = 'cloud-name'
+UNSET = "_unset"
+
LOG = logging.getLogger(__name__)
@@ -48,26 +62,64 @@ class DataSourceNotFoundException(Exception):
pass
-def process_base64_metadata(metadata, key_path=''):
- """Strip ci-b64 prefix and return metadata with base64-encoded-keys set."""
+class InvalidMetaDataException(Exception):
+ """Raised when metadata is broken, unavailable or disabled."""
+ pass
+
+
+def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
+ """Process all instance metadata cleaning it up for persisting as json.
+
+ Strip ci-b64 prefix and catalog any 'base64_encoded_keys' as a list
+
+ @return Dict copy of processed metadata.
+ """
md_copy = copy.deepcopy(metadata)
- md_copy['base64-encoded-keys'] = []
+ md_copy['base64_encoded_keys'] = []
+ md_copy['sensitive_keys'] = []
for key, val in metadata.items():
if key_path:
sub_key_path = key_path + '/' + key
else:
sub_key_path = key
+ if key in sensitive_keys or sub_key_path in sensitive_keys:
+ md_copy['sensitive_keys'].append(sub_key_path)
if isinstance(val, str) and val.startswith('ci-b64:'):
- md_copy['base64-encoded-keys'].append(sub_key_path)
+ md_copy['base64_encoded_keys'].append(sub_key_path)
md_copy[key] = val.replace('ci-b64:', '')
if isinstance(val, dict):
- return_val = process_base64_metadata(val, sub_key_path)
- md_copy['base64-encoded-keys'].extend(
- return_val.pop('base64-encoded-keys'))
+ return_val = process_instance_metadata(
+ val, sub_key_path, sensitive_keys)
+ md_copy['base64_encoded_keys'].extend(
+ return_val.pop('base64_encoded_keys'))
+ md_copy['sensitive_keys'].extend(
+ return_val.pop('sensitive_keys'))
md_copy[key] = return_val
return md_copy
+def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
+ """Redact any sensitive keys from to provided metadata dictionary.
+
+ Replace any keys values listed in 'sensitive_keys' with redact_value.
+ """
+ if not metadata.get('sensitive_keys', []):
+ return metadata
+ md_copy = copy.deepcopy(metadata)
+ for key_path in metadata.get('sensitive_keys'):
+ path_parts = key_path.split('/')
+ obj = md_copy
+ for path in path_parts:
+ if isinstance(obj[path], dict) and path != path_parts[-1]:
+ obj = obj[path]
+ obj[path] = redact_value
+ return md_copy
+
+
+URLParams = namedtuple(
+ 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
+
+
@six.add_metaclass(abc.ABCMeta)
class DataSource(object):
@@ -81,6 +133,37 @@ class DataSource(object):
# Cached cloud_name as determined by _get_cloud_name
_cloud_name = None
+ # Track the discovered fallback nic for use in configuration generation.
+ _fallback_interface = None
+
+ # read_url_params
+ url_max_wait = -1 # max_wait < 0 means do not wait
+ url_timeout = 10 # timeout for each metadata url read attempt
+ url_retries = 5 # number of times to retry url upon 404
+
+ # The datasource defines a set of supported EventTypes during which
+ # the datasource can react to changes in metadata and regenerate
+ # network configuration on metadata changes.
+ # A datasource which supports writing network config on each system boot
+ # would call update_events['network'].add(EventType.BOOT).
+
+ # Default: generate network config on new instance id (first boot).
+ update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])}
+
+ # N-tuple listing default values for any metadata-related class
+ # attributes cached on an instance by a process_data runs. These attribute
+ # values are reset via clear_cached_attrs during any update_metadata call.
+ cached_attr_defaults = (
+ ('ec2_metadata', UNSET), ('network_json', UNSET),
+ ('metadata', {}), ('userdata', None), ('userdata_raw', None),
+ ('vendordata', None), ('vendordata_raw', None))
+
+ _dirty_cache = False
+
+ # N-tuple of keypaths or keynames redact from instance-data.json for
+ # non-root users
+ sensitive_metadata_keys = ('security-credentials',)
+
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
self.distro = distro
@@ -106,49 +189,140 @@ class DataSource(object):
def _get_standardized_metadata(self):
"""Return a dictionary of standardized metadata keys."""
- return {'v1': {
- 'local-hostname': self.get_hostname(),
- 'instance-id': self.get_instance_id(),
- 'cloud-name': self.cloud_name,
- 'region': self.region,
- 'availability-zone': self.availability_zone}}
+ local_hostname = self.get_hostname()
+ instance_id = self.get_instance_id()
+ availability_zone = self.availability_zone
+ cloud_name = self.cloud_name
+ # When adding new standard keys prefer underscore-delimited instead
+ # of hyphen-delimted to support simple variable references in jinja
+ # templates.
+ return {
+ 'v1': {
+ 'availability-zone': availability_zone,
+ 'availability_zone': availability_zone,
+ 'cloud-name': cloud_name,
+ 'cloud_name': cloud_name,
+ 'instance-id': instance_id,
+ 'instance_id': instance_id,
+ 'local-hostname': local_hostname,
+ 'local_hostname': local_hostname,
+ 'region': self.region}}
+
+ def clear_cached_attrs(self, attr_defaults=()):
+ """Reset any cached metadata attributes to datasource defaults.
+
+ @param attr_defaults: Optional tuple of (attr, value) pairs to
+ set instead of cached_attr_defaults.
+ """
+ if not self._dirty_cache:
+ return
+ if attr_defaults:
+ attr_values = attr_defaults
+ else:
+ attr_values = self.cached_attr_defaults
+
+ for attribute, value in attr_values:
+ if hasattr(self, attribute):
+ setattr(self, attribute, value)
+ if not attr_defaults:
+ self._dirty_cache = False
def get_data(self):
"""Datasources implement _get_data to setup metadata and userdata_raw.
Minimally, the datasource should return a boolean True on success.
"""
+ self._dirty_cache = True
return_value = self._get_data()
- json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
if not return_value:
return return_value
+ self.persist_instance_data()
+ return return_value
+ def persist_instance_data(self):
+ """Process and write INSTANCE_JSON_FILE with all instance metadata.
+
+ Replace any hyphens with underscores in key names for use in template
+ processing.
+
+ @return True on successful write, False otherwise.
+ """
instance_data = {
- 'ds': {
- 'meta-data': self.metadata,
- 'user-data': self.get_userdata_raw(),
- 'vendor-data': self.get_vendordata_raw()}}
+ 'ds': {'_doc': EXPERIMENTAL_TEXT,
+ 'meta_data': self.metadata}}
+ if hasattr(self, 'network_json'):
+ network_json = getattr(self, 'network_json')
+ if network_json != UNSET:
+ instance_data['ds']['network_json'] = network_json
+ if hasattr(self, 'ec2_metadata'):
+ ec2_metadata = getattr(self, 'ec2_metadata')
+ if ec2_metadata != UNSET:
+ instance_data['ds']['ec2_metadata'] = ec2_metadata
instance_data.update(
self._get_standardized_metadata())
try:
# Process content base64encoding unserializable values
content = util.json_dumps(instance_data)
- # Strip base64: prefix and return base64-encoded-keys
- processed_data = process_base64_metadata(json.loads(content))
+ # Strip base64: prefix and set base64_encoded_keys list.
+ processed_data = process_instance_metadata(
+ json.loads(content),
+ sensitive_keys=self.sensitive_metadata_keys)
except TypeError as e:
LOG.warning('Error persisting instance-data.json: %s', str(e))
- return return_value
+ return False
except UnicodeDecodeError as e:
LOG.warning('Error persisting instance-data.json: %s', str(e))
- return return_value
- write_json(json_file, processed_data, mode=0o600)
- return return_value
+ return False
+ json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
+ write_json(json_file, processed_data) # World readable
+ json_sensitive_file = os.path.join(self.paths.run_dir,
+ INSTANCE_JSON_SENSITIVE_FILE)
+ write_json(json_sensitive_file,
+ redact_sensitive_keys(processed_data), mode=0o600)
+ return True
def _get_data(self):
+ """Walk metadata sources, process crawled data and save attributes."""
raise NotImplementedError(
'Subclasses of DataSource must implement _get_data which'
' sets self.metadata, vendordata_raw and userdata_raw.')
+ def get_url_params(self):
+ """Return the Datasource's prefered url_read parameters.
+
+ Subclasses may override url_max_wait, url_timeout, url_retries.
+
+ @return: A URLParams object with max_wait_seconds, timeout_seconds,
+ num_retries.
+ """
+ max_wait = self.url_max_wait
+ try:
+ max_wait = int(self.ds_cfg.get("max_wait", self.url_max_wait))
+ except ValueError:
+ util.logexc(
+ LOG, "Config max_wait '%s' is not an int, using default '%s'",
+ self.ds_cfg.get("max_wait"), max_wait)
+
+ timeout = self.url_timeout
+ try:
+ timeout = max(
+ 0, int(self.ds_cfg.get("timeout", self.url_timeout)))
+ except ValueError:
+ timeout = self.url_timeout
+ util.logexc(
+ LOG, "Config timeout '%s' is not an int, using default '%s'",
+ self.ds_cfg.get('timeout'), timeout)
+
+ retries = self.url_retries
+ try:
+ retries = int(self.ds_cfg.get("retries", self.url_retries))
+ except Exception:
+ util.logexc(
+ LOG, "Config retries '%s' is not an int, using default '%s'",
+ self.ds_cfg.get('retries'), retries)
+
+ return URLParams(max_wait, timeout, retries)
+
def get_userdata(self, apply_filter=False):
if self.userdata is None:
self.userdata = self.ud_proc.process(self.get_userdata_raw())
@@ -162,6 +336,17 @@ class DataSource(object):
return self.vendordata
@property
+ def fallback_interface(self):
+ """Determine the network interface used during local network config."""
+ if self._fallback_interface is None:
+ self._fallback_interface = net.find_fallback_nic()
+ if self._fallback_interface is None:
+ LOG.warning(
+ "Did not find a fallback interface on %s.",
+ self.cloud_name)
+ return self._fallback_interface
+
+ @property
def cloud_name(self):
"""Return lowercase cloud name as determined by the datasource.
@@ -340,6 +525,43 @@ class DataSource(object):
def get_package_mirror_info(self):
return self.distro.get_package_mirror_info(data_source=self)
+ def update_metadata(self, source_event_types):
+ """Refresh cached metadata if the datasource supports this event.
+
+ The datasource has a list of update_events which
+ trigger refreshing all cached metadata as well as refreshing the
+ network configuration.
+
+ @param source_event_types: List of EventTypes which may trigger a
+ metadata update.
+
+ @return True if the datasource did successfully update cached metadata
+ due to source_event_type.
+ """
+ supported_events = {}
+ for event in source_event_types:
+ for update_scope, update_events in self.update_events.items():
+ if event in update_events:
+ if not supported_events.get(update_scope):
+ supported_events[update_scope] = set()
+ supported_events[update_scope].add(event)
+ for scope, matched_events in supported_events.items():
+ LOG.debug(
+ "Update datasource metadata and %s config due to events: %s",
+ scope, ', '.join(matched_events))
+ # Each datasource has a cached config property which needs clearing
+ # Once cleared that config property will be regenerated from
+ # current metadata.
+ self.clear_cached_attrs((('_%s_config' % scope, UNSET),))
+ if supported_events:
+ self.clear_cached_attrs()
+ result = self.get_data()
+ if result:
+ return True
+ LOG.debug("Datasource %s not updated for events: %s", self,
+ ', '.join(source_event_types))
+ return False
+
def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still
return False
@@ -444,7 +666,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
with myrep:
LOG.debug("Seeing if we can get any data from %s", cls)
s = cls(sys_cfg, distro, paths)
- if s.get_data():
+ if s.update_metadata([EventType.BOOT_NEW_INSTANCE]):
myrep.message = "found %s data from %s" % (mode, name)
return (s, type_utils.obj_name(cls))
except Exception:
@@ -517,6 +739,10 @@ def convert_vendordata(data, recurse=True):
raise ValueError("Unknown data type for vendordata: %s" % type(data))
+class BrokenMetadata(IOError):
+ pass
+
+
# 'depends' is a list of dependencies (DEP_FILESYSTEM)
# ds_list is a list of 2 item lists
# ds_list = [
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 90c12df1..e5696b1f 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -14,6 +14,7 @@ from cloudinit import temp_utils
from contextlib import contextmanager
from xml.etree import ElementTree
+from cloudinit import url_helper
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -55,14 +56,14 @@ class AzureEndpointHttpClient(object):
if secure:
headers = self.headers.copy()
headers.update(self.extra_secure_headers)
- return util.read_file_or_url(url, headers=headers)
+ return url_helper.read_file_or_url(url, headers=headers)
def post(self, url, data=None, extra_headers=None):
headers = self.headers
if extra_headers is not None:
headers = self.headers.copy()
headers.update(extra_headers)
- return util.read_file_or_url(url, data=data, headers=headers)
+ return url_helper.read_file_or_url(url, data=data, headers=headers)
class GoalState(object):
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
index 693f8d5c..0e7cccac 100644
--- a/cloudinit/sources/helpers/digitalocean.py
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -41,10 +41,9 @@ def assign_ipv4_link_local(nic=None):
"address")
try:
- (result, _err) = util.subp(ip_addr_cmd)
+ util.subp(ip_addr_cmd)
LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic)
-
- (result, _err) = util.subp(ip_link_cmd)
+ util.subp(ip_link_cmd)
LOG.debug("brought device '%s' up", nic)
except Exception:
util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed."
@@ -75,7 +74,7 @@ def del_ipv4_link_local(nic=None):
ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
try:
- (result, _err) = util.subp(ip_addr_cmd)
+ util.subp(ip_addr_cmd)
LOG.debug("removed ip4LL addresses from %s", nic)
except Exception as e:
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 26f3168d..9c29ceac 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -21,6 +21,8 @@ from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
+from cloudinit.sources import BrokenMetadata
+
# See https://docs.openstack.org/user-guide/cli-config-drive.html
LOG = logging.getLogger(__name__)
@@ -36,21 +38,38 @@ KEY_COPIES = (
('local-hostname', 'hostname', False),
('instance-id', 'uuid', True),
)
+
+# Versions and names taken from nova source nova/api/metadata/base.py
OS_LATEST = 'latest'
OS_FOLSOM = '2012-08-10'
OS_GRIZZLY = '2013-04-04'
OS_HAVANA = '2013-10-17'
OS_LIBERTY = '2015-10-15'
+# NEWTON_ONE adds 'devices' to md (sriov-pf-passthrough-neutron-port-vlan)
+OS_NEWTON_ONE = '2016-06-30'
+# NEWTON_TWO adds vendor_data2.json (vendordata-reboot)
+OS_NEWTON_TWO = '2016-10-06'
+# OS_OCATA adds 'vif' field to devices (sriov-pf-passthrough-neutron-port-vlan)
+OS_OCATA = '2017-02-22'
+# OS_ROCKY adds a vf_trusted field to devices (sriov-trusted-vfs)
+OS_ROCKY = '2018-08-27'
+
+
# keep this in chronological order. new supported versions go at the end.
OS_VERSIONS = (
OS_FOLSOM,
OS_GRIZZLY,
OS_HAVANA,
OS_LIBERTY,
+ OS_NEWTON_ONE,
+ OS_NEWTON_TWO,
+ OS_OCATA,
+ OS_ROCKY,
)
PHYSICAL_TYPES = (
None,
+ 'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
'bridge',
'dvs',
'ethernet',
@@ -68,10 +87,6 @@ class NonReadable(IOError):
pass
-class BrokenMetadata(IOError):
- pass
-
-
class SourceMixin(object):
def _ec2_name_to_device(self, name):
if not self.ec2_metadata:
@@ -441,7 +456,7 @@ class MetadataReader(BaseReader):
return self._versions
found = []
version_path = self._path_join(self.base_path, "openstack")
- content = self._path_read(version_path)
+ content = self._path_read(version_path, decode=True)
for line in content.splitlines():
line = line.strip()
if not line:
@@ -589,6 +604,8 @@ def convert_net_json(network_json=None, known_macs=None):
cfg.update({'type': 'physical', 'mac_address': link_mac_addr})
elif link['type'] in ['bond']:
params = {}
+ if link_mac_addr:
+ params['mac_address'] = link_mac_addr
for k, v in link.items():
if k == 'bond_links':
continue
@@ -638,7 +655,7 @@ def convert_net_json(network_json=None, known_macs=None):
known_macs = net.get_interfaces_by_mac()
# go through and fill out the link_id_info with names
- for link_id, info in link_id_info.items():
+ for _link_id, info in link_id_info.items():
if info.get('name'):
continue
if info.get('mac') in known_macs:
@@ -658,6 +675,17 @@ def convert_net_json(network_json=None, known_macs=None):
else:
cfg[key] = fmt % link_id_info[target]['name']
+ # Infiniband interfaces may be referenced in network_data.json by a 6 byte
+ # Ethernet MAC-style address, and we use that address to look up the
+ # interface name above. Now ensure that the hardware address is set to the
+ # full 20 byte address.
+ ib_known_hwaddrs = net.get_ib_hwaddrs_by_interface()
+ if ib_known_hwaddrs:
+ for cfg in config:
+ if cfg['name'] in ib_known_hwaddrs:
+ cfg['mac_address'] = ib_known_hwaddrs[cfg['name']]
+ cfg['type'] = 'infiniband'
+
for service in services:
cfg = service
cfg.update({'type': 'nameserver'})
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 2d8900e2..e1890e23 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -73,7 +73,7 @@ class NicConfigurator(object):
The mac address(es) are in the lower case
"""
cmd = ['ip', 'addr', 'show']
- (output, err) = util.subp(cmd)
+ output, _err = util.subp(cmd)
sections = re.split(r'\n\d+: ', '\n' + output)[1:]
macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
@@ -164,7 +164,7 @@ class NicConfigurator(object):
return ([subnet], route_list)
# Add routes if there is no primary nic
- if not self._primaryNic:
+ if not self._primaryNic and v4.gateways:
route_list.extend(self.gen_ipv4_route(nic,
v4.gateways,
v4.netmask))
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
index 75cfbaaf..8c91fa41 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -56,10 +56,10 @@ class PasswordConfigurator(object):
LOG.info('Expiring password.')
for user in uidUserList:
try:
- out, err = util.subp(['passwd', '--expire', user])
+ util.subp(['passwd', '--expire', user])
except util.ProcessExecutionError as e:
if os.path.exists('/usr/bin/chage'):
- out, e = util.subp(['chage', '-d', '0', user])
+ util.subp(['chage', '-d', '0', user])
else:
LOG.warning('Failed to expire password for %s with error: '
'%s', user, e)
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index 44075255..a590f323 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -91,7 +91,7 @@ def enable_nics(nics):
for attempt in range(0, enableNicsWaitRetries):
logger.debug("Trying to connect interfaces, attempt %d", attempt)
- (out, err) = set_customization_status(
+ (out, _err) = set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS,
nics)
@@ -104,7 +104,7 @@ def enable_nics(nics):
return
for count in range(0, enableNicsWaitCount):
- (out, err) = set_customization_status(
+ (out, _err) = set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS,
nics)
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index e7fda22a..8082019e 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -1,14 +1,17 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
import inspect
import os
import six
import stat
+from cloudinit.event import EventType
from cloudinit.helpers import Paths
from cloudinit import importer
from cloudinit.sources import (
- INSTANCE_JSON_FILE, DataSource)
+ EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE,
+ REDACT_SENSITIVE_VALUE, UNSET, DataSource, redact_sensitive_keys)
from cloudinit.tests.helpers import CiTestCase, skipIf, mock
from cloudinit.user_data import UserDataProcessor
from cloudinit import util
@@ -17,25 +20,32 @@ from cloudinit import util
class DataSourceTestSubclassNet(DataSource):
dsname = 'MyTestSubclass'
+ url_max_wait = 55
- def __init__(self, sys_cfg, distro, paths, custom_userdata=None):
+ def __init__(self, sys_cfg, distro, paths, custom_metadata=None,
+ custom_userdata=None, get_data_retval=True):
super(DataSourceTestSubclassNet, self).__init__(
sys_cfg, distro, paths)
self._custom_userdata = custom_userdata
+ self._custom_metadata = custom_metadata
+ self._get_data_retval = get_data_retval
def _get_cloud_name(self):
return 'SubclassCloudName'
def _get_data(self):
- self.metadata = {'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'}
+ if self._custom_metadata:
+ self.metadata = self._custom_metadata
+ else:
+ self.metadata = {'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion'}
if self._custom_userdata:
self.userdata_raw = self._custom_userdata
else:
self.userdata_raw = 'userdata_raw'
self.vendordata_raw = 'vendordata_raw'
- return True
+ return self._get_data_retval
class InvalidDataSourceTestSubclassNet(DataSource):
@@ -70,8 +80,7 @@ class TestDataSource(CiTestCase):
"""Init uses DataSource.dsname for sourcing ds_cfg."""
sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}
distro = 'distrotest' # generally should be a Distro object
- paths = Paths({})
- datasource = DataSourceTestSubclassNet(sys_cfg, distro, paths)
+ datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
self.assertEqual({'key2': False}, datasource.ds_cfg)
def test_str_is_classname(self):
@@ -81,6 +90,91 @@ class TestDataSource(CiTestCase):
'DataSourceTestSubclassNet',
str(DataSourceTestSubclassNet('', '', self.paths)))
+ def test_datasource_get_url_params_defaults(self):
+ """get_url_params default url config settings for the datasource."""
+ params = self.datasource.get_url_params()
+ self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait)
+ self.assertEqual(params.timeout_seconds, self.datasource.url_timeout)
+ self.assertEqual(params.num_retries, self.datasource.url_retries)
+
+ def test_datasource_get_url_params_subclassed(self):
+ """Subclasses can override get_url_params defaults."""
+ sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}
+ distro = 'distrotest' # generally should be a Distro object
+ datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
+ expected = (datasource.url_max_wait, datasource.url_timeout,
+ datasource.url_retries)
+ url_params = datasource.get_url_params()
+ self.assertNotEqual(self.datasource.get_url_params(), url_params)
+ self.assertEqual(expected, url_params)
+
+ def test_datasource_get_url_params_ds_config_override(self):
+ """Datasource configuration options can override url param defaults."""
+ sys_cfg = {
+ 'datasource': {
+ 'MyTestSubclass': {
+ 'max_wait': '1', 'timeout': '2', 'retries': '3'}}}
+ datasource = DataSourceTestSubclassNet(
+ sys_cfg, self.distro, self.paths)
+ expected = (1, 2, 3)
+ url_params = datasource.get_url_params()
+ self.assertNotEqual(
+ (datasource.url_max_wait, datasource.url_timeout,
+ datasource.url_retries),
+ url_params)
+ self.assertEqual(expected, url_params)
+
+ def test_datasource_get_url_params_is_zero_or_greater(self):
+ """get_url_params ignores timeouts with a value below 0."""
+ # Set an override that is below 0 which gets ignored.
+ sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}}
+ datasource = DataSource(sys_cfg, self.distro, self.paths)
+ (_max_wait, timeout, _retries) = datasource.get_url_params()
+ self.assertEqual(0, timeout)
+
+ def test_datasource_get_url_uses_defaults_on_errors(self):
+ """On invalid system config values for url_params defaults are used."""
+ # All invalid values should be logged
+ sys_cfg = {'datasource': {
+ '_undef': {
+ 'max_wait': 'nope', 'timeout': 'bug', 'retries': 'nonint'}}}
+ datasource = DataSource(sys_cfg, self.distro, self.paths)
+ url_params = datasource.get_url_params()
+ expected = (datasource.url_max_wait, datasource.url_timeout,
+ datasource.url_retries)
+ self.assertEqual(expected, url_params)
+ logs = self.logs.getvalue()
+ expected_logs = [
+ "Config max_wait 'nope' is not an int, using default '-1'",
+ "Config timeout 'bug' is not an int, using default '10'",
+ "Config retries 'nonint' is not an int, using default '5'",
+ ]
+ for log in expected_logs:
+ self.assertIn(log, logs)
+
+ @mock.patch('cloudinit.sources.net.find_fallback_nic')
+ def test_fallback_interface_is_discovered(self, m_get_fallback_nic):
+ """The fallback_interface is discovered via find_fallback_nic."""
+ m_get_fallback_nic.return_value = 'nic9'
+ self.assertEqual('nic9', self.datasource.fallback_interface)
+
+ @mock.patch('cloudinit.sources.net.find_fallback_nic')
+ def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic):
+ """Log a warning when fallback_interface can not discover the nic."""
+ self.datasource._cloud_name = 'MySupahCloud'
+ m_get_fallback_nic.return_value = None # Couldn't discover nic
+ self.assertIsNone(self.datasource.fallback_interface)
+ self.assertEqual(
+ 'WARNING: Did not find a fallback interface on MySupahCloud.\n',
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.sources.net.find_fallback_nic')
+ def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic):
+ """The fallback_interface is cached and won't be rediscovered."""
+ self.datasource._fallback_interface = 'nic10'
+ self.assertEqual('nic10', self.datasource.fallback_interface)
+ m_get_fallback_nic.assert_not_called()
+
def test__get_data_unimplemented(self):
"""Raise an error when _get_data is not implemented."""
with self.assertRaises(NotImplementedError) as context_manager:
@@ -178,8 +272,19 @@ class TestDataSource(CiTestCase):
self.assertEqual('fqdnhostname.domain.com',
datasource.get_hostname(fqdn=True))
- def test_get_data_write_json_instance_data(self):
- """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root."""
+ def test_get_data_does_not_write_instance_data_on_failure(self):
+ """get_data does not write INSTANCE_JSON_FILE on get_data False."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ get_data_retval=False)
+ self.assertFalse(datasource.get_data())
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ self.assertFalse(
+ os.path.exists(json_file), 'Found unexpected file %s' % json_file)
+
+ def test_get_data_writes_json_instance_data_on_success(self):
+ """get_data writes INSTANCE_JSON_FILE to run_dir as world readable."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
@@ -187,40 +292,126 @@ class TestDataSource(CiTestCase):
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
expected = {
- 'base64-encoded-keys': [],
+ 'base64_encoded_keys': [],
+ 'sensitive_keys': [],
'v1': {
'availability-zone': 'myaz',
+ 'availability_zone': 'myaz',
'cloud-name': 'subclasscloudname',
+ 'cloud_name': 'subclasscloudname',
'instance-id': 'iid-datasource',
+ 'instance_id': 'iid-datasource',
'local-hostname': 'test-subclass-hostname',
+ 'local_hostname': 'test-subclass-hostname',
'region': 'myregion'},
'ds': {
- 'meta-data': {'availability_zone': 'myaz',
+ '_doc': EXPERIMENTAL_TEXT,
+ 'meta_data': {'availability_zone': 'myaz',
'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'},
- 'user-data': 'userdata_raw',
- 'vendor-data': 'vendordata_raw'}}
+ 'region': 'myregion'}}}
self.assertEqual(expected, util.load_json(content))
file_stat = os.stat(json_file)
+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+ self.assertEqual(expected, util.load_json(content))
+
+ def test_get_data_writes_json_instance_data_sensitive(self):
+ """get_data writes INSTANCE_JSON_SENSITIVE_FILE as readonly root."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ custom_metadata={
+ 'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion',
+ 'some': {'security-credentials': {
+ 'cred1': 'sekret', 'cred2': 'othersekret'}}})
+ self.assertEqual(
+ ('security-credentials',), datasource.sensitive_metadata_keys)
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
+ redacted = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {'cred1': 'sekret', 'cred2': 'othersekret'},
+ redacted['ds']['meta_data']['some']['security-credentials'])
+ content = util.load_file(sensitive_json_file)
+ expected = {
+ 'base64_encoded_keys': [],
+ 'sensitive_keys': ['ds/meta_data/some/security-credentials'],
+ 'v1': {
+ 'availability-zone': 'myaz',
+ 'availability_zone': 'myaz',
+ 'cloud-name': 'subclasscloudname',
+ 'cloud_name': 'subclasscloudname',
+ 'instance-id': 'iid-datasource',
+ 'instance_id': 'iid-datasource',
+ 'local-hostname': 'test-subclass-hostname',
+ 'local_hostname': 'test-subclass-hostname',
+ 'region': 'myregion'},
+ 'ds': {
+ '_doc': EXPERIMENTAL_TEXT,
+ 'meta_data': {
+ 'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion',
+ 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}}
+ }
+ self.maxDiff = None
+ self.assertEqual(expected, util.load_json(content))
+ file_stat = os.stat(sensitive_json_file)
self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
+ self.assertEqual(expected, util.load_json(content))
def test_get_data_handles_redacted_unserializable_content(self):
"""get_data warns unserializable content in INSTANCE_JSON_FILE."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_userdata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
- self.assertTrue(datasource.get_data())
+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
+ datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
- expected_userdata = {
+ expected_metadata = {
'key1': 'val1',
'key2': {
'key2.1': "Warning: redacted unserializable type <class"
" 'cloudinit.helpers.Paths'>"}}
instance_json = util.load_json(content)
self.assertEqual(
- expected_userdata, instance_json['ds']['user-data'])
+ expected_metadata, instance_json['ds']['meta_data'])
+
+ def test_persist_instance_data_writes_ec2_metadata_when_set(self):
+ """When ec2_metadata class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ datasource.ec2_metadata = UNSET
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertNotIn('ec2_metadata', instance_data['ds'])
+ datasource.ec2_metadata = {'ec2stuff': 'is good'}
+ datasource.persist_instance_data()
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {'ec2stuff': 'is good'},
+ instance_data['ds']['ec2_metadata'])
+
+ def test_persist_instance_data_writes_network_json_when_set(self):
+ """When network_data.json class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertNotIn('network_json', instance_data['ds'])
+ datasource.network_json = {'network_json': 'is good'}
+ datasource.persist_instance_data()
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {'network_json': 'is good'},
+ instance_data['ds']['network_json'])
@skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes")
def test_get_data_base64encodes_unserializable_bytes(self):
@@ -228,17 +419,17 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
self.assertTrue(datasource.get_data())
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
instance_json = util.load_json(content)
- self.assertEqual(
- ['ds/user-data/key2/key2.1'],
- instance_json['base64-encoded-keys'])
+ self.assertItemsEqual(
+ ['ds/meta_data/key2/key2.1'],
+ instance_json['base64_encoded_keys'])
self.assertEqual(
{'key1': 'val1', 'key2': {'key2.1': 'EjM='}},
- instance_json['ds']['user-data'])
+ instance_json['ds']['meta_data'])
@skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes")
def test_get_data_handles_bytes_values(self):
@@ -246,15 +437,15 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
self.assertTrue(datasource.get_data())
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
instance_json = util.load_json(content)
- self.assertEqual([], instance_json['base64-encoded-keys'])
+ self.assertEqual([], instance_json['base64_encoded_keys'])
self.assertEqual(
{'key1': 'val1', 'key2': {'key2.1': '\x123'}},
- instance_json['ds']['user-data'])
+ instance_json['ds']['meta_data'])
@skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8")
def test_non_utf8_encoding_logs_warning(self):
@@ -262,7 +453,7 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_userdata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
+ custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
self.assertTrue(datasource.get_data())
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
self.assertFalse(os.path.exists(json_file))
@@ -278,7 +469,7 @@ class TestDataSource(CiTestCase):
base_args = get_args(DataSource.get_hostname) # pylint: disable=W1505
# Import all DataSource subclasses so we can inspect them.
modules = util.find_modules(os.path.dirname(os.path.dirname(__file__)))
- for loc, name in modules.items():
+ for _loc, name in modules.items():
mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], [])
if mod_locs:
importer.import_module(mod_locs[0])
@@ -296,3 +487,116 @@ class TestDataSource(CiTestCase):
get_args(grandchild.get_hostname), # pylint: disable=W1505
'%s does not implement DataSource.get_hostname params'
% grandchild)
+
+ def test_clear_cached_attrs_resets_cached_attr_class_attributes(self):
+ """Class attributes listed in cached_attr_defaults are reset."""
+ count = 0
+ # Setup values for all cached class attributes
+ for attr, value in self.datasource.cached_attr_defaults:
+ setattr(self.datasource, attr, count)
+ count += 1
+ self.datasource._dirty_cache = True
+ self.datasource.clear_cached_attrs()
+ for attr, value in self.datasource.cached_attr_defaults:
+ self.assertEqual(value, getattr(self.datasource, attr))
+
+ def test_clear_cached_attrs_noops_on_clean_cache(self):
+ """Class attributes listed in cached_attr_defaults are reset."""
+ count = 0
+ # Setup values for all cached class attributes
+ for attr, _ in self.datasource.cached_attr_defaults:
+ setattr(self.datasource, attr, count)
+ count += 1
+ self.datasource._dirty_cache = False # Fake clean cache
+ self.datasource.clear_cached_attrs()
+ count = 0
+ for attr, _ in self.datasource.cached_attr_defaults:
+ self.assertEqual(count, getattr(self.datasource, attr))
+ count += 1
+
+ def test_clear_cached_attrs_skips_non_attr_class_attributes(self):
+ """Skip any cached_attr_defaults which aren't class attributes."""
+ self.datasource._dirty_cache = True
+ self.datasource.clear_cached_attrs()
+ for attr in ('ec2_metadata', 'network_json'):
+ self.assertFalse(hasattr(self.datasource, attr))
+
+ def test_clear_cached_attrs_of_custom_attrs(self):
+ """Custom attr_values can be passed to clear_cached_attrs."""
+ self.datasource._dirty_cache = True
+ cached_attr_name = self.datasource.cached_attr_defaults[0][0]
+ setattr(self.datasource, cached_attr_name, 'himom')
+ self.datasource.myattr = 'orig'
+ self.datasource.clear_cached_attrs(
+ attr_defaults=(('myattr', 'updated'),))
+ self.assertEqual('himom', getattr(self.datasource, cached_attr_name))
+ self.assertEqual('updated', self.datasource.myattr)
+
+ def test_update_metadata_only_acts_on_supported_update_events(self):
+ """update_metadata won't get_data on unsupported update events."""
+ self.datasource.update_events['network'].discard(EventType.BOOT)
+ self.assertEqual(
+ {'network': set([EventType.BOOT_NEW_INSTANCE])},
+ self.datasource.update_events)
+
+ def fake_get_data():
+ raise Exception('get_data should not be called')
+
+ self.datasource.get_data = fake_get_data
+ self.assertFalse(
+ self.datasource.update_metadata(
+ source_event_types=[EventType.BOOT]))
+
+ def test_update_metadata_returns_true_on_supported_update_event(self):
+ """update_metadata returns get_data response on supported events."""
+
+ def fake_get_data():
+ return True
+
+ self.datasource.get_data = fake_get_data
+ self.datasource._network_config = 'something'
+ self.datasource._dirty_cache = True
+ self.assertTrue(
+ self.datasource.update_metadata(
+ source_event_types=[
+ EventType.BOOT, EventType.BOOT_NEW_INSTANCE]))
+ self.assertEqual(UNSET, self.datasource._network_config)
+ self.assertIn(
+ "DEBUG: Update datasource metadata and network config due to"
+ " events: New instance first boot",
+ self.logs.getvalue())
+
+
+class TestRedactSensitiveData(CiTestCase):
+
+ def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self):
+ """When sensitive_keys is absent or empty from metadata do nothing."""
+ md = {'my': 'data'}
+ self.assertEqual(
+ md, redact_sensitive_keys(md, redact_value='redacted'))
+ md['sensitive_keys'] = []
+ self.assertEqual(
+ md, redact_sensitive_keys(md, redact_value='redacted'))
+
+ def test_redact_sensitive_data_redacts_exact_match_name(self):
+ """Only exact matched sensitive_keys are redacted from metadata."""
+ md = {'sensitive_keys': ['md/secure'],
+ 'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
+ secure_md = copy.deepcopy(md)
+ secure_md['md']['secure'] = 'redacted'
+ self.assertEqual(
+ secure_md,
+ redact_sensitive_keys(md, redact_value='redacted'))
+
+ def test_redact_sensitive_data_does_redacts_with_default_string(self):
+ """When redact_value is absent, REDACT_SENSITIVE_VALUE is used."""
+ md = {'sensitive_keys': ['md/secure'],
+ 'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
+ secure_md = copy.deepcopy(md)
+ secure_md['md']['secure'] = 'redacted for non-root user'
+ self.assertEqual(
+ secure_md,
+ redact_sensitive_keys(md))
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py
new file mode 100644
index 00000000..7599126c
--- /dev/null
+++ b/cloudinit/sources/tests/test_oracle.py
@@ -0,0 +1,331 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.sources import DataSourceOracle as oracle
+from cloudinit.sources import BrokenMetadata
+from cloudinit import helpers
+
+from cloudinit.tests import helpers as test_helpers
+
+from textwrap import dedent
+import argparse
+import httpretty
+import json
+import mock
+import os
+import six
+import uuid
+
+DS_PATH = "cloudinit.sources.DataSourceOracle"
+MD_VER = "2013-10-17"
+
+
+class TestDataSourceOracle(test_helpers.CiTestCase):
+ """Test datasource DataSourceOracle."""
+
+ ds_class = oracle.DataSourceOracle
+
+ my_uuid = str(uuid.uuid4())
+ my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj",
+ "name": "ci-vm1", "availability_zone": "phx-ad-3",
+ "hostname": "ci-vm1hostname",
+ "launch_index": 0, "files": [],
+ "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"},
+ "meta": {}}
+
+ def _patch_instance(self, inst, patches):
+ """Patch an instance of a class 'inst'.
+ for each name, kwargs in patches:
+ inst.name = mock.Mock(**kwargs)
+ returns a namespace object that has
+ namespace.name = mock.Mock(**kwargs)
+ Do not bother with cleanup as instance is assumed transient."""
+ mocks = argparse.Namespace()
+ for name, kwargs in patches.items():
+ imock = mock.Mock(name=name, spec=getattr(inst, name), **kwargs)
+ setattr(mocks, name, imock)
+ setattr(inst, name, imock)
+ return mocks
+
+ def _get_ds(self, sys_cfg=None, distro=None, paths=None, ud_proc=None,
+ patches=None):
+ if sys_cfg is None:
+ sys_cfg = {}
+ if patches is None:
+ patches = {}
+ if paths is None:
+ tmpd = self.tmp_dir()
+ dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd),
+ 'run_dir': self.tmp_path('run_dir')}
+ for d in dirs.values():
+ os.mkdir(d)
+ paths = helpers.Paths(dirs)
+
+ ds = self.ds_class(sys_cfg=sys_cfg, distro=distro,
+ paths=paths, ud_proc=ud_proc)
+
+ return ds, self._patch_instance(ds, patches)
+
+ def test_platform_not_viable_returns_false(self):
+ ds, mocks = self._get_ds(
+ patches={'_is_platform_viable': {'return_value': False}})
+ self.assertFalse(ds._get_data())
+ mocks._is_platform_viable.assert_called_once_with()
+
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_without_userdata(self, m_is_iscsi_root):
+ """If no user-data is provided, it should not be in return dict."""
+ ds, mocks = self._get_ds(patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ self.assertTrue(ds._get_data())
+ mocks._is_platform_viable.assert_called_once_with()
+ mocks.crawl_metadata.assert_called_once_with()
+ self.assertEqual(self.my_uuid, ds.system_uuid)
+ self.assertEqual(self.my_md['availability_zone'], ds.availability_zone)
+ self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys())
+ self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
+ self.assertIsNone(ds.userdata_raw)
+
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_with_vendordata(self, m_is_iscsi_root):
+ """Test with vendor data."""
+ vd = {'cloud-init': '#cloud-config\nkey: value'}
+ ds, mocks = self._get_ds(patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md,
+ 'vendor_data': vd}}}})
+ self.assertTrue(ds._get_data())
+ mocks._is_platform_viable.assert_called_once_with()
+ mocks.crawl_metadata.assert_called_once_with()
+ self.assertEqual(vd, ds.vendordata_pure)
+ self.assertEqual(vd['cloud-init'], ds.vendordata_raw)
+
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_with_userdata(self, m_is_iscsi_root):
+ """Ensure user-data is populated if present and is binary."""
+ my_userdata = b'abcdefg'
+ ds, mocks = self._get_ds(patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md,
+ 'user_data': my_userdata}}}})
+ self.assertTrue(ds._get_data())
+ mocks._is_platform_viable.assert_called_once_with()
+ mocks.crawl_metadata.assert_called_once_with()
+ self.assertEqual(self.my_uuid, ds.system_uuid)
+ self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys())
+ self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
+ self.assertEqual(my_userdata, ds.userdata_raw)
+
+ @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config")
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_network_cmdline(self, m_is_iscsi_root, m_cmdline_config):
+ """network_config should read kernel cmdline."""
+ distro = mock.MagicMock()
+ ds, _ = self._get_ds(distro=distro, patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ ncfg = {'version': 1, 'config': [{'a': 'b'}]}
+ m_cmdline_config.return_value = ncfg
+ self.assertTrue(ds._get_data())
+ self.assertEqual(ncfg, ds.network_config)
+ m_cmdline_config.assert_called_once_with()
+ self.assertFalse(distro.generate_fallback_config.called)
+
+ @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config")
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_network_fallback(self, m_is_iscsi_root, m_cmdline_config):
+ """test that fallback network is generated if no kernel cmdline."""
+ distro = mock.MagicMock()
+ ds, _ = self._get_ds(distro=distro, patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ ncfg = {'version': 1, 'config': [{'a': 'b'}]}
+ m_cmdline_config.return_value = None
+ self.assertTrue(ds._get_data())
+ ncfg = {'version': 1, 'config': [{'distro1': 'value'}]}
+ distro.generate_fallback_config.return_value = ncfg
+ self.assertEqual(ncfg, ds.network_config)
+ m_cmdline_config.assert_called_once_with()
+ distro.generate_fallback_config.assert_called_once_with()
+ self.assertEqual(1, m_cmdline_config.call_count)
+
+ # test that the result got cached, and the methods not re-called.
+ self.assertEqual(ncfg, ds.network_config)
+ self.assertEqual(1, m_cmdline_config.call_count)
+
+
+@mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4()))
+class TestReadMetaData(test_helpers.HttprettyTestCase):
+ """Test the read_metadata which interacts with http metadata service."""
+
+ mdurl = oracle.METADATA_ENDPOINT
+ my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj",
+ "name": "ci-vm1", "availability_zone": "phx-ad-3",
+ "hostname": "ci-vm1hostname",
+ "launch_index": 0, "files": [],
+ "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"},
+ "meta": {}}
+
+ def populate_md(self, data):
+ """call httppretty.register_url for each item dict 'data',
+ including valid indexes. Text values converted to bytes."""
+ httpretty.register_uri(
+ httpretty.GET, self.mdurl + MD_VER + "/",
+ '\n'.join(data.keys()).encode('utf-8'))
+ for k, v in data.items():
+ httpretty.register_uri(
+ httpretty.GET, self.mdurl + MD_VER + "/" + k,
+ v if not isinstance(v, six.text_type) else v.encode('utf-8'))
+
+ def test_broken_no_sys_uuid(self, m_read_system_uuid):
+ """Datasource requires ability to read system_uuid and true return."""
+ m_read_system_uuid.return_value = None
+ self.assertRaises(BrokenMetadata, oracle.read_metadata)
+
+ def test_broken_no_metadata_json(self, m_read_system_uuid):
+ """Datasource requires meta_data.json."""
+ httpretty.register_uri(
+ httpretty.GET, self.mdurl + MD_VER + "/",
+ '\n'.join(['user_data']).encode('utf-8'))
+ with self.assertRaises(BrokenMetadata) as cm:
+ oracle.read_metadata()
+ self.assertIn("Required field 'meta_data.json' missing",
+ str(cm.exception))
+
+ def test_with_userdata(self, m_read_system_uuid):
+ data = {'user_data': b'#!/bin/sh\necho hi world\n',
+ 'meta_data.json': json.dumps(self.my_md)}
+ self.populate_md(data)
+ result = oracle.read_metadata()[MD_VER]
+ self.assertEqual(data['user_data'], result['user_data'])
+ self.assertEqual(self.my_md, result['meta_data'])
+
+ def test_without_userdata(self, m_read_system_uuid):
+ data = {'meta_data.json': json.dumps(self.my_md)}
+ self.populate_md(data)
+ result = oracle.read_metadata()[MD_VER]
+ self.assertNotIn('user_data', result)
+ self.assertEqual(self.my_md, result['meta_data'])
+
+ def test_unknown_fields_included(self, m_read_system_uuid):
+ """Unknown fields listed in index should be included.
+ And those ending in .json should be decoded."""
+ some_data = {'key1': 'data1', 'subk1': {'subd1': 'subv'}}
+ some_vendor_data = {'cloud-init': 'foo'}
+ data = {'meta_data.json': json.dumps(self.my_md),
+ 'some_data.json': json.dumps(some_data),
+ 'vendor_data.json': json.dumps(some_vendor_data),
+ 'other_blob': b'this is blob'}
+ self.populate_md(data)
+ result = oracle.read_metadata()[MD_VER]
+ self.assertNotIn('user_data', result)
+ self.assertEqual(self.my_md, result['meta_data'])
+ self.assertEqual(some_data, result['some_data'])
+ self.assertEqual(some_vendor_data, result['vendor_data'])
+ self.assertEqual(data['other_blob'], result['other_blob'])
+
+
+class TestIsPlatformViable(test_helpers.CiTestCase):
+ @mock.patch(DS_PATH + ".util.read_dmi_data",
+ return_value=oracle.CHASSIS_ASSET_TAG)
+ def test_expected_viable(self, m_read_dmi_data):
+ """System with known chassis tag is viable."""
+ self.assertTrue(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+
+ @mock.patch(DS_PATH + ".util.read_dmi_data", return_value=None)
+ def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data):
+ """System without known chassis tag is not viable."""
+ self.assertFalse(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+
+ @mock.patch(DS_PATH + ".util.read_dmi_data", return_value="LetsGoCubs")
+ def test_expected_not_viable_other(self, m_read_dmi_data):
+ """System with unnown chassis tag is not viable."""
+ self.assertFalse(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
+
+
+class TestLoadIndex(test_helpers.CiTestCase):
+ """_load_index handles parsing of an index into a proper list.
+ The tests here guarantee correct parsing of html version or
+ a fixed version. See the function docstring for more doc."""
+
+ _known_html_api_versions = dedent("""\
+ <html>
+ <head><title>Index of /openstack/</title></head>
+ <body bgcolor="white">
+ <h1>Index of /openstack/</h1><hr><pre><a href="../">../</a>
+ <a href="2013-10-17/">2013-10-17/</a> 27-Jun-2018 12:22 -
+ <a href="latest/">latest/</a> 27-Jun-2018 12:22 -
+ </pre><hr></body>
+ </html>""")
+
+ _known_html_contents = dedent("""\
+ <html>
+ <head><title>Index of /openstack/2013-10-17/</title></head>
+ <body bgcolor="white">
+ <h1>Index of /openstack/2013-10-17/</h1><hr><pre><a href="../">../</a>
+ <a href="meta_data.json">meta_data.json</a> 27-Jun-2018 12:22 679
+ <a href="user_data">user_data</a> 27-Jun-2018 12:22 146
+ </pre><hr></body>
+ </html>""")
+
+ def test_parse_html(self):
+ """Test parsing of lower case html."""
+ self.assertEqual(
+ ['2013-10-17/', 'latest/'],
+ oracle._load_index(self._known_html_api_versions))
+ self.assertEqual(
+ ['meta_data.json', 'user_data'],
+ oracle._load_index(self._known_html_contents))
+
+ def test_parse_html_upper(self):
+ """Test parsing of upper case html, although known content is lower."""
+ def _toupper(data):
+ return data.replace("<a", "<A").replace("html>", "HTML>")
+
+ self.assertEqual(
+ ['2013-10-17/', 'latest/'],
+ oracle._load_index(_toupper(self._known_html_api_versions)))
+ self.assertEqual(
+ ['meta_data.json', 'user_data'],
+ oracle._load_index(_toupper(self._known_html_contents)))
+
+ def test_parse_newline_list_with_endl(self):
+ """Test parsing of newline separated list with ending newline."""
+ self.assertEqual(
+ ['2013-10-17/', 'latest/'],
+ oracle._load_index("\n".join(["2013-10-17/", "latest/", ""])))
+ self.assertEqual(
+ ['meta_data.json', 'user_data'],
+ oracle._load_index("\n".join(["meta_data.json", "user_data", ""])))
+
+ def test_parse_newline_list_without_endl(self):
+ """Test parsing of newline separated list with no ending newline.
+
+ Actual openstack implementation does not include trailing newline."""
+ self.assertEqual(
+ ['2013-10-17/', 'latest/'],
+ oracle._load_index("\n".join(["2013-10-17/", "latest/"])))
+ self.assertEqual(
+ ['meta_data.json', 'user_data'],
+ oracle._load_index("\n".join(["meta_data.json", "user_data"])))
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 882517f5..3f99b58c 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -41,6 +41,12 @@ VALID_KEY_TYPES = (
)
+DISABLE_USER_OPTS = (
+ "no-port-forwarding,no-agent-forwarding,"
+ "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
+ " rather than the user \\\"$DISABLE_USER\\\".\';echo;sleep 10\"")
+
+
class AuthKeyLine(object):
def __init__(self, source, keytype=None, base64=None,
comment=None, options=None):
@@ -279,24 +285,28 @@ class SshdConfigLine(object):
def parse_ssh_config(fname):
+ if not os.path.isfile(fname):
+ return []
+ return parse_ssh_config_lines(util.load_file(fname).splitlines())
+
+
+def parse_ssh_config_lines(lines):
# See: man sshd_config
# The file contains keyword-argument pairs, one per line.
# Lines starting with '#' and empty lines are interpreted as comments.
# Note: key-words are case-insensitive and arguments are case-sensitive
- lines = []
- if not os.path.isfile(fname):
- return lines
- for line in util.load_file(fname).splitlines():
+ ret = []
+ for line in lines:
line = line.strip()
if not line or line.startswith("#"):
- lines.append(SshdConfigLine(line))
+ ret.append(SshdConfigLine(line))
continue
try:
key, val = line.split(None, 1)
except ValueError:
key, val = line.split('=', 1)
- lines.append(SshdConfigLine(line, key, val))
- return lines
+ ret.append(SshdConfigLine(line, key, val))
+ return ret
def parse_ssh_config_map(fname):
@@ -310,4 +320,56 @@ def parse_ssh_config_map(fname):
ret[line.key] = line.value
return ret
+
+def update_ssh_config(updates, fname=DEF_SSHD_CFG):
+ """Read fname, and update if changes are necessary.
+
+ @param updates: dictionary of desired values {Option: value}
+ @return: boolean indicating if an update was done."""
+ lines = parse_ssh_config(fname)
+ changed = update_ssh_config_lines(lines=lines, updates=updates)
+ if changed:
+ util.write_file(
+ fname, "\n".join([str(l) for l in lines]) + "\n", copy_mode=True)
+ return len(changed) != 0
+
+
+def update_ssh_config_lines(lines, updates):
+ """Update the ssh config lines per updates.
+
+ @param lines: array of SshdConfigLine. This array is updated in place.
+ @param updates: dictionary of desired values {Option: value}
+ @return: A list of keys in updates that were changed."""
+ found = set()
+ changed = []
+
+ # Keywords are case-insensitive and arguments are case-sensitive
+ casemap = dict([(k.lower(), k) for k in updates.keys()])
+
+ for (i, line) in enumerate(lines, start=1):
+ if not line.key:
+ continue
+ if line.key in casemap:
+ key = casemap[line.key]
+ value = updates[key]
+ found.add(key)
+ if line.value == value:
+ LOG.debug("line %d: option %s already set to %s",
+ i, key, value)
+ else:
+ changed.append(key)
+ LOG.debug("line %d: option %s updated %s -> %s", i,
+ key, line.value, value)
+ line.value = value
+
+ if len(found) != len(updates):
+ for key, value in updates.items():
+ if key in found:
+ continue
+ changed.append(key)
+ lines.append(SshdConfigLine('', key, value))
+ LOG.debug("line %d: option %s added with %s",
+ len(lines), key, value)
+ return changed
+
# vi: ts=4 expandtab
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index bc4ebc85..8a064124 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -17,10 +17,13 @@ from cloudinit.settings import (
from cloudinit import handlers
# Default handlers (used if not overridden)
-from cloudinit.handlers import boot_hook as bh_part
-from cloudinit.handlers import cloud_config as cc_part
-from cloudinit.handlers import shell_script as ss_part
-from cloudinit.handlers import upstart_job as up_part
+from cloudinit.handlers.boot_hook import BootHookPartHandler
+from cloudinit.handlers.cloud_config import CloudConfigPartHandler
+from cloudinit.handlers.jinja_template import JinjaTemplatePartHandler
+from cloudinit.handlers.shell_script import ShellScriptPartHandler
+from cloudinit.handlers.upstart_job import UpstartJobPartHandler
+
+from cloudinit.event import EventType
from cloudinit import cloud
from cloudinit import config
@@ -85,7 +88,7 @@ class Init(object):
# from whatever it was to a new set...
if self.datasource is not NULL_DATA_SOURCE:
self.datasource.distro = self._distro
- self.datasource.sys_cfg = system_config
+ self.datasource.sys_cfg = self.cfg
return self._distro
@property
@@ -362,16 +365,22 @@ class Init(object):
self._store_vendordata()
def setup_datasource(self):
- if self.datasource is None:
- raise RuntimeError("Datasource is None, cannot setup.")
- self.datasource.setup(is_new_instance=self.is_new_instance())
+ with events.ReportEventStack("setup-datasource",
+ "setting up datasource",
+ parent=self.reporter):
+ if self.datasource is None:
+ raise RuntimeError("Datasource is None, cannot setup.")
+ self.datasource.setup(is_new_instance=self.is_new_instance())
def activate_datasource(self):
- if self.datasource is None:
- raise RuntimeError("Datasource is None, cannot activate.")
- self.datasource.activate(cfg=self.cfg,
- is_new_instance=self.is_new_instance())
- self._write_to_cache()
+ with events.ReportEventStack("activate-datasource",
+ "activating datasource",
+ parent=self.reporter):
+ if self.datasource is None:
+ raise RuntimeError("Datasource is None, cannot activate.")
+ self.datasource.activate(cfg=self.cfg,
+ is_new_instance=self.is_new_instance())
+ self._write_to_cache()
def _store_userdata(self):
raw_ud = self.datasource.get_userdata_raw()
@@ -405,12 +414,17 @@ class Init(object):
'datasource': self.datasource,
})
# TODO(harlowja) Hmmm, should we dynamically import these??
+ cloudconfig_handler = CloudConfigPartHandler(**opts)
+ shellscript_handler = ShellScriptPartHandler(**opts)
def_handlers = [
- cc_part.CloudConfigPartHandler(**opts),
- ss_part.ShellScriptPartHandler(**opts),
- bh_part.BootHookPartHandler(**opts),
- up_part.UpstartJobPartHandler(**opts),
+ cloudconfig_handler,
+ shellscript_handler,
+ BootHookPartHandler(**opts),
+ UpstartJobPartHandler(**opts),
]
+ opts.update(
+ {'sub_handlers': [cloudconfig_handler, shellscript_handler]})
+ def_handlers.append(JinjaTemplatePartHandler(**opts))
return def_handlers
def _default_userdata_handlers(self):
@@ -502,7 +516,7 @@ class Init(object):
# The default frequency if handlers don't have one
'frequency': frequency,
# This will be used when new handlers are found
- # to help write there contents to files with numbered
+ # to help write their contents to files with numbered
# names...
'handlercount': 0,
'excluded': excluded,
@@ -642,10 +656,14 @@ class Init(object):
except Exception as e:
LOG.warning("Failed to rename devices: %s", e)
- if (self.datasource is not NULL_DATA_SOURCE and
- not self.is_new_instance()):
- LOG.debug("not a new instance. network config is not applied.")
- return
+ if self.datasource is not NULL_DATA_SOURCE:
+ if not self.is_new_instance():
+ if not self.datasource.update_metadata([EventType.BOOT]):
+ LOG.debug(
+ "No network config applied. Neither a new instance"
+ " nor datasource network update on '%s' event",
+ EventType.BOOT)
+ return
LOG.info("Applying network configuration from %s bringup=%s: %s",
src, bring_up, netcfg)
@@ -691,7 +709,9 @@ class Modules(object):
module_list = []
if name not in self.cfg:
return module_list
- cfg_mods = self.cfg[name]
+ cfg_mods = self.cfg.get(name)
+ if not cfg_mods:
+ return module_list
# Create 'module_list', an array of hashes
# Where hash['mod'] = module name
# hash['freq'] = frequency
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index b3ea64e4..b668674b 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -13,6 +13,7 @@
import collections
import re
+
try:
from Cheetah.Template import Template as CTemplate
CHEETAH_AVAILABLE = True
@@ -20,23 +21,44 @@ except (ImportError, AttributeError):
CHEETAH_AVAILABLE = False
try:
- import jinja2
+ from jinja2.runtime import implements_to_string
from jinja2 import Template as JTemplate
+ from jinja2 import DebugUndefined as JUndefined
JINJA_AVAILABLE = True
except (ImportError, AttributeError):
+ from cloudinit.helpers import identity
+ implements_to_string = identity
JINJA_AVAILABLE = False
+ JUndefined = object
from cloudinit import log as logging
from cloudinit import type_utils as tu
from cloudinit import util
+
LOG = logging.getLogger(__name__)
TYPE_MATCHER = re.compile(r"##\s*template:(.*)", re.I)
BASIC_MATCHER = re.compile(r'\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)')
+MISSING_JINJA_PREFIX = u'CI_MISSING_JINJA_VAR/'
+
+
+@implements_to_string # Needed for python2.7. Otherwise cached super.__str__
+class UndefinedJinjaVariable(JUndefined):
+ """Class used to represent any undefined jinja template varible."""
+
+ def __str__(self):
+ return u'%s%s' % (MISSING_JINJA_PREFIX, self._undefined_name)
+
+ def __sub__(self, other):
+ other = str(other).replace(MISSING_JINJA_PREFIX, '')
+ raise TypeError(
+ 'Undefined jinja variable: "{this}-{other}". Jinja tried'
+ ' subtraction. Perhaps you meant "{this}_{other}"?'.format(
+ this=self._undefined_name, other=other))
def basic_render(content, params):
- """This does simple replacement of bash variable like templates.
+ """This does sumple replacement of bash variable like templates.
It identifies patterns like ${a} or $a and can also identify patterns like
${a.b} or $a.b which will look for a key 'b' in the dictionary rooted
@@ -82,7 +104,7 @@ def detect_template(text):
# keep_trailing_newline is in jinja2 2.7+, not 2.6
add = "\n" if content.endswith("\n") else ""
return JTemplate(content,
- undefined=jinja2.StrictUndefined,
+ undefined=UndefinedJinjaVariable,
trim_blocks=True).render(**params) + add
if text.find("\n") != -1:
@@ -121,7 +143,11 @@ def detect_template(text):
def render_from_file(fn, params):
if not params:
params = {}
- template_type, renderer, content = detect_template(util.load_file(fn))
+ # jinja in python2 uses unicode internally. All py2 str will be decoded.
+ # If it is given a str that has non-ascii then it will raise a
+ # UnicodeDecodeError. So we explicitly convert to unicode type here.
+ template_type, renderer, content = detect_template(
+ util.load_file(fn, decode=False).decode('utf-8'))
LOG.debug("Rendering content of '%s' using renderer %s", fn, template_type)
return renderer(content, params)
@@ -132,14 +158,18 @@ def render_to_file(fn, outfn, params, mode=0o644):
def render_string_to_file(content, outfn, params, mode=0o644):
+ """Render string (or py2 unicode) to file.
+ Warning: py2 str with non-ascii chars will cause UnicodeDecodeError."""
contents = render_string(content, params)
util.write_file(outfn, contents, mode=mode)
def render_string(content, params):
+ """Render string (or py2 unicode).
+ Warning: py2 str with non-ascii chars will cause UnicodeDecodeError."""
if not params:
params = {}
- template_type, renderer, content = detect_template(content)
+ _template_type, renderer, content = detect_template(content)
return renderer(content, params)
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 999b1d7c..2eb7b0cd 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -3,32 +3,43 @@
from __future__ import print_function
import functools
+import httpretty
import logging
import os
import shutil
import sys
import tempfile
-import unittest
+import time
import mock
import six
import unittest2
+from unittest2.util import strclass
try:
- from contextlib import ExitStack
+ from contextlib import ExitStack, contextmanager
except ImportError:
- from contextlib2 import ExitStack
+ from contextlib2 import ExitStack, contextmanager
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
+from cloudinit.config.schema import (
+ SchemaValidationError, validate_cloudconfig_schema)
+from cloudinit import cloud
+from cloudinit import distros
from cloudinit import helpers as ch
+from cloudinit.sources import DataSourceNone
+from cloudinit.templater import JINJA_AVAILABLE
from cloudinit import util
+_real_subp = util.subp
+
# Used for skipping tests
SkipTest = unittest2.SkipTest
+skipIf = unittest2.skipIf
# Used for detecting different python versions
PY2 = False
@@ -108,12 +119,15 @@ class TestCase(unittest2.TestCase):
super(TestCase, self).setUp()
self.reset_global_state()
- def add_patch(self, target, attr, **kwargs):
+ def shortDescription(self):
+ return strclass(self.__class__) + '.' + self._testMethodName
+
+ def add_patch(self, target, attr, *args, **kwargs):
"""Patches specified target object and sets it as attr on test
instance also schedules cleanup"""
if 'autospec' not in kwargs:
kwargs['autospec'] = True
- m = mock.patch(target, **kwargs)
+ m = mock.patch(target, *args, **kwargs)
p = m.start()
self.addCleanup(m.stop)
setattr(self, attr, p)
@@ -136,6 +150,17 @@ class CiTestCase(TestCase):
# Subclass overrides for specific test behavior
# Whether or not a unit test needs logfile setup
with_logs = False
+ allowed_subp = False
+ SUBP_SHELL_TRUE = "shell=true"
+
+ @contextmanager
+ def allow_subp(self, allowed_subp):
+ orig = self.allowed_subp
+ try:
+ self.allowed_subp = allowed_subp
+ yield
+ finally:
+ self.allowed_subp = orig
def setUp(self):
super(CiTestCase, self).setUp()
@@ -148,11 +173,41 @@ class CiTestCase(TestCase):
handler.setFormatter(formatter)
self.old_handlers = self.logger.handlers
self.logger.handlers = [handler]
+ if self.allowed_subp is True:
+ util.subp = _real_subp
+ else:
+ util.subp = self._fake_subp
+
+ def _fake_subp(self, *args, **kwargs):
+ if 'args' in kwargs:
+ cmd = kwargs['args']
+ else:
+ cmd = args[0]
+
+ if not isinstance(cmd, six.string_types):
+ cmd = cmd[0]
+ pass_through = False
+ if not isinstance(self.allowed_subp, (list, bool)):
+ raise TypeError("self.allowed_subp supports list or bool.")
+ if isinstance(self.allowed_subp, bool):
+ pass_through = self.allowed_subp
+ else:
+ pass_through = (
+ (cmd in self.allowed_subp) or
+ (self.SUBP_SHELL_TRUE in self.allowed_subp and
+ kwargs.get('shell')))
+ if pass_through:
+ return _real_subp(*args, **kwargs)
+ raise Exception(
+ "called subp. set self.allowed_subp=True to allow\n subp(%s)" %
+ ', '.join([str(repr(a)) for a in args] +
+ ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()]))
def tearDown(self):
if self.with_logs:
# Remove the handler we setup
logging.getLogger().handlers = self.old_handlers
+ util.subp = _real_subp
super(CiTestCase, self).tearDown()
def tmp_dir(self, dir=None, cleanup=True):
@@ -183,6 +238,29 @@ class CiTestCase(TestCase):
"""
raise SystemExit(code)
+ def tmp_cloud(self, distro, sys_cfg=None, metadata=None):
+ """Create a cloud with tmp working directory paths.
+
+ @param distro: Name of the distro to attach to the cloud.
+ @param metadata: Optional metadata to set on the datasource.
+
+ @return: The built cloud instance.
+ """
+ self.new_root = self.tmp_dir()
+ if not sys_cfg:
+ sys_cfg = {}
+ tmp_paths = {}
+ for var in ['templates_dir', 'run_dir', 'cloud_dir']:
+ tmp_paths[var] = self.tmp_path(var, dir=self.new_root)
+ util.ensure_dir(tmp_paths[var])
+ self.paths = ch.Paths(tmp_paths)
+ cls = distros.fetch(distro)
+ mydist = cls(distro, sys_cfg, self.paths)
+ myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, self.paths)
+ if metadata:
+ myds.metadata.update(metadata)
+ return cloud.Cloud(myds, self.paths, sys_cfg, mydist, None)
+
class ResourceUsingTestCase(CiTestCase):
@@ -190,35 +268,11 @@ class ResourceUsingTestCase(CiTestCase):
super(ResourceUsingTestCase, self).setUp()
self.resource_path = None
- def resourceLocation(self, subname=None):
- if self.resource_path is None:
- paths = [
- os.path.join('tests', 'data'),
- os.path.join('data'),
- os.path.join(os.pardir, 'tests', 'data'),
- os.path.join(os.pardir, 'data'),
- ]
- for p in paths:
- if os.path.isdir(p):
- self.resource_path = p
- break
- self.assertTrue((self.resource_path and
- os.path.isdir(self.resource_path)),
- msg="Unable to locate test resource data path!")
- if not subname:
- return self.resource_path
- return os.path.join(self.resource_path, subname)
-
- def readResource(self, name):
- where = self.resourceLocation(name)
- with open(where, 'r') as fh:
- return fh.read()
-
def getCloudPaths(self, ds=None):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
cp = ch.Paths({'cloud_dir': tmpdir,
- 'templates_dir': self.resourceLocation()},
+ 'templates_dir': resourceLocation()},
ds=ds)
return cp
@@ -234,7 +288,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
ResourceUsingTestCase.tearDown(self)
def replicateTestRoot(self, example_root, target_root):
- real_root = self.resourceLocation()
+ real_root = resourceLocation()
real_root = os.path.join(real_root, 'roots', example_root)
for (dir_path, _dirnames, filenames) in os.walk(real_root):
real_path = dir_path
@@ -285,7 +339,8 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
os.path: [('isfile', 1), ('exists', 1),
('islink', 1), ('isdir', 1), ('lexists', 1)],
os: [('listdir', 1), ('mkdir', 1),
- ('lstat', 1), ('symlink', 2)]
+ ('lstat', 1), ('symlink', 2),
+ ('stat', 1)]
}
if hasattr(os, 'scandir'):
@@ -319,23 +374,54 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
self.patchOS(root)
return root
+ @contextmanager
+ def reRooted(self, root=None):
+ try:
+ yield self.reRoot(root)
+ finally:
+ self.patched_funcs.close()
+
class HttprettyTestCase(CiTestCase):
# necessary as http_proxy gets in the way of httpretty
# https://github.com/gabrielfalcao/HTTPretty/issues/122
+ # Also make sure that allow_net_connect is set to False.
+ # And make sure reset and enable/disable are done.
def setUp(self):
self.restore_proxy = os.environ.get('http_proxy')
if self.restore_proxy is not None:
del os.environ['http_proxy']
super(HttprettyTestCase, self).setUp()
+ httpretty.HTTPretty.allow_net_connect = False
+ httpretty.reset()
+ httpretty.enable()
def tearDown(self):
+ httpretty.disable()
+ httpretty.reset()
if self.restore_proxy:
os.environ['http_proxy'] = self.restore_proxy
super(HttprettyTestCase, self).tearDown()
+class SchemaTestCaseMixin(unittest2.TestCase):
+
+ def assertSchemaValid(self, cfg, msg="Valid Schema failed validation."):
+ """Assert the config is valid per self.schema.
+
+ If there is only one top level key in the schema properties, then
+ the cfg will be put under that key."""
+ props = list(self.schema.get('properties'))
+ # put cfg under top level key if there is only one in the schema
+ if len(props) == 1:
+ cfg = {props[0]: cfg}
+ try:
+ validate_cloudconfig_schema(cfg, self.schema, strict=True)
+ except SchemaValidationError:
+ self.fail(msg)
+
+
def populate_dir(path, files):
if not os.path.exists(path):
os.makedirs(path)
@@ -354,11 +440,20 @@ def populate_dir(path, files):
return ret
+def populate_dir_with_ts(path, data):
+ """data is {'file': ('contents', mtime)}. mtime relative to now."""
+ populate_dir(path, dict((k, v[0]) for k, v in data.items()))
+ btime = time.time()
+ for fpath, (_contents, mtime) in data.items():
+ ts = btime + mtime if mtime else btime
+ os.utime(os.path.sep.join((path, fpath)), (ts, ts))
+
+
def dir2dict(startdir, prefix=None):
flist = {}
if prefix is None:
prefix = startdir
- for root, dirs, files in os.walk(startdir):
+ for root, _dirs, files in os.walk(startdir):
for fname in files:
fpath = os.path.join(root, fname)
key = fpath[len(prefix):]
@@ -399,19 +494,16 @@ def wrap_and_call(prefix, mocks, func, *args, **kwargs):
p.stop()
-try:
- skipIf = unittest.skipIf
-except AttributeError:
- # Python 2.6. Doesn't have to be high fidelity.
- def skipIf(condition, reason):
- def decorator(func):
- def wrapper(*args, **kws):
- if condition:
- return func(*args, **kws)
- else:
- print(reason, file=sys.stderr)
- return wrapper
- return decorator
+def resourceLocation(subname=None):
+ path = os.path.join('tests', 'data')
+ if not subname:
+ return path
+ return os.path.join(path, subname)
+
+
+def readResource(name, mode='r'):
+ with open(resourceLocation(name), mode) as fh:
+ return fh.read()
try:
@@ -427,6 +519,14 @@ def skipUnlessJsonSchema():
_missing_jsonschema_dep, "No python-jsonschema dependency present.")
+def skipUnlessJinja():
+ return skipIf(not JINJA_AVAILABLE, "No jinja dependency present.")
+
+
+def skipIfJinja():
+ return skipIf(JINJA_AVAILABLE, "Jinja dependency present.")
+
+
# older versions of mock do not have the useful 'assert_not_called'
if not hasattr(mock.Mock, 'assert_not_called'):
def __mock_assert_not_called(mmock):
diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py
new file mode 100644
index 00000000..0562b966
--- /dev/null
+++ b/cloudinit/tests/test_gpg.py
@@ -0,0 +1,54 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Test gpg module."""
+
+from cloudinit import gpg
+from cloudinit import util
+from cloudinit.tests.helpers import CiTestCase
+
+import mock
+
+
+@mock.patch("cloudinit.gpg.time.sleep")
+@mock.patch("cloudinit.gpg.util.subp")
+class TestReceiveKeys(CiTestCase):
+ """Test the recv_key method."""
+
+ def test_retries_on_subp_exc(self, m_subp, m_sleep):
+ """retry should be done on gpg receive keys failure."""
+ retries = (1, 2, 4)
+ my_exc = util.ProcessExecutionError(
+ stdout='', stderr='', exit_code=2, cmd=['mycmd'])
+ m_subp.side_effect = (my_exc, my_exc, ('', ''))
+ gpg.recv_key("ABCD", "keyserver.example.com", retries=retries)
+ self.assertEqual([mock.call(1), mock.call(2)], m_sleep.call_args_list)
+
+ def test_raises_error_after_retries(self, m_subp, m_sleep):
+ """If the final run fails, error should be raised."""
+ naplen = 1
+ keyid, keyserver = ("ABCD", "keyserver.example.com")
+ m_subp.side_effect = util.ProcessExecutionError(
+ stdout='', stderr='', exit_code=2, cmd=['mycmd'])
+ with self.assertRaises(ValueError) as rcm:
+ gpg.recv_key(keyid, keyserver, retries=(naplen,))
+ self.assertIn(keyid, str(rcm.exception))
+ self.assertIn(keyserver, str(rcm.exception))
+ m_sleep.assert_called_with(naplen)
+
+ def test_no_retries_on_none(self, m_subp, m_sleep):
+ """retry should not be done if retries is None."""
+ m_subp.side_effect = util.ProcessExecutionError(
+ stdout='', stderr='', exit_code=2, cmd=['mycmd'])
+ with self.assertRaises(ValueError):
+ gpg.recv_key("ABCD", "keyserver.example.com", retries=None)
+ m_sleep.assert_not_called()
+
+ def test_expected_gpg_command(self, m_subp, m_sleep):
+ """Verify gpg is called with expected args."""
+ key, keyserver = ("DEADBEEF", "keyserver.example.com")
+ retries = (1, 2, 4)
+ m_subp.return_value = ('', '')
+ gpg.recv_key(key, keyserver, retries=retries)
+ m_subp.assert_called_once_with(
+ ['gpg', '--keyserver=%s' % keyserver, '--recv-keys', key],
+ capture=True)
+ m_sleep.assert_not_called()
diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py
index 7dea2e41..d76e768e 100644
--- a/cloudinit/tests/test_netinfo.py
+++ b/cloudinit/tests/test_netinfo.py
@@ -2,105 +2,166 @@
"""Tests netinfo module functions and classes."""
-from cloudinit.netinfo import netdev_pformat, route_pformat
-from cloudinit.tests.helpers import CiTestCase, mock
+from copy import copy
+
+from cloudinit.netinfo import netdev_info, netdev_pformat, route_pformat
+from cloudinit.tests.helpers import CiTestCase, mock, readResource
# Example ifconfig and route output
-SAMPLE_IFCONFIG_OUT = """\
-enp0s25 Link encap:Ethernet HWaddr 50:7b:9d:2c:af:91
- inet addr:192.168.2.18 Bcast:192.168.2.255 Mask:255.255.255.0
- inet6 addr: fe80::8107:2b92:867e:f8a6/64 Scope:Link
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
- RX packets:8106427 errors:55 dropped:0 overruns:0 frame:37
- TX packets:9339739 errors:0 dropped:0 overruns:0 carrier:0
- collisions:0 txqueuelen:1000
- RX bytes:4953721719 (4.9 GB) TX bytes:7731890194 (7.7 GB)
- Interrupt:20 Memory:e1200000-e1220000
-
-lo Link encap:Local Loopback
- inet addr:127.0.0.1 Mask:255.0.0.0
- inet6 addr: ::1/128 Scope:Host
- UP LOOPBACK RUNNING MTU:65536 Metric:1
- RX packets:579230851 errors:0 dropped:0 overruns:0 frame:0
- TX packets:579230851 errors:0 dropped:0 overruns:0 carrier:0
- collisions:0 txqueuelen:1
-"""
-
-SAMPLE_ROUTE_OUT = '\n'.join([
- '0.0.0.0 192.168.2.1 0.0.0.0 UG 0 0 0'
- ' enp0s25',
- '0.0.0.0 192.168.2.1 0.0.0.0 UG 0 0 0'
- ' wlp3s0',
- '192.168.2.0 0.0.0.0 255.255.255.0 U 0 0 0'
- ' enp0s25'])
-
-
-NETDEV_FORMATTED_OUT = '\n'.join([
- '+++++++++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++'
- '++++++++++++++++++++',
- '+---------+------+------------------------------+---------------+-------+'
- '-------------------+',
- '| Device | Up | Address | Mask | Scope |'
- ' Hw-Address |',
- '+---------+------+------------------------------+---------------+-------+'
- '-------------------+',
- '| enp0s25 | True | 192.168.2.18 | 255.255.255.0 | . |'
- ' 50:7b:9d:2c:af:91 |',
- '| enp0s25 | True | fe80::8107:2b92:867e:f8a6/64 | . | link |'
- ' 50:7b:9d:2c:af:91 |',
- '| lo | True | 127.0.0.1 | 255.0.0.0 | . |'
- ' . |',
- '| lo | True | ::1/128 | . | host |'
- ' . |',
- '+---------+------+------------------------------+---------------+-------+'
- '-------------------+'])
-
-ROUTE_FORMATTED_OUT = '\n'.join([
- '+++++++++++++++++++++++++++++Route IPv4 info++++++++++++++++++++++++++'
- '+++',
- '+-------+-------------+-------------+---------------+-----------+-----'
- '--+',
- '| Route | Destination | Gateway | Genmask | Interface | Flags'
- ' |',
- '+-------+-------------+-------------+---------------+-----------+'
- '-------+',
- '| 0 | 0.0.0.0 | 192.168.2.1 | 0.0.0.0 | wlp3s0 |'
- ' UG |',
- '| 1 | 192.168.2.0 | 0.0.0.0 | 255.255.255.0 | enp0s25 |'
- ' U |',
- '+-------+-------------+-------------+---------------+-----------+'
- '-------+',
- '++++++++++++++++++++++++++++++++++++++++Route IPv6 info++++++++++'
- '++++++++++++++++++++++++++++++',
- '+-------+-------------+-------------+---------------+---------------+'
- '-----------------+-------+',
- '| Route | Proto | Recv-Q | Send-Q | Local Address |'
- ' Foreign Address | State |',
- '+-------+-------------+-------------+---------------+---------------+'
- '-----------------+-------+',
- '| 0 | 0.0.0.0 | 192.168.2.1 | 0.0.0.0 | UG |'
- ' 0 | 0 |',
- '| 1 | 192.168.2.0 | 0.0.0.0 | 255.255.255.0 | U |'
- ' 0 | 0 |',
- '+-------+-------------+-------------+---------------+---------------+'
- '-----------------+-------+'])
+SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output")
+SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output")
+SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output")
+SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4")
+SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6")
+SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4")
+SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6")
+NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output")
+ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output")
class TestNetInfo(CiTestCase):
maxDiff = None
+ with_logs = True
+ @mock.patch('cloudinit.netinfo.util.which')
@mock.patch('cloudinit.netinfo.util.subp')
- def test_netdev_pformat(self, m_subp):
- """netdev_pformat properly rendering network device information."""
- m_subp.return_value = (SAMPLE_IFCONFIG_OUT, '')
+ def test_netdev_old_nettools_pformat(self, m_subp, m_which):
+ """netdev_pformat properly rendering old nettools info."""
+ m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, '')
+ m_which.side_effect = lambda x: x if x == 'ifconfig' else None
content = netdev_pformat()
self.assertEqual(NETDEV_FORMATTED_OUT, content)
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_netdev_new_nettools_pformat(self, m_subp, m_which):
+ """netdev_pformat properly rendering netdev new nettools info."""
+ m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, '')
+ m_which.side_effect = lambda x: x if x == 'ifconfig' else None
+ content = netdev_pformat()
+ self.assertEqual(NETDEV_FORMATTED_OUT, content)
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_netdev_iproute_pformat(self, m_subp, m_which):
+ """netdev_pformat properly rendering ip route info."""
+ m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '')
+ m_which.side_effect = lambda x: x if x == 'ip' else None
+ content = netdev_pformat()
+ new_output = copy(NETDEV_FORMATTED_OUT)
+ # ip route show describes global scopes on ipv4 addresses
+ # whereas ifconfig does not. Add proper global/host scope to output.
+ new_output = new_output.replace('| . | 50:7b', '| global | 50:7b')
+ new_output = new_output.replace(
+ '255.0.0.0 | . |', '255.0.0.0 | host |')
+ self.assertEqual(new_output, content)
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_netdev_warn_on_missing_commands(self, m_subp, m_which):
+ """netdev_pformat warns when missing both ip and 'netstat'."""
+ m_which.return_value = None # Niether ip nor netstat found
+ content = netdev_pformat()
+ self.assertEqual('\n', content)
+ self.assertEqual(
+ "WARNING: Could not print networks: missing 'ip' and 'ifconfig'"
+ " commands\n",
+ self.logs.getvalue())
+ m_subp.assert_not_called()
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_netdev_info_nettools_down(self, m_subp, m_which):
+ """test netdev_info using nettools and down interfaces."""
+ m_subp.return_value = (
+ readResource("netinfo/new-ifconfig-output-down"), "")
+ m_which.side_effect = lambda x: x if x == 'ifconfig' else None
+ self.assertEqual(
+ {'eth0': {'ipv4': [], 'ipv6': [],
+ 'hwaddr': '00:16:3e:de:51:a6', 'up': False},
+ 'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0'}],
+ 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}],
+ 'hwaddr': '.', 'up': True}},
+ netdev_info("."))
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_netdev_info_iproute_down(self, m_subp, m_which):
+ """Test netdev_info with ip and down interfaces."""
+ m_subp.return_value = (
+ readResource("netinfo/sample-ipaddrshow-output-down"), "")
+ m_which.side_effect = lambda x: x if x == 'ip' else None
+ self.assertEqual(
+ {'lo': {'ipv4': [{'ip': '127.0.0.1', 'bcast': '.',
+ 'mask': '255.0.0.0', 'scope': 'host'}],
+ 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}],
+ 'hwaddr': '.', 'up': True},
+ 'eth0': {'ipv4': [], 'ipv6': [],
+ 'hwaddr': '00:16:3e:de:51:a6', 'up': False}},
+ netdev_info("."))
+
+ @mock.patch('cloudinit.netinfo.netdev_info')
+ def test_netdev_pformat_with_down(self, m_netdev_info):
+ """test netdev_pformat when netdev_info returns 'down' interfaces."""
+ m_netdev_info.return_value = (
+ {'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0',
+ 'scope': 'host'}],
+ 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}],
+ 'hwaddr': '.', 'up': True},
+ 'eth0': {'ipv4': [], 'ipv6': [],
+ 'hwaddr': '00:16:3e:de:51:a6', 'up': False}})
+ self.assertEqual(
+ readResource("netinfo/netdev-formatted-output-down"),
+ netdev_pformat())
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_route_nettools_pformat(self, m_subp, m_which):
+ """route_pformat properly rendering nettools route info."""
+
+ def subp_netstat_route_selector(*args, **kwargs):
+ if args[0] == ['netstat', '--route', '--numeric', '--extend']:
+ return (SAMPLE_ROUTE_OUT_V4, '')
+ if args[0] == ['netstat', '-A', 'inet6', '--route', '--numeric']:
+ return (SAMPLE_ROUTE_OUT_V6, '')
+ raise Exception('Unexpected subp call %s' % args[0])
+
+ m_subp.side_effect = subp_netstat_route_selector
+ m_which.side_effect = lambda x: x if x == 'netstat' else None
+ content = route_pformat()
+ self.assertEqual(ROUTE_FORMATTED_OUT, content)
+
+ @mock.patch('cloudinit.netinfo.util.which')
@mock.patch('cloudinit.netinfo.util.subp')
- def test_route_pformat(self, m_subp):
- """netdev_pformat properly rendering network device information."""
- m_subp.return_value = (SAMPLE_ROUTE_OUT, '')
+ def test_route_iproute_pformat(self, m_subp, m_which):
+ """route_pformat properly rendering ip route info."""
+
+ def subp_iproute_selector(*args, **kwargs):
+ if ['ip', '-o', 'route', 'list'] == args[0]:
+ return (SAMPLE_IPROUTE_OUT_V4, '')
+ v6cmd = ['ip', '--oneline', '-6', 'route', 'list', 'table', 'all']
+ if v6cmd == args[0]:
+ return (SAMPLE_IPROUTE_OUT_V6, '')
+ raise Exception('Unexpected subp call %s' % args[0])
+
+ m_subp.side_effect = subp_iproute_selector
+ m_which.side_effect = lambda x: x if x == 'ip' else None
content = route_pformat()
self.assertEqual(ROUTE_FORMATTED_OUT, content)
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_route_warn_on_missing_commands(self, m_subp, m_which):
+ """route_pformat warns when missing both ip and 'netstat'."""
+ m_which.return_value = None # Niether ip nor netstat found
+ content = route_pformat()
+ self.assertEqual('\n', content)
+ self.assertEqual(
+ "WARNING: Could not print routes: missing 'ip' and 'netstat'"
+ " commands\n",
+ self.logs.getvalue())
+ m_subp.assert_not_called()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py
new file mode 100644
index 00000000..94b6b255
--- /dev/null
+++ b/cloudinit/tests/test_stages.py
@@ -0,0 +1,231 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests related to cloudinit.stages module."""
+
+import os
+
+from cloudinit import stages
+from cloudinit import sources
+
+from cloudinit.event import EventType
+from cloudinit.util import write_file
+
+from cloudinit.tests.helpers import CiTestCase, mock
+
+TEST_INSTANCE_ID = 'i-testing'
+
+
+class FakeDataSource(sources.DataSource):
+
+ def __init__(self, paths=None, userdata=None, vendordata=None,
+ network_config=''):
+ super(FakeDataSource, self).__init__({}, None, paths=paths)
+ self.metadata = {'instance-id': TEST_INSTANCE_ID}
+ self.userdata_raw = userdata
+ self.vendordata_raw = vendordata
+ self._network_config = None
+ if network_config: # Permit for None value to setup attribute
+ self._network_config = network_config
+
+ @property
+ def network_config(self):
+ return self._network_config
+
+ def _get_data(self):
+ return True
+
+
+class TestInit(CiTestCase):
+ with_logs = True
+
+ def setUp(self):
+ super(TestInit, self).setUp()
+ self.tmpdir = self.tmp_dir()
+ self.init = stages.Init()
+ # Setup fake Paths for Init to reference
+ self.init._cfg = {'system_info': {
+ 'distro': 'ubuntu', 'paths': {'cloud_dir': self.tmpdir,
+ 'run_dir': self.tmpdir}}}
+ self.init.datasource = FakeDataSource(paths=self.init.paths)
+
+ def test_wb__find_networking_config_disabled(self):
+ """find_networking_config returns no config when disabled."""
+ disable_file = os.path.join(
+ self.init.paths.get_cpath('data'), 'upgraded-network')
+ write_file(disable_file, '')
+ self.assertEqual(
+ (None, disable_file),
+ self.init._find_networking_config())
+
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_disabled_by_kernel(self, m_cmdline):
+ """find_networking_config returns when disabled by kernel cmdline."""
+ m_cmdline.return_value = {'config': 'disabled'}
+ self.assertEqual(
+ (None, 'cmdline'),
+ self.init._find_networking_config())
+ self.assertEqual('DEBUG: network config disabled by cmdline\n',
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_disabled_by_datasrc(self, m_cmdline):
+ """find_networking_config returns when disabled by datasource cfg."""
+ m_cmdline.return_value = {} # Kernel doesn't disable networking
+ self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
+ 'network': {}} # system config doesn't disable
+
+ self.init.datasource = FakeDataSource(
+ network_config={'config': 'disabled'})
+ self.assertEqual(
+ (None, 'ds'),
+ self.init._find_networking_config())
+ self.assertEqual('DEBUG: network config disabled by ds\n',
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_disabled_by_sysconfig(self, m_cmdline):
+ """find_networking_config returns when disabled by system config."""
+ m_cmdline.return_value = {} # Kernel doesn't disable networking
+ self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
+ 'network': {'config': 'disabled'}}
+ self.assertEqual(
+ (None, 'system_cfg'),
+ self.init._find_networking_config())
+ self.assertEqual('DEBUG: network config disabled by system_cfg\n',
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_returns_kernel(self, m_cmdline):
+ """find_networking_config returns kernel cmdline config if present."""
+ expected_cfg = {'config': ['fakekernel']}
+ m_cmdline.return_value = expected_cfg
+ self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
+ 'network': {'config': ['fakesys_config']}}
+ self.init.datasource = FakeDataSource(
+ network_config={'config': ['fakedatasource']})
+ self.assertEqual(
+ (expected_cfg, 'cmdline'),
+ self.init._find_networking_config())
+
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_returns_system_cfg(self, m_cmdline):
+ """find_networking_config returns system config when present."""
+ m_cmdline.return_value = {} # No kernel network config
+ expected_cfg = {'config': ['fakesys_config']}
+ self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
+ 'network': expected_cfg}
+ self.init.datasource = FakeDataSource(
+ network_config={'config': ['fakedatasource']})
+ self.assertEqual(
+ (expected_cfg, 'system_cfg'),
+ self.init._find_networking_config())
+
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_returns_datasrc_cfg(self, m_cmdline):
+ """find_networking_config returns datasource net config if present."""
+ m_cmdline.return_value = {} # No kernel network config
+ # No system config for network in setUp
+ expected_cfg = {'config': ['fakedatasource']}
+ self.init.datasource = FakeDataSource(network_config=expected_cfg)
+ self.assertEqual(
+ (expected_cfg, 'ds'),
+ self.init._find_networking_config())
+
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_returns_fallback(self, m_cmdline):
+ """find_networking_config returns fallback config if not defined."""
+ m_cmdline.return_value = {} # Kernel doesn't disable networking
+ # Neither datasource nor system_info disable or provide network
+
+ fake_cfg = {'config': [{'type': 'physical', 'name': 'eth9'}],
+ 'version': 1}
+
+ def fake_generate_fallback():
+ return fake_cfg
+
+ # Monkey patch distro which gets cached on self.init
+ distro = self.init.distro
+ distro.generate_fallback_config = fake_generate_fallback
+ self.assertEqual(
+ (fake_cfg, 'fallback'),
+ self.init._find_networking_config())
+ self.assertNotIn('network config disabled', self.logs.getvalue())
+
+ def test_apply_network_config_disabled(self):
+ """Log when network is disabled by upgraded-network."""
+ disable_file = os.path.join(
+ self.init.paths.get_cpath('data'), 'upgraded-network')
+
+ def fake_network_config():
+ return (None, disable_file)
+
+ self.init._find_networking_config = fake_network_config
+
+ self.init.apply_network_config(True)
+ self.assertIn(
+ 'INFO: network config is disabled by %s' % disable_file,
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.distros.ubuntu.Distro')
+ def test_apply_network_on_new_instance(self, m_ubuntu):
+ """Call distro apply_network_config methods on is_new_instance."""
+ net_cfg = {
+ 'version': 1, 'config': [
+ {'subnets': [{'type': 'dhcp'}], 'type': 'physical',
+ 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
+
+ def fake_network_config():
+ return net_cfg, 'fallback'
+
+ self.init._find_networking_config = fake_network_config
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ self.init.distro.apply_network_config.assert_called_with(
+ net_cfg, bring_up=True)
+
+ @mock.patch('cloudinit.distros.ubuntu.Distro')
+ def test_apply_network_on_same_instance_id(self, m_ubuntu):
+ """Only call distro.apply_network_config_names on same instance id."""
+ old_instance_id = os.path.join(
+ self.init.paths.get_cpath('data'), 'instance-id')
+ write_file(old_instance_id, TEST_INSTANCE_ID)
+ net_cfg = {
+ 'version': 1, 'config': [
+ {'subnets': [{'type': 'dhcp'}], 'type': 'physical',
+ 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
+
+ def fake_network_config():
+ return net_cfg, 'fallback'
+
+ self.init._find_networking_config = fake_network_config
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ self.init.distro.apply_network_config.assert_not_called()
+ self.assertIn(
+ 'No network config applied. Neither a new instance'
+ " nor datasource network update on '%s' event" % EventType.BOOT,
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.distros.ubuntu.Distro')
+ def test_apply_network_on_datasource_allowed_event(self, m_ubuntu):
+ """Apply network if datasource.update_metadata permits BOOT event."""
+ old_instance_id = os.path.join(
+ self.init.paths.get_cpath('data'), 'instance-id')
+ write_file(old_instance_id, TEST_INSTANCE_ID)
+ net_cfg = {
+ 'version': 1, 'config': [
+ {'subnets': [{'type': 'dhcp'}], 'type': 'physical',
+ 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
+
+ def fake_network_config():
+ return net_cfg, 'fallback'
+
+ self.init._find_networking_config = fake_network_config
+ self.init.datasource = FakeDataSource(paths=self.init.paths)
+ self.init.datasource.update_events = {'network': [EventType.BOOT]}
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ self.init.distro.apply_network_config.assert_called_with(
+ net_cfg, bring_up=True)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py
index b778a3a7..113249d9 100644
--- a/cloudinit/tests/test_url_helper.py
+++ b/cloudinit/tests/test_url_helper.py
@@ -1,7 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.url_helper import oauth_headers
+from cloudinit.url_helper import oauth_headers, read_file_or_url
from cloudinit.tests.helpers import CiTestCase, mock, skipIf
+from cloudinit import util
+
+import httpretty
try:
@@ -38,3 +41,26 @@ class TestOAuthHeaders(CiTestCase):
'url', 'consumer_key', 'token_key', 'token_secret',
'consumer_secret')
self.assertEqual('url', return_value)
+
+
+class TestReadFileOrUrl(CiTestCase):
+ def test_read_file_or_url_str_from_file(self):
+ """Test that str(result.contents) on file is text version of contents.
+ It should not be "b'data'", but just "'data'" """
+ tmpf = self.tmp_path("myfile1")
+ data = b'This is my file content\n'
+ util.write_file(tmpf, data, omode="wb")
+ result = read_file_or_url("file://%s" % tmpf)
+ self.assertEqual(result.contents, data)
+ self.assertEqual(str(result), data.decode('utf-8'))
+
+ @httpretty.activate
+ def test_read_file_or_url_str_from_url(self):
+ """Test that str(result.contents) on url is text version of contents.
+ It should not be "b'data'", but just "'data'" """
+ url = 'http://hostname/path'
+ data = b'This is my url content\n'
+ httpretty.register_uri(httpretty.GET, url, data)
+ result = read_file_or_url(url)
+ self.assertEqual(result.contents, data)
+ self.assertEqual(str(result), data.decode('utf-8'))
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index 3f37dbb6..edb0c18f 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -3,11 +3,12 @@
"""Tests for cloudinit.util"""
import logging
-from textwrap import dedent
+import platform
import cloudinit.util as util
from cloudinit.tests.helpers import CiTestCase, mock
+from textwrap import dedent
LOG = logging.getLogger(__name__)
@@ -16,6 +17,100 @@ MOUNT_INFO = [
'153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2'
]
+OS_RELEASE_SLES = dedent("""\
+ NAME="SLES"\n
+ VERSION="12-SP3"\n
+ VERSION_ID="12.3"\n
+ PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"\n
+ ID="sles"\nANSI_COLOR="0;32"\n
+ CPE_NAME="cpe:/o:suse:sles:12:sp3"\n
+""")
+
+OS_RELEASE_OPENSUSE = dedent("""\
+NAME="openSUSE Leap"
+VERSION="42.3"
+ID=opensuse
+ID_LIKE="suse"
+VERSION_ID="42.3"
+PRETTY_NAME="openSUSE Leap 42.3"
+ANSI_COLOR="0;32"
+CPE_NAME="cpe:/o:opensuse:leap:42.3"
+BUG_REPORT_URL="https://bugs.opensuse.org"
+HOME_URL="https://www.opensuse.org/"
+""")
+
+OS_RELEASE_CENTOS = dedent("""\
+ NAME="CentOS Linux"
+ VERSION="7 (Core)"
+ ID="centos"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="7"
+ PRETTY_NAME="CentOS Linux 7 (Core)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:centos:centos:7"
+ HOME_URL="https://www.centos.org/"
+ BUG_REPORT_URL="https://bugs.centos.org/"
+
+ CENTOS_MANTISBT_PROJECT="CentOS-7"
+ CENTOS_MANTISBT_PROJECT_VERSION="7"
+ REDHAT_SUPPORT_PRODUCT="centos"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7"
+""")
+
+OS_RELEASE_REDHAT_7 = dedent("""\
+ NAME="Red Hat Enterprise Linux Server"
+ VERSION="7.5 (Maipo)"
+ ID="rhel"
+ ID_LIKE="fedora"
+ VARIANT="Server"
+ VARIANT_ID="server"
+ VERSION_ID="7.5"
+ PRETTY_NAME="Red Hat"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:redhat:enterprise_linux:7.5:GA:server"
+ HOME_URL="https://www.redhat.com/"
+ BUG_REPORT_URL="https://bugzilla.redhat.com/"
+
+ REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7"
+ REDHAT_BUGZILLA_PRODUCT_VERSION=7.5
+ REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7.5"
+""")
+
+REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)"
+REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)"
+REDHAT_RELEASE_REDHAT_6 = (
+ "Red Hat Enterprise Linux Server release 6.10 (Santiago)")
+REDHAT_RELEASE_REDHAT_7 = (
+ "Red Hat Enterprise Linux Server release 7.5 (Maipo)")
+
+
+OS_RELEASE_DEBIAN = dedent("""\
+ PRETTY_NAME="Debian GNU/Linux 9 (stretch)"
+ NAME="Debian GNU/Linux"
+ VERSION_ID="9"
+ VERSION="9 (stretch)"
+ ID=debian
+ HOME_URL="https://www.debian.org/"
+ SUPPORT_URL="https://www.debian.org/support"
+ BUG_REPORT_URL="https://bugs.debian.org/"
+""")
+
+OS_RELEASE_UBUNTU = dedent("""\
+ NAME="Ubuntu"\n
+ # comment test
+ VERSION="16.04.3 LTS (Xenial Xerus)"\n
+ ID=ubuntu\n
+ ID_LIKE=debian\n
+ PRETTY_NAME="Ubuntu 16.04.3 LTS"\n
+ VERSION_ID="16.04"\n
+ HOME_URL="http://www.ubuntu.com/"\n
+ SUPPORT_URL="http://help.ubuntu.com/"\n
+ BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n
+ VERSION_CODENAME=xenial\n
+ UBUNTU_CODENAME=xenial\n
+""")
+
class FakeCloud(object):
@@ -135,7 +230,7 @@ class TestGetHostnameFqdn(CiTestCase):
def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self):
"""Calls to cloud.get_hostname pass the metadata_only parameter."""
mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com')
- hostname, fqdn = util.get_hostname_fqdn(
+ _hn, _fqdn = util.get_hostname_fqdn(
cfg={}, cloud=mycloud, metadata_only=True)
self.assertEqual(
[{'fqdn': True, 'metadata_only': True},
@@ -212,4 +307,175 @@ class TestBlkid(CiTestCase):
capture=True, decode="replace")
+@mock.patch('cloudinit.util.subp')
+class TestUdevadmSettle(CiTestCase):
+ def test_with_no_params(self, m_subp):
+ """called with no parameters."""
+ util.udevadm_settle()
+ m_subp.called_once_with(mock.call(['udevadm', 'settle']))
+
+ def test_with_exists_and_not_exists(self, m_subp):
+ """with exists=file where file does not exist should invoke subp."""
+ mydev = self.tmp_path("mydev")
+ util.udevadm_settle(exists=mydev)
+ m_subp.called_once_with(
+ ['udevadm', 'settle', '--exit-if-exists=%s' % mydev])
+
+ def test_with_exists_and_file_exists(self, m_subp):
+ """with exists=file where file does exist should not invoke subp."""
+ mydev = self.tmp_path("mydev")
+ util.write_file(mydev, "foo\n")
+ util.udevadm_settle(exists=mydev)
+ self.assertIsNone(m_subp.call_args)
+
+ def test_with_timeout_int(self, m_subp):
+ """timeout can be an integer."""
+ timeout = 9
+ util.udevadm_settle(timeout=timeout)
+ m_subp.called_once_with(
+ ['udevadm', 'settle', '--timeout=%s' % timeout])
+
+ def test_with_timeout_string(self, m_subp):
+ """timeout can be a string."""
+ timeout = "555"
+ util.udevadm_settle(timeout=timeout)
+ m_subp.assert_called_once_with(
+ ['udevadm', 'settle', '--timeout=%s' % timeout])
+
+ def test_with_exists_and_timeout(self, m_subp):
+ """test call with both exists and timeout."""
+ mydev = self.tmp_path("mydev")
+ timeout = "3"
+ util.udevadm_settle(exists=mydev)
+ m_subp.called_once_with(
+ ['udevadm', 'settle', '--exit-if-exists=%s' % mydev,
+ '--timeout=%s' % timeout])
+
+ def test_subp_exception_raises_to_caller(self, m_subp):
+ m_subp.side_effect = util.ProcessExecutionError("BOOM")
+ self.assertRaises(util.ProcessExecutionError, util.udevadm_settle)
+
+
+@mock.patch('os.path.exists')
+class TestGetLinuxDistro(CiTestCase):
+
+ @classmethod
+ def os_release_exists(self, path):
+ """Side effect function"""
+ if path == '/etc/os-release':
+ return 1
+
+ @classmethod
+ def redhat_release_exists(self, path):
+ """Side effect function """
+ if path == '/etc/redhat-release':
+ return 1
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
+ """Verify we get the correct name if the os-release file has
+ the distro name in quotes"""
+ m_os_release.return_value = OS_RELEASE_SLES
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('sles', '12.3', platform.machine()), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists):
+ """Verify we get the correct name if the os-release file does not
+ have the distro name in quotes"""
+ m_os_release.return_value = OS_RELEASE_UBUNTU
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('ubuntu', '16.04', 'xenial'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_centos6(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on CentOS 6."""
+ m_os_release.return_value = REDHAT_RELEASE_CENTOS_6
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('centos', '6.10', 'Final'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists):
+ """Verify the correct release info on CentOS 7 without os-release."""
+ m_os_release.return_value = REDHAT_RELEASE_CENTOS_7
+ m_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('centos', '7.5.1804', 'Core'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 7 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_REDHAT_7
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('redhat', '7.5', 'Maipo'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 7 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_REDHAT_7
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('redhat', '7.5', 'Maipo'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 6 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_REDHAT_6
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('redhat', '6.10', 'Santiago'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_copr_centos(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on COPR CentOS."""
+ m_os_release.return_value = OS_RELEASE_CENTOS
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('centos', '7', 'Core'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_debian(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on Debian."""
+ m_os_release.return_value = OS_RELEASE_DEBIAN
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('debian', '9', 'stretch'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_opensuse(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on OpenSUSE."""
+ m_os_release.return_value = OS_RELEASE_OPENSUSE
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('opensuse', '42.3', platform.machine()), dist)
+
+ @mock.patch('platform.dist')
+ def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists):
+ """Verify we get no information if os-release does not exist"""
+ m_platform_dist.return_value = ('', '', '')
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(('', '', ''), dist)
+
+ @mock.patch('platform.dist')
+ def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists):
+ """Verify we get an empty tuple when no information exists and
+ Exceptions are not propagated"""
+ m_platform_dist.side_effect = Exception()
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(('', '', ''), dist)
+
+ @mock.patch('platform.dist')
+ def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists):
+ """Verify we get the correct platform information"""
+ m_platform_dist.return_value = ('foo', '1.1', 'aarch64')
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(('foo', '1.1', 'aarch64'), dist)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_version.py b/cloudinit/tests/test_version.py
new file mode 100644
index 00000000..a96c2a47
--- /dev/null
+++ b/cloudinit/tests/test_version.py
@@ -0,0 +1,31 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.tests.helpers import CiTestCase
+from cloudinit import version
+
+import mock
+
+
+class TestExportsFeatures(CiTestCase):
+ def test_has_network_config_v1(self):
+ self.assertIn('NETWORK_CONFIG_V1', version.FEATURES)
+
+ def test_has_network_config_v2(self):
+ self.assertIn('NETWORK_CONFIG_V2', version.FEATURES)
+
+
+class TestVersionString(CiTestCase):
+ @mock.patch("cloudinit.version._PACKAGED_VERSION",
+ "17.2-3-gb05b9972-0ubuntu1")
+ def test_package_version_respected(self):
+ """If _PACKAGED_VERSION is filled in, then it should be returned."""
+ self.assertEqual("17.2-3-gb05b9972-0ubuntu1", version.version_string())
+
+ @mock.patch("cloudinit.version._PACKAGED_VERSION", "@@PACKAGED_VERSION@@")
+ @mock.patch("cloudinit.version.__VERSION__", "17.2")
+ def test_package_version_skipped(self):
+ """If _PACKAGED_VERSION is not modified, then return __VERSION__."""
+ self.assertEqual("17.2", version.version_string())
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 03a573af..8067979e 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -15,6 +15,7 @@ import six
import time
from email.utils import parsedate
+from errno import ENOENT
from functools import partial
from itertools import count
from requests import exceptions
@@ -80,6 +81,32 @@ def combine_url(base, *add_ons):
return url
+def read_file_or_url(url, timeout=5, retries=10,
+ headers=None, data=None, sec_between=1, ssl_details=None,
+ headers_cb=None, exception_cb=None):
+ url = url.lstrip()
+ if url.startswith("/"):
+ url = "file://%s" % url
+ if url.lower().startswith("file://"):
+ if data:
+ LOG.warning("Unable to post data to file resource %s", url)
+ file_path = url[len("file://"):]
+ try:
+ with open(file_path, "rb") as fp:
+ contents = fp.read()
+ except IOError as e:
+ code = e.errno
+ if e.errno == ENOENT:
+ code = NOT_FOUND
+ raise UrlError(cause=e, code=code, headers=None, url=url)
+ return FileResponse(file_path, contents=contents)
+ else:
+ return readurl(url, timeout=timeout, retries=retries, headers=headers,
+ headers_cb=headers_cb, data=data,
+ sec_between=sec_between, ssl_details=ssl_details,
+ exception_cb=exception_cb)
+
+
# Made to have same accessors as UrlResponse so that the
# read_file_or_url can return this or that object and the
# 'user' of those objects will not need to know the difference.
@@ -96,7 +123,7 @@ class StringResponse(object):
return True
def __str__(self):
- return self.contents
+ return self.contents.decode('utf-8')
class FileResponse(StringResponse):
@@ -519,7 +546,7 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
resource_owner_secret=token_secret,
signature_method=oauth1.SIGNATURE_PLAINTEXT,
timestamp=timestamp)
- uri, signed_headers, body = client.sign(url)
+ _uri, signed_headers, _body = client.sign(url)
return signed_headers
# vi: ts=4 expandtab
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index cc55daf8..ed83d2d8 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -19,7 +19,7 @@ import six
from cloudinit import handlers
from cloudinit import log as logging
-from cloudinit.url_helper import UrlError
+from cloudinit.url_helper import read_file_or_url, UrlError
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -224,8 +224,8 @@ class UserDataProcessor(object):
content = util.load_file(include_once_fn)
else:
try:
- resp = util.read_file_or_url(include_url,
- ssl_details=self.ssl_details)
+ resp = read_file_or_url(include_url,
+ ssl_details=self.ssl_details)
if include_once_on and resp.ok():
util.write_file(include_once_fn, resp.contents,
mode=0o600)
@@ -337,8 +337,10 @@ def is_skippable(part):
# Coverts a raw string into a mime message
def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
+ """convert a string (more likely bytes) or a message into
+ a mime message."""
if not raw_data:
- raw_data = ''
+ raw_data = b''
def create_binmsg(data, content_type):
maintype, subtype = content_type.split("/", 1)
@@ -346,15 +348,17 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
msg.set_payload(data)
return msg
- try:
- data = util.decode_binary(util.decomp_gzip(raw_data))
- if "mime-version:" in data[0:4096].lower():
- msg = util.message_from_string(data)
- else:
- msg = create_binmsg(data, content_type)
- except UnicodeDecodeError:
- msg = create_binmsg(raw_data, content_type)
+ if isinstance(raw_data, six.text_type):
+ bdata = raw_data.encode('utf-8')
+ else:
+ bdata = raw_data
+ bdata = util.decomp_gzip(bdata, decode=False)
+ if b"mime-version:" in bdata[0:4096].lower():
+ msg = util.message_from_string(bdata.decode('utf-8'))
+ else:
+ msg = create_binmsg(bdata, content_type)
return msg
+
# vi: ts=4 expandtab
diff --git a/cloudinit/util.py b/cloudinit/util.py
index acdc0d85..50680960 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -576,6 +576,79 @@ def get_cfg_option_int(yobj, key, default=0):
return int(get_cfg_option_str(yobj, key, default=default))
+def _parse_redhat_release(release_file=None):
+ """Return a dictionary of distro info fields from /etc/redhat-release.
+
+ Dict keys will align with /etc/os-release keys:
+ ID, VERSION_ID, VERSION_CODENAME
+ """
+
+ if not release_file:
+ release_file = '/etc/redhat-release'
+ if not os.path.exists(release_file):
+ return {}
+ redhat_release = load_file(release_file)
+ redhat_regex = (
+ r'(?P<name>.+) release (?P<version>[\d\.]+) '
+ r'\((?P<codename>[^)]+)\)')
+ match = re.match(redhat_regex, redhat_release)
+ if match:
+ group = match.groupdict()
+ group['name'] = group['name'].lower().partition(' linux')[0]
+ if group['name'] == 'red hat enterprise':
+ group['name'] = 'redhat'
+ return {'ID': group['name'], 'VERSION_ID': group['version'],
+ 'VERSION_CODENAME': group['codename']}
+ return {}
+
+
+def get_linux_distro():
+ distro_name = ''
+ distro_version = ''
+ flavor = ''
+ os_release = {}
+ if os.path.exists('/etc/os-release'):
+ os_release = load_shell_content(load_file('/etc/os-release'))
+ if not os_release:
+ os_release = _parse_redhat_release()
+ if os_release:
+ distro_name = os_release.get('ID', '')
+ distro_version = os_release.get('VERSION_ID', '')
+ if 'sles' in distro_name or 'suse' in distro_name:
+ # RELEASE_BLOCKER: We will drop this sles ivergent behavior in
+ # before 18.4 so that get_linux_distro returns a named tuple
+ # which will include both version codename and architecture
+ # on all distributions.
+ flavor = platform.machine()
+ else:
+ flavor = os_release.get('VERSION_CODENAME', '')
+ if not flavor:
+ match = re.match(r'[^ ]+ \((?P<codename>[^)]+)\)',
+ os_release.get('VERSION', ''))
+ if match:
+ flavor = match.groupdict()['codename']
+ if distro_name == 'rhel':
+ distro_name = 'redhat'
+ else:
+ dist = ('', '', '')
+ try:
+ # Will be removed in 3.7
+ dist = platform.dist() # pylint: disable=W1505
+ except Exception:
+ pass
+ finally:
+ found = None
+ for entry in dist:
+ if entry:
+ found = 1
+ if not found:
+ LOG.warning('Unable to determine distribution, template '
+ 'expansion may have unexpected results')
+ return dist
+
+ return (distro_name, distro_version, flavor)
+
+
def system_info():
info = {
'platform': platform.platform(),
@@ -583,19 +656,19 @@ def system_info():
'release': platform.release(),
'python': platform.python_version(),
'uname': platform.uname(),
- 'dist': platform.dist(), # pylint: disable=W1505
+ 'dist': get_linux_distro()
}
system = info['system'].lower()
var = 'unknown'
if system == "linux":
linux_dist = info['dist'][0].lower()
- if linux_dist in ('centos', 'fedora', 'debian'):
+ if linux_dist in ('centos', 'debian', 'fedora', 'rhel', 'suse'):
var = linux_dist
elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
var = 'ubuntu'
elif linux_dist == 'redhat':
var = 'rhel'
- elif linux_dist == 'suse':
+ elif linux_dist in ('opensuse', 'sles'):
var = 'suse'
else:
var = 'linux'
@@ -857,37 +930,6 @@ def fetch_ssl_details(paths=None):
return ssl_details
-def read_file_or_url(url, timeout=5, retries=10,
- headers=None, data=None, sec_between=1, ssl_details=None,
- headers_cb=None, exception_cb=None):
- url = url.lstrip()
- if url.startswith("/"):
- url = "file://%s" % url
- if url.lower().startswith("file://"):
- if data:
- LOG.warning("Unable to post data to file resource %s", url)
- file_path = url[len("file://"):]
- try:
- contents = load_file(file_path, decode=False)
- except IOError as e:
- code = e.errno
- if e.errno == ENOENT:
- code = url_helper.NOT_FOUND
- raise url_helper.UrlError(cause=e, code=code, headers=None,
- url=url)
- return url_helper.FileResponse(file_path, contents=contents)
- else:
- return url_helper.readurl(url,
- timeout=timeout,
- retries=retries,
- headers=headers,
- headers_cb=headers_cb,
- data=data,
- sec_between=sec_between,
- ssl_details=ssl_details,
- exception_cb=exception_cb)
-
-
def load_yaml(blob, default=None, allowed=(dict,)):
loaded = default
blob = decode_binary(blob)
@@ -905,8 +947,20 @@ def load_yaml(blob, default=None, allowed=(dict,)):
" but got %s instead") %
(allowed, type_utils.obj_name(converted)))
loaded = converted
- except (yaml.YAMLError, TypeError, ValueError):
- logexc(LOG, "Failed loading yaml blob")
+ except (yaml.YAMLError, TypeError, ValueError) as e:
+ msg = 'Failed loading yaml blob'
+ mark = None
+ if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
+ mark = getattr(e, 'context_mark')
+ elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'):
+ mark = getattr(e, 'problem_mark')
+ if mark:
+ msg += (
+ '. Invalid format at line {line} column {col}: "{err}"'.format(
+ line=mark.line + 1, col=mark.column + 1, err=e))
+ else:
+ msg += '. {err}'.format(err=e)
+ LOG.warning(msg)
return loaded
@@ -925,12 +979,14 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
ud_url = "%s%s%s" % (base, "user-data", ext)
md_url = "%s%s%s" % (base, "meta-data", ext)
- md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
+ md_resp = url_helper.read_file_or_url(md_url, timeout, retries,
+ file_retries)
md = None
if md_resp.ok():
md = load_yaml(decode_binary(md_resp.contents), default={})
- ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)
+ ud_resp = url_helper.read_file_or_url(ud_url, timeout, retries,
+ file_retries)
ud = None
if ud_resp.ok():
ud = ud_resp.contents
@@ -1154,7 +1210,9 @@ def gethostbyaddr(ip):
def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
- return is_resolvable(urlparse.urlparse(url).hostname)
+ return log_time(logfunc=LOG.debug, msg="Resolving URL: " + url,
+ func=is_resolvable,
+ args=(urlparse.urlparse(url).hostname,))
def search_for_mirror(candidates):
@@ -1446,7 +1504,7 @@ def get_config_logfiles(cfg):
for fmt in get_output_cfg(cfg, None):
if not fmt:
continue
- match = re.match('(?P<type>\||>+)\s*(?P<target>.*)', fmt)
+ match = re.match(r'(?P<type>\||>+)\s*(?P<target>.*)', fmt)
if not match:
continue
target = match.group('target')
@@ -1608,7 +1666,8 @@ def mounts():
return mounted
-def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
+def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True,
+ update_env_for_mount=None):
"""
Mount the device, call method 'callback' passing the directory
in which it was mounted, then unmount. Return whatever 'callback'
@@ -1670,7 +1729,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
mountcmd.extend(['-t', mtype])
mountcmd.append(device)
mountcmd.append(tmpd)
- subp(mountcmd)
+ subp(mountcmd, update_env=update_env_for_mount)
umount = tmpd # This forces it to be unmounted (when set)
mountpoint = tmpd
break
@@ -1857,9 +1916,55 @@ def subp_blob_in_tempfile(blob, *args, **kwargs):
return subp(*args, **kwargs)
-def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
+def subp(args, data=None, rcs=None, env=None, capture=True,
+ combine_capture=False, shell=False,
logstring=False, decode="replace", target=None, update_env=None,
status_cb=None):
+ """Run a subprocess.
+
+ :param args: command to run in a list. [cmd, arg1, arg2...]
+ :param data: input to the command, made available on its stdin.
+ :param rcs:
+ a list of allowed return codes. If subprocess exits with a value not
+ in this list, a ProcessExecutionError will be raised. By default,
+ data is returned as a string. See 'decode' parameter.
+ :param env: a dictionary for the command's environment.
+ :param capture:
+ boolean indicating if output should be captured. If True, then stderr
+ and stdout will be returned. If False, they will not be redirected.
+ :param combine_capture:
+ boolean indicating if stderr should be redirected to stdout. When True,
+ interleaved stderr and stdout will be returned as the first element of
+ a tuple, the second will be empty string or bytes (per decode).
+ if combine_capture is True, then output is captured independent of
+ the value of capture.
+ :param shell: boolean indicating if this should be run with a shell.
+ :param logstring:
+ the command will be logged to DEBUG. If it contains info that should
+ not be logged, then logstring will be logged instead.
+ :param decode:
+ if False, no decoding will be done and returned stdout and stderr will
+ be bytes. Other allowed values are 'strict', 'ignore', and 'replace'.
+ These values are passed through to bytes().decode() as the 'errors'
+ parameter. There is no support for decoding to other than utf-8.
+ :param target:
+ not supported, kwarg present only to make function signature similar
+ to curtin's subp.
+ :param update_env:
+ update the enviornment for this command with this dictionary.
+ this will not affect the current processes os.environ.
+ :param status_cb:
+ call this fuction with a single string argument before starting
+ and after finishing.
+
+ :return
+ if not capturing, return is (None, None)
+ if capturing, stdout and stderr are returned.
+ if decode:
+ entries in tuple will be python2 unicode or python3 string
+ if not decode:
+ entries in tuple will be python2 string or python3 bytes
+ """
# not supported in cloud-init (yet), for now kept in the call signature
# to ease maintaining code shared between cloud-init and curtin
@@ -1885,7 +1990,8 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
status_cb('Begin run command: {command}\n'.format(command=command))
if not logstring:
LOG.debug(("Running command %s with allowed return codes %s"
- " (shell=%s, capture=%s)"), args, rcs, shell, capture)
+ " (shell=%s, capture=%s)"),
+ args, rcs, shell, 'combine' if combine_capture else capture)
else:
LOG.debug(("Running hidden command to protect sensitive "
"input/output logstring: %s"), logstring)
@@ -1896,6 +2002,9 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
if capture:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
+ if combine_capture:
+ stdout = subprocess.PIPE
+ stderr = subprocess.STDOUT
if data is None:
# using devnull assures any reads get null, rather
# than possibly waiting on input.
@@ -1934,10 +2043,11 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
devnull_fp.close()
# Just ensure blank instead of none.
- if not out and capture:
- out = b''
- if not err and capture:
- err = b''
+ if capture or combine_capture:
+ if not out:
+ out = b''
+ if not err:
+ err = b''
if decode:
def ldecode(data, m='utf-8'):
if not isinstance(data, bytes):
@@ -2061,24 +2171,33 @@ def is_container():
return False
-def get_proc_env(pid):
+def get_proc_env(pid, encoding='utf-8', errors='replace'):
"""
Return the environment in a dict that a given process id was started with.
- """
- env = {}
- fn = os.path.join("/proc/", str(pid), "environ")
+ @param encoding: if true, then decoding will be done with
+ .decode(encoding, errors) and text will be returned.
+ if false then binary will be returned.
+ @param errors: only used if encoding is true."""
+ fn = os.path.join("/proc", str(pid), "environ")
+
try:
- contents = load_file(fn)
- toks = contents.split("\x00")
- for tok in toks:
- if tok == "":
- continue
- (name, val) = tok.split("=", 1)
- if name:
- env[name] = val
+ contents = load_file(fn, decode=False)
except (IOError, OSError):
- pass
+ return {}
+
+ env = {}
+ null, equal = (b"\x00", b"=")
+ if encoding:
+ null, equal = ("\x00", "=")
+ contents = contents.decode(encoding, errors)
+
+ for tok in contents.split(null):
+ if not tok:
+ continue
+ (name, val) = tok.split(equal, 1)
+ if name:
+ env[name] = val
return env
@@ -2214,7 +2333,7 @@ def parse_mtab(path):
def find_freebsd_part(label_part):
if label_part.startswith("/dev/label/"):
target_label = label_part[5:]
- (label_part, err) = subp(['glabel', 'status', '-s'])
+ (label_part, _err) = subp(['glabel', 'status', '-s'])
for labels in label_part.split("\n"):
items = labels.split()
if len(items) > 0 and items[0].startswith(target_label):
@@ -2275,8 +2394,8 @@ def parse_mount(path):
# the regex is a bit complex. to better understand this regex see:
# https://regex101.com/r/2F6c1k/1
# https://regex101.com/r/T2en7a/1
- regex = r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) ' + \
- '(?=(?:type)[\s]+([\S]+)|\(([^,]*))'
+ regex = (r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) '
+ r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))')
for line in mount_locs:
m = re.search(regex, line)
if not m:
@@ -2545,11 +2664,21 @@ def _call_dmidecode(key, dmidecode_path):
if result.replace(".", "") == "":
return ""
return result
- except (IOError, OSError) as _err:
- LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err)
+ except (IOError, OSError) as e:
+ LOG.debug('failed dmidecode cmd: %s\n%s', cmd, e)
return None
+def is_x86(uname_arch=None):
+ """Return True if platform is x86-based"""
+ if uname_arch is None:
+ uname_arch = os.uname()[4]
+ x86_arch_match = (
+ uname_arch == 'x86_64' or
+ (uname_arch[0] == 'i' and uname_arch[2:] == '86'))
+ return x86_arch_match
+
+
def read_dmi_data(key):
"""
Wrapper for reading DMI data.
@@ -2577,8 +2706,7 @@ def read_dmi_data(key):
# running dmidecode can be problematic on some arches (LP: #1243287)
uname_arch = os.uname()[4]
- if not (uname_arch == "x86_64" or
- (uname_arch.startswith("i") and uname_arch[2:] == "86") or
+ if not (is_x86(uname_arch) or
uname_arch == 'aarch64' or
uname_arch == 'amd64'):
LOG.debug("dmidata is not supported on %s", uname_arch)
@@ -2727,4 +2855,19 @@ def mount_is_read_write(mount_point):
mount_opts = result[-1].split(',')
return mount_opts[0] == 'rw'
+
+def udevadm_settle(exists=None, timeout=None):
+ """Invoke udevadm settle with optional exists and timeout parameters"""
+ settle_cmd = ["udevadm", "settle"]
+ if exists:
+ # skip the settle if the requested path already exists
+ if os.path.exists(exists):
+ return
+ settle_cmd.extend(['--exit-if-exists=%s' % exists])
+ if timeout:
+ settle_cmd.extend(['--timeout=%s' % timeout])
+
+ return subp(settle_cmd)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/version.py b/cloudinit/version.py
index ccd0f84e..844a02e0 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "18.2"
+__VERSION__ = "18.4"
+_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
FEATURES = [
# supports network config version 1
@@ -15,6 +16,9 @@ FEATURES = [
def version_string():
+ """Extract a version string from cloud-init."""
+ if not _PACKAGED_VERSION.startswith('@@'):
+ return _PACKAGED_VERSION
return __VERSION__
# vi: ts=4 expandtab
diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py
index f9f7a63c..1da90c40 100644
--- a/cloudinit/warnings.py
+++ b/cloudinit/warnings.py
@@ -130,7 +130,7 @@ def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs):
os.path.join(_get_warn_dir(cfg), name),
topline + "\n".join(fmtlines) + "\n" + topline)
- LOG.warning(topline + "\n".join(fmtlines) + "\n" + closeline)
+ LOG.warning("%s%s\n%s", topline, "\n".join(fmtlines), closeline)
if sleep:
LOG.debug("sleeping %d seconds for warning '%s'", sleep, name)
diff --git a/debian/changelog b/debian/changelog
index 7a26815b..74e8dc2d 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,219 @@
+cloud-init (18.4-0ubuntu1~16.04.2) xenial; urgency=medium
+
+ * cherry-pick 1d5e9aef: azure: Add apply_network_config option to
+ disable network (LP: #1798424)
+ * debian/patches/openstack-no-network-config.patch
+ add patch to default Azure apply_network_config to False. Only
+ fallback network config on eth0 is generated by cloud-init. IMDS
+ network_config is ignored.
+
+ -- Chad Smith <chad.smith@canonical.com> Wed, 17 Oct 2018 12:51:09 -0600
+
+cloud-init (18.4-0ubuntu1~16.04.1) xenial-proposed; urgency=medium
+
+ * drop the following cherry-picks now included:
+ + cpick-3cee0bf8-oracle-fix-detect_openstack-to-report-True-on
+ * refresh patches:
+ + debian/patches/azure-use-walinux-agent.patch
+ + debian/patches/openstack-no-network-config.patch
+ * refresh patches:
+ + debian/patches/ds-identify-behavior-xenial.patch
+ * New upstream release. (LP: #1795953)
+ - release 18.4
+ - tests: allow skipping an entire cloud_test without running.
+ - tests: disable lxd tests on cosmic
+ - cii-tests: use unittest2.SkipTest in ntp_chrony due to new deps
+ - lxd: adjust to snap installed lxd.
+ - docs: surface experimental doc in instance-data.json
+ - tests: fix ec2 integration tests. process meta_data instead of meta-data
+ - Add support for Infiniband network interfaces (IPoIB). [Mark Goddard]
+ - cli: add cloud-init query subcommand to query instance metadata
+ - tools/tox-venv: update for new features.
+ - pylint: ignore warning assignment-from-no-return for _write_network
+ - stages: Fix bug causing datasource to have incorrect sys_cfg.
+ - Remove dead-code _write_network distro implementations.
+ - net_util: ensure static configs have netmask in translate_network result
+ [Thomas Berger]
+ - Fall back to root:root on syslog permissions if other options fail.
+ [Robert Schweikert]
+ - tests: Add mock for util.get_hostname. [Robert Schweikert]
+ - ds-identify: doc string cleanup.
+ - OpenStack: Support setting mac address on bond. [Fabian Wiesel]
+ - bash_completion/cloud-init: fix shell syntax error.
+ - EphemeralIPv4Network: Be more explicit when adding default route.
+ - OpenStack: support reading of newer versions of metdata.
+ - OpenStack: fix bug causing 'latest' version to be used from network.
+ - user-data: jinja template to render instance-data.json in cloud-config
+ - config: disable ssh access to a configured user account
+ - tests: print failed testname instead of docstring upon failure
+ - tests: Disallow use of util.subp except for where needed.
+ - sysconfig: refactor sysconfig to accept distro specific templates paths
+ - Add unit tests for config/cc_ssh.py [Francis Ginther]
+ - Fix the built-in cloudinit/tests/helpers:skipIf
+ - read-version: enhance error message [Joshua Powers]
+ - hyperv_reporting_handler: simplify threaded publisher
+ - VMWare: Fix a network config bug in vm with static IPv4 and no gateway.
+ [Pengpeng Sun]
+ - logging: Add logging config type hyperv for reporting via Azure KVP
+ [Andy Liu]
+ - tests: disable other snap test as well [Joshua Powers]
+ - tests: disable snap, fix write_files binary [Joshua Powers]
+ - Add datasource Oracle Compute Infrastructure (OCI).
+ - azure: allow azure to generate network configuration from IMDS per boot.
+ - Scaleway: Add network configuration to the DataSource [Louis Bouchard]
+ - docs: Fix example cloud-init analyze command to match output.
+ [Wesley Gao]
+ - netplan: Correctly render macaddress on a bonds and bridges when
+ provided.
+ - tools: Add 'net-convert' subcommand command to 'cloud-init devel'.
+ - redhat: remove ssh keys on new instance.
+ - Use typeset or local in profile.d scripts.
+ - OpenNebula: Fix null gateway6 [Akihiko Ota]
+ - oracle: fix detect_openstack to report True on OracleCloud.com DMI data
+ - tests: improve LXDInstance trying to workaround or catch bug.
+ - update_metadata re-config on every boot comments and tests not quite
+ right [Mike Gerdts]
+ - tests: Collect build_info from system if available.
+ - pylint: Fix pylint warnings reported in pylint 2.0.0.
+ - get_linux_distro: add support for rhel via redhat-release.
+ - get_linux_distro: add support for centos6 and rawhide flavors of redhat
+ - tools: add '--debug' to tools/net-convert.py
+ - tests: bump the version of paramiko to 2.4.1.
+
+ -- Chad Smith <chad.smith@canonical.com> Wed, 03 Oct 2018 12:10:25 -0600
+
+cloud-init (18.3-9-g2e62cb8a-0ubuntu1~16.04.2) xenial-proposed; urgency=medium
+
+ * cherry-pick 3cee0bf8: oracle: fix detect_openstack to report True on
+ (LP: #1784685)
+
+ -- Chad Smith <chad.smith@canonical.com> Tue, 31 Jul 2018 13:57:21 -0600
+
+cloud-init (18.3-9-g2e62cb8a-0ubuntu1~16.04.1) xenial-proposed; urgency=medium
+
+ * New upstream snapshot. (LP: #1777912)
+ - docs: note in rtd about avoiding /tmp when writing files
+ - ubuntu,centos,debian: get_linux_distro to align with platform.dist
+ - Fix boothook docs on environment variable name (INSTANCE_I ->
+ INSTANCE_ID) [Marc Tamsky]
+ - update_metadata: a datasource can support network re-config every boot
+ - tests: drop salt-minion integration test
+ - Retry on failed import of gpg receive keys.
+ - tools: Fix run-container when neither source or binary package requested.
+ - docs: Fix a small spelling error. [Oz N Tiram]
+ - tox: use simplestreams from git repository rather than bzr.
+
+ -- Chad Smith <chad.smith@canonical.com> Mon, 09 Jul 2018 15:34:52 -0600
+
+cloud-init (18.3-0ubuntu1~16.04.1) xenial-proposed; urgency=medium
+
+ * debian/rules: update version.version_string to contain packaged version.
+ (LP: #1770712)
+ * debian/patches/openstack-no-network-config.patch
+ add patch to ignore Openstack network_config from network_data.json by
+ default
+ * Refresh patches against upstream:
+ + azure-use-walinux-agent.patch
+ + ds-identify-behavior-xenial.patch
+ * New upstream release. (LP: #1777912)
+ - release 18.3
+ - docs: represent sudo:false in docs for user_groups config module
+ - Explicitly prevent `sudo` access for user module [Jacob Bednarz]
+ - lxd: Delete default network and detach device if lxd-init created them.
+ - openstack: avoid unneeded metadata probe on non-openstack platforms
+ - stages: fix tracebacks if a module stage is undefined or empty
+ [Robert Schweikert]
+ - Be more safe on string/bytes when writing multipart user-data to disk.
+ - Fix get_proc_env for pids that have non-utf8 content in environment.
+ - tests: fix salt_minion integration test on bionic and later
+ - tests: provide human-readable integration test summary when --verbose
+ - tests: skip chrony integration tests on lxd running artful or older
+ - test: add optional --preserve-instance arg to integraiton tests
+ - netplan: fix mtu if provided by network config for all rendered types
+ - tests: remove pip install workarounds for pylxd, take upstream fix.
+ - subp: support combine_capture argument.
+ - tests: ordered tox dependencies for pylxd install
+ - util: add get_linux_distro function to replace platform.dist
+ [Robert Schweikert]
+ - pyflakes: fix unused variable references identified by pyflakes 2.0.0.
+ - - Do not use the systemd_prefix macro, not available in this environment
+ [Robert Schweikert]
+ - doc: Add config info to ec2, openstack and cloudstack datasource docs
+ - Enable SmartOS network metadata to work with netplan via per-subnet
+ routes [Dan McDonald]
+ - openstack: Allow discovery in init-local using dhclient in a sandbox.
+ - tests: Avoid using https in httpretty, improve HttPretty test case.
+ - yaml_load/schema: Add invalid line and column nums to error message
+ - Azure: Ignore NTFS mount errors when checking ephemeral drive
+ [Paul Meyer]
+ - packages/brpm: Get proper dependencies for cmdline distro.
+ - packages: Make rpm spec files patch in package version like in debs.
+ - tools/run-container: replace tools/run-centos with more generic.
+ - Update version.version_string to contain packaged version.
+ - cc_mounts: Do not add devices to fstab that are already present.
+ [Lars Kellogg-Stedman]
+ - ds-identify: ensure that we have certain tokens in PATH.
+ - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers]
+ - read_file_or_url: move to url_helper, fix bug in its FileResponse.
+ - cloud_tests: help pylint
+ - flake8: fix flake8 errors in previous commit.
+ - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford]
+ - tests: restructure SSH and initial connections [Joshua Powers]
+ - ds-identify: recognize container-other as a container, test SmartOS.
+ - cloud-config.service: run After snap.seeded.service.
+ - tests: do not rely on host /proc/cmdline in test_net.py
+ [Lars Kellogg-Stedman]
+ - ds-identify: Remove dupe call to is_ds_enabled, improve debug message.
+ - SmartOS: fix get_interfaces for nics that do not have addr_assign_type.
+ - tests: fix package and ca_cert cloud_tests on bionic
+ - ds-identify: make shellcheck 0.4.6 happy with ds-identify.
+ - pycodestyle: Fix deprecated string literals, move away from flake8.
+ - azure: Add reported ready marker file. [Joshua Chan]
+ - tools: Support adding a release suffix through packages/bddeb.
+ - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt.
+ [Harm Weites]
+ - tools: Re-use the orig tarball in packages/bddeb if it is around.
+ - netinfo: fix netdev_pformat when a nic does not have an address assigned.
+ - collect-logs: add -v flag, write to stderr, limit journal to single boot.
+ - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled.
+ - Add reporting events and log_time around early source of blocking time
+ - IBMCloud: recognize provisioning environment during debug boots.
+ - net: detect unstable network names and trigger a settle if needed
+ - IBMCloud: improve documentation in datasource.
+ - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov]
+ - packages/debian/control.in: add missing dependency on iproute2.
+ - DataSourceSmartOS: add locking of serial device. [Mike Gerdts]
+ - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts]
+ - DataSourceSmartOS: list() should always return a list [Mike Gerdts]
+ - schema: in validation, raise ImportError if strict but no jsonschema.
+ - set_passwords: Add newline to end of sshd config, only restart if
+ updated.
+ - pylint: pay attention to unused variable warnings.
+ - doc: Add documentation for AliYun datasource. [Junjie Wang]
+ - Schema: do not warn on duplicate items in commands.
+ - net: Depend on iproute2's ip instead of net-tools ifconfig or route
+ - DataSourceSmartOS: fix hang when metadata service is down [Mike Gerdts]
+ - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to
+ ext4. [Mike Gerdts]
+ - pycodestyle: Fix invalid escape sequences in string literals.
+ - Implement bash completion script for cloud-init command line
+ - tools: Fix make-tarball cli tool usage for development
+ - renderer: support unicode in render_from_file.
+ - Implement ntp client spec with auto support for distro selection
+ - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds.
+ - tests: fix ec2 integration network metadata validation
+
+ -- Chad Smith <chad.smith@canonical.com> Thu, 21 Jun 2018 14:32:29 -0600
+
+cloud-init (18.2-4-g05926e48-0ubuntu1~16.04.2) xenial-proposed; urgency=medium
+
+ * cherry-pick 6ef92c98: IBMCloud: recognize provisioning environment
+ during debug (LP: #1767166)
+ * cherry-pick 11172924: IBMCloud: Disable config-drive and nocloud
+ only if IBMCloud (LP: #1766401)
+
+ -- Chad Smith <chad.smith@canonical.com> Mon, 30 Apr 2018 15:52:05 -0600
+
cloud-init (18.2-4-g05926e48-0ubuntu1~16.04.1) xenial-proposed; urgency=medium
* debian/new-upstream-snapshot: Remove script, now maintained elsewhere.
diff --git a/debian/patches/azure-apply-network-config-false.patch b/debian/patches/azure-apply-network-config-false.patch
new file mode 100644
index 00000000..281c19c6
--- /dev/null
+++ b/debian/patches/azure-apply-network-config-false.patch
@@ -0,0 +1,23 @@
+Description: Azure apply_network_config default to False
+ Azure cloud-images on Xenial already contain hotplug network scripts so
+ default behavior for should remain to only generate fallback network
+ configuration which is to dhcp on eth0 and let image hotplug scripts add
+ network configuration for any additional nics that show up.
+Author: Chad Smith <chad.smith@canonical.com>
+Origin: backport
+Bug: https://bugs.launchpad.net/cloud-init/+bug/1798424
+Forwarded: not-needed
+Last-Update: 2018-10-17
+Index: cloud-init/cloudinit/sources/DataSourceAzure.py
+===================================================================
+--- cloud-init.orig/cloudinit/sources/DataSourceAzure.py
++++ cloud-init/cloudinit/sources/DataSourceAzure.py
+@@ -207,7 +207,7 @@ BUILTIN_DS_CONFIG = {
+ },
+ 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH},
+ 'dhclient_lease_file': LEASE_FILE,
+- 'apply_network_config': True, # Use IMDS published network configuration
++ 'apply_network_config': False, # Use fallback network config not IMDS
+ }
+ # RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False
+
diff --git a/debian/patches/azure-use-walinux-agent.patch b/debian/patches/azure-use-walinux-agent.patch
index 2113b75d..3c858824 100644
--- a/debian/patches/azure-use-walinux-agent.patch
+++ b/debian/patches/azure-use-walinux-agent.patch
@@ -6,12 +6,12 @@ Forwarded: not-needed
Author: Scott Moser <smoser@ubuntu.com>
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
-@@ -190,7 +190,7 @@ if util.is_FreeBSD():
+@@ -196,7 +196,7 @@ if util.is_FreeBSD():
LOG.debug("resource disk is None")
BUILTIN_DS_CONFIG = {
- 'agent_command': AGENT_START_BUILTIN,
+ 'agent_command': AGENT_START,
- 'data_dir': "/var/lib/waagent",
+ 'data_dir': AGENT_SEED_DIR,
'set_hostname': True,
'hostname_bounce': {
diff --git a/debian/patches/cpick-1d5e9aef-azure-Add-apply_network_config-option-to-disable b/debian/patches/cpick-1d5e9aef-azure-Add-apply_network_config-option-to-disable
new file mode 100644
index 00000000..67f9f0e6
--- /dev/null
+++ b/debian/patches/cpick-1d5e9aef-azure-Add-apply_network_config-option-to-disable
@@ -0,0 +1,228 @@
+From 1d5e9aefdab06a2574d78e644deed6c6fa1da171 Mon Sep 17 00:00:00 2001
+From: Chad Smith <chad.smith@canonical.com>
+Date: Wed, 17 Oct 2018 18:47:35 +0000
+Subject: [PATCH] azure: Add apply_network_config option to disable network
+ from IMDS
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Azure generates network configuration from the IMDS service and removes
+any preexisting hotplug network scripts which exist in Azure cloud images.
+Add a datasource configuration option which allows for writing a default
+network configuration which sets up dhcp on eth0 and leave the hotplug
+handling to the cloud-image scripts.
+
+To disable network-config from Azure IMDS, add the following to
+/etc/cloud/cloud.cfg.d/99-azure-no-imds-network.cfg:
+datasource:
+  Azure:
+    apply_network_config: False
+
+LP: #1798424
+---
+ cloudinit/sources/DataSourceAzure.py | 11 +++-
+ doc/rtd/topics/datasources/azure.rst | 46 +++++++++++++++
+ tests/unittests/test_datasource/test_azure.py | 56 +++++++++++++++++--
+ 3 files changed, 107 insertions(+), 6 deletions(-)
+
+--- a/cloudinit/sources/DataSourceAzure.py
++++ b/cloudinit/sources/DataSourceAzure.py
+@@ -207,7 +207,9 @@ BUILTIN_DS_CONFIG = {
+ },
+ 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH},
+ 'dhclient_lease_file': LEASE_FILE,
++ 'apply_network_config': True, # Use IMDS published network configuration
+ }
++# RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False
+
+ BUILTIN_CLOUD_CONFIG = {
+ 'disk_setup': {
+@@ -450,7 +452,8 @@ class DataSourceAzure(sources.DataSource
+ except sources.InvalidMetaDataException as e:
+ LOG.warning('Could not crawl Azure metadata: %s', e)
+ return False
+- if self.distro and self.distro.name == 'ubuntu':
++ if (self.distro and self.distro.name == 'ubuntu' and
++ self.ds_cfg.get('apply_network_config')):
+ maybe_remove_ubuntu_network_config_scripts()
+
+ # Process crawled data and augment with various config defaults
+@@ -611,7 +614,11 @@ class DataSourceAzure(sources.DataSource
+ the blacklisted devices.
+ """
+ if not self._network_config:
+- self._network_config = parse_network_config(self._metadata_imds)
++ if self.ds_cfg.get('apply_network_config'):
++ nc_src = self._metadata_imds
++ else:
++ nc_src = None
++ self._network_config = parse_network_config(nc_src)
+ return self._network_config
+
+
+--- a/doc/rtd/topics/datasources/azure.rst
++++ b/doc/rtd/topics/datasources/azure.rst
+@@ -57,6 +57,52 @@ in order to use waagent.conf with cloud-
+ ResourceDisk.MountPoint=/mnt
+
+
++Configuration
++-------------
++The following configuration can be set for the datasource in system
++configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
++
++The settings that may be configured are:
++
++ * **agent_command**: Either __builtin__ (default) or a command to run to getcw
++ metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the
++ provided command to obtain metadata.
++ * **apply_network_config**: Boolean set to True to use network configuration
++ described by Azure's IMDS endpoint instead of fallback network config of
++ dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is False.
++ * **data_dir**: Path used to read metadata files and write crawled data.
++ * **dhclient_lease_file**: The fallback lease file to source when looking for
++ custom DHCP option 245 from Azure fabric.
++ * **disk_aliases**: A dictionary defining which device paths should be
++ interpreted as ephemeral images. See cc_disk_setup module for more info.
++ * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to
++ metadata changes.
++ * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to
++ metadata changes. Azure will throttle ifup/down in some cases after metadata
++ has been updated to inform dhcp server about updated hostnames.
++ * **set_hostname**: Boolean set to True when we want Azure to set the hostname
++ based on metadata.
++
++An example configuration with the default values is provided below:
++
++.. sourcecode:: yaml
++
++ datasource:
++ Azure:
++ agent_command: __builtin__
++ apply_network_config: true
++ data_dir: /var/lib/waagent
++ dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
++ disk_aliases:
++ ephemeral0: /dev/disk/cloud/azure_resource
++ hostname_bounce:
++ interface: eth0
++ command: builtin
++ policy: true
++ hostname_command: hostname
++ set_hostname: true
++
++
+ Userdata
+ --------
+ Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init
+--- a/tests/unittests/test_datasource/test_azure.py
++++ b/tests/unittests/test_datasource/test_azure.py
+@@ -254,7 +254,8 @@ scbus-1 on xpt0 bus 0
+ ])
+ return dsaz
+
+- def _get_ds(self, data, agent_command=None, distro=None):
++ def _get_ds(self, data, agent_command=None, distro=None,
++ apply_network=None):
+
+ def dsdevs():
+ return data.get('dsdevs', [])
+@@ -310,6 +311,8 @@ scbus-1 on xpt0 bus 0
+ data.get('sys_cfg', {}), distro=distro, paths=self.paths)
+ if agent_command is not None:
+ dsrc.ds_cfg['agent_command'] = agent_command
++ if apply_network is not None:
++ dsrc.ds_cfg['apply_network_config'] = apply_network
+
+ return dsrc
+
+@@ -414,14 +417,26 @@ fdescfs /dev/fd fdes
+
+ def test_get_data_on_ubuntu_will_remove_network_scripts(self):
+ """get_data will remove ubuntu net scripts on Ubuntu distro."""
++ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+- 'sys_cfg': {}}
++ 'sys_cfg': sys_cfg}
+
+ dsrc = self._get_ds(data, distro='ubuntu')
+ dsrc.get_data()
+ self.m_remove_ubuntu_network_scripts.assert_called_once_with()
+
++ def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self):
++ """When apply_network_config false, do not remove scripts on Ubuntu."""
++ sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}}
++ odata = {'HostName': "myhost", 'UserName': "myuser"}
++ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
++ 'sys_cfg': sys_cfg}
++
++ dsrc = self._get_ds(data, distro='ubuntu')
++ dsrc.get_data()
++ self.m_remove_ubuntu_network_scripts.assert_not_called()
++
+ def test_crawl_metadata_returns_structured_data_and_caches_nothing(self):
+ """Return all structured metadata and cache no class attributes."""
+ yaml_cfg = "{agent_command: my_command}\n"
+@@ -503,8 +518,10 @@ fdescfs /dev/fd fdes
+
+ def test_network_config_set_from_imds(self):
+ """Datasource.network_config returns IMDS network data."""
++ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ odata = {}
+- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
++ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
++ 'sys_cfg': sys_cfg}
+ expected_network_config = {
+ 'ethernets': {
+ 'eth0': {'set-name': 'eth0',
+@@ -783,9 +800,10 @@ fdescfs /dev/fd fdes
+ @mock.patch('cloudinit.net.generate_fallback_config')
+ def test_imds_network_config(self, mock_fallback):
+ """Network config is generated from IMDS network data when present."""
++ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+- 'sys_cfg': {}}
++ 'sys_cfg': sys_cfg}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+@@ -803,6 +821,36 @@ fdescfs /dev/fd fdes
+
+ @mock.patch('cloudinit.net.get_interface_mac')
+ @mock.patch('cloudinit.net.get_devicelist')
++ @mock.patch('cloudinit.net.device_driver')
++ @mock.patch('cloudinit.net.generate_fallback_config')
++ def test_imds_network_ignored_when_apply_network_config_false(
++ self, mock_fallback, mock_dd, mock_devlist, mock_get_mac):
++ """When apply_network_config is False, use fallback instead of IMDS."""
++ sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}}
++ odata = {'HostName': "myhost", 'UserName': "myuser"}
++ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
++ 'sys_cfg': sys_cfg}
++ fallback_config = {
++ 'version': 1,
++ 'config': [{
++ 'type': 'physical', 'name': 'eth0',
++ 'mac_address': '00:11:22:33:44:55',
++ 'params': {'driver': 'hv_netsvc'},
++ 'subnets': [{'type': 'dhcp'}],
++ }]
++ }
++ mock_fallback.return_value = fallback_config
++
++ mock_devlist.return_value = ['eth0']
++ mock_dd.return_value = ['hv_netsvc']
++ mock_get_mac.return_value = '00:11:22:33:44:55'
++
++ dsrc = self._get_ds(data)
++ self.assertTrue(dsrc.get_data())
++ self.assertEqual(dsrc.network_config, fallback_config)
++
++ @mock.patch('cloudinit.net.get_interface_mac')
++ @mock.patch('cloudinit.net.get_devicelist')
+ @mock.patch('cloudinit.net.device_driver')
+ @mock.patch('cloudinit.net.generate_fallback_config')
+ def test_fallback_network_config(self, mock_fallback, mock_dd,
diff --git a/debian/patches/ds-identify-behavior-xenial.patch b/debian/patches/ds-identify-behavior-xenial.patch
index 0bcfb372..ba7639ab 100644
--- a/debian/patches/ds-identify-behavior-xenial.patch
+++ b/debian/patches/ds-identify-behavior-xenial.patch
@@ -10,7 +10,7 @@ Bug-ubuntu: http://bugs.launchpad.net/bugs/1660385
--- a/tools/ds-identify
+++ b/tools/ds-identify
-@@ -84,8 +84,8 @@ _DI_LOGGED=""
+@@ -93,8 +93,8 @@ _DI_LOGGED=""
DI_MAIN=${DI_MAIN:-main}
DI_BLKID_OUTPUT=""
@@ -21,7 +21,7 @@ Bug-ubuntu: http://bugs.launchpad.net/bugs/1660385
DI_DMI_CHASSIS_ASSET_TAG=""
DI_DMI_PRODUCT_NAME=""
DI_DMI_SYS_VENDOR=""
-@@ -122,7 +122,7 @@ DI_ON_FOUND=""
+@@ -131,7 +131,7 @@ DI_ON_FOUND=""
DI_ON_MAYBE=""
DI_ON_NOTFOUND=""
diff --git a/debian/patches/openstack-no-network-config.patch b/debian/patches/openstack-no-network-config.patch
new file mode 100644
index 00000000..88449d1d
--- /dev/null
+++ b/debian/patches/openstack-no-network-config.patch
@@ -0,0 +1,40 @@
+Description: Fallback network config instead of network_data.json for OpenStack
+ To make this acceptable as a SRU we keep the same behavior as is
+ in the stable release which is to generate network for fallback nic
+ only.
+ .
+ In this series, OpenStack datasource can optionally generate
+ network_config from network_data.json if the datasource is configured
+ with a file like /etc/cloud.cfg.d/openstack-net.cfg:
+ .
+ datasource:
+ OpenStack:
+ apply_network_config: true
+Forwarded: not-needed
+Author: Chad Smith <chad.smith@canonical.com>
+
+--- a/cloudinit/sources/DataSourceOpenStack.py
++++ b/cloudinit/sources/DataSourceOpenStack.py
+@@ -98,10 +98,9 @@ class DataSourceOpenStack(openstack.Sour
+ if self._network_config != sources.UNSET:
+ return self._network_config
+
+- # RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide
++ # Xenial, Artful and Bionic will not provide
+ # network_config by default unless configured in /etc/cloud/cloud.cfg*.
+- # Patch Xenial and Artful before release to default to False.
+- if util.is_false(self.ds_cfg.get('apply_network_config', True)):
++ if util.is_false(self.ds_cfg.get('apply_network_config', False)):
+ self._network_config = None
+ return self._network_config
+ if self.network_json == sources.UNSET:
+--- a/tests/unittests/test_datasource/test_openstack.py
++++ b/tests/unittests/test_datasource/test_openstack.py
+@@ -345,6 +345,7 @@ class TestOpenStackDataSource(test_helpe
+ settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ sample_json = {'links': [{'ethernet_mac_address': 'mymac'}],
+ 'networks': [], 'services': []}
++ ds_os.ds_cfg = {'apply_network_config': True} # Default is False
+ ds_os.network_json = sample_json
+ with test_helpers.mock.patch(mock_path) as m_convert_json:
+ m_convert_json.return_value = example_cfg
diff --git a/debian/patches/series b/debian/patches/series
index 7e909afc..0e264119 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,3 +1,6 @@
azure-use-walinux-agent.patch
ds-identify-behavior-xenial.patch
stable-release-no-jsonschema-dep.patch
+openstack-no-network-config.patch
+cpick-1d5e9aef-azure-Add-apply_network_config-option-to-disable
+azure-apply-network-config-false.patch
diff --git a/debian/rules b/debian/rules
index 72754aab..c5b18ebb 100755
--- a/debian/rules
+++ b/debian/rules
@@ -1,6 +1,7 @@
#!/usr/bin/make -f
INIT_SYSTEM ?= upstart,systemd
export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM)
+DEB_VERSION := $(shell dpkg-parsechangelog --show-field=Version)
%:
dh $@ --with python3,systemd --buildsystem pybuild
@@ -21,3 +22,4 @@ override_dh_auto_install:
install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh
install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh
install -m 0644 -D debian/apport-launcher.py debian/cloud-init/usr/share/apport/package-hooks/cloud-init.py
+ flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement}
diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt
index dd91477d..43a62a26 100644
--- a/doc/examples/cloud-config-disk-setup.txt
+++ b/doc/examples/cloud-config-disk-setup.txt
@@ -37,7 +37,7 @@ fs_setup:
# Default disk definitions for SmartOS
# ------------------------------------
-device_aliases: {'ephemeral0': '/dev/sdb'}
+device_aliases: {'ephemeral0': '/dev/vdb'}
disk_setup:
ephemeral0:
table_type: mbr
@@ -46,7 +46,7 @@ disk_setup:
fs_setup:
- label: ephemeral0
- filesystem: ext3
+ filesystem: ext4
device: ephemeral0.0
# Cavaut for SmartOS: if ephemeral disk is not defined, then the disk will
diff --git a/doc/examples/cloud-config-run-cmds.txt b/doc/examples/cloud-config-run-cmds.txt
index 3bb06864..002398f5 100644
--- a/doc/examples/cloud-config-run-cmds.txt
+++ b/doc/examples/cloud-config-run-cmds.txt
@@ -18,5 +18,8 @@ runcmd:
- [ sh, -xc, "echo $(date) ': hello world!'" ]
- [ sh, -c, echo "=========hello world'=========" ]
- ls -l /root
- - [ wget, "http://slashdot.org", -O, /tmp/index.html ]
+ # Note: Don't write files to /tmp from cloud-init use /run/somedir instead.
+ # Early boot environments can race systemd-tmpfiles-clean LP: #1707222.
+ - mkdir /run/mydir
+ - [ wget, "http://slashdot.org", -O, /run/mydir/index.html ]
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index 7bca24a3..6a363b77 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -30,7 +30,14 @@ users:
gecos: Magic Cloud App Daemon User
inactive: true
system: true
+ - name: fizzbuzz
+ sudo: False
+ ssh_authorized_keys:
+ - <ssh pub key 1>
+ - <ssh pub key 2>
- snapuser: joe@joeuser.io
+ - name: nosshlogins
+ ssh_redirect_user: true
# Valid Values:
# name: The user's login name
@@ -71,13 +78,28 @@ users:
# no_log_init: When set to true, do not initialize lastlog and faillog database.
# ssh_import_id: Optional. Import SSH ids
# ssh_authorized_keys: Optional. [list] Add keys to user's authorized keys file
-# sudo: Defaults to none. Set to the sudo string you want to use, i.e.
-# ALL=(ALL) NOPASSWD:ALL. To add multiple rules, use the following
-# format.
-# sudo:
-# - ALL=(ALL) NOPASSWD:/bin/mysql
-# - ALL=(ALL) ALL
-# Note: Please double check your syntax and make sure it is valid.
+# ssh_redirect_user: Optional. [bool] Set true to block ssh logins for cloud
+# ssh public keys and emit a message redirecting logins to
+# use <default_username> instead. This option only disables cloud
+# provided public-keys. An error will be raised if ssh_authorized_keys
+# or ssh_import_id is provided for the same user.
+#
+# ssh_authorized_keys.
+# sudo: Defaults to none. Accepts a sudo rule string, a list of sudo rule
+# strings or False to explicitly deny sudo usage. Examples:
+#
+# Allow a user unrestricted sudo access.
+# sudo: ALL=(ALL) NOPASSWD:ALL
+#
+# Adding multiple sudo rule strings.
+# sudo:
+# - ALL=(ALL) NOPASSWD:/bin/mysql
+# - ALL=(ALL) ALL
+#
+# Prevent sudo access for a user.
+# sudo: False
+#
+# Note: Please double check your syntax and make sure it is valid.
# cloud-init does not parse/check the syntax of the sudo
# directive.
# system: Create the user as a system user. This means no home directory.
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index bd84c641..eb84dcf5 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -127,7 +127,10 @@ runcmd:
- [ sh, -xc, "echo $(date) ': hello world!'" ]
- [ sh, -c, echo "=========hello world'=========" ]
- ls -l /root
- - [ wget, "http://slashdot.org", -O, /tmp/index.html ]
+ # Note: Don't write files to /tmp from cloud-init use /run/somedir instead.
+ # Early boot environments can race systemd-tmpfiles-clean LP: #1707222.
+ - mkdir /run/mydir
+ - [ wget, "http://slashdot.org", -O, /run/mydir/index.html ]
# boot commands
@@ -229,9 +232,22 @@ disable_root: false
# respective key in /root/.ssh/authorized_keys if disable_root is true
# see 'man authorized_keys' for more information on what you can do here
#
-# The string '$USER' will be replaced with the username of the default user
-#
-# disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as the user \"$USER\" rather than the user \"root\".';echo;sleep 10"
+# The string '$USER' will be replaced with the username of the default user.
+# The string '$DISABLE_USER' will be replaced with the username to disable.
+#
+# disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as the user \"$USER\" rather than the user \"$DISABLE_USER\".';echo;sleep 10"
+
+# disable ssh access for non-root-users
+# To disable ssh access for non-root users, ssh_redirect_user: true can be
+# provided for any use in the 'users' list. This will prompt any ssh login
+# attempts as that user with a message like that in disable_root_opts which
+# redirects the person to login as <default_username>
+# This option can not be combined with either ssh_authorized_keys or
+# ssh_import_id.
+users:
+ - default
+ - name: blockeduser
+ ssh_redirect_user: true
# set the locale to a given locale
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index de67f361..20a99a30 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -31,6 +31,7 @@ initialization of a cloud instance.
topics/capabilities.rst
topics/availability.rst
topics/format.rst
+ topics/instancedata.rst
topics/dir_layout.rst
topics/examples.rst
topics/boot.rst
diff --git a/doc/rtd/topics/capabilities.rst b/doc/rtd/topics/capabilities.rst
index 3e2c9e31..0d8b8947 100644
--- a/doc/rtd/topics/capabilities.rst
+++ b/doc/rtd/topics/capabilities.rst
@@ -16,13 +16,15 @@ User configurability
`Cloud-init`_ 's behavior can be configured via user-data.
- User-data can be given by the user at instance launch time.
+ User-data can be given by the user at instance launch time. See
+ :ref:`user_data_formats` for acceptable user-data content.
+
This is done via the ``--user-data`` or ``--user-data-file`` argument to
ec2-run-instances for example.
-* Check your local clients documentation for how to provide a `user-data`
- string or `user-data` file for usage by cloud-init on instance creation.
+* Check your local client's documentation for how to provide a `user-data`
+ string or `user-data` file to cloud-init on instance creation.
Feature detection
@@ -51,10 +53,9 @@ system:
% cloud-init --help
usage: cloud-init [-h] [--version] [--file FILES]
-
[--debug] [--force]
- {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status}
- ...
+ {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status}
+ ...
optional arguments:
-h, --help show this help message and exit
@@ -66,17 +67,19 @@ system:
your own risk)
Subcommands:
- {init,modules,single,dhclient-hook,features,analyze,devel,collect-logs,clean,status}
+ {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status}
init initializes cloud-init and performs initial modules
modules activates modules using a given configuration key
single run a single module
+ query Query instance metadata from the command line
dhclient-hook run the dhclient hookto record network info
features list defined features
analyze Devel tool: Analyze cloud-init logs and data
devel Run development tools
collect-logs Collect and tar all cloud-init debug info
- clean Remove logs and artifacts so cloud-init can re-run.
- status Report cloud-init status or wait on completion.
+ clean Remove logs and artifacts so cloud-init can re-run
+ status Report cloud-init status or wait on completion
+
CLI Subcommand details
======================
@@ -102,8 +105,8 @@ cloud-init status
Report whether cloud-init is running, done, disabled or errored. Exits
non-zero if an error is detected in cloud-init.
- * **--long**: Detailed status information.
- * **--wait**: Block until cloud-init completes.
+* **--long**: Detailed status information.
+* **--wait**: Block until cloud-init completes.
.. code-block:: shell-session
@@ -141,6 +144,68 @@ Logs collected are:
* journalctl output
* /var/lib/cloud/instance/user-data.txt
+.. _cli_query:
+
+cloud-init query
+------------------
+Query standardized cloud instance metadata crawled by cloud-init and stored
+in ``/run/cloud-init/instance-data.json``. This is a convenience command-line
+interface to reference any cached configuration metadata that cloud-init
+crawls when booting the instance. See :ref:`instance_metadata` for more info.
+
+* **--all**: Dump all available instance data as json which can be queried.
+* **--instance-data**: Optional path to a different instance-data.json file to
+ source for queries.
+* **--list-keys**: List available query keys from cached instance data.
+
+.. code-block:: shell-session
+
+ # List all top-level query keys available (includes standardized aliases)
+ % cloud-init query --list-keys
+ availability_zone
+ base64_encoded_keys
+ cloud_name
+ ds
+ instance_id
+ local_hostname
+ region
+ v1
+
+* **<varname>**: A dot-delimited variable path into the instance-data.json
+ object.
+
+.. code-block:: shell-session
+
+ # Query cloud-init standardized metadata on any cloud
+ % cloud-init query v1.cloud_name
+ aws # or openstack, azure, gce etc.
+
+ # Any standardized instance-data under a <v#> key is aliased as a top-level
+ # key for convenience.
+ % cloud-init query cloud_name
+ aws # or openstack, azure, gce etc.
+
+ # Query datasource-specific metadata on EC2
+ % cloud-init query ds.meta_data.public_ipv4
+
+* **--format** A string that will use jinja-template syntax to render a string
+ replacing
+
+.. code-block:: shell-session
+
+ # Generate a custom hostname fqdn based on instance-id, cloud and region
+ % cloud-init query --format 'custom-{{instance_id}}.{{region}}.{{v1.cloud_name}}.com'
+ custom-i-0e91f69987f37ec74.us-east-2.aws.com
+
+
+.. note::
+ The standardized instance data keys under **v#** are guaranteed not to change
+ behavior or format. If using top-level convenience aliases for any
+ standardized instance data keys, the most value (highest **v#**) of that key
+ name is what is reported as the top-level value. So these aliases act as a
+ 'latest'.
+
+
.. _cli_analyze:
cloud-init analyze
@@ -148,10 +213,10 @@ cloud-init analyze
Get detailed reports of where cloud-init spends most of its time. See
:ref:`boot_time_analysis` for more info.
- * **blame** Report ordered by most costly operations.
- * **dump** Machine-readable JSON dump of all cloud-init tracked events.
- * **show** show time-ordered report of the cost of operations during each
- boot stage.
+* **blame** Report ordered by most costly operations.
+* **dump** Machine-readable JSON dump of all cloud-init tracked events.
+* **show** show time-ordered report of the cost of operations during each
+ boot stage.
.. _cli_devel:
@@ -166,6 +231,13 @@ likely be promoted to top-level subcommands when stable.
validation is work in progress and supports a subset of cloud-config
modules.
+ * ``cloud-init devel render``: Use cloud-init's jinja template render to
+ process **#cloud-config** or **custom-scripts**, injecting any variables
+ from ``/run/cloud-init/instance-data.json``. It accepts a user-data file
+ containing the jinja template header ``## template: jinja`` and renders
+ that content with any instance-data.json variables present.
+
+
.. _cli_clean:
cloud-init clean
@@ -173,8 +245,8 @@ cloud-init clean
Remove cloud-init artifacts from /var/lib/cloud and optionally reboot the
machine to so cloud-init re-runs all stages as it did on first boot.
- * **--logs**: Optionally remove /var/log/cloud-init*log files.
- * **--reboot**: Reboot the system after removing artifacts.
+* **--logs**: Optionally remove /var/log/cloud-init*log files.
+* **--reboot**: Reboot the system after removing artifacts.
.. _cli_init:
@@ -186,7 +258,7 @@ Can be run on the commandline, but is generally gated to run only once
due to semaphores in **/var/lib/cloud/instance/sem/** and
**/var/lib/cloud/sem**.
- * **--local**: Run *init-local* stage instead of *init*.
+* **--local**: Run *init-local* stage instead of *init*.
.. _cli_modules:
@@ -201,8 +273,8 @@ declared to run in various boot stages in the file
commandline, but each module is gated to run only once due to semaphores
in ``/var/lib/cloud/``.
- * **--mode (init|config|final)**: Run *modules:init*, *modules:config* or
- *modules:final* cloud-init stages. See :ref:`boot_stages` for more info.
+* **--mode (init|config|final)**: Run *modules:init*, *modules:config* or
+ *modules:final* cloud-init stages. See :ref:`boot_stages` for more info.
.. _cli_single:
@@ -212,9 +284,9 @@ Attempt to run a single named cloud config module. The following example
re-runs the cc_set_hostname module ignoring the module default frequency
of once-per-instance:
- * **--name**: The cloud-config module name to run
- * **--frequency**: Optionally override the declared module frequency
- with one of (always|once-per-instance|once)
+* **--name**: The cloud-config module name to run
+* **--frequency**: Optionally override the declared module frequency
+ with one of (always|once-per-instance|once)
.. code-block:: shell-session
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 7e2854de..e34f145c 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -17,6 +17,14 @@ own way) internally a datasource abstract class was created to allow for a
single way to access the different cloud systems methods to provide this data
through the typical usage of subclasses.
+Any metadata processed by cloud-init's datasources is persisted as
+``/run/cloud0-init/instance-data.json``. Cloud-init provides tooling
+to quickly introspect some of that data. See :ref:`instance_metadata` for
+more information.
+
+
+Datasource API
+--------------
The current interface that a datasource object must provide is the following:
.. sourcecode:: python
@@ -52,14 +60,14 @@ The current interface that a datasource object must provide is the following:
# or does not exist)
def device_name_to_device(self, name)
- # gets the locale string this instance should be applying
+ # gets the locale string this instance should be applying
# which typically used to adjust the instances locale settings files
def get_locale(self)
@property
def availability_zone(self)
- # gets the instance id that was assigned to this instance by the
+ # gets the instance id that was assigned to this instance by the
# cloud provider or when said instance id does not exist in the backing
# metadata this will return 'iid-datasource'
def get_instance_id(self)
@@ -80,6 +88,7 @@ Follow for more information.
.. toctree::
:maxdepth: 2
+ datasources/aliyun.rst
datasources/altcloud.rst
datasources/azure.rst
datasources/cloudsigma.rst
@@ -91,6 +100,7 @@ Follow for more information.
datasources/nocloud.rst
datasources/opennebula.rst
datasources/openstack.rst
+ datasources/oracle.rst
datasources/ovf.rst
datasources/smartos.rst
datasources/fallback.rst
diff --git a/doc/rtd/topics/datasources/aliyun.rst b/doc/rtd/topics/datasources/aliyun.rst
new file mode 100644
index 00000000..3f4f40ca
--- /dev/null
+++ b/doc/rtd/topics/datasources/aliyun.rst
@@ -0,0 +1,74 @@
+.. _datasource_aliyun:
+
+Alibaba Cloud (AliYun)
+======================
+The ``AliYun`` datasource reads data from Alibaba Cloud ECS. Support is
+present in cloud-init since 0.7.9.
+
+Metadata Service
+----------------
+The Alibaba Cloud metadata service is available at the well known url
+``http://100.100.100.200/``. For more information see
+Alibaba Cloud ECS on `metadata
+<https://www.alibabacloud.com/help/zh/faq-detail/49122.htm>`__.
+
+Versions
+^^^^^^^^
+Like the EC2 metadata service, Alibaba Cloud's metadata service provides
+versioned data under specific paths. As of April 2018, there are only
+``2016-01-01`` and ``latest`` versions.
+
+It is expected that the dated version will maintain a stable interface but
+``latest`` may change content at a future date.
+
+Cloud-init uses the ``2016-01-01`` version.
+
+You can list the versions available to your instance with:
+
+.. code-block:: shell-session
+
+ $ curl http://100.100.100.200/
+ 2016-01-01
+ latest
+
+Metadata
+^^^^^^^^
+Instance metadata can be queried at
+``http://100.100.100.200/2016-01-01/meta-data``
+
+.. code-block:: shell-session
+
+ $ curl http://100.100.100.200/2016-01-01/meta-data
+ dns-conf/
+ eipv4
+ hostname
+ image-id
+ instance-id
+ instance/
+ mac
+ network-type
+ network/
+ ntp-conf/
+ owner-account-id
+ private-ipv4
+ public-keys/
+ region-id
+ serial-number
+ source-address
+ sub-private-ipv4-list
+ vpc-cidr-block
+ vpc-id
+
+Userdata
+^^^^^^^^
+If provided, user-data will appear at
+``http://100.100.100.200/2016-01-01/user-data``.
+If no user-data is provided, this will return a 404.
+
+.. code-block:: shell-session
+
+ $ curl http://100.100.100.200/2016-01-01/user-data
+ #!/bin/sh
+ echo "Hello World."
+
+.. vi: textwidth=78
diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst
index 225093a1..a3101ed7 100644
--- a/doc/rtd/topics/datasources/cloudstack.rst
+++ b/doc/rtd/topics/datasources/cloudstack.rst
@@ -4,7 +4,9 @@ CloudStack
==========
`Apache CloudStack`_ expose user-data, meta-data, user password and account
-sshkey thru the Virtual-Router. For more details on meta-data and user-data,
+sshkey thru the Virtual-Router. The datasource obtains the VR address via
+dhcp lease information given to the instance.
+For more details on meta-data and user-data,
refer the `CloudStack Administrator Guide`_.
URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.1
@@ -18,14 +20,26 @@ is the Virtual Router IP:
Configuration
-------------
+The following configuration can be set for the datasource in system
+configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
-Apache CloudStack datasource can be configured as follows:
+The settings that may be configured are:
-.. code:: yaml
+ * **max_wait**: the maximum amount of clock time in seconds that should be
+ spent searching metadata_urls. A value less than zero will result in only
+ one request being made, to the first in the list. (default: 120)
+ * **timeout**: the timeout value provided to urlopen for each individual http
+ request. This is used both when selecting a metadata_url and when crawling
+ the metadata service. (default: 50)
- datasource:
- CloudStack: {}
- None: {}
+An example configuration with the default values is provided below:
+
+.. sourcecode:: yaml
+
+ datasource:
+ CloudStack:
+ max_wait: 120
+ timeout: 50
datasource_list:
- CloudStack
diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst
index 3bc66e17..64c325d8 100644
--- a/doc/rtd/topics/datasources/ec2.rst
+++ b/doc/rtd/topics/datasources/ec2.rst
@@ -60,4 +60,34 @@ To see which versions are supported from your cloud provider use the following U
...
latest
+
+
+Configuration
+-------------
+The following configuration can be set for the datasource in system
+configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are:
+
+ * **metadata_urls**: This list of urls will be searched for an Ec2
+ metadata service. The first entry that successfully returns a 200 response
+ for <url>/<version>/meta-data/instance-id will be selected.
+ (default: ['http://169.254.169.254', 'http://instance-data:8773']).
+ * **max_wait**: the maximum amount of clock time in seconds that should be
+ spent searching metadata_urls. A value less than zero will result in only
+ one request being made, to the first in the list. (default: 120)
+ * **timeout**: the timeout value provided to urlopen for each individual http
+ request. This is used both when selecting a metadata_url and when crawling
+ the metadata service. (default: 50)
+
+An example configuration with the default values is provided below:
+
+.. sourcecode:: yaml
+
+ datasource:
+ Ec2:
+ metadata_urls: ["http://169.254.169.254:80", "http://instance-data:8773"]
+ max_wait: 120
+ timeout: 50
+
.. vi: textwidth=78
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
index 43592dec..421da08f 100644
--- a/doc/rtd/topics/datasources/openstack.rst
+++ b/doc/rtd/topics/datasources/openstack.rst
@@ -7,6 +7,21 @@ This datasource supports reading data from the
`OpenStack Metadata Service
<https://docs.openstack.org/nova/latest/admin/networking-nova.html#metadata-service>`_.
+Discovery
+-------------
+To determine whether a platform looks like it may be OpenStack, cloud-init
+checks the following environment attributes as a potential OpenStack platform:
+
+ * Maybe OpenStack if
+
+ * **non-x86 cpu architecture**: because DMI data is buggy on some arches
+ * Is OpenStack **if x86 architecture and ANY** of the following
+
+ * **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova*
+ * **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute*
+ * **DMI chassis_asset_tag** is *OpenTelekomCloud*
+
+
Configuration
-------------
The following configuration can be set for the datasource in system
@@ -25,18 +40,22 @@ The settings that may be configured are:
the metadata service. (default: 10)
* **retries**: The number of retries that should be done for an http request.
This value is used only after metadata_url is selected. (default: 5)
+ * **apply_network_config**: A boolean specifying whether to configure the
+ network for the instance based on network_data.json provided by the
+ metadata service. When False, only configure dhcp on the primary nic for
+ this instances. (default: True)
-An example configuration with the default values is provided as example below:
+An example configuration with the default values is provided below:
.. sourcecode:: yaml
- #cloud-config
datasource:
OpenStack:
metadata_urls: ["http://169.254.169.254"]
max_wait: -1
timeout: 10
retries: 5
+ apply_network_config: True
Vendor Data
diff --git a/doc/rtd/topics/datasources/oracle.rst b/doc/rtd/topics/datasources/oracle.rst
new file mode 100644
index 00000000..f2383cee
--- /dev/null
+++ b/doc/rtd/topics/datasources/oracle.rst
@@ -0,0 +1,26 @@
+.. _datasource_oracle:
+
+Oracle
+======
+
+This datasource reads metadata, vendor-data and user-data from
+`Oracle Compute Infrastructure`_ (OCI).
+
+Oracle Platform
+---------------
+OCI provides bare metal and virtual machines. In both cases,
+the platform identifies itself via DMI data in the chassis asset tag
+with the string 'OracleCloud.com'.
+
+Oracle's platform provides a metadata service that mimics the 2013-10-17
+version of OpenStack metadata service. Initially support for Oracle
+was done via the OpenStack datasource.
+
+Cloud-init has a specific datasource for Oracle in order to:
+ a. allow and support future growth of the OCI platform.
+ b. address small differences between OpenStack and Oracle metadata
+ implementation.
+
+
+.. _Oracle Compute Infrastructure: https://cloud.oracle.com/
+.. vi: textwidth=78
diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst
index cacc8a27..51363ea5 100644
--- a/doc/rtd/topics/debugging.rst
+++ b/doc/rtd/topics/debugging.rst
@@ -45,7 +45,7 @@ subcommands default to reading /var/log/cloud-init.log.
.. code-block:: shell-session
- $ cloud-init analyze blame -i my-cloud-init.log
+ $ cloud-init analyze dump -i my-cloud-init.log
[
{
"description": "running config modules",
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index e25289ad..15234d21 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -1,6 +1,8 @@
-*******
-Formats
-*******
+.. _user_data_formats:
+
+*****************
+User-Data Formats
+*****************
User data that will be acted upon by cloud-init must be in one of the following types.
@@ -65,6 +67,11 @@ Typically used by those who just want to execute a shell script.
Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME archive.
+.. note::
+ New in cloud-init v. 18.4: User-data scripts can also render cloud instance
+ metadata variables using jinja templating. See
+ :ref:`instance_metadata` for more information.
+
Example
-------
@@ -103,12 +110,18 @@ These things include:
- certain ssh keys should be imported
- *and many more...*
-**Note:** The file must be valid yaml syntax.
+.. note::
+ This file must be valid yaml syntax.
See the :ref:`yaml_examples` section for a commented set of examples of supported cloud config formats.
Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when using a MIME archive.
+.. note::
+ New in cloud-init v. 18.4: Cloud config dta can also render cloud instance
+ metadata variables using jinja templating. See
+ :ref:`instance_metadata` for more information.
+
Upstart Job
===========
@@ -121,7 +134,7 @@ Cloud Boothook
This content is ``boothook`` data. It is stored in a file under ``/var/lib/cloud`` and then executed immediately.
This is the earliest ``hook`` available. Note, that there is no mechanism provided for running only once. The boothook must take care of this itself.
-It is provided with the instance id in the environment variable ``INSTANCE_I``. This could be made use of to provide a 'once-per-instance' type of functionality.
+It is provided with the instance id in the environment variable ``INSTANCE_ID``. This could be made use of to provide a 'once-per-instance' type of functionality.
Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when using a MIME archive.
diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst
new file mode 100644
index 00000000..634e1807
--- /dev/null
+++ b/doc/rtd/topics/instancedata.rst
@@ -0,0 +1,297 @@
+.. _instance_metadata:
+
+*****************
+Instance Metadata
+*****************
+
+What is a instance data?
+========================
+
+Instance data is the collection of all configuration data that cloud-init
+processes to configure the instance. This configuration typically
+comes from any number of sources:
+
+* cloud-provided metadata services (aka metadata)
+* custom config-drive attached to the instance
+* cloud-config seed files in the booted cloud image or distribution
+* vendordata provided from files or cloud metadata services
+* userdata provided at instance creation
+
+Each cloud provider presents unique configuration metadata in different
+formats to the instance. Cloud-init provides a cache of any crawled metadata
+as well as a versioned set of standardized instance data keys which it makes
+available on all platforms.
+
+Cloud-init produces a simple json object in
+``/run/cloud-init/instance-data.json`` which represents standardized and
+versioned representation of the metadata it consumes during initial boot. The
+intent is to provide the following benefits to users or scripts on any system
+deployed with cloud-init:
+
+* simple static object to query to obtain a instance's metadata
+* speed: avoid costly network transactions for metadata that is already cached
+ on the filesytem
+* reduce need to recrawl metadata services for static metadata that is already
+ cached
+* leverage cloud-init's best practices for crawling cloud-metadata services
+* avoid rolling unique metadata crawlers on each cloud platform to get
+ metadata configuration values
+
+Cloud-init stores any instance data processed in the following files:
+
+* ``/run/cloud-init/instance-data.json``: world-readable json containing
+ standardized keys, sensitive keys redacted
+* ``/run/cloud-init/instance-data-sensitive.json``: root-readable unredacted
+ json blob
+* ``/var/lib/cloud/instance/user-data.txt``: root-readable sensitive raw
+ userdata
+* ``/var/lib/cloud/instance/vendor-data.txt``: root-readable sensitive raw
+ vendordata
+
+Cloud-init redacts any security sensitive content from instance-data.json,
+stores ``/run/cloud-init/instance-data.json`` as a world-readable json file.
+Because user-data and vendor-data can contain passwords both of these files
+are readonly for *root* as well. The *root* user can also read
+``/run/cloud-init/instance-data-sensitive.json`` which is all instance data
+from instance-data.json as well as unredacted sensitive content.
+
+
+Format of instance-data.json
+============================
+
+The instance-data.json and instance-data-sensitive.json files are well-formed
+JSON and record the set of keys and values for any metadata processed by
+cloud-init. Cloud-init standardizes the format for this content so that it
+can be generalized across different cloud platforms.
+
+There are three basic top-level keys:
+
+* **base64_encoded_keys**: A list of forward-slash delimited key paths into
+ the instance-data.json object whose value is base64encoded for json
+ compatibility. Values at these paths should be decoded to get the original
+ value.
+
+* **sensitive_keys**: A list of forward-slash delimited key paths into
+ the instance-data.json object whose value is considered by the datasource as
+ 'security sensitive'. Only the keys listed here will be redacted from
+ instance-data.json for non-root users.
+
+* **ds**: Datasource-specific metadata crawled for the specific cloud
+ platform. It should closely represent the structure of the cloud metadata
+ crawled. The structure of content and details provided are entirely
+ cloud-dependent. Mileage will vary depending on what the cloud exposes.
+ The content exposed under the 'ds' key is currently **experimental** and
+ expected to change slightly in the upcoming cloud-init release.
+
+* **v1**: Standardized cloud-init metadata keys, these keys are guaranteed to
+ exist on all cloud platforms. They will also retain their current behavior
+ and format and will be carried forward even if cloud-init introduces a new
+ version of standardized keys with **v2**.
+
+The standardized keys present:
+
++----------------------+-----------------------------------------------+---------------------------+
+| Key path | Description | Examples |
++======================+===============================================+===========================+
+| v1.cloud_name | The name of the cloud provided by metadata | aws, openstack, azure, |
+| | key 'cloud-name' or the cloud-init datasource | configdrive, nocloud, |
+| | name which was discovered. | ovf, etc. |
++----------------------+-----------------------------------------------+---------------------------+
+| v1.instance_id | Unique instance_id allocated by the cloud | i-<somehash> |
++----------------------+-----------------------------------------------+---------------------------+
+| v1.local_hostname | The internal or local hostname of the system | ip-10-41-41-70, |
+| | | <user-provided-hostname> |
++----------------------+-----------------------------------------------+---------------------------+
+| v1.region | The physical region/datacenter in which the | us-east-2 |
+| | instance is deployed | |
++----------------------+-----------------------------------------------+---------------------------+
+| v1.availability_zone | The physical availability zone in which the | us-east-2b, nova, null |
+| | instance is deployed | |
++----------------------+-----------------------------------------------+---------------------------+
+
+
+Below is an example of ``/run/cloud-init/instance_data.json`` on an EC2
+instance:
+
+.. sourcecode:: json
+
+ {
+ "base64_encoded_keys": [],
+ "sensitive_keys": [],
+ "ds": {
+ "meta_data": {
+ "ami-id": "ami-014e1416b628b0cbf",
+ "ami-launch-index": "0",
+ "ami-manifest-path": "(unknown)",
+ "block-device-mapping": {
+ "ami": "/dev/sda1",
+ "ephemeral0": "sdb",
+ "ephemeral1": "sdc",
+ "root": "/dev/sda1"
+ },
+ "hostname": "ip-10-41-41-70.us-east-2.compute.internal",
+ "instance-action": "none",
+ "instance-id": "i-04fa31cfc55aa7976",
+ "instance-type": "t2.micro",
+ "local-hostname": "ip-10-41-41-70.us-east-2.compute.internal",
+ "local-ipv4": "10.41.41.70",
+ "mac": "06:b6:92:dd:9d:24",
+ "metrics": {
+ "vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
+ },
+ "network": {
+ "interfaces": {
+ "macs": {
+ "06:b6:92:dd:9d:24": {
+ "device-number": "0",
+ "interface-id": "eni-08c0c9fdb99b6e6f4",
+ "ipv4-associations": {
+ "18.224.22.43": "10.41.41.70"
+ },
+ "local-hostname": "ip-10-41-41-70.us-east-2.compute.internal",
+ "local-ipv4s": "10.41.41.70",
+ "mac": "06:b6:92:dd:9d:24",
+ "owner-id": "437526006925",
+ "public-hostname": "ec2-18-224-22-43.us-east-2.compute.amazonaws.com",
+ "public-ipv4s": "18.224.22.43",
+ "security-group-ids": "sg-828247e9",
+ "security-groups": "Cloud-init integration test secgroup",
+ "subnet-id": "subnet-282f3053",
+ "subnet-ipv4-cidr-block": "10.41.41.0/24",
+ "subnet-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/64",
+ "vpc-id": "vpc-252ef24d",
+ "vpc-ipv4-cidr-block": "10.41.0.0/16",
+ "vpc-ipv4-cidr-blocks": "10.41.0.0/16",
+ "vpc-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/56"
+ }
+ }
+ }
+ },
+ "placement": {
+ "availability-zone": "us-east-2b"
+ },
+ "profile": "default-hvm",
+ "public-hostname": "ec2-18-224-22-43.us-east-2.compute.amazonaws.com",
+ "public-ipv4": "18.224.22.43",
+ "public-keys": {
+ "cloud-init-integration": [
+ "ssh-rsa
+ AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB
+ cloud-init-integration"
+ ]
+ },
+ "reservation-id": "r-06ab75e9346f54333",
+ "security-groups": "Cloud-init integration test secgroup",
+ "services": {
+ "domain": "amazonaws.com",
+ "partition": "aws"
+ }
+ }
+ },
+ "v1": {
+ "availability-zone": "us-east-2b",
+ "availability_zone": "us-east-2b",
+ "cloud-name": "aws",
+ "cloud_name": "aws",
+ "instance-id": "i-04fa31cfc55aa7976",
+ "instance_id": "i-04fa31cfc55aa7976",
+ "local-hostname": "ip-10-41-41-70",
+ "local_hostname": "ip-10-41-41-70",
+ "region": "us-east-2"
+ }
+ }
+
+
+Using instance-data
+===================
+
+As of cloud-init v. 18.4, any variables present in
+``/run/cloud-init/instance-data.json`` can be used in:
+
+* User-data scripts
+* Cloud config data
+* Command line interface via **cloud-init query** or
+ **cloud-init devel render**
+
+Many clouds allow users to provide user-data to an instance at
+the time the instance is launched. Cloud-init supports a number of
+:ref:`user_data_formats`.
+
+Both user-data scripts and **#cloud-config** data support jinja template
+rendering.
+When the first line of the provided user-data begins with,
+**## template: jinja** cloud-init will use jinja to render that file.
+Any instance-data-sensitive.json variables are surfaced as dot-delimited
+jinja template variables because cloud-config modules are run as 'root'
+user.
+
+
+Below are some examples of providing these types of user-data:
+
+* Cloud config calling home with the ec2 public hostname and avaliability-zone
+
+.. code-block:: shell-session
+
+ ## template: jinja
+ #cloud-config
+ runcmd:
+ - echo 'EC2 public hostname allocated to instance: {{
+ ds.meta_data.public_hostname }}' > /tmp/instance_metadata
+ - echo 'EC2 avaiability zone: {{ v1.availability_zone }}' >>
+ /tmp/instance_metadata
+ - curl -X POST -d '{"hostname": "{{ds.meta_data.public_hostname }}",
+ "availability-zone": "{{ v1.availability_zone }}"}'
+ https://example.com
+
+* Custom user-data script performing different operations based on region
+
+.. code-block:: shell-session
+
+ ## template: jinja
+ #!/bin/bash
+ {% if v1.region == 'us-east-2' -%}
+ echo 'Installing custom proxies for {{ v1.region }}
+ sudo apt-get install my-xtra-fast-stack
+ {%- endif %}
+ ...
+
+.. note::
+ Trying to reference jinja variables that don't exist in
+ instance-data.json will result in warnings in ``/var/log/cloud-init.log``
+ and the following string in your rendered user-data:
+ ``CI_MISSING_JINJA_VAR/<your_varname>``.
+
+Cloud-init also surfaces a commandline tool **cloud-init query** which can
+assist developers or scripts with obtaining instance metadata easily. See
+:ref:`cli_query` for more information.
+
+To cut down on keystrokes on the command line, cloud-init also provides
+top-level key aliases for any standardized ``v#`` keys present. The preceding
+``v1`` is not required of ``v1.var_name`` These aliases will represent the
+value of the highest versioned standard key. For example, ``cloud_name``
+value will be ``v2.cloud_name`` if both ``v1`` and ``v2`` keys are present in
+instance-data.json.
+The **query** command also publishes ``userdata`` and ``vendordata`` keys to
+the root user which will contain the decoded user and vendor data provided to
+this instance. Non-root users referencing userdata or vendordata keys will
+see only redacted values.
+
+.. code-block:: shell-session
+
+ # List all top-level instance-data keys available
+ % cloud-init query --list-keys
+
+ # Find your EC2 ami-id
+ % cloud-init query ds.metadata.ami_id
+
+ # Format your cloud_name and region using jinja template syntax
+ % cloud-init query --format 'cloud: {{ v1.cloud_name }} myregion: {{
+ % v1.region }}'
+
+.. note::
+ To save time designing a user-data template for a specific cloud's
+ instance-data.json, use the 'render' cloud-init command on an
+ instance booted on your favorite cloud. See :ref:`cli_devel` for more
+ information.
+
+.. vi: textwidth=78
diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst
index 2f8ab54c..3b0148ca 100644
--- a/doc/rtd/topics/network-config-format-v1.rst
+++ b/doc/rtd/topics/network-config-format-v1.rst
@@ -130,6 +130,18 @@ the bond interfaces.
The ``bond_interfaces`` key accepts a list of network device ``name`` values
from the configuration. This list may be empty.
+**mtu**: *<MTU SizeBytes>*
+
+The MTU key represents a device's Maximum Transmission Unit, the largest size
+packet or frame, specified in octets (eight-bit bytes), that can be sent in a
+packet- or frame-based network. Specifying ``mtu`` is optional.
+
+.. note::
+
+ The possible supported values of a device's MTU is not available at
+ configuration time. It's possible to specify a value too large or to
+ small for a device and may be ignored by the device.
+
**params**: *<Dictionary of key: value bonding parameter pairs>*
The ``params`` key in a bond holds a dictionary of bonding parameters.
@@ -268,6 +280,21 @@ Type ``vlan`` requires the following keys:
- ``vlan_link``: Specify the underlying link via its ``name``.
- ``vlan_id``: Specify the VLAN numeric id.
+The following optional keys are supported:
+
+**mtu**: *<MTU SizeBytes>*
+
+The MTU key represents a device's Maximum Transmission Unit, the largest size
+packet or frame, specified in octets (eight-bit bytes), that can be sent in a
+packet- or frame-based network. Specifying ``mtu`` is optional.
+
+.. note::
+
+ The possible supported values of a device's MTU is not available at
+ configuration time. It's possible to specify a value too large or to
+ small for a device and may be ignored by the device.
+
+
**VLAN Example**::
network:
diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst
index 335d236a..ea370ef5 100644
--- a/doc/rtd/topics/network-config-format-v2.rst
+++ b/doc/rtd/topics/network-config-format-v2.rst
@@ -174,6 +174,12 @@ recognized by ``inet_pton(3)``
Example for IPv4: ``gateway4: 172.16.0.1``
Example for IPv6: ``gateway6: 2001:4::1``
+**mtu**: *<MTU SizeBytes>*
+
+The MTU key represents a device's Maximum Transmission Unit, the largest size
+packet or frame, specified in octets (eight-bit bytes), that can be sent in a
+packet- or frame-based network. Specifying ``mtu`` is optional.
+
**nameservers**: *<(mapping)>*
Set DNS servers and search domains, for manual address configuration. There
diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst
index cac4a6e4..b83bd899 100644
--- a/doc/rtd/topics/tests.rst
+++ b/doc/rtd/topics/tests.rst
@@ -58,7 +58,8 @@ explaining how to run one or the other independently.
$ tox -e citest -- run --verbose \
--os-name stretch --os-name xenial \
--deb cloud-init_0.7.8~my_patch_all.deb \
- --preserve-data --data-dir ~/collection
+ --preserve-data --data-dir ~/collection \
+ --preserve-instance
The above command will do the following:
@@ -76,6 +77,10 @@ The above command will do the following:
* ``--preserve-data`` always preserve collected data, do not remove data
after successful test run
+* ``--preserve-instance`` do not destroy the instance after test to allow
+ for debugging the stopped instance during integration test development. By
+ default, test instances are destroyed after the test completes.
+
* ``--data-dir ~/collection`` write collected data into `~/collection`,
rather than using a temporary directory
diff --git a/integration-requirements.txt b/integration-requirements.txt
index 6dde191d..b10ff964 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -5,6 +5,7 @@
# the packages/pkg-deps.json file as well.
#
+unittest2
# ec2 backend
boto3==1.5.9
@@ -13,8 +14,7 @@ paramiko==2.4.2
# lxd backend
# 04/03/2018: enables use of lxd 3.0
-git+https://github.com/lxc/pylxd.git@1a85a12a23401de6e96b1aeaf59ecbff2e88f49d
-
+git+https://github.com/lxc/pylxd.git@4b8ab1802f9aee4eb29cf7b119dae0aa47150779
# finds latest image information
-bzr+lp:simplestreams
+git+https://git.launchpad.net/simplestreams
diff --git a/packages/bddeb b/packages/bddeb
index 4f2e2ddf..95602a02 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -1,11 +1,14 @@
#!/usr/bin/env python3
import argparse
+import csv
import json
import os
import shutil
import sys
+UNRELEASED = "UNRELEASED"
+
def find_root():
# expected path is in <top_dir>/packages/
@@ -28,6 +31,24 @@ if "avoid-pep8-E402-import-not-top-of-file":
DEBUILD_ARGS = ["-S", "-d"]
+def get_release_suffix(release):
+ """Given ubuntu release (xenial), return a suffix for package (~16.04.1)"""
+ csv_path = "/usr/share/distro-info/ubuntu.csv"
+ rels = {}
+ # fields are version, codename, series, created, release, eol, eol-server
+ if os.path.exists(csv_path):
+ with open(csv_path, "r") as fp:
+ # version has "16.04 LTS" or "16.10", so drop "LTS" portion.
+ rels = {row['series']: row['version'].replace(' LTS', '')
+ for row in csv.DictReader(fp)}
+ if release in rels:
+ return "~%s.1" % rels[release]
+ elif release != UNRELEASED:
+ print("missing distro-info-data package, unable to give "
+ "per-release suffix.\n")
+ return ""
+
+
def run_helper(helper, args=None, strip=True):
if args is None:
args = []
@@ -117,7 +138,7 @@ def get_parser():
parser.add_argument("--release", dest="release",
help=("build with changelog referencing RELEASE"),
- default="UNRELEASED")
+ default=UNRELEASED)
for ent in DEBUILD_ARGS:
parser.add_argument(ent, dest="debuild_args", action='append_const',
@@ -148,7 +169,10 @@ def main():
if args.verbose:
capture = False
- templ_data = {'debian_release': args.release}
+ templ_data = {
+ 'debian_release': args.release,
+ 'release_suffix': get_release_suffix(args.release)}
+
with temp_utils.tempdir() as tdir:
# output like 0.7.6-1022-g36e92d3
@@ -157,10 +181,18 @@ def main():
# This is really only a temporary archive
# since we will extract it then add in the debian
# folder, then re-archive it for debian happiness
- print("Creating a temporary tarball using the 'make-tarball' helper")
tarball = "cloud-init_%s.orig.tar.gz" % ver_data['version_long']
tarball_fp = util.abs_join(tdir, tarball)
- run_helper('make-tarball', ['--long', '--output=' + tarball_fp])
+ path = None
+ for pd in ("./", "../", "../dl/"):
+ if os.path.exists(pd + tarball):
+ path = pd + tarball
+ print("Using existing tarball %s" % path)
+ shutil.copy(path, tarball_fp)
+ break
+ if path is None:
+ print("Creating a temp tarball using the 'make-tarball' helper")
+ run_helper('make-tarball', ['--long', '--output=' + tarball_fp])
print("Extracting temporary tarball %r" % (tarball))
cmd = ['tar', '-xvzf', tarball_fp, '-C', tdir]
diff --git a/packages/brpm b/packages/brpm
index 3439cf35..a154ef29 100755
--- a/packages/brpm
+++ b/packages/brpm
@@ -42,13 +42,13 @@ def run_helper(helper, args=None, strip=True):
return stdout
-def read_dependencies(requirements_file='requirements.txt'):
+def read_dependencies(distro, requirements_file='requirements.txt'):
"""Returns the Python package depedencies from requirements.txt files.
@returns a tuple of (requirements, test_requirements)
"""
pkg_deps = run_helper(
- 'read-dependencies', args=['--distro', 'redhat']).splitlines()
+ 'read-dependencies', args=['--distro', distro]).splitlines()
test_deps = run_helper(
'read-dependencies', args=[
'--requirements-file', 'test-requirements.txt',
@@ -83,7 +83,7 @@ def generate_spec_contents(args, version_data, tmpl_fn, top_dir, arc_fn):
rpm_upstream_version = version_data['version']
subs['rpm_upstream_version'] = rpm_upstream_version
- deps, test_deps = read_dependencies()
+ deps, test_deps = read_dependencies(distro=args.distro)
subs['buildrequires'] = deps + test_deps
subs['requires'] = deps
diff --git a/packages/debian/changelog.in b/packages/debian/changelog.in
index bdf8d56f..930322f5 100644
--- a/packages/debian/changelog.in
+++ b/packages/debian/changelog.in
@@ -1,5 +1,5 @@
## template:basic
-cloud-init (${version_long}-1~bddeb) ${debian_release}; urgency=low
+cloud-init (${version_long}-1~bddeb${release_suffix}) ${debian_release}; urgency=low
* build
diff --git a/packages/debian/control.in b/packages/debian/control.in
index 46da6dff..e9ed64f3 100644
--- a/packages/debian/control.in
+++ b/packages/debian/control.in
@@ -11,6 +11,7 @@ Package: cloud-init
Architecture: all
Depends: ${misc:Depends},
${${python}:Depends},
+ iproute2,
isc-dhcp-client
Recommends: eatmydata, sudo, software-properties-common, gdisk
XB-Python-Version: ${python:Versions}
diff --git a/packages/debian/rules.in b/packages/debian/rules.in
index 4aa907e3..e542c7f1 100755
--- a/packages/debian/rules.in
+++ b/packages/debian/rules.in
@@ -3,6 +3,7 @@
INIT_SYSTEM ?= systemd
export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM)
PYVER ?= python${pyver}
+DEB_VERSION := $(shell dpkg-parsechangelog --show-field=Version)
%:
dh $@ --with $(PYVER),systemd --buildsystem pybuild
@@ -14,6 +15,7 @@ override_dh_install:
cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf
install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh
install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh
+ flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement}
override_dh_auto_test:
ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS)))
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index 6ab0d20b..a3a6d1e0 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -115,6 +115,13 @@ rm -rf $RPM_BUILD_ROOT%{python_sitelib}/tests
mkdir -p $RPM_BUILD_ROOT/%{_sharedstatedir}/cloud
mkdir -p $RPM_BUILD_ROOT/%{_libexecdir}/%{name}
+# patch in the full version to version.py
+version_pys=$(cd "$RPM_BUILD_ROOT" && find . -name version.py -type f)
+[ -n "$version_pys" ] ||
+ { echo "failed to find 'version.py' to patch with version." 1>&2; exit 1; }
+( cd "$RPM_BUILD_ROOT" &&
+ sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $version_pys )
+
%clean
rm -rf $RPM_BUILD_ROOT
@@ -197,6 +204,7 @@ fi
%dir %{_sysconfdir}/cloud/templates
%config(noreplace) %{_sysconfdir}/cloud/templates/*
%config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf
+%{_sysconfdir}/bash_completion.d/cloud-init
%{_libexecdir}/%{name}
%dir %{_sharedstatedir}/cloud
diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in
index 86e18b1b..e781d743 100644
--- a/packages/suse/cloud-init.spec.in
+++ b/packages/suse/cloud-init.spec.in
@@ -5,7 +5,7 @@
# Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html
Name: cloud-init
-Version: {{version}}
+Version: {{rpm_upstream_version}}
Release: 1{{subrelease}}%{?dist}
Summary: Cloud instance init scripts
@@ -16,22 +16,13 @@ URL: http://launchpad.net/cloud-init
Source0: {{archive_name}}
BuildRoot: %{_tmppath}/%{name}-%{version}-build
-%if 0%{?suse_version} && 0%{?suse_version} <= 1110
-%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
-%else
BuildArch: noarch
-%endif
+
{% for r in buildrequires %}
BuildRequires: {{r}}
{% endfor %}
-%if 0%{?suse_version} && 0%{?suse_version} <= 1210
- %define initsys sysvinit
-%else
- %define initsys systemd
-%endif
-
# Install pypi 'dynamic' requirements
{% for r in requires %}
Requires: {{r}}
@@ -39,7 +30,7 @@ Requires: {{r}}
# Custom patches
{% for p in patches %}
-Patch{{loop.index0}: {{p}}
+Patch{{loop.index0}}: {{p}}
{% endfor %}
%description
@@ -63,35 +54,21 @@ end for
%{__python} setup.py install \
--skip-build --root=%{buildroot} --prefix=%{_prefix} \
--record-rpm=INSTALLED_FILES --install-lib=%{python_sitelib} \
- --init-system=%{initsys}
+ --init-system=systemd
+
+# Move udev rules
+mkdir -p %{buildroot}/usr/lib/udev/rules.d/
+mv %{buildroot}/lib/udev/rules.d/* %{buildroot}/usr/lib/udev/rules.d/
# Remove non-SUSE templates
rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.debian.*
rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.redhat.*
rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.ubuntu.*
-# Remove cloud-init tests
-rm -r %{buildroot}/%{python_sitelib}/tests
-
-# Move sysvinit scripts to the correct place and create symbolic links
-%if %{initsys} == sysvinit
- mkdir -p %{buildroot}/%{_initddir}
- mv %{buildroot}%{_sysconfdir}/rc.d/init.d/* %{buildroot}%{_initddir}/
- rmdir %{buildroot}%{_sysconfdir}/rc.d/init.d
- rmdir %{buildroot}%{_sysconfdir}/rc.d
-
- mkdir -p %{buildroot}/%{_sbindir}
- pushd %{buildroot}/%{_initddir}
- for file in * ; do
- ln -s %{_initddir}/${file} %{buildroot}/%{_sbindir}/rc${file}
- done
- popd
-%endif
-
# Move documentation
mkdir -p %{buildroot}/%{_defaultdocdir}
mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir}
-for doc in TODO LICENSE ChangeLog requirements.txt; do
+for doc in LICENSE ChangeLog requirements.txt; do
cp ${doc} %{buildroot}/%{_defaultdocdir}/cloud-init
done
@@ -102,29 +79,35 @@ done
mkdir -p %{buildroot}/var/lib/cloud
+# patch in the full version to version.py
+version_pys=$(cd "%{buildroot}" && find . -name version.py -type f)
+[ -n "$version_pys" ] ||
+ { echo "failed to find 'version.py' to patch with version." 1>&2; exit 1; }
+( cd "%{buildroot}" &&
+ sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $version_pys )
+
%postun
%insserv_cleanup
%files
-# Sysvinit scripts
-%if %{initsys} == sysvinit
- %attr(0755, root, root) %{_initddir}/cloud-config
- %attr(0755, root, root) %{_initddir}/cloud-final
- %attr(0755, root, root) %{_initddir}/cloud-init-local
- %attr(0755, root, root) %{_initddir}/cloud-init
-
- %{_sbindir}/rccloud-*
-%endif
-
# Program binaries
%{_bindir}/cloud-init*
+# systemd files
+/usr/lib/systemd/system-generators/*
+/usr/lib/systemd/system/*
+
# There doesn't seem to be an agreed upon place for these
# although it appears the standard says /usr/lib but rpmbuild
# will try /usr/lib64 ??
/usr/lib/%{name}/uncloud-init
/usr/lib/%{name}/write-ssh-key-fingerprints
+/usr/lib/%{name}/ds-identify
+
+# udev rules
+/usr/lib/udev/rules.d/66-azure-ephemeral.rules
+
# Docs
%doc %{_defaultdocdir}/cloud-init/*
@@ -136,6 +119,10 @@ mkdir -p %{buildroot}/var/lib/cloud
%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/README
%dir %{_sysconfdir}/cloud/templates
%config(noreplace) %{_sysconfdir}/cloud/templates/*
+%{_sysconfdir}/bash_completion.d/cloud-init
+
+%{_sysconfdir}/dhcp/dhclient-exit-hooks.d/hook-dhclient
+%{_sysconfdir}/NetworkManager/dispatcher.d/hook-network-manager
# Python code is here...
%{python_sitelib}/*
diff --git a/setup.py b/setup.py
index bc3f52ac..5ed8eae2 100755
--- a/setup.py
+++ b/setup.py
@@ -25,7 +25,7 @@ from distutils.errors import DistutilsArgError
import subprocess
RENDERED_TMPD_PREFIX = "RENDERED_TEMPD"
-
+VARIANT = None
def is_f(p):
return os.path.isfile(p)
@@ -114,10 +114,20 @@ def render_tmpl(template):
atexit.register(shutil.rmtree, tmpd)
bname = os.path.basename(template).rstrip(tmpl_ext)
fpath = os.path.join(tmpd, bname)
- tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath])
+ if VARIANT:
+ tiny_p([sys.executable, './tools/render-cloudcfg', '--variant',
+ VARIANT, template, fpath])
+ else:
+ tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath])
# return path relative to setup.py
return os.path.join(os.path.basename(tmpd), bname)
+# User can set the variant for template rendering
+if '--distro' in sys.argv:
+ idx = sys.argv.index('--distro')
+ VARIANT = sys.argv[idx+1]
+ del sys.argv[idx+1]
+ sys.argv.remove('--distro')
INITSYS_FILES = {
'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],
@@ -228,6 +238,7 @@ if not in_virtualenv():
INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k]
data_files = [
+ (ETC + '/bash_completion.d', ['bash_completion/cloud-init']),
(ETC + '/cloud', [render_tmpl("config/cloud.cfg.tmpl")]),
(ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')),
(ETC + '/cloud/templates', glob('templates/*')),
@@ -259,7 +270,7 @@ requirements = read_requires()
setuptools.setup(
name='cloud-init',
version=get_version(),
- description='EC2 initialisation magic',
+ description='Cloud instance initialisation magic',
author='Scott Moser',
author_email='scott.moser@canonical.com',
url='http://launchpad.net/cloud-init/',
@@ -276,4 +287,5 @@ setuptools.setup(
}
)
+
# vi: ts=4 expandtab
diff --git a/systemd/cloud-config.service.tmpl b/systemd/cloud-config.service.tmpl
index b2661f63..508d777e 100644
--- a/systemd/cloud-config.service.tmpl
+++ b/systemd/cloud-config.service.tmpl
@@ -3,6 +3,7 @@
Description=Apply the settings specified in cloud-config
Before=vyos-router.service
After=network-online.target cloud-config.target
+After=snapd.seeded.service
Wants=network-online.target cloud-config.target
[Service]
diff --git a/templates/chrony.conf.debian.tmpl b/templates/chrony.conf.debian.tmpl
new file mode 100644
index 00000000..661bf04e
--- /dev/null
+++ b/templates/chrony.conf.debian.tmpl
@@ -0,0 +1,39 @@
+## template:jinja
+# Welcome to the chrony configuration file. See chrony.conf(5) for more
+# information about usuable directives.
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# This directive specify the location of the file containing ID/key pairs for
+# NTP authentication.
+keyfile /etc/chrony/chrony.keys
+
+# This directive specify the file into which chronyd will store the rate
+# information.
+driftfile /var/lib/chrony/chrony.drift
+
+# Uncomment the following line to turn logging on.
+#log tracking measurements statistics
+
+# Log files location.
+logdir /var/log/chrony
+
+# Stop bad estimates upsetting machine clock.
+maxupdateskew 100.0
+
+# This directive enables kernel synchronisation (every 11 minutes) of the
+# real-time clock. Note that it can’t be used along with the 'rtcfile' directive.
+rtcsync
+
+# Step the system clock instead of slewing it if the adjustment is larger than
+# one second, but only in the first three clock updates.
+makestep 1 3
+
diff --git a/templates/chrony.conf.fedora.tmpl b/templates/chrony.conf.fedora.tmpl
new file mode 100644
index 00000000..8551f793
--- /dev/null
+++ b/templates/chrony.conf.fedora.tmpl
@@ -0,0 +1,48 @@
+## template:jinja
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# Allow the system clock to be stepped in the first three updates
+# if its offset is larger than 1 second.
+makestep 1.0 3
+
+# Enable kernel synchronization of the real-time clock (RTC).
+rtcsync
+
+# Enable hardware timestamping on all interfaces that support it.
+#hwtimestamp *
+
+# Increase the minimum number of selectable sources required to adjust
+# the system clock.
+#minsources 2
+
+# Allow NTP client access from local network.
+#allow 192.168.0.0/16
+
+# Serve time even if not synchronized to a time source.
+#local stratum 10
+
+# Specify file containing keys for NTP authentication.
+#keyfile /etc/chrony.keys
+
+# Get TAI-UTC offset and leap seconds from the system tz database.
+leapsectz right/UTC
+
+# Specify directory for log files.
+logdir /var/log/chrony
+
+# Select which information is logged.
+#log measurements statistics tracking
diff --git a/templates/chrony.conf.opensuse.tmpl b/templates/chrony.conf.opensuse.tmpl
new file mode 100644
index 00000000..a3d3e0ec
--- /dev/null
+++ b/templates/chrony.conf.opensuse.tmpl
@@ -0,0 +1,38 @@
+## template:jinja
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# In first three updates step the system clock instead of slew
+# if the adjustment is larger than 1 second.
+makestep 1.0 3
+
+# Enable kernel synchronization of the real-time clock (RTC).
+rtcsync
+
+# Allow NTP client access from local network.
+#allow 192.168/16
+
+# Serve time even if not synchronized to any NTP server.
+#local stratum 10
+
+# Specify file containing keys for NTP authentication.
+#keyfile /etc/chrony.keys
+
+# Specify directory for log files.
+logdir /var/log/chrony
+
+# Select which information is logged.
+#log measurements statistics tracking
diff --git a/templates/chrony.conf.rhel.tmpl b/templates/chrony.conf.rhel.tmpl
new file mode 100644
index 00000000..5b3542ef
--- /dev/null
+++ b/templates/chrony.conf.rhel.tmpl
@@ -0,0 +1,45 @@
+## template:jinja
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# Allow the system clock to be stepped in the first three updates
+# if its offset is larger than 1 second.
+makestep 1.0 3
+
+# Enable kernel synchronization of the real-time clock (RTC).
+rtcsync
+
+# Enable hardware timestamping on all interfaces that support it.
+#hwtimestamp *
+
+# Increase the minimum number of selectable sources required to adjust
+# the system clock.
+#minsources 2
+
+# Allow NTP client access from local network.
+#allow 192.168.0.0/16
+
+# Serve time even if not synchronized to a time source.
+#local stratum 10
+
+# Specify file containing keys for NTP authentication.
+#keyfile /etc/chrony.keys
+
+# Specify directory for log files.
+logdir /var/log/chrony
+
+# Select which information is logged.
+#log measurements statistics tracking
diff --git a/templates/chrony.conf.sles.tmpl b/templates/chrony.conf.sles.tmpl
new file mode 100644
index 00000000..a3d3e0ec
--- /dev/null
+++ b/templates/chrony.conf.sles.tmpl
@@ -0,0 +1,38 @@
+## template:jinja
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# In first three updates step the system clock instead of slew
+# if the adjustment is larger than 1 second.
+makestep 1.0 3
+
+# Enable kernel synchronization of the real-time clock (RTC).
+rtcsync
+
+# Allow NTP client access from local network.
+#allow 192.168/16
+
+# Serve time even if not synchronized to any NTP server.
+#local stratum 10
+
+# Specify file containing keys for NTP authentication.
+#keyfile /etc/chrony.keys
+
+# Specify directory for log files.
+logdir /var/log/chrony
+
+# Select which information is logged.
+#log measurements statistics tracking
diff --git a/templates/chrony.conf.ubuntu.tmpl b/templates/chrony.conf.ubuntu.tmpl
new file mode 100644
index 00000000..50a6f518
--- /dev/null
+++ b/templates/chrony.conf.ubuntu.tmpl
@@ -0,0 +1,42 @@
+## template:jinja
+# Welcome to the chrony configuration file. See chrony.conf(5) for more
+# information about usuable directives.
+
+# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board
+# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for
+# more information.
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# This directive specify the location of the file containing ID/key pairs for
+# NTP authentication.
+keyfile /etc/chrony/chrony.keys
+
+# This directive specify the file into which chronyd will store the rate
+# information.
+driftfile /var/lib/chrony/chrony.drift
+
+# Uncomment the following line to turn logging on.
+#log tracking measurements statistics
+
+# Log files location.
+logdir /var/log/chrony
+
+# Stop bad estimates upsetting machine clock.
+maxupdateskew 100.0
+
+# This directive enables kernel synchronisation (every 11 minutes) of the
+# real-time clock. Note that it can’t be used along with the 'rtcfile' directive.
+rtcsync
+
+# Step the system clock instead of slewing it if the adjustment is larger than
+# one second, but only in the first three clock updates.
+makestep 1 3
diff --git a/tests/cloud_tests/args.py b/tests/cloud_tests/args.py
index c6c1877b..ab345491 100644
--- a/tests/cloud_tests/args.py
+++ b/tests/cloud_tests/args.py
@@ -62,6 +62,9 @@ ARG_SETS = {
(('-d', '--data-dir'),
{'help': 'directory to store test data in',
'action': 'store', 'metavar': 'DIR', 'required': False}),
+ (('--preserve-instance',),
+ {'help': 'do not destroy the instance under test',
+ 'action': 'store_true', 'default': False, 'required': False}),
(('--preserve-data',),
{'help': 'do not remove collected data after successful run',
'action': 'store_true', 'default': False, 'required': False}),),
diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py
index b9cfcfa6..f04d0cd4 100644
--- a/tests/cloud_tests/bddeb.py
+++ b/tests/cloud_tests/bddeb.py
@@ -113,7 +113,7 @@ def bddeb(args):
@return_value: fail count
"""
LOG.info('preparing to build cloud-init deb')
- (res, failed) = run_stage('build deb', [partial(setup_build, args)])
+ _res, failed = run_stage('build deb', [partial(setup_build, args)])
return failed
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py
index d4f9135b..642745d8 100644
--- a/tests/cloud_tests/collect.py
+++ b/tests/cloud_tests/collect.py
@@ -9,6 +9,7 @@ from cloudinit import util as c_util
from tests.cloud_tests import (config, LOG, setup_image, util)
from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)
from tests.cloud_tests import platforms
+from tests.cloud_tests.testcases import base, get_test_class
def collect_script(instance, base_dir, script, script_name):
@@ -25,7 +26,8 @@ def collect_script(instance, base_dir, script, script_name):
script.encode(), rcs=False,
description='collect: {}'.format(script_name))
if err:
- LOG.debug("collect script %s had stderr: %s", script_name, err)
+ LOG.debug("collect script %s exited '%s' and had stderr: %s",
+ script_name, err, exit)
if not isinstance(out, bytes):
raise util.PlatformError(
"Collection of '%s' returned type %s, expected bytes: %s" %
@@ -41,7 +43,7 @@ def collect_console(instance, base_dir):
@param base_dir: directory to write console log to
"""
logfile = os.path.join(base_dir, 'console.log')
- LOG.debug('getting console log for %s to %s', instance, logfile)
+ LOG.debug('getting console log for %s to %s', instance.name, logfile)
try:
data = instance.console_log()
except NotImplementedError as e:
@@ -62,6 +64,7 @@ def collect_test_data(args, snapshot, os_name, test_name):
res = ({}, 1)
# load test config
+ test_name_in = test_name
test_name = config.path_to_name(test_name)
test_config = config.load_test_config(test_name)
user_data = test_config['cloud_config']
@@ -74,6 +77,16 @@ def collect_test_data(args, snapshot, os_name, test_name):
LOG.warning('test config %s is not enabled, skipping', test_name)
return ({}, 0)
+ test_class = get_test_class(
+ config.name_to_module(test_name_in),
+ test_data={'platform': snapshot.platform_name, 'os_name': os_name},
+ test_conf=test_config['cloud_config'])
+ try:
+ test_class.maybeSkipTest()
+ except base.SkipTest as s:
+ LOG.warning('skipping test config %s: %s', test_name, s)
+ return ({}, 0)
+
# if testcase requires a feature flag that the image does not support,
# skip the testcase with a warning
req_features = test_config.get('required_features', [])
@@ -92,7 +105,8 @@ def collect_test_data(args, snapshot, os_name, test_name):
# create test instance
component = PlatformComponent(
partial(platforms.get_instance, snapshot, user_data,
- block=True, start=False, use_desc=test_name))
+ block=True, start=False, use_desc=test_name),
+ preserve_instance=args.preserve_instance)
LOG.info('collecting test data for test: %s', test_name)
with component as instance:
diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py
index 3bad021f..529e79cd 100644
--- a/tests/cloud_tests/platforms/instances.py
+++ b/tests/cloud_tests/platforms/instances.py
@@ -87,32 +87,39 @@ class Instance(TargetBase):
self._ssh_client = None
def _ssh_connect(self):
- """Connect via SSH."""
+ """Connect via SSH.
+
+ Attempt to SSH to the client on the specific IP and port. If it
+ fails in some manner, then retry 2 more times for a total of 3
+ attempts; sleeping a few seconds between attempts.
+ """
if self._ssh_client:
return self._ssh_client
if not self.ssh_ip or not self.ssh_port:
- raise ValueError
+ raise ValueError("Cannot ssh_connect, ssh_ip=%s ssh_port=%s" %
+ (self.ssh_ip, self.ssh_port))
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file)
- retries = 30
+ retries = 3
while retries:
try:
client.connect(username=self.ssh_username,
hostname=self.ssh_ip, port=self.ssh_port,
- pkey=private_key, banner_timeout=30)
+ pkey=private_key)
self._ssh_client = client
return client
except (ConnectionRefusedError, AuthenticationException,
BadHostKeyException, ConnectionResetError, SSHException,
- OSError) as e:
+ OSError):
retries -= 1
- time.sleep(10)
+ LOG.debug('Retrying ssh connection on connect failure')
+ time.sleep(3)
- ssh_cmd = 'Failed ssh connection to %s@%s:%s after 300 seconds' % (
+ ssh_cmd = 'Failed ssh connection to %s@%s:%s after 3 retries' % (
self.ssh_username, self.ssh_ip, self.ssh_port
)
raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh')
@@ -128,18 +135,31 @@ class Instance(TargetBase):
return ' '.join(l for l in test.strip().splitlines()
if not l.lstrip().startswith('#'))
- time = self.config['boot_timeout']
+ boot_timeout = self.config['boot_timeout']
tests = [self.config['system_ready_script']]
if wait_for_cloud_init:
tests.append(self.config['cloud_init_ready_script'])
formatted_tests = ' && '.join(clean_test(t) for t in tests)
cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && '
- 'exit 0; sleep 1; done; exit 1').format(time=time,
+ 'exit 0; sleep 1; done; exit 1').format(time=boot_timeout,
test=formatted_tests)
- if self.execute(cmd, rcs=(0, 1))[-1] != 0:
- raise OSError('timeout: after {}s system not started'.format(time))
-
+ end_time = time.time() + boot_timeout
+ while True:
+ try:
+ return_code = self.execute(
+ cmd, rcs=(0, 1), description='wait for instance start'
+ )[-1]
+ if return_code == 0:
+ break
+ except util.InTargetExecuteError:
+ LOG.warning("failed to connect via SSH")
+
+ if time.time() < end_time:
+ time.sleep(3)
+ else:
+ raise util.PlatformError('ssh', 'after %ss instance is not '
+ 'reachable' % boot_timeout)
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py
index 0d957bca..83c97ab4 100644
--- a/tests/cloud_tests/platforms/lxd/instance.py
+++ b/tests/cloud_tests/platforms/lxd/instance.py
@@ -12,6 +12,8 @@ from tests.cloud_tests.util import PlatformError
from ..instances import Instance
+from pylxd import exceptions as pylxd_exc
+
class LXDInstance(Instance):
"""LXD container backed instance."""
@@ -30,6 +32,9 @@ class LXDInstance(Instance):
@param config: image config
@param features: supported feature flags
"""
+ if not pylxd_container:
+ raise ValueError("Invalid value pylxd_container: %s" %
+ pylxd_container)
self._pylxd_container = pylxd_container
super(LXDInstance, self).__init__(
platform, name, properties, config, features)
@@ -40,9 +45,19 @@ class LXDInstance(Instance):
@property
def pylxd_container(self):
"""Property function."""
+ if self._pylxd_container is None:
+ raise RuntimeError(
+ "%s: Attempted use of pylxd_container after deletion." % self)
self._pylxd_container.sync()
return self._pylxd_container
+ def __str__(self):
+ return (
+ '%s(name=%s) status=%s' %
+ (self.__class__.__name__, self.name,
+ ("deleted" if self._pylxd_container is None else
+ self.pylxd_container.status)))
+
def _execute(self, command, stdin=None, env=None):
if env is None:
env = {}
@@ -152,9 +167,8 @@ class LXDInstance(Instance):
return fp.read()
try:
- stdout, stderr = subp(
- ['lxc', 'console', '--show-log', self.name], decode=False)
- return stdout
+ return subp(['lxc', 'console', '--show-log', self.name],
+ decode=False)[0]
except ProcessExecutionError as e:
raise PlatformError(
"console log",
@@ -166,10 +180,27 @@ class LXDInstance(Instance):
self.shutdown(wait=wait)
self.start(wait=wait)
- def shutdown(self, wait=True):
+ def shutdown(self, wait=True, retry=1):
"""Shutdown instance."""
- if self.pylxd_container.status != 'Stopped':
+ if self.pylxd_container.status == 'Stopped':
+ return
+
+ try:
+ LOG.debug("%s: shutting down (wait=%s)", self, wait)
self.pylxd_container.stop(wait=wait)
+ except (pylxd_exc.LXDAPIException, pylxd_exc.NotFound) as e:
+ # An exception happens here sometimes (LP: #1783198)
+ # LOG it, and try again.
+ LOG.warning(
+ ("%s: shutdown(retry=%d) caught %s in shutdown "
+ "(response=%s): %s"),
+ self, retry, e.__class__.__name__, e.response, e)
+ if isinstance(e, pylxd_exc.NotFound):
+ LOG.debug("container_exists(%s) == %s",
+ self.name, self.platform.container_exists(self.name))
+ if retry == 0:
+ raise e
+ return self.shutdown(wait=wait, retry=retry - 1)
def start(self, wait=True, wait_for_cloud_init=False):
"""Start instance."""
@@ -190,12 +221,14 @@ class LXDInstance(Instance):
def destroy(self):
"""Clean up instance."""
+ LOG.debug("%s: deleting container.", self)
self.unfreeze()
self.shutdown()
self.pylxd_container.delete(wait=True)
+ self._pylxd_container = None
+
if self.platform.container_exists(self.name):
- raise OSError('container {} was not properly removed'
- .format(self.name))
+ raise OSError('%s: container was not properly removed' % self)
if self._console_log_file and os.path.exists(self._console_log_file):
os.unlink(self._console_log_file)
shutil.rmtree(self.tmpd)
@@ -209,16 +242,15 @@ def _has_proper_console_support():
if 'console' not in info.get('api_extensions', []):
reason = "LXD server does not support console api extension"
else:
- dver = info.get('environment', {}).get('driver_version', "")
+ dver = str(info.get('environment', {}).get('driver_version', ""))
if dver.startswith("2.") or dver.startswith("1."):
reason = "LXD Driver version not 3.x+ (%s)" % dver
else:
try:
- stdout, stderr = subp(['lxc', 'console', '--help'],
- decode=False)
+ stdout = subp(['lxc', 'console', '--help'], decode=False)[0]
if not (b'console' in stdout and b'log' in stdout):
reason = "no '--log' in lxc console --help"
- except ProcessExecutionError as e:
+ except ProcessExecutionError:
reason = "no 'console' command in lxc client"
if reason:
diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml
index c7dcbe83..defae02b 100644
--- a/tests/cloud_tests/releases.yaml
+++ b/tests/cloud_tests/releases.yaml
@@ -129,6 +129,22 @@ features:
releases:
# UBUNTU =================================================================
+ cosmic:
+ # EOL: Jul 2019
+ default:
+ enabled: true
+ release: cosmic
+ version: 18.10
+ os: ubuntu
+ feature_groups:
+ - base
+ - debian_base
+ - ubuntu_specific
+ lxd:
+ sstreams_server: https://cloud-images.ubuntu.com/daily
+ alias: cosmic
+ setup_overrides: null
+ override_templates: false
bionic:
# EOL: Apr 2023
default:
diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py
index 6d242115..39f4517f 100644
--- a/tests/cloud_tests/setup_image.py
+++ b/tests/cloud_tests/setup_image.py
@@ -4,6 +4,7 @@
from functools import partial
import os
+import yaml
from tests.cloud_tests import LOG
from tests.cloud_tests import stage, util
@@ -25,10 +26,9 @@ def installed_package_version(image, package, ensure_installed=True):
else:
raise NotImplementedError
- msg = 'query version for package: {}'.format(package)
- (out, err, exit) = image.execute(
- cmd, description=msg, rcs=(0,) if ensure_installed else range(0, 256))
- return out.strip()
+ return image.execute(
+ cmd, description='query version for package: {}'.format(package),
+ rcs=(0,) if ensure_installed else range(0, 256))[0].strip()
def install_deb(args, image):
@@ -54,7 +54,7 @@ def install_deb(args, image):
remote_path], description=msg)
# check installed deb version matches package
fmt = ['-W', "--showformat=${Version}"]
- (out, err, exit) = image.execute(['dpkg-deb'] + fmt + [remote_path])
+ out = image.execute(['dpkg-deb'] + fmt + [remote_path])[0]
expected_version = out.strip()
found_version = installed_package_version(image, 'cloud-init')
if expected_version != found_version:
@@ -85,7 +85,7 @@ def install_rpm(args, image):
image.execute(['rpm', '-U', remote_path], description=msg)
fmt = ['--queryformat', '"%{VERSION}"']
- (out, err, exit) = image.execute(['rpm', '-q'] + fmt + [remote_path])
+ (out, _err, _exit) = image.execute(['rpm', '-q'] + fmt + [remote_path])
expected_version = out.strip()
found_version = installed_package_version(image, 'cloud-init')
if expected_version != found_version:
@@ -221,7 +221,14 @@ def setup_image(args, image):
calls = [partial(stage.run_single, desc, partial(func, args, image))
for name, func, desc in handlers if getattr(args, name, None)]
- LOG.info('setting up %s', image)
+ try:
+ data = yaml.load(image.read_data("/etc/cloud/build.info", decode=True))
+ info = ' '.join(["%s=%s" % (k, data.get(k))
+ for k in ("build_name", "serial") if k in data])
+ except Exception as e:
+ info = "N/A (%s)" % e
+
+ LOG.info('setting up %s (%s)', image, info)
res = stage.run_stage(
'set up for {}'.format(image), calls, continue_after_error=False)
return res
diff --git a/tests/cloud_tests/stage.py b/tests/cloud_tests/stage.py
index 74a7d46d..d64a1dcc 100644
--- a/tests/cloud_tests/stage.py
+++ b/tests/cloud_tests/stage.py
@@ -12,9 +12,15 @@ from tests.cloud_tests import LOG
class PlatformComponent(object):
"""Context manager to safely handle platform components."""
- def __init__(self, get_func):
- """Store get_<platform component> function as partial with no args."""
+ def __init__(self, get_func, preserve_instance=False):
+ """Store get_<platform component> function as partial with no args.
+
+ @param get_func: Callable returning an instance from the platform.
+ @param preserve_instance: Boolean, when True, do not destroy instance
+ after test. Used for test development.
+ """
self.get_func = get_func
+ self.preserve_instance = preserve_instance
def __enter__(self):
"""Create instance of platform component."""
@@ -24,7 +30,10 @@ class PlatformComponent(object):
def __exit__(self, etype, value, trace):
"""Destroy instance."""
if self.instance is not None:
- self.instance.destroy()
+ if self.preserve_instance:
+ LOG.info('Preserving test instance %s', self.instance.name)
+ else:
+ self.instance.destroy()
def run_single(name, call):
diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml
index a3e29900..fb9a5d27 100644
--- a/tests/cloud_tests/testcases.yaml
+++ b/tests/cloud_tests/testcases.yaml
@@ -24,9 +24,13 @@ base_test_data:
status.json: |
#!/bin/sh
cat /run/cloud-init/status.json
- cloud-init-version: |
+ package-versions: |
#!/bin/sh
- dpkg-query -W -f='${Version}' cloud-init
+ dpkg-query --show
+ build.info: |
+ #!/bin/sh
+ binfo=/etc/cloud/build.info
+ [ -f "$binfo" ] && cat "$binfo" || echo "N/A"
system.journal.gz: |
#!/bin/sh
[ -d /run/systemd ] || { echo "not systemd."; exit 0; }
diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py
index bd548f5a..6bb39f77 100644
--- a/tests/cloud_tests/testcases/__init__.py
+++ b/tests/cloud_tests/testcases/__init__.py
@@ -4,8 +4,7 @@
import importlib
import inspect
-import unittest
-from unittest.util import strclass
+import unittest2
from cloudinit.util import read_conf
@@ -13,7 +12,7 @@ from tests.cloud_tests import config
from tests.cloud_tests.testcases.base import CloudTestCase as base_test
-def discover_tests(test_name):
+def discover_test(test_name):
"""Discover tests in test file for 'testname'.
@return_value: list of test classes
@@ -25,35 +24,48 @@ def discover_tests(test_name):
except NameError:
raise ValueError('no test verifier found at: {}'.format(testmod_name))
- return [mod for name, mod in inspect.getmembers(testmod)
- if inspect.isclass(mod) and base_test in inspect.getmro(mod) and
- getattr(mod, '__test__', True)]
+ found = [mod for name, mod in inspect.getmembers(testmod)
+ if (inspect.isclass(mod)
+ and base_test in inspect.getmro(mod)
+ and getattr(mod, '__test__', True))]
+ if len(found) != 1:
+ raise RuntimeError(
+ "Unexpected situation, multiple tests for %s: %s" % (
+ test_name, found))
+ return found
-def get_suite(test_name, data, conf):
- """Get test suite with all tests for 'testname'.
- @return_value: a test suite
- """
- suite = unittest.TestSuite()
- for test_class in discover_tests(test_name):
+def get_test_class(test_name, test_data, test_conf):
+ test_class = discover_test(test_name)[0]
+
+ class DynamicTestSubclass(test_class):
- class tmp(test_class):
+ _realclass = test_class
+ data = test_data
+ conf = test_conf
+ release_conf = read_conf(config.RELEASES_CONF)['releases']
- _realclass = test_class
+ def __str__(self):
+ return "%s (%s)" % (self._testMethodName,
+ unittest2.util.strclass(self._realclass))
- def __str__(self):
- return "%s (%s)" % (self._testMethodName,
- strclass(self._realclass))
+ @classmethod
+ def setUpClass(cls):
+ cls.maybeSkipTest()
- @classmethod
- def setUpClass(cls):
- cls.data = data
- cls.conf = conf
- cls.release_conf = read_conf(config.RELEASES_CONF)['releases']
+ return DynamicTestSubclass
- suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(tmp))
+def get_suite(test_name, data, conf):
+ """Get test suite with all tests for 'testname'.
+
+ @return_value: a test suite
+ """
+ suite = unittest2.TestSuite()
+ suite.addTest(
+ unittest2.defaultTestLoader.loadTestsFromTestCase(
+ get_test_class(test_name, data, conf)))
return suite
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py
index 324c7c91..e18d601c 100644
--- a/tests/cloud_tests/testcases/base.py
+++ b/tests/cloud_tests/testcases/base.py
@@ -5,15 +5,15 @@
import crypt
import json
import re
-import unittest
+import unittest2
from cloudinit import util as c_util
-SkipTest = unittest.SkipTest
+SkipTest = unittest2.SkipTest
-class CloudTestCase(unittest.TestCase):
+class CloudTestCase(unittest2.TestCase):
"""Base test class for verifiers."""
# data gets populated in get_suite.setUpClass
@@ -31,6 +31,32 @@ class CloudTestCase(unittest.TestCase):
def is_distro(self, distro_name):
return self.os_cfg['os'] == distro_name
+ @classmethod
+ def maybeSkipTest(cls):
+ """Present to allow subclasses to override and raise a skipTest."""
+ pass
+
+ def assertPackageInstalled(self, name, version=None):
+ """Check dpkg-query --show output for matching package name.
+
+ @param name: package base name
+ @param version: string representing a package version or part of a
+ version.
+ """
+ pkg_out = self.get_data_file('package-versions')
+ pkg_match = re.search(
+ '^%s\t(?P<version>.*)$' % name, pkg_out, re.MULTILINE)
+ if pkg_match:
+ installed_version = pkg_match.group('version')
+ if not version:
+ return # Success
+ if installed_version.startswith(version):
+ return # Success
+ raise AssertionError(
+ 'Expected package version %s-%s not found. Found %s' %
+ name, version, installed_version)
+ raise AssertionError('Package not installed: %s' % name)
+
def os_version_cmp(self, cmp_version):
"""Compare the version of the test to comparison_version.
@@ -146,29 +172,31 @@ class CloudTestCase(unittest.TestCase):
'Skipping instance-data.json test.'
' OS: %s not bionic or newer' % self.os_name)
instance_data = json.loads(out)
- self.assertEqual(
- ['ds/user-data'], instance_data['base64-encoded-keys'])
+ self.assertItemsEqual(
+ [],
+ instance_data['base64_encoded_keys'])
ds = instance_data.get('ds', {})
- macs = ds.get('network', {}).get('interfaces', {}).get('macs', {})
+ v1_data = instance_data.get('v1', {})
+ metadata = ds.get('meta_data', {})
+ macs = metadata.get(
+ 'network', {}).get('interfaces', {}).get('macs', {})
if not macs:
raise AssertionError('No network data from EC2 meta-data')
# Check meta-data items we depend on
expected_net_keys = [
'public-ipv4s', 'ipv4-associations', 'local-hostname',
'public-hostname']
- for mac, mac_data in macs.items():
+ for mac_data in macs.values():
for key in expected_net_keys:
self.assertIn(key, mac_data)
self.assertIsNotNone(
- ds.get('placement', {}).get('availability-zone'),
+ metadata.get('placement', {}).get('availability-zone'),
'Could not determine EC2 Availability zone placement')
- ds = instance_data.get('ds', {})
- v1_data = instance_data.get('v1', {})
self.assertIsNotNone(
- v1_data['availability-zone'], 'expected ec2 availability-zone')
- self.assertEqual('aws', v1_data['cloud-name'])
- self.assertIn('i-', v1_data['instance-id'])
- self.assertIn('ip-', v1_data['local-hostname'])
+ v1_data['availability_zone'], 'expected ec2 availability_zone')
+ self.assertEqual('aws', v1_data['cloud_name'])
+ self.assertIn('i-', v1_data['instance_id'])
+ self.assertIn('ip-', v1_data['local_hostname'])
self.assertIsNotNone(v1_data['region'], 'expected ec2 region')
def test_instance_data_json_lxd(self):
@@ -191,16 +219,14 @@ class CloudTestCase(unittest.TestCase):
' OS: %s not bionic or newer' % self.os_name)
instance_data = json.loads(out)
v1_data = instance_data.get('v1', {})
- self.assertEqual(
- ['ds/user-data', 'ds/vendor-data'],
- sorted(instance_data['base64-encoded-keys']))
- self.assertEqual('nocloud', v1_data['cloud-name'])
+ self.assertItemsEqual([], sorted(instance_data['base64_encoded_keys']))
+ self.assertEqual('nocloud', v1_data['cloud_name'])
self.assertIsNone(
- v1_data['availability-zone'],
- 'found unexpected lxd availability-zone %s' %
- v1_data['availability-zone'])
- self.assertIn('cloud-test', v1_data['instance-id'])
- self.assertIn('cloud-test', v1_data['local-hostname'])
+ v1_data['availability_zone'],
+ 'found unexpected lxd availability_zone %s' %
+ v1_data['availability_zone'])
+ self.assertIn('cloud-test', v1_data['instance_id'])
+ self.assertIn('cloud-test', v1_data['local_hostname'])
self.assertIsNone(
v1_data['region'],
'found unexpected lxd region %s' % v1_data['region'])
@@ -226,18 +252,17 @@ class CloudTestCase(unittest.TestCase):
' OS: %s not bionic or newer' % self.os_name)
instance_data = json.loads(out)
v1_data = instance_data.get('v1', {})
- self.assertEqual(
- ['ds/user-data'], instance_data['base64-encoded-keys'])
- self.assertEqual('nocloud', v1_data['cloud-name'])
+ self.assertItemsEqual([], instance_data['base64_encoded_keys'])
+ self.assertEqual('nocloud', v1_data['cloud_name'])
self.assertIsNone(
- v1_data['availability-zone'],
- 'found unexpected kvm availability-zone %s' %
- v1_data['availability-zone'])
+ v1_data['availability_zone'],
+ 'found unexpected kvm availability_zone %s' %
+ v1_data['availability_zone'])
self.assertIsNotNone(
- re.match('[\da-f]{8}(-[\da-f]{4}){3}-[\da-f]{12}',
- v1_data['instance-id']),
- 'kvm instance-id is not a UUID: %s' % v1_data['instance-id'])
- self.assertIn('ubuntu', v1_data['local-hostname'])
+ re.match(r'[\da-f]{8}(-[\da-f]{4}){3}-[\da-f]{12}',
+ v1_data['instance_id']),
+ 'kvm instance_id is not a UUID: %s' % v1_data['instance_id'])
+ self.assertIn('ubuntu', v1_data['local_hostname'])
self.assertIsNone(
v1_data['region'],
'found unexpected lxd region %s' % v1_data['region'])
diff --git a/tests/cloud_tests/testcases/examples/including_user_groups.py b/tests/cloud_tests/testcases/examples/including_user_groups.py
index 93b7a82d..4067348d 100644
--- a/tests/cloud_tests/testcases/examples/including_user_groups.py
+++ b/tests/cloud_tests/testcases/examples/including_user_groups.py
@@ -42,7 +42,7 @@ class TestUserGroups(base.CloudTestCase):
def test_user_root_in_secret(self):
"""Test root user is in 'secret' group."""
- user, _, groups = self.get_data_file('root_groups').partition(":")
+ _user, _, groups = self.get_data_file('root_groups').partition(":")
self.assertIn("secret", groups.split(),
msg="User root is not in group 'secret'")
diff --git a/tests/cloud_tests/testcases/modules/byobu.py b/tests/cloud_tests/testcases/modules/byobu.py
index 005ca014..74d0529a 100644
--- a/tests/cloud_tests/testcases/modules/byobu.py
+++ b/tests/cloud_tests/testcases/modules/byobu.py
@@ -9,8 +9,7 @@ class TestByobu(base.CloudTestCase):
def test_byobu_installed(self):
"""Test byobu installed."""
- out = self.get_data_file('byobu_installed')
- self.assertIn('/usr/bin/byobu', out)
+ self.assertPackageInstalled('byobu')
def test_byobu_profile_enabled(self):
"""Test byobu profile.d file exists."""
diff --git a/tests/cloud_tests/testcases/modules/byobu.yaml b/tests/cloud_tests/testcases/modules/byobu.yaml
index a9aa1f3f..d002a611 100644
--- a/tests/cloud_tests/testcases/modules/byobu.yaml
+++ b/tests/cloud_tests/testcases/modules/byobu.yaml
@@ -7,9 +7,6 @@ cloud_config: |
#cloud-config
byobu_by_default: enable
collect_scripts:
- byobu_installed: |
- #!/bin/bash
- which byobu
byobu_profile_enabled: |
#!/bin/bash
ls /etc/profile.d/Z97-byobu.sh
diff --git a/tests/cloud_tests/testcases/modules/ca_certs.py b/tests/cloud_tests/testcases/modules/ca_certs.py
index e75f0413..6b56f639 100644
--- a/tests/cloud_tests/testcases/modules/ca_certs.py
+++ b/tests/cloud_tests/testcases/modules/ca_certs.py
@@ -7,10 +7,23 @@ from tests.cloud_tests.testcases import base
class TestCaCerts(base.CloudTestCase):
"""Test ca certs module."""
- def test_cert_count(self):
- """Test the count is proper."""
- out = self.get_data_file('cert_count')
- self.assertEqual(5, int(out))
+ def test_certs_updated(self):
+ """Test certs have been updated in /etc/ssl/certs."""
+ out = self.get_data_file('cert_links')
+ # Bionic update-ca-certificates creates less links debian #895075
+ unlinked_files = []
+ links = {}
+ for cert_line in out.splitlines():
+ if '->' in cert_line:
+ fname, _sep, link = cert_line.split()
+ links[fname] = link
+ else:
+ unlinked_files.append(cert_line)
+ self.assertEqual(['ca-certificates.crt'], unlinked_files)
+ self.assertEqual('cloud-init-ca-certs.pem', links['a535c1f3.0'])
+ self.assertEqual(
+ '/usr/share/ca-certificates/cloud-init-ca-certs.crt',
+ links['cloud-init-ca-certs.pem'])
def test_cert_installed(self):
"""Test line from our cert exists."""
diff --git a/tests/cloud_tests/testcases/modules/ca_certs.yaml b/tests/cloud_tests/testcases/modules/ca_certs.yaml
index d939f435..2cd91551 100644
--- a/tests/cloud_tests/testcases/modules/ca_certs.yaml
+++ b/tests/cloud_tests/testcases/modules/ca_certs.yaml
@@ -43,9 +43,13 @@ cloud_config: |
DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ==
-----END CERTIFICATE-----
collect_scripts:
- cert_count: |
+ cert_links: |
#!/bin/bash
- ls -l /etc/ssl/certs | wc -l
+ # links printed <filename> -> <link target>
+ # non-links printed <filename>
+ for file in `ls /etc/ssl/certs`; do
+ [ -h /etc/ssl/certs/$file ] && echo -n $file ' -> ' && readlink /etc/ssl/certs/$file || echo $file;
+ done
cert: |
#!/bin/bash
md5sum /etc/ssl/certs/ca-certificates.crt
diff --git a/tests/cloud_tests/testcases/modules/lxd_bridge.py b/tests/cloud_tests/testcases/modules/lxd_bridge.py
index c0262ba3..ea545e0a 100644
--- a/tests/cloud_tests/testcases/modules/lxd_bridge.py
+++ b/tests/cloud_tests/testcases/modules/lxd_bridge.py
@@ -7,15 +7,25 @@ from tests.cloud_tests.testcases import base
class TestLxdBridge(base.CloudTestCase):
"""Test LXD module."""
+ @classmethod
+ def maybeSkipTest(cls):
+ """Skip on cosmic for two reasons:
+ a.) LP: #1795036 - 'lxd init' fails on cosmic kernel.
+ b.) apt install lxd installs via snap which can be slow
+ as that will download core snap and lxd."""
+ os_name = cls.data.get('os_name', 'UNKNOWN')
+ if os_name == "cosmic":
+ raise base.SkipTest('Skipping test on cosmic (LP: #1795036).')
+
def test_lxd(self):
"""Test lxd installed."""
out = self.get_data_file('lxd')
- self.assertIn('/usr/bin/lxd', out)
+ self.assertIn('/lxd', out)
def test_lxc(self):
"""Test lxc installed."""
out = self.get_data_file('lxc')
- self.assertIn('/usr/bin/lxc', out)
+ self.assertIn('/lxc', out)
def test_bridge(self):
"""Test bridge config."""
diff --git a/tests/cloud_tests/testcases/modules/lxd_dir.py b/tests/cloud_tests/testcases/modules/lxd_dir.py
index 1495674e..797bafed 100644
--- a/tests/cloud_tests/testcases/modules/lxd_dir.py
+++ b/tests/cloud_tests/testcases/modules/lxd_dir.py
@@ -7,14 +7,24 @@ from tests.cloud_tests.testcases import base
class TestLxdDir(base.CloudTestCase):
"""Test LXD module."""
+ @classmethod
+ def maybeSkipTest(cls):
+ """Skip on cosmic for two reasons:
+ a.) LP: #1795036 - 'lxd init' fails on cosmic kernel.
+ b.) apt install lxd installs via snap which can be slow
+ as that will download core snap and lxd."""
+ os_name = cls.data.get('os_name', 'UNKNOWN')
+ if os_name == "cosmic":
+ raise base.SkipTest('Skipping test on cosmic (LP: #1795036).')
+
def test_lxd(self):
"""Test lxd installed."""
out = self.get_data_file('lxd')
- self.assertIn('/usr/bin/lxd', out)
+ self.assertIn('/lxd', out)
def test_lxc(self):
"""Test lxc installed."""
out = self.get_data_file('lxc')
- self.assertIn('/usr/bin/lxc', out)
+ self.assertIn('/lxc', out)
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp.py b/tests/cloud_tests/testcases/modules/ntp.py
index b50e52fe..c63cc15e 100644
--- a/tests/cloud_tests/testcases/modules/ntp.py
+++ b/tests/cloud_tests/testcases/modules/ntp.py
@@ -9,15 +9,14 @@ class TestNtp(base.CloudTestCase):
def test_ntp_installed(self):
"""Test ntp installed"""
- out = self.get_data_file('ntp_installed')
- self.assertEqual(0, int(out))
+ self.assertPackageInstalled('ntp')
def test_ntp_dist_entries(self):
"""Test dist config file is empty"""
out = self.get_data_file('ntp_conf_dist_empty')
self.assertEqual(0, int(out))
- def test_ntp_entires(self):
+ def test_ntp_entries(self):
"""Test config entries"""
out = self.get_data_file('ntp_conf_pool_list')
self.assertIn('pool.ntp.org iburst', out)
diff --git a/tests/cloud_tests/testcases/modules/ntp.yaml b/tests/cloud_tests/testcases/modules/ntp.yaml
index 2530d72e..7ea0707d 100644
--- a/tests/cloud_tests/testcases/modules/ntp.yaml
+++ b/tests/cloud_tests/testcases/modules/ntp.yaml
@@ -4,6 +4,7 @@
cloud_config: |
#cloud-config
ntp:
+ ntp_client: ntp
pools: []
servers: []
collect_scripts:
diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py
new file mode 100644
index 00000000..0f4c3d08
--- /dev/null
+++ b/tests/cloud_tests/testcases/modules/ntp_chrony.py
@@ -0,0 +1,26 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""cloud-init Integration Test Verify Script."""
+import unittest2
+
+from tests.cloud_tests.testcases import base
+
+
+class TestNtpChrony(base.CloudTestCase):
+ """Test ntp module with chrony client"""
+
+ def setUp(self):
+ """Skip this suite of tests on lxd and artful or older."""
+ if self.platform == 'lxd':
+ if self.is_distro('ubuntu') and self.os_version_cmp('artful') <= 0:
+ raise unittest2.SkipTest(
+ 'No support for chrony on containers <= artful.'
+ ' LP: #1589780')
+ return super(TestNtpChrony, self).setUp()
+
+ def test_chrony_entries(self):
+ """Test chrony config entries"""
+ out = self.get_data_file('chrony_conf')
+ self.assertIn('.pool.ntp.org', out)
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.yaml b/tests/cloud_tests/testcases/modules/ntp_chrony.yaml
new file mode 100644
index 00000000..120735e2
--- /dev/null
+++ b/tests/cloud_tests/testcases/modules/ntp_chrony.yaml
@@ -0,0 +1,17 @@
+#
+# ntp enabled, chrony selected, check conf file
+# as chrony won't start in a container
+#
+cloud_config: |
+ #cloud-config
+ ntp:
+ enabled: true
+ ntp_client: chrony
+collect_scripts:
+ chrony_conf: |
+ #!/bin/sh
+ set -- /etc/chrony.conf /etc/chrony/chrony.conf
+ for p in "$@"; do
+ [ -e "$p" ] && { cat "$p"; exit; }
+ done
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_pools.yaml b/tests/cloud_tests/testcases/modules/ntp_pools.yaml
index d490b228..60fa0fd1 100644
--- a/tests/cloud_tests/testcases/modules/ntp_pools.yaml
+++ b/tests/cloud_tests/testcases/modules/ntp_pools.yaml
@@ -9,6 +9,7 @@ required_features:
cloud_config: |
#cloud-config
ntp:
+ ntp_client: ntp
pools:
- 0.cloud-init.mypool
- 1.cloud-init.mypool
diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.yaml b/tests/cloud_tests/testcases/modules/ntp_servers.yaml
index 6b13b70e..ee636679 100644
--- a/tests/cloud_tests/testcases/modules/ntp_servers.yaml
+++ b/tests/cloud_tests/testcases/modules/ntp_servers.yaml
@@ -6,6 +6,7 @@ required_features:
cloud_config: |
#cloud-config
ntp:
+ ntp_client: ntp
servers:
- 172.16.15.14
- 172.16.17.18
diff --git a/tests/cloud_tests/testcases/modules/ntp_timesyncd.py b/tests/cloud_tests/testcases/modules/ntp_timesyncd.py
new file mode 100644
index 00000000..eca750bc
--- /dev/null
+++ b/tests/cloud_tests/testcases/modules/ntp_timesyncd.py
@@ -0,0 +1,15 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""cloud-init Integration Test Verify Script."""
+from tests.cloud_tests.testcases import base
+
+
+class TestNtpTimesyncd(base.CloudTestCase):
+ """Test ntp module with systemd-timesyncd client"""
+
+ def test_timesyncd_entries(self):
+ """Test timesyncd config entries"""
+ out = self.get_data_file('timesyncd_conf')
+ self.assertIn('.pool.ntp.org', out)
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml b/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml
new file mode 100644
index 00000000..ee47a741
--- /dev/null
+++ b/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml
@@ -0,0 +1,15 @@
+#
+# ntp enabled, systemd-timesyncd selected, check conf file
+# as systemd-timesyncd won't start in a container
+#
+cloud_config: |
+ #cloud-config
+ ntp:
+ enabled: true
+ ntp_client: systemd-timesyncd
+collect_scripts:
+ timesyncd_conf: |
+ #!/bin/sh
+ cat /etc/systemd/timesyncd.conf.d/cloud-init.conf
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
index a92dec22..fecad768 100644
--- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
+++ b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
@@ -7,15 +7,13 @@ from tests.cloud_tests.testcases import base
class TestPackageInstallUpdateUpgrade(base.CloudTestCase):
"""Test package install update upgrade module."""
- def test_installed_htop(self):
- """Test htop got installed."""
- out = self.get_data_file('dpkg_htop')
- self.assertEqual(1, int(out))
+ def test_installed_sl(self):
+ """Test sl got installed."""
+ self.assertPackageInstalled('sl')
def test_installed_tree(self):
"""Test tree got installed."""
- out = self.get_data_file('dpkg_tree')
- self.assertEqual(1, int(out))
+ self.assertPackageInstalled('tree')
def test_apt_history(self):
"""Test apt history for update command."""
@@ -23,13 +21,13 @@ class TestPackageInstallUpdateUpgrade(base.CloudTestCase):
self.assertIn(
'Commandline: /usr/bin/apt-get --option=Dpkg::Options'
'::=--force-confold --option=Dpkg::options::=--force-unsafe-io '
- '--assume-yes --quiet install htop tree', out)
+ '--assume-yes --quiet install sl tree', out)
def test_cloud_init_output(self):
"""Test cloud-init-output for install & upgrade stuff."""
out = self.get_data_file('cloud-init-output.log')
self.assertIn('Setting up tree (', out)
- self.assertIn('Setting up htop (', out)
+ self.assertIn('Setting up sl (', out)
self.assertIn('Reading package lists...', out)
self.assertIn('Building dependency tree...', out)
self.assertIn('Reading state information...', out)
diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml
index 71d24b83..dd79e438 100644
--- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml
+++ b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml
@@ -15,7 +15,7 @@ required_features:
cloud_config: |
#cloud-config
packages:
- - htop
+ - sl
- tree
package_update: true
package_upgrade: true
@@ -23,11 +23,8 @@ collect_scripts:
apt_history_cmdline: |
#!/bin/bash
grep ^Commandline: /var/log/apt/history.log
- dpkg_htop: |
+ dpkg_show: |
#!/bin/bash
- dpkg -l | grep htop | wc -l
- dpkg_tree: |
- #!/bin/bash
- dpkg -l | grep tree | wc -l
+ dpkg-query --show
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/salt_minion.py b/tests/cloud_tests/testcases/modules/salt_minion.py
deleted file mode 100644
index 70917a4c..00000000
--- a/tests/cloud_tests/testcases/modules/salt_minion.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class Test(base.CloudTestCase):
- """Test salt minion module."""
-
- def test_minon_master(self):
- """Test master value in config."""
- out = self.get_data_file('minion')
- self.assertIn('master: salt.mydomain.com', out)
-
- def test_minion_pem(self):
- """Test private key."""
- out = self.get_data_file('minion.pem')
- self.assertIn('------BEGIN PRIVATE KEY------', out)
- self.assertIn('<key data>', out)
- self.assertIn('------END PRIVATE KEY-------', out)
-
- def test_minion_pub(self):
- """Test public key."""
- out = self.get_data_file('minion.pub')
- self.assertIn('------BEGIN PUBLIC KEY-------', out)
- self.assertIn('<key data>', out)
- self.assertIn('------END PUBLIC KEY-------', out)
-
- def test_grains(self):
- """Test master value in config."""
- out = self.get_data_file('grains')
- self.assertIn('role: web', out)
-
- def test_minion_installed(self):
- """Test if the salt-minion package is installed"""
- out = self.get_data_file('minion_installed')
- self.assertEqual(1, int(out))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/salt_minion.yaml b/tests/cloud_tests/testcases/modules/salt_minion.yaml
deleted file mode 100644
index f20b9765..00000000
--- a/tests/cloud_tests/testcases/modules/salt_minion.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# Create config for a salt minion
-#
-# 2016-11-17: Currently takes >60 seconds results in test failure
-#
-enabled: True
-cloud_config: |
- #cloud-config
- salt_minion:
- conf:
- master: salt.mydomain.com
- public_key: |
- ------BEGIN PUBLIC KEY-------
- <key data>
- ------END PUBLIC KEY-------
- private_key: |
- ------BEGIN PRIVATE KEY------
- <key data>
- ------END PRIVATE KEY-------
- grains:
- role: web
-collect_scripts:
- minion: |
- #!/bin/bash
- cat /etc/salt/minion
- minion_id: |
- #!/bin/bash
- cat /etc/salt/minion_id
- minion.pem: |
- #!/bin/bash
- cat /etc/salt/pki/minion/minion.pem
- minion.pub: |
- #!/bin/bash
- cat /etc/salt/pki/minion/minion.pub
- grains: |
- #!/bin/bash
- cat /etc/salt/grains
- minion_installed: |
- #!/bin/bash
- dpkg -l | grep salt-minion | grep ii | wc -l
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/snap.yaml b/tests/cloud_tests/testcases/modules/snap.yaml
index 44043f31..322199c3 100644
--- a/tests/cloud_tests/testcases/modules/snap.yaml
+++ b/tests/cloud_tests/testcases/modules/snap.yaml
@@ -1,6 +1,9 @@
#
# Install snappy
#
+# Aug 23, 2018: Disabled due to requiring a proxy for testing
+# tests do not handle the proxy well at this time.
+enabled: False
required_features:
- snap
cloud_config: |
diff --git a/tests/cloud_tests/testcases/modules/snappy.yaml b/tests/cloud_tests/testcases/modules/snappy.yaml
index 43f93295..8ac322ae 100644
--- a/tests/cloud_tests/testcases/modules/snappy.yaml
+++ b/tests/cloud_tests/testcases/modules/snappy.yaml
@@ -1,6 +1,9 @@
#
# Install snappy
#
+# Aug 17, 2018: Disabled due to requiring a proxy for testing
+# tests do not handle the proxy well at this time.
+enabled: False
required_features:
- snap
cloud_config: |
diff --git a/tests/cloud_tests/testcases/modules/user_groups.py b/tests/cloud_tests/testcases/modules/user_groups.py
index 93b7a82d..4067348d 100644
--- a/tests/cloud_tests/testcases/modules/user_groups.py
+++ b/tests/cloud_tests/testcases/modules/user_groups.py
@@ -42,7 +42,7 @@ class TestUserGroups(base.CloudTestCase):
def test_user_root_in_secret(self):
"""Test root user is in 'secret' group."""
- user, _, groups = self.get_data_file('root_groups').partition(":")
+ _user, _, groups = self.get_data_file('root_groups').partition(":")
self.assertIn("secret", groups.split(),
msg="User root is not in group 'secret'")
diff --git a/tests/cloud_tests/testcases/modules/write_files.py b/tests/cloud_tests/testcases/modules/write_files.py
index 7bd520f6..526a2ebd 100644
--- a/tests/cloud_tests/testcases/modules/write_files.py
+++ b/tests/cloud_tests/testcases/modules/write_files.py
@@ -14,8 +14,11 @@ class TestWriteFiles(base.CloudTestCase):
def test_binary(self):
"""Test binary file reads as executable."""
- out = self.get_data_file('file_binary')
- self.assertIn('ELF 64-bit LSB executable, x86-64, version 1', out)
+ out = self.get_data_file('file_binary').strip()
+ md5 = "3801184b97bb8c6e63fa0e1eae2920d7"
+ sha256 = ("2c791c4037ea5bd7e928d6a87380f8ba7a803cd83d"
+ "5e4f269e28f5090f0f2c9a")
+ self.assertIn(out, (md5 + " -", sha256 + " -"))
def test_gzip(self):
"""Test gzip file shows up as a shell script."""
diff --git a/tests/cloud_tests/testcases/modules/write_files.yaml b/tests/cloud_tests/testcases/modules/write_files.yaml
index ce936b7b..cc7ea4bd 100644
--- a/tests/cloud_tests/testcases/modules/write_files.yaml
+++ b/tests/cloud_tests/testcases/modules/write_files.yaml
@@ -3,6 +3,13 @@
#
# NOTE: on trusty 'file' has an output formatting error for binary files and
# has 2 spaces in 'LSB executable', which causes a failure here
+#
+# NOTE: the binary data can be any binary data, not only executables
+# and can be generated via the base 64 command as such:
+# $ base64 < hello > hello.txt
+# the opposite is running:
+# $ base64 -d < hello.txt > hello
+#
required_features:
- no_file_fmt_e
cloud_config: |
@@ -19,9 +26,7 @@ cloud_config: |
SMBDOPTIONS="-D"
path: /root/file_text
- content: !!binary |
- f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAAAAAAAAEAAOAAI
- AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgA
- AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAA
+ /Z/xrHR4WINT0UNoKPQKbuovp6+Js+JK
path: /root/file_binary
permissions: '0555'
- encoding: gzip
@@ -38,7 +43,9 @@ collect_scripts:
file /root/file_text
file_binary: |
#!/bin/bash
- file /root/file_binary
+ for hasher in md5sum sha256sum; do
+ $hasher </root/file_binary && break
+ done
file_gzip: |
#!/bin/bash
file /root/file_gzip
diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py
index 3dd4996d..06f7d865 100644
--- a/tests/cloud_tests/util.py
+++ b/tests/cloud_tests/util.py
@@ -358,7 +358,7 @@ class TargetBase(object):
# when sh is invoked with '-c', then the first argument is "$0"
# which is commonly understood as the "program name".
# 'read_data' is the program name, and 'remote_path' is '$1'
- stdout, stderr, rc = self._execute(
+ stdout, _stderr, rc = self._execute(
["sh", "-c", 'exec cat "$1"', 'read_data', remote_path])
if rc != 0:
raise RuntimeError("Failed to read file '%s'" % remote_path)
diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py
index 5a68a484..9911ecf2 100644
--- a/tests/cloud_tests/verify.py
+++ b/tests/cloud_tests/verify.py
@@ -3,7 +3,7 @@
"""Verify test results."""
import os
-import unittest
+import unittest2
from tests.cloud_tests import (config, LOG, util, testcases)
@@ -18,7 +18,7 @@ def verify_data(data_dir, platform, os_name, tests):
@return_value: {<test_name>: {passed: True/False, failures: []}}
"""
base_dir = os.sep.join((data_dir, platform, os_name))
- runner = unittest.TextTestRunner(verbosity=util.current_verbosity())
+ runner = unittest2.TextTestRunner(verbosity=util.current_verbosity())
res = {}
for test_name in tests:
LOG.debug('verifying test data for %s', test_name)
@@ -56,6 +56,51 @@ def verify_data(data_dir, platform, os_name, tests):
return res
+def format_test_failures(test_result):
+ """Return a human-readable printable format of test failures."""
+ if not test_result['failures']:
+ return ''
+ failure_hdr = ' test failures:'
+ failure_fmt = ' * {module}.{class}.{function}\n {error}'
+ output = []
+ for failure in test_result['failures']:
+ if not output:
+ output = [failure_hdr]
+ output.append(failure_fmt.format(**failure))
+ return '\n'.join(output)
+
+
+def format_results(res):
+ """Return human-readable results as a string"""
+ platform_hdr = 'Platform: {platform}'
+ distro_hdr = ' Distro: {distro}'
+ distro_summary_fmt = (
+ ' test modules passed:{passed} tests failed:{failed}')
+ output = ['']
+ counts = {}
+ for platform, platform_data in res.items():
+ output.append(platform_hdr.format(platform=platform))
+ counts[platform] = {}
+ for distro, distro_data in platform_data.items():
+ distro_failure_output = []
+ output.append(distro_hdr.format(distro=distro))
+ counts[platform][distro] = {'passed': 0, 'failed': 0}
+ for _, test_result in distro_data.items():
+ if test_result['passed']:
+ counts[platform][distro]['passed'] += 1
+ else:
+ counts[platform][distro]['failed'] += len(
+ test_result['failures'])
+ failure_output = format_test_failures(test_result)
+ if failure_output:
+ distro_failure_output.append(failure_output)
+ output.append(
+ distro_summary_fmt.format(**counts[platform][distro]))
+ if distro_failure_output:
+ output.extend(distro_failure_output)
+ return '\n'.join(output)
+
+
def verify(args):
"""Verify test data.
@@ -90,7 +135,7 @@ def verify(args):
failed += len(fail_list)
# dump results
- LOG.debug('verify results: %s', res)
+ LOG.debug('\n---- Verify summarized results:\n%s', format_results(res))
if args.result:
util.merge_results({'verify': res}, args.result)
diff --git a/tests/data/netinfo/netdev-formatted-output b/tests/data/netinfo/netdev-formatted-output
new file mode 100644
index 00000000..283ab4a4
--- /dev/null
+++ b/tests/data/netinfo/netdev-formatted-output
@@ -0,0 +1,10 @@
++++++++++++++++++++++++++++++++++++++++Net device info++++++++++++++++++++++++++++++++++++++++
++---------+------+------------------------------+---------------+--------+-------------------+
+| Device | Up | Address | Mask | Scope | Hw-Address |
++---------+------+------------------------------+---------------+--------+-------------------+
+| enp0s25 | True | 192.168.2.18 | 255.255.255.0 | . | 50:7b:9d:2c:af:91 |
+| enp0s25 | True | fe80::7777:2222:1111:eeee/64 | . | global | 50:7b:9d:2c:af:91 |
+| enp0s25 | True | fe80::8107:2b92:867e:f8a6/64 | . | link | 50:7b:9d:2c:af:91 |
+| lo | True | 127.0.0.1 | 255.0.0.0 | . | . |
+| lo | True | ::1/128 | . | host | . |
++---------+------+------------------------------+---------------+--------+-------------------+
diff --git a/tests/data/netinfo/netdev-formatted-output-down b/tests/data/netinfo/netdev-formatted-output-down
new file mode 100644
index 00000000..038dfb4d
--- /dev/null
+++ b/tests/data/netinfo/netdev-formatted-output-down
@@ -0,0 +1,8 @@
++++++++++++++++++++++++++++Net device info++++++++++++++++++++++++++++
++--------+-------+-----------+-----------+-------+-------------------+
+| Device | Up | Address | Mask | Scope | Hw-Address |
++--------+-------+-----------+-----------+-------+-------------------+
+| eth0 | False | . | . | . | 00:16:3e:de:51:a6 |
+| lo | True | 127.0.0.1 | 255.0.0.0 | host | . |
+| lo | True | ::1/128 | . | host | . |
++--------+-------+-----------+-----------+-------+-------------------+
diff --git a/tests/data/netinfo/new-ifconfig-output b/tests/data/netinfo/new-ifconfig-output
new file mode 100644
index 00000000..83d4ad16
--- /dev/null
+++ b/tests/data/netinfo/new-ifconfig-output
@@ -0,0 +1,18 @@
+enp0s25: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
+ inet 192.168.2.18 netmask 255.255.255.0 broadcast 192.168.2.255
+ inet6 fe80::7777:2222:1111:eeee prefixlen 64 scopeid 0x30<global>
+ inet6 fe80::8107:2b92:867e:f8a6 prefixlen 64 scopeid 0x20<link>
+ ether 50:7b:9d:2c:af:91 txqueuelen 1000 (Ethernet)
+ RX packets 3017 bytes 10601563 (10.1 MiB)
+ RX errors 0 dropped 39 overruns 0 frame 0
+ TX packets 2627 bytes 196976 (192.3 KiB)
+ TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
+
+lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
+ inet 127.0.0.1 netmask 255.0.0.0
+ inet6 ::1 prefixlen 128 scopeid 0x10<host>
+ loop txqueuelen 1 (Local Loopback)
+ RX packets 0 bytes 0 (0.0 B)
+ RX errors 0 dropped 0 overruns 0 frame 0
+ TX packets 0 bytes 0 (0.0 B)
+ TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
diff --git a/tests/data/netinfo/new-ifconfig-output-down b/tests/data/netinfo/new-ifconfig-output-down
new file mode 100644
index 00000000..5d12e352
--- /dev/null
+++ b/tests/data/netinfo/new-ifconfig-output-down
@@ -0,0 +1,15 @@
+eth0: flags=4098<BROADCAST,MULTICAST> mtu 1500
+ ether 00:16:3e:de:51:a6 txqueuelen 1000 (Ethernet)
+ RX packets 126229 bytes 158139342 (158.1 MB)
+ RX errors 0 dropped 0 overruns 0 frame 0
+ TX packets 59317 bytes 4839008 (4.8 MB)
+ TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
+
+lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
+ inet 127.0.0.1 netmask 255.0.0.0
+ inet6 ::1 prefixlen 128 scopeid 0x10<host>
+ loop txqueuelen 1000 (Local Loopback)
+ RX packets 260 bytes 20092 (20.0 KB)
+ RX errors 0 dropped 0 overruns 0 frame 0
+ TX packets 260 bytes 20092 (20.0 KB)
+ TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
diff --git a/tests/data/netinfo/old-ifconfig-output b/tests/data/netinfo/old-ifconfig-output
new file mode 100644
index 00000000..e01f763e
--- /dev/null
+++ b/tests/data/netinfo/old-ifconfig-output
@@ -0,0 +1,18 @@
+enp0s25 Link encap:Ethernet HWaddr 50:7b:9d:2c:af:91
+ inet addr:192.168.2.18 Bcast:192.168.2.255 Mask:255.255.255.0
+ inet6 addr: fe80::7777:2222:1111:eeee/64 Scope:Global
+ inet6 addr: fe80::8107:2b92:867e:f8a6/64 Scope:Link
+ UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
+ RX packets:8106427 errors:55 dropped:0 overruns:0 frame:37
+ TX packets:9339739 errors:0 dropped:0 overruns:0 carrier:0
+ collisions:0 txqueuelen:1000
+ RX bytes:4953721719 (4.9 GB) TX bytes:7731890194 (7.7 GB)
+ Interrupt:20 Memory:e1200000-e1220000
+
+lo Link encap:Local Loopback
+ inet addr:127.0.0.1 Mask:255.0.0.0
+ inet6 addr: ::1/128 Scope:Host
+ UP LOOPBACK RUNNING MTU:65536 Metric:1
+ RX packets:579230851 errors:0 dropped:0 overruns:0 frame:0
+ TX packets:579230851 errors:0 dropped:0 overruns:0 carrier:0
+ collisions:0 txqueuelen:1
diff --git a/tests/data/netinfo/route-formatted-output b/tests/data/netinfo/route-formatted-output
new file mode 100644
index 00000000..9d2c5dd3
--- /dev/null
+++ b/tests/data/netinfo/route-formatted-output
@@ -0,0 +1,22 @@
++++++++++++++++++++++++++++++Route IPv4 info+++++++++++++++++++++++++++++
++-------+-------------+-------------+---------------+-----------+-------+
+| Route | Destination | Gateway | Genmask | Interface | Flags |
++-------+-------------+-------------+---------------+-----------+-------+
+| 0 | 0.0.0.0 | 192.168.2.1 | 0.0.0.0 | enp0s25 | UG |
+| 1 | 0.0.0.0 | 192.168.2.1 | 0.0.0.0 | wlp3s0 | UG |
+| 2 | 192.168.2.0 | 0.0.0.0 | 255.255.255.0 | enp0s25 | U |
++-------+-------------+-------------+---------------+-----------+-------+
++++++++++++++++++++++++++++++++++++Route IPv6 info+++++++++++++++++++++++++++++++++++
++-------+---------------------------+---------------------------+-----------+-------+
+| Route | Destination | Gateway | Interface | Flags |
++-------+---------------------------+---------------------------+-----------+-------+
+| 0 | 2a00:abcd:82ae:cd33::657 | :: | enp0s25 | Ue |
+| 1 | 2a00:abcd:82ae:cd33::/64 | :: | enp0s25 | U |
+| 2 | 2a00:abcd:82ae:cd33::/56 | fe80::32ee:54de:cd43:b4e1 | enp0s25 | UG |
+| 3 | fd81:123f:654::657 | :: | enp0s25 | U |
+| 4 | fd81:123f:654::/64 | :: | enp0s25 | U |
+| 5 | fd81:123f:654::/48 | fe80::32ee:54de:cd43:b4e1 | enp0s25 | UG |
+| 6 | fe80::abcd:ef12:bc34:da21 | :: | enp0s25 | U |
+| 7 | fe80::/64 | :: | enp0s25 | U |
+| 8 | ::/0 | fe80::32ee:54de:cd43:b4e1 | enp0s25 | UG |
++-------+---------------------------+---------------------------+-----------+-------+
diff --git a/tests/data/netinfo/sample-ipaddrshow-output b/tests/data/netinfo/sample-ipaddrshow-output
new file mode 100644
index 00000000..b2fa2672
--- /dev/null
+++ b/tests/data/netinfo/sample-ipaddrshow-output
@@ -0,0 +1,13 @@
+1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
+ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+ inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever
+ inet6 ::1/128 scope host \ valid_lft forever preferred_lft forever
+2: enp0s25: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
+ link/ether 50:7b:9d:2c:af:91 brd ff:ff:ff:ff:ff:ff
+ inet 192.168.2.18/24 brd 192.168.2.255 scope global dynamic enp0s25
+ valid_lft 84174sec preferred_lft 84174sec
+ inet6 fe80::7777:2222:1111:eeee/64 scope global
+ valid_lft forever preferred_lft forever
+ inet6 fe80::8107:2b92:867e:f8a6/64 scope link
+ valid_lft forever preferred_lft forever
+
diff --git a/tests/data/netinfo/sample-ipaddrshow-output-down b/tests/data/netinfo/sample-ipaddrshow-output-down
new file mode 100644
index 00000000..cb516d64
--- /dev/null
+++ b/tests/data/netinfo/sample-ipaddrshow-output-down
@@ -0,0 +1,8 @@
+1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
+ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+ inet 127.0.0.1/8 scope host lo
+ valid_lft forever preferred_lft forever
+ inet6 ::1/128 scope host
+ valid_lft forever preferred_lft forever
+44: eth0@if45: <BROADCAST,MULTICAST> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
+ link/ether 00:16:3e:de:51:a6 brd ff:ff:ff:ff:ff:ff link-netnsid 0
diff --git a/tests/data/netinfo/sample-iproute-output-v4 b/tests/data/netinfo/sample-iproute-output-v4
new file mode 100644
index 00000000..904cb034
--- /dev/null
+++ b/tests/data/netinfo/sample-iproute-output-v4
@@ -0,0 +1,3 @@
+default via 192.168.2.1 dev enp0s25 proto static metric 100
+default via 192.168.2.1 dev wlp3s0 proto static metric 150
+192.168.2.0/24 dev enp0s25 proto kernel scope link src 192.168.2.18 metric 100
diff --git a/tests/data/netinfo/sample-iproute-output-v6 b/tests/data/netinfo/sample-iproute-output-v6
new file mode 100644
index 00000000..12bb1c12
--- /dev/null
+++ b/tests/data/netinfo/sample-iproute-output-v6
@@ -0,0 +1,11 @@
+2a00:abcd:82ae:cd33::657 dev enp0s25 proto kernel metric 256 expires 2334sec pref medium
+2a00:abcd:82ae:cd33::/64 dev enp0s25 proto ra metric 100 pref medium
+2a00:abcd:82ae:cd33::/56 via fe80::32ee:54de:cd43:b4e1 dev enp0s25 proto ra metric 100 pref medium
+fd81:123f:654::657 dev enp0s25 proto kernel metric 256 pref medium
+fd81:123f:654::/64 dev enp0s25 proto ra metric 100 pref medium
+fd81:123f:654::/48 via fe80::32ee:54de:cd43:b4e1 dev enp0s25 proto ra metric 100 pref medium
+fe80::abcd:ef12:bc34:da21 dev enp0s25 proto static metric 100 pref medium
+fe80::/64 dev enp0s25 proto kernel metric 256 pref medium
+default via fe80::32ee:54de:cd43:b4e1 dev enp0s25 proto static metric 100 pref medium
+local ::1 dev lo table local proto none metric 0 pref medium
+local 2600:1f16:b80:ad00:90a:c915:bca6:5ff2 dev lo table local proto none metric 0 pref medium
diff --git a/tests/data/netinfo/sample-route-output-v4 b/tests/data/netinfo/sample-route-output-v4
new file mode 100644
index 00000000..ecc31d96
--- /dev/null
+++ b/tests/data/netinfo/sample-route-output-v4
@@ -0,0 +1,5 @@
+Kernel IP routing table
+Destination Gateway Genmask Flags Metric Ref Use Iface
+0.0.0.0 192.168.2.1 0.0.0.0 UG 100 0 0 enp0s25
+0.0.0.0 192.168.2.1 0.0.0.0 UG 150 0 0 wlp3s0
+192.168.2.0 0.0.0.0 255.255.255.0 U 100 0 0 enp0s25
diff --git a/tests/data/netinfo/sample-route-output-v6 b/tests/data/netinfo/sample-route-output-v6
new file mode 100644
index 00000000..4712b73c
--- /dev/null
+++ b/tests/data/netinfo/sample-route-output-v6
@@ -0,0 +1,13 @@
+Kernel IPv6 routing table
+Destination Next Hop Flag Met Re Use If
+2a00:abcd:82ae:cd33::657/128 :: Ue 256 1 0 enp0s25
+2a00:abcd:82ae:cd33::/64 :: U 100 1 0 enp0s25
+2a00:abcd:82ae:cd33::/56 fe80::32ee:54de:cd43:b4e1 UG 100 1 0 enp0s25
+fd81:123f:654::657/128 :: U 256 1 0 enp0s25
+fd81:123f:654::/64 :: U 100 1 0 enp0s25
+fd81:123f:654::/48 fe80::32ee:54de:cd43:b4e1 UG 100 1 0 enp0s25
+fe80::abcd:ef12:bc34:da21/128 :: U 100 1 2 enp0s25
+fe80::/64 :: U 256 1 16880 enp0s25
+::/0 fe80::32ee:54de:cd43:b4e1 UG 100 1 0 enp0s25
+::/0 :: !n -1 1424956 lo
+::1/128 :: Un 0 4 26289 lo
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index 25878d7a..739bbebf 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -182,7 +182,7 @@ class TestCmdlineUrl(CiTestCase):
self.assertEqual(
('url', 'http://example.com'), main.parse_cmdline_url(cmdline))
- @mock.patch('cloudinit.cmd.main.util.read_file_or_url')
+ @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
def test_invalid_content(self, m_read):
key = "cloud-config-url"
url = 'http://example.com/foo'
@@ -196,7 +196,7 @@ class TestCmdlineUrl(CiTestCase):
self.assertIn(url, msg)
self.assertFalse(os.path.exists(fpath))
- @mock.patch('cloudinit.cmd.main.util.read_file_or_url')
+ @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
def test_valid_content(self, m_read):
url = "http://example.com/foo"
payload = b"#cloud-config\nmydata: foo\nbar: wark\n"
@@ -210,18 +210,18 @@ class TestCmdlineUrl(CiTestCase):
self.assertEqual(logging.INFO, lvl)
self.assertIn(url, msg)
- @mock.patch('cloudinit.cmd.main.util.read_file_or_url')
+ @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
def test_no_key_found(self, m_read):
cmdline = "ro mykey=http://example.com/foo root=foo"
fpath = self.tmp_path("ccpath")
- lvl, msg = main.attempt_cmdline_url(
+ lvl, _msg = main.attempt_cmdline_url(
fpath, network=True, cmdline=cmdline)
m_read.assert_not_called()
self.assertFalse(os.path.exists(fpath))
self.assertEqual(logging.DEBUG, lvl)
- @mock.patch('cloudinit.cmd.main.util.read_file_or_url')
+ @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
def test_exception_warns(self, m_read):
url = "http://example.com/foo"
cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index 9751ed95..abe820e1 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -2,27 +2,34 @@
"""Tests of the built-in user data handlers."""
+import copy
import os
import shutil
import tempfile
+from textwrap import dedent
-try:
- from unittest import mock
-except ImportError:
- import mock
-from cloudinit.tests import helpers as test_helpers
+from cloudinit.tests.helpers import (
+ FilesystemMockingTestCase, CiTestCase, mock, skipUnlessJinja)
from cloudinit import handlers
from cloudinit import helpers
from cloudinit import util
-from cloudinit.handlers import upstart_job
+from cloudinit.handlers.cloud_config import CloudConfigPartHandler
+from cloudinit.handlers.jinja_template import (
+ JinjaTemplatePartHandler, convert_jinja_instance_data,
+ render_jinja_payload)
+from cloudinit.handlers.shell_script import ShellScriptPartHandler
+from cloudinit.handlers.upstart_job import UpstartJobPartHandler
from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE)
-class TestBuiltins(test_helpers.FilesystemMockingTestCase):
+class TestUpstartJobPartHandler(FilesystemMockingTestCase):
+
+ mpath = 'cloudinit.handlers.upstart_job.'
+
def test_upstart_frequency_no_out(self):
c_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, c_root)
@@ -32,14 +39,13 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase):
'cloud_dir': c_root,
'upstart_dir': up_root,
})
- freq = PER_ALWAYS
- h = upstart_job.UpstartJobPartHandler(paths)
+ h = UpstartJobPartHandler(paths)
# No files should be written out when
# the frequency is ! per-instance
h.handle_part('', handlers.CONTENT_START,
None, None, None)
h.handle_part('blah', 'text/upstart-job',
- 'test.conf', 'blah', freq)
+ 'test.conf', 'blah', frequency=PER_ALWAYS)
h.handle_part('', handlers.CONTENT_END,
None, None, None)
self.assertEqual(0, len(os.listdir(up_root)))
@@ -48,7 +54,6 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase):
# files should be written out when frequency is ! per-instance
new_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, new_root)
- freq = PER_INSTANCE
self.patchOS(new_root)
self.patchUtils(new_root)
@@ -56,22 +61,297 @@ class TestBuiltins(test_helpers.FilesystemMockingTestCase):
'upstart_dir': "/etc/upstart",
})
- upstart_job.SUITABLE_UPSTART = True
util.ensure_dir("/run")
util.ensure_dir("/etc/upstart")
- with mock.patch.object(util, 'subp') as mockobj:
- h = upstart_job.UpstartJobPartHandler(paths)
- h.handle_part('', handlers.CONTENT_START,
- None, None, None)
- h.handle_part('blah', 'text/upstart-job',
- 'test.conf', 'blah', freq)
- h.handle_part('', handlers.CONTENT_END,
- None, None, None)
+ with mock.patch(self.mpath + 'SUITABLE_UPSTART', return_value=True):
+ with mock.patch.object(util, 'subp') as m_subp:
+ h = UpstartJobPartHandler(paths)
+ h.handle_part('', handlers.CONTENT_START,
+ None, None, None)
+ h.handle_part('blah', 'text/upstart-job',
+ 'test.conf', 'blah', frequency=PER_INSTANCE)
+ h.handle_part('', handlers.CONTENT_END,
+ None, None, None)
- self.assertEqual(len(os.listdir('/etc/upstart')), 1)
+ self.assertEqual(len(os.listdir('/etc/upstart')), 1)
- mockobj.assert_called_once_with(
+ m_subp.assert_called_once_with(
['initctl', 'reload-configuration'], capture=False)
+
+class TestJinjaTemplatePartHandler(CiTestCase):
+
+ with_logs = True
+
+ mpath = 'cloudinit.handlers.jinja_template.'
+
+ def setUp(self):
+ super(TestJinjaTemplatePartHandler, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.run_dir = os.path.join(self.tmp, 'run_dir')
+ util.ensure_dir(self.run_dir)
+ self.paths = helpers.Paths({
+ 'cloud_dir': self.tmp, 'run_dir': self.run_dir})
+
+ def test_jinja_template_part_handler_defaults(self):
+ """On init, paths are saved and subhandler types are empty."""
+ h = JinjaTemplatePartHandler(self.paths)
+ self.assertEqual(['## template: jinja'], h.prefixes)
+ self.assertEqual(3, h.handler_version)
+ self.assertEqual(self.paths, h.paths)
+ self.assertEqual({}, h.sub_handlers)
+
+ def test_jinja_template_part_handler_looks_up_sub_handler_types(self):
+ """When sub_handlers are passed, init lists types of subhandlers."""
+ script_handler = ShellScriptPartHandler(self.paths)
+ cloudconfig_handler = CloudConfigPartHandler(self.paths)
+ h = JinjaTemplatePartHandler(
+ self.paths, sub_handlers=[script_handler, cloudconfig_handler])
+ self.assertItemsEqual(
+ ['text/cloud-config', 'text/cloud-config-jsonp',
+ 'text/x-shellscript'],
+ h.sub_handlers)
+
+ def test_jinja_template_part_handler_looks_up_subhandler_types(self):
+ """When sub_handlers are passed, init lists types of subhandlers."""
+ script_handler = ShellScriptPartHandler(self.paths)
+ cloudconfig_handler = CloudConfigPartHandler(self.paths)
+ h = JinjaTemplatePartHandler(
+ self.paths, sub_handlers=[script_handler, cloudconfig_handler])
+ self.assertItemsEqual(
+ ['text/cloud-config', 'text/cloud-config-jsonp',
+ 'text/x-shellscript'],
+ h.sub_handlers)
+
+ def test_jinja_template_handle_noop_on_content_signals(self):
+ """Perform no part handling when content type is CONTENT_SIGNALS."""
+ script_handler = ShellScriptPartHandler(self.paths)
+
+ h = JinjaTemplatePartHandler(
+ self.paths, sub_handlers=[script_handler])
+ with mock.patch.object(script_handler, 'handle_part') as m_handle_part:
+ h.handle_part(
+ data='data', ctype=handlers.CONTENT_START, filename='part-1',
+ payload='## template: jinja\n#!/bin/bash\necho himom',
+ frequency='freq', headers='headers')
+ m_handle_part.assert_not_called()
+
+ @skipUnlessJinja()
+ def test_jinja_template_handle_subhandler_v2_with_clean_payload(self):
+ """Call version 2 subhandler.handle_part with stripped payload."""
+ script_handler = ShellScriptPartHandler(self.paths)
+ self.assertEqual(2, script_handler.handler_version)
+
+ # Create required instance-data.json file
+ instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ instance_data = {'topkey': 'echo himom'}
+ util.write_file(instance_json, util.json_dumps(instance_data))
+ h = JinjaTemplatePartHandler(
+ self.paths, sub_handlers=[script_handler])
+ with mock.patch.object(script_handler, 'handle_part') as m_part:
+ # ctype with leading '!' not in handlers.CONTENT_SIGNALS
+ h.handle_part(
+ data='data', ctype="!" + handlers.CONTENT_START,
+ filename='part01',
+ payload='## template: jinja \t \n#!/bin/bash\n{{ topkey }}',
+ frequency='freq', headers='headers')
+ m_part.assert_called_once_with(
+ 'data', '!__begin__', 'part01', '#!/bin/bash\necho himom', 'freq')
+
+ @skipUnlessJinja()
+ def test_jinja_template_handle_subhandler_v3_with_clean_payload(self):
+ """Call version 3 subhandler.handle_part with stripped payload."""
+ cloudcfg_handler = CloudConfigPartHandler(self.paths)
+ self.assertEqual(3, cloudcfg_handler.handler_version)
+
+ # Create required instance-data.json file
+ instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ instance_data = {'topkey': {'sub': 'runcmd: [echo hi]'}}
+ util.write_file(instance_json, util.json_dumps(instance_data))
+ h = JinjaTemplatePartHandler(
+ self.paths, sub_handlers=[cloudcfg_handler])
+ with mock.patch.object(cloudcfg_handler, 'handle_part') as m_part:
+ # ctype with leading '!' not in handlers.CONTENT_SIGNALS
+ h.handle_part(
+ data='data', ctype="!" + handlers.CONTENT_END,
+ filename='part01',
+ payload='## template: jinja\n#cloud-config\n{{ topkey.sub }}',
+ frequency='freq', headers='headers')
+ m_part.assert_called_once_with(
+ 'data', '!__end__', 'part01', '#cloud-config\nruncmd: [echo hi]',
+ 'freq', 'headers')
+
+ def test_jinja_template_handle_errors_on_missing_instance_data_json(self):
+ """If instance-data is absent, raise an error from handle_part."""
+ script_handler = ShellScriptPartHandler(self.paths)
+ h = JinjaTemplatePartHandler(
+ self.paths, sub_handlers=[script_handler])
+ with self.assertRaises(RuntimeError) as context_manager:
+ h.handle_part(
+ data='data', ctype="!" + handlers.CONTENT_START,
+ filename='part01',
+ payload='## template: jinja \n#!/bin/bash\necho himom',
+ frequency='freq', headers='headers')
+ script_file = os.path.join(script_handler.script_dir, 'part01')
+ self.assertEqual(
+ 'Cannot render jinja template vars. Instance data not yet present'
+ ' at {}/instance-data.json'.format(
+ self.run_dir), str(context_manager.exception))
+ self.assertFalse(
+ os.path.exists(script_file),
+ 'Unexpected file created %s' % script_file)
+
+ @skipUnlessJinja()
+ def test_jinja_template_handle_renders_jinja_content(self):
+ """When present, render jinja variables from instance-data.json."""
+ script_handler = ShellScriptPartHandler(self.paths)
+ instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ instance_data = {'topkey': {'subkey': 'echo himom'}}
+ util.write_file(instance_json, util.json_dumps(instance_data))
+ h = JinjaTemplatePartHandler(
+ self.paths, sub_handlers=[script_handler])
+ h.handle_part(
+ data='data', ctype="!" + handlers.CONTENT_START,
+ filename='part01',
+ payload=(
+ '## template: jinja \n'
+ '#!/bin/bash\n'
+ '{{ topkey.subkey|default("nosubkey") }}'),
+ frequency='freq', headers='headers')
+ script_file = os.path.join(script_handler.script_dir, 'part01')
+ self.assertNotIn(
+ 'Instance data not yet present at {}/instance-data.json'.format(
+ self.run_dir),
+ self.logs.getvalue())
+ self.assertEqual(
+ '#!/bin/bash\necho himom', util.load_file(script_file))
+
+ @skipUnlessJinja()
+ def test_jinja_template_handle_renders_jinja_content_missing_keys(self):
+ """When specified jinja variable is undefined, log a warning."""
+ script_handler = ShellScriptPartHandler(self.paths)
+ instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ instance_data = {'topkey': {'subkey': 'echo himom'}}
+ util.write_file(instance_json, util.json_dumps(instance_data))
+ h = JinjaTemplatePartHandler(
+ self.paths, sub_handlers=[script_handler])
+ h.handle_part(
+ data='data', ctype="!" + handlers.CONTENT_START,
+ filename='part01',
+ payload='## template: jinja \n#!/bin/bash\n{{ goodtry }}',
+ frequency='freq', headers='headers')
+ script_file = os.path.join(script_handler.script_dir, 'part01')
+ self.assertTrue(
+ os.path.exists(script_file),
+ 'Missing expected file %s' % script_file)
+ self.assertIn(
+ "WARNING: Could not render jinja template variables in file"
+ " 'part01': 'goodtry'\n",
+ self.logs.getvalue())
+
+
+class TestConvertJinjaInstanceData(CiTestCase):
+
+ def test_convert_instance_data_hyphens_to_underscores(self):
+ """Replace hyphenated keys with underscores in instance-data."""
+ data = {'hyphenated-key': 'hyphenated-val',
+ 'underscore_delim_key': 'underscore_delimited_val'}
+ expected_data = {'hyphenated_key': 'hyphenated-val',
+ 'underscore_delim_key': 'underscore_delimited_val'}
+ self.assertEqual(
+ expected_data,
+ convert_jinja_instance_data(data=data))
+
+ def test_convert_instance_data_promotes_versioned_keys_to_top_level(self):
+ """Any versioned keys are promoted as top-level keys
+
+ This provides any cloud-init standardized keys up at a top-level to
+ allow ease of reference for users. Intsead of v1.availability_zone,
+ the name availability_zone can be used in templates.
+ """
+ data = {'ds': {'dskey1': 1, 'dskey2': 2},
+ 'v1': {'v1key1': 'v1.1'},
+ 'v2': {'v2key1': 'v2.1'}}
+ expected_data = copy.deepcopy(data)
+ expected_data.update({'v1key1': 'v1.1', 'v2key1': 'v2.1'})
+
+ converted_data = convert_jinja_instance_data(data=data)
+ self.assertItemsEqual(
+ ['ds', 'v1', 'v2', 'v1key1', 'v2key1'], converted_data.keys())
+ self.assertEqual(
+ expected_data,
+ converted_data)
+
+ def test_convert_instance_data_most_recent_version_of_promoted_keys(self):
+ """The most-recent versioned key value is promoted to top-level."""
+ data = {'v1': {'key1': 'old v1 key1', 'key2': 'old v1 key2'},
+ 'v2': {'key1': 'newer v2 key1', 'key3': 'newer v2 key3'},
+ 'v3': {'key1': 'newest v3 key1'}}
+ expected_data = copy.deepcopy(data)
+ expected_data.update(
+ {'key1': 'newest v3 key1', 'key2': 'old v1 key2',
+ 'key3': 'newer v2 key3'})
+
+ converted_data = convert_jinja_instance_data(data=data)
+ self.assertEqual(
+ expected_data,
+ converted_data)
+
+ def test_convert_instance_data_decodes_decode_paths(self):
+ """Any decode_paths provided are decoded by convert_instance_data."""
+ data = {'key1': {'subkey1': 'aGkgbW9t'}, 'key2': 'aGkgZGFk'}
+ expected_data = copy.deepcopy(data)
+ expected_data['key1']['subkey1'] = 'hi mom'
+
+ converted_data = convert_jinja_instance_data(
+ data=data, decode_paths=('key1/subkey1',))
+ self.assertEqual(
+ expected_data,
+ converted_data)
+
+
+class TestRenderJinjaPayload(CiTestCase):
+
+ with_logs = True
+
+ @skipUnlessJinja()
+ def test_render_jinja_payload_logs_jinja_vars_on_debug(self):
+ """When debug is True, log jinja varables available."""
+ payload = (
+ '## template: jinja\n#!/bin/sh\necho hi from {{ v1.hostname }}')
+ instance_data = {'v1': {'hostname': 'foo'}, 'instance-id': 'iid'}
+ expected_log = dedent("""\
+ DEBUG: Converted jinja variables
+ {
+ "hostname": "foo",
+ "instance_id": "iid",
+ "v1": {
+ "hostname": "foo"
+ }
+ }
+ """)
+ self.assertEqual(
+ render_jinja_payload(
+ payload=payload, payload_fn='myfile',
+ instance_data=instance_data, debug=True),
+ '#!/bin/sh\necho hi from foo')
+ self.assertEqual(expected_log, self.logs.getvalue())
+
+ @skipUnlessJinja()
+ def test_render_jinja_payload_replaces_missing_variables_and_warns(self):
+ """Warn on missing jinja variables and replace the absent variable."""
+ payload = (
+ '## template: jinja\n#!/bin/sh\necho hi from {{ NOTHERE }}')
+ instance_data = {'v1': {'hostname': 'foo'}, 'instance-id': 'iid'}
+ self.assertEqual(
+ render_jinja_payload(
+ payload=payload, payload_fn='myfile',
+ instance_data=instance_data),
+ '#!/bin/sh\necho hi from CI_MISSING_JINJA_VAR/NOTHERE')
+ expected_log = (
+ 'WARNING: Could not render jinja template variables in file'
+ " 'myfile': 'NOTHERE'")
+ self.assertIn(expected_log, self.logs.getvalue())
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index 0c0f427a..199d69b0 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -208,8 +208,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
for subcommand in expected_subcommands:
self.assertIn(subcommand, error)
- @mock.patch('cloudinit.config.schema.handle_schema_args')
- def test_wb_devel_schema_subcommand_parser(self, m_schema):
+ def test_wb_devel_schema_subcommand_parser(self):
"""The subcommand cloud-init schema calls the correct subparser."""
exit_code = self._call_main(['cloud-init', 'devel', 'schema'])
self.assertEqual(1, exit_code)
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index 275b16d2..3efe7adf 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -524,7 +524,17 @@ c: 4
self.assertEqual(cfg.get('password'), 'gocubs')
self.assertEqual(cfg.get('locale'), 'chicago')
- @httpretty.activate
+
+class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
+
+ def setUp(self):
+ TestConsumeUserData.setUp(self)
+ helpers.HttprettyTestCase.setUp(self)
+
+ def tearDown(self):
+ TestConsumeUserData.tearDown(self)
+ helpers.HttprettyTestCase.tearDown(self)
+
@mock.patch('cloudinit.url_helper.time.sleep')
def test_include(self, mock_sleep):
"""Test #include."""
@@ -543,7 +553,6 @@ c: 4
cc = util.load_yaml(cc_contents)
self.assertTrue(cc.get('included'))
- @httpretty.activate
@mock.patch('cloudinit.url_helper.time.sleep')
def test_include_bad_url(self, mock_sleep):
"""Test #include with a bad URL."""
@@ -597,8 +606,10 @@ class TestUDProcess(helpers.ResourceUsingTestCase):
class TestConvertString(helpers.TestCase):
+
def test_handles_binary_non_utf8_decodable(self):
- blob = b'\x32\x99'
+ """Printable unicode (not utf8-decodable) is safely converted."""
+ blob = b'#!/bin/bash\necho \xc3\x84\n'
msg = ud.convert_string(blob)
self.assertEqual(blob, msg.get_payload(decode=True))
@@ -612,6 +623,13 @@ class TestConvertString(helpers.TestCase):
msg = ud.convert_string(text)
self.assertEqual(text, msg.get_payload(decode=False))
+ def test_handle_mime_parts(self):
+ """Mime parts are properly returned as a mime message."""
+ message = MIMEBase("text", "plain")
+ message.set_payload("Just text")
+ msg = ud.convert_string(str(message))
+ self.assertEqual("Just text", msg.get_payload(decode=False))
+
class TestFetchBaseConfig(helpers.TestCase):
def test_only_builtin_gets_builtin(self):
diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py
index 4fa9616b..1e77842f 100644
--- a/tests/unittests/test_datasource/test_aliyun.py
+++ b/tests/unittests/test_datasource/test_aliyun.py
@@ -130,7 +130,6 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
self.ds.get_hostname())
@mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
- @httpretty.activate
def test_with_mock_server(self, m_is_aliyun):
m_is_aliyun.return_value = True
self.regist_default_server()
@@ -143,7 +142,6 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
self._test_host_name()
@mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
- @httpretty.activate
def test_returns_false_when_not_on_aliyun(self, m_is_aliyun):
"""If is_aliyun returns false, then get_data should return False."""
m_is_aliyun.return_value = False
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py
index 3253f3ad..ff35904e 100644
--- a/tests/unittests/test_datasource/test_altcloud.py
+++ b/tests/unittests/test_datasource/test_altcloud.py
@@ -262,64 +262,56 @@ class TestUserDataRhevm(CiTestCase):
'''
Test to exercise method: DataSourceAltCloud.user_data_rhevm()
'''
- cmd_pass = ['true']
- cmd_fail = ['false']
- cmd_not_found = ['bogus bad command']
-
def setUp(self):
'''Set up.'''
self.paths = helpers.Paths({'cloud_dir': '/tmp'})
- self.mount_dir = tempfile.mkdtemp()
+ self.mount_dir = self.tmp_dir()
_write_user_data_files(self.mount_dir, 'test user data')
-
- def tearDown(self):
- # Reset
-
- _remove_user_data_files(self.mount_dir)
-
- # Attempt to remove the temp dir ignoring errors
- try:
- shutil.rmtree(self.mount_dir)
- except OSError:
- pass
-
- dsac.CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
- dsac.CMD_PROBE_FLOPPY = ['modprobe', 'floppy']
- dsac.CMD_UDEVADM_SETTLE = ['udevadm', 'settle',
- '--quiet', '--timeout=5']
+ self.add_patch(
+ 'cloudinit.sources.DataSourceAltCloud.modprobe_floppy',
+ 'm_modprobe_floppy', return_value=None)
+ self.add_patch(
+ 'cloudinit.sources.DataSourceAltCloud.util.udevadm_settle',
+ 'm_udevadm_settle', return_value=('', ''))
+ self.add_patch(
+ 'cloudinit.sources.DataSourceAltCloud.util.mount_cb',
+ 'm_mount_cb')
def test_mount_cb_fails(self):
'''Test user_data_rhevm() where mount_cb fails.'''
- dsac.CMD_PROBE_FLOPPY = self.cmd_pass
+ self.m_mount_cb.side_effect = util.MountFailedError("Failed Mount")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_modprobe_fails(self):
'''Test user_data_rhevm() where modprobe fails.'''
- dsac.CMD_PROBE_FLOPPY = self.cmd_fail
+ self.m_modprobe_floppy.side_effect = util.ProcessExecutionError(
+ "Failed modprobe")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_no_modprobe_cmd(self):
'''Test user_data_rhevm() with no modprobe command.'''
- dsac.CMD_PROBE_FLOPPY = self.cmd_not_found
+ self.m_modprobe_floppy.side_effect = util.ProcessExecutionError(
+ "No such file or dir")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_udevadm_fails(self):
'''Test user_data_rhevm() where udevadm fails.'''
- dsac.CMD_UDEVADM_SETTLE = self.cmd_fail
+ self.m_udevadm_settle.side_effect = util.ProcessExecutionError(
+ "Failed settle.")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_no_udevadm_cmd(self):
'''Test user_data_rhevm() with no udevadm command.'''
- dsac.CMD_UDEVADM_SETTLE = self.cmd_not_found
+ self.m_udevadm_settle.side_effect = OSError("No such file or dir")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 3e8b7913..4e428b71 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -1,15 +1,21 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from cloudinit import distros
from cloudinit import helpers
-from cloudinit.util import b64e, decode_binary, load_file, write_file
-from cloudinit.sources import DataSourceAzure as dsaz
-from cloudinit.util import find_freebsd_part
-from cloudinit.util import get_path_dev_freebsd
+from cloudinit import url_helper
+from cloudinit.sources import (
+ UNSET, DataSourceAzure as dsaz, InvalidMetaDataException)
+from cloudinit.util import (b64e, decode_binary, load_file, write_file,
+ find_freebsd_part, get_path_dev_freebsd,
+ MountFailedError)
from cloudinit.version import version_string as vs
-from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock,
- ExitStack, PY26, SkipTest)
+from cloudinit.tests.helpers import (
+ HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call,
+ ExitStack, PY26, SkipTest)
import crypt
+import httpretty
+import json
import os
import stat
import xml.etree.ElementTree as ET
@@ -77,6 +83,106 @@ def construct_valid_ovf_env(data=None, pubkeys=None,
return content
+NETWORK_METADATA = {
+ "network": {
+ "interface": [
+ {
+ "macAddress": "000D3A047598",
+ "ipv6": {
+ "ipAddress": []
+ },
+ "ipv4": {
+ "subnet": [
+ {
+ "prefix": "24",
+ "address": "10.0.0.0"
+ }
+ ],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81"
+ }
+ ]
+ }
+ }
+ ]
+ }
+}
+
+
+class TestGetMetadataFromIMDS(HttprettyTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestGetMetadataFromIMDS, self).setUp()
+ self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2017-12-01"
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.readurl')
+ @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4')
+ @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up')
+ def test_get_metadata_does_not_dhcp_if_network_is_up(
+ self, m_net_is_up, m_dhcp, m_readurl):
+ """Do not perform DHCP setup when nic is already up."""
+ m_net_is_up.return_value = True
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(NETWORK_METADATA).encode('utf-8'))
+ self.assertEqual(
+ NETWORK_METADATA,
+ dsaz.get_metadata_from_imds('eth9', retries=3))
+
+ m_net_is_up.assert_called_with('eth9')
+ m_dhcp.assert_not_called()
+ self.assertIn(
+ "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.readurl')
+ @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4')
+ @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up')
+ def test_get_metadata_performs_dhcp_when_network_is_down(
+ self, m_net_is_up, m_dhcp, m_readurl):
+ """Perform DHCP setup when nic is not up."""
+ m_net_is_up.return_value = False
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(NETWORK_METADATA).encode('utf-8'))
+
+ self.assertEqual(
+ NETWORK_METADATA,
+ dsaz.get_metadata_from_imds('eth9', retries=2))
+
+ m_net_is_up.assert_called_with('eth9')
+ m_dhcp.assert_called_with('eth9')
+ self.assertIn(
+ "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
+ self.logs.getvalue())
+
+ m_readurl.assert_called_with(
+ self.network_md_url, exception_cb=mock.ANY,
+ headers={'Metadata': 'true'}, retries=2, timeout=1)
+
+ @mock.patch('cloudinit.url_helper.time.sleep')
+ @mock.patch('cloudinit.sources.DataSourceAzure.net.is_up')
+ def test_get_metadata_from_imds_empty_when_no_imds_present(
+ self, m_net_is_up, m_sleep):
+ """Return empty dict when IMDS network metadata is absent."""
+ httpretty.register_uri(
+ httpretty.GET,
+ dsaz.IMDS_URL + 'instance?api-version=2017-12-01',
+ body={}, status=404)
+
+ m_net_is_up.return_value = True # skips dhcp
+
+ self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=2))
+
+ m_net_is_up.assert_called_with('eth9')
+ self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list)
+ self.assertIn(
+ "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
+ self.logs.getvalue())
+
+
class TestAzureDataSource(CiTestCase):
with_logs = True
@@ -95,6 +201,19 @@ class TestAzureDataSource(CiTestCase):
self.patches = ExitStack()
self.addCleanup(self.patches.close)
+ self.patches.enter_context(mock.patch.object(
+ dsaz, '_get_random_seed', return_value='wild'))
+ self.m_get_metadata_from_imds = self.patches.enter_context(
+ mock.patch.object(
+ dsaz, 'get_metadata_from_imds',
+ mock.MagicMock(return_value=NETWORK_METADATA)))
+ self.m_fallback_nic = self.patches.enter_context(
+ mock.patch('cloudinit.sources.net.find_fallback_nic',
+ return_value='eth9'))
+ self.m_remove_ubuntu_network_scripts = self.patches.enter_context(
+ mock.patch.object(
+ dsaz, 'maybe_remove_ubuntu_network_config_scripts',
+ mock.MagicMock()))
super(TestAzureDataSource, self).setUp()
def apply_patches(self, patches):
@@ -135,7 +254,7 @@ scbus-1 on xpt0 bus 0
])
return dsaz
- def _get_ds(self, data, agent_command=None):
+ def _get_ds(self, data, agent_command=None, distro=None):
def dsdevs():
return data.get('dsdevs', [])
@@ -184,8 +303,11 @@ scbus-1 on xpt0 bus 0
side_effect=_wait_for_files)),
])
+ if distro is not None:
+ distro_cls = distros.fetch(distro)
+ distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths)
dsrc = dsaz.DataSourceAzure(
- data.get('sys_cfg', {}), distro=None, paths=self.paths)
+ data.get('sys_cfg', {}), distro=distro, paths=self.paths)
if agent_command is not None:
dsrc.ds_cfg['agent_command'] = agent_command
@@ -214,7 +336,7 @@ scbus-1 on xpt0 bus 0
self.assertIn(tag, x)
def tags_equal(x, y):
- for x_tag, x_val in x.items():
+ for x_val in x.values():
y_val = y.get(x_val.tag)
self.assertEqual(x_val.text, y_val.text)
@@ -258,29 +380,20 @@ fdescfs /dev/fd fdescfs rw 0 0
res = get_path_dev_freebsd('/etc', mnt_list)
self.assertIsNotNone(res)
- @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data')
- def test_non_azure_dmi_chassis_asset_tag(self, m_read_dmi_data):
- """Report non-azure when DMI's chassis asset tag doesn't match.
-
- Return False when the asset tag doesn't match Azure's static
- AZURE_CHASSIS_ASSET_TAG.
- """
+ @mock.patch('cloudinit.sources.DataSourceAzure._is_platform_viable')
+ def test_call_is_platform_viable_seed(self, m_is_platform_viable):
+ """Check seed_dir using _is_platform_viable and return False."""
# Return a non-matching asset tag value
- nonazure_tag = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'
- m_read_dmi_data.return_value = nonazure_tag
+ m_is_platform_viable.return_value = False
dsrc = dsaz.DataSourceAzure(
{}, distro=None, paths=self.paths)
self.assertFalse(dsrc.get_data())
- self.assertEqual(
- "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format(
- nonazure_tag),
- self.logs.getvalue())
+ m_is_platform_viable.assert_called_with(dsrc.seed_dir)
def test_basic_seed_dir(self):
odata = {'HostName': "myhost", 'UserName': "myuser"}
data = {'ovfcontent': construct_valid_ovf_env(data=odata),
'sys_cfg': {}}
-
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
@@ -289,6 +402,82 @@ fdescfs /dev/fd fdescfs rw 0 0
self.assertTrue(os.path.isfile(
os.path.join(self.waagent_d, 'ovf-env.xml')))
+ def test_get_data_non_ubuntu_will_not_remove_network_scripts(self):
+ """get_data on non-Ubuntu will not remove ubuntu net scripts."""
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': {}}
+
+ dsrc = self._get_ds(data, distro='debian')
+ dsrc.get_data()
+ self.m_remove_ubuntu_network_scripts.assert_not_called()
+
+ def test_get_data_on_ubuntu_will_remove_network_scripts(self):
+ """get_data will remove ubuntu net scripts on Ubuntu distro."""
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': {}}
+
+ dsrc = self._get_ds(data, distro='ubuntu')
+ dsrc.get_data()
+ self.m_remove_ubuntu_network_scripts.assert_called_once_with()
+
+ def test_crawl_metadata_returns_structured_data_and_caches_nothing(self):
+ """Return all structured metadata and cache no class attributes."""
+ yaml_cfg = "{agent_command: my_command}\n"
+ odata = {'HostName': "myhost", 'UserName': "myuser",
+ 'UserData': {'text': 'FOOBAR', 'encoding': 'plain'},
+ 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': {}}
+ dsrc = self._get_ds(data)
+ expected_cfg = {
+ 'PreprovisionedVm': False,
+ 'datasource': {'Azure': {'agent_command': 'my_command'}},
+ 'system_info': {'default_user': {'name': u'myuser'}}}
+ expected_metadata = {
+ 'azure_data': {
+ 'configurationsettype': 'LinuxProvisioningConfiguration'},
+ 'imds': {'network': {'interface': [{
+ 'ipv4': {'ipAddress': [
+ {'privateIpAddress': '10.0.0.4',
+ 'publicIpAddress': '104.46.124.81'}],
+ 'subnet': [{'address': '10.0.0.0', 'prefix': '24'}]},
+ 'ipv6': {'ipAddress': []},
+ 'macAddress': '000D3A047598'}]}},
+ 'instance-id': 'test-instance-id',
+ 'local-hostname': u'myhost',
+ 'random_seed': 'wild'}
+
+ crawled_metadata = dsrc.crawl_metadata()
+
+ self.assertItemsEqual(
+ crawled_metadata.keys(),
+ ['cfg', 'files', 'metadata', 'userdata_raw'])
+ self.assertEqual(crawled_metadata['cfg'], expected_cfg)
+ self.assertEqual(
+ list(crawled_metadata['files'].keys()), ['ovf-env.xml'])
+ self.assertIn(
+ b'<HostName>myhost</HostName>',
+ crawled_metadata['files']['ovf-env.xml'])
+ self.assertEqual(crawled_metadata['metadata'], expected_metadata)
+ self.assertEqual(crawled_metadata['userdata_raw'], 'FOOBAR')
+ self.assertEqual(dsrc.userdata_raw, None)
+ self.assertEqual(dsrc.metadata, {})
+ self.assertEqual(dsrc._metadata_imds, UNSET)
+ self.assertFalse(os.path.isfile(
+ os.path.join(self.waagent_d, 'ovf-env.xml')))
+
+ def test_crawl_metadata_raises_invalid_metadata_on_error(self):
+ """crawl_metadata raises an exception on invalid ovf-env.xml."""
+ data = {'ovfcontent': "BOGUS", 'sys_cfg': {}}
+ dsrc = self._get_ds(data)
+ error_msg = ('BrokenAzureDataSource: Invalid ovf-env.xml:'
+ ' syntax error: line 1, column 0')
+ with self.assertRaises(InvalidMetaDataException) as cm:
+ dsrc.crawl_metadata()
+ self.assertEqual(str(cm.exception), error_msg)
+
def test_waagent_d_has_0700_perms(self):
# we expect /var/lib/waagent to be created 0700
dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
@@ -312,6 +501,20 @@ fdescfs /dev/fd fdescfs rw 0 0
self.assertTrue(ret)
self.assertEqual(data['agent_invoked'], cfg['agent_command'])
+ def test_network_config_set_from_imds(self):
+ """Datasource.network_config returns IMDS network data."""
+ odata = {}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+ expected_network_config = {
+ 'ethernets': {
+ 'eth0': {'set-name': 'eth0',
+ 'match': {'macaddress': '00:0d:3a:04:75:98'},
+ 'dhcp4': True}},
+ 'version': 2}
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(expected_network_config, dsrc.network_config)
+
def test_user_cfg_set_agent_command(self):
# set dscfg in via base64 encoded yaml
cfg = {'agent_command': "my_command"}
@@ -335,6 +538,18 @@ fdescfs /dev/fd fdescfs rw 0 0
self.assertTrue(ret)
self.assertEqual(data['agent_invoked'], '_COMMAND')
+ def test_sys_cfg_set_never_destroy_ntfs(self):
+ sys_cfg = {'datasource': {'Azure': {
+ 'never_destroy_ntfs': 'user-supplied-value'}}}
+ data = {'ovfcontent': construct_valid_ovf_env(data={}),
+ 'sys_cfg': sys_cfg}
+
+ dsrc = self._get_ds(data)
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS),
+ 'user-supplied-value')
+
def test_username_used(self):
odata = {'HostName': "myhost", 'UserName': "myuser"}
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
@@ -565,12 +780,34 @@ fdescfs /dev/fd fdescfs rw 0 0
self.assertEqual(
[mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list)
+ @mock.patch('cloudinit.net.generate_fallback_config')
+ def test_imds_network_config(self, mock_fallback):
+ """Network config is generated from IMDS network data when present."""
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': {}}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ expected_cfg = {
+ 'ethernets': {
+ 'eth0': {'dhcp4': True,
+ 'match': {'macaddress': '00:0d:3a:04:75:98'},
+ 'set-name': 'eth0'}},
+ 'version': 2}
+
+ self.assertEqual(expected_cfg, dsrc.network_config)
+ mock_fallback.assert_not_called()
+
@mock.patch('cloudinit.net.get_interface_mac')
@mock.patch('cloudinit.net.get_devicelist')
@mock.patch('cloudinit.net.device_driver')
@mock.patch('cloudinit.net.generate_fallback_config')
- def test_network_config(self, mock_fallback, mock_dd,
- mock_devlist, mock_get_mac):
+ def test_fallback_network_config(self, mock_fallback, mock_dd,
+ mock_devlist, mock_get_mac):
+ """On absent IMDS network data, generate network fallback config."""
odata = {'HostName': "myhost", 'UserName': "myuser"}
data = {'ovfcontent': construct_valid_ovf_env(data=odata),
'sys_cfg': {}}
@@ -591,6 +828,8 @@ fdescfs /dev/fd fdescfs rw 0 0
mock_get_mac.return_value = '00:11:22:33:44:55'
dsrc = self._get_ds(data)
+ # Represent empty response from network imds
+ self.m_get_metadata_from_imds.return_value = {}
ret = dsrc.get_data()
self.assertTrue(ret)
@@ -603,8 +842,9 @@ fdescfs /dev/fd fdescfs rw 0 0
@mock.patch('cloudinit.net.get_devicelist')
@mock.patch('cloudinit.net.device_driver')
@mock.patch('cloudinit.net.generate_fallback_config')
- def test_network_config_blacklist(self, mock_fallback, mock_dd,
- mock_devlist, mock_get_mac):
+ def test_fallback_network_config_blacklist(self, mock_fallback, mock_dd,
+ mock_devlist, mock_get_mac):
+ """On absent network metadata, blacklist mlx from fallback config."""
odata = {'HostName': "myhost", 'UserName': "myuser"}
data = {'ovfcontent': construct_valid_ovf_env(data=odata),
'sys_cfg': {}}
@@ -635,6 +875,8 @@ fdescfs /dev/fd fdescfs rw 0 0
mock_get_mac.return_value = '00:11:22:33:44:55'
dsrc = self._get_ds(data)
+ # Represent empty response from network imds
+ self.m_get_metadata_from_imds.return_value = {}
ret = dsrc.get_data()
self.assertTrue(ret)
@@ -675,7 +917,12 @@ class TestAzureBounce(CiTestCase):
mock.patch.object(dsaz, 'get_metadata_from_fabric',
mock.MagicMock(return_value={})))
self.patches.enter_context(
+ mock.patch.object(dsaz, 'get_metadata_from_imds',
+ mock.MagicMock(return_value={})))
+ self.patches.enter_context(
mock.patch.object(dsaz.util, 'which', lambda x: True))
+ self.patches.enter_context(mock.patch.object(
+ dsaz, '_get_random_seed', return_value='wild'))
def _dmi_mocks(key):
if key == 'system-uuid':
@@ -703,9 +950,12 @@ class TestAzureBounce(CiTestCase):
mock.patch.object(dsaz, 'set_hostname'))
self.subp = self.patches.enter_context(
mock.patch('cloudinit.sources.DataSourceAzure.util.subp'))
+ self.find_fallback_nic = self.patches.enter_context(
+ mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9'))
def tearDown(self):
self.patches.close()
+ super(TestAzureBounce, self).tearDown()
def _get_ds(self, ovfcontent=None, agent_command=None):
if ovfcontent is not None:
@@ -911,7 +1161,7 @@ class TestLoadAzureDsDir(CiTestCase):
str(context_manager.exception))
-class TestReadAzureOvf(TestCase):
+class TestReadAzureOvf(CiTestCase):
def test_invalid_xml_raises_non_azure_ds(self):
invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
@@ -957,7 +1207,9 @@ class TestCanDevBeReformatted(CiTestCase):
# return sorted by partition number
return sorted(ret, key=lambda d: d[0])
- def mount_cb(device, callback):
+ def mount_cb(device, callback, mtype, update_env_for_mount):
+ self.assertEqual('ntfs', mtype)
+ self.assertEqual('C', update_env_for_mount.get('LANG'))
p = self.tmp_dir()
for f in bypath.get(device).get('files', []):
write_file(os.path.join(p, f), content=f)
@@ -988,14 +1240,16 @@ class TestCanDevBeReformatted(CiTestCase):
'/dev/sda2': {'num': 2},
'/dev/sda3': {'num': 3},
}}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
+ value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
+ preserve_ntfs=False)
self.assertFalse(value)
self.assertIn("3 or more", msg.lower())
def test_no_partitions_is_false(self):
"""A disk with no partitions can not be formatted."""
self.patchup({'/dev/sda': {}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
+ value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
+ preserve_ntfs=False)
self.assertFalse(value)
self.assertIn("not partitioned", msg.lower())
@@ -1007,7 +1261,8 @@ class TestCanDevBeReformatted(CiTestCase):
'/dev/sda1': {'num': 1},
'/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []},
}}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
+ value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
+ preserve_ntfs=False)
self.assertFalse(value)
self.assertIn("not ntfs", msg.lower())
@@ -1020,7 +1275,8 @@ class TestCanDevBeReformatted(CiTestCase):
'/dev/sda2': {'num': 2, 'fs': 'ntfs',
'files': ['secret.txt']},
}}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
+ value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
+ preserve_ntfs=False)
self.assertFalse(value)
self.assertIn("files on it", msg.lower())
@@ -1032,7 +1288,8 @@ class TestCanDevBeReformatted(CiTestCase):
'/dev/sda1': {'num': 1},
'/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []},
}}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
+ value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
+ preserve_ntfs=False)
self.assertTrue(value)
self.assertIn("safe for", msg.lower())
@@ -1043,7 +1300,8 @@ class TestCanDevBeReformatted(CiTestCase):
'partitions': {
'/dev/sda1': {'num': 1, 'fs': 'zfs'},
}}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
+ value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
+ preserve_ntfs=False)
self.assertFalse(value)
self.assertIn("not ntfs", msg.lower())
@@ -1055,9 +1313,14 @@ class TestCanDevBeReformatted(CiTestCase):
'/dev/sda1': {'num': 1, 'fs': 'ntfs',
'files': ['file1.txt', 'file2.exe']},
}}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
- self.assertFalse(value)
- self.assertIn("files on it", msg.lower())
+ with mock.patch.object(dsaz.LOG, 'warning') as warning:
+ value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
+ preserve_ntfs=False)
+ wmsg = warning.call_args[0][0]
+ self.assertIn("looks like you're using NTFS on the ephemeral disk",
+ wmsg)
+ self.assertFalse(value)
+ self.assertIn("files on it", msg.lower())
def test_one_partition_ntfs_empty_is_true(self):
"""1 mountable ntfs partition and no files can be formatted."""
@@ -1066,7 +1329,8 @@ class TestCanDevBeReformatted(CiTestCase):
'partitions': {
'/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []}
}}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
+ value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
+ preserve_ntfs=False)
self.assertTrue(value)
self.assertIn("safe for", msg.lower())
@@ -1078,7 +1342,8 @@ class TestCanDevBeReformatted(CiTestCase):
'/dev/sda1': {'num': 1, 'fs': 'ntfs',
'files': ['dataloss_warning_readme.txt']}
}}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda")
+ value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
+ preserve_ntfs=False)
self.assertTrue(value)
self.assertIn("safe for", msg.lower())
@@ -1093,7 +1358,8 @@ class TestCanDevBeReformatted(CiTestCase):
'num': 1, 'fs': 'ntfs', 'files': [self.warning_file],
'realpath': '/dev/sdb1'}
}}})
- value, msg = dsaz.can_dev_be_reformatted(epath)
+ value, msg = dsaz.can_dev_be_reformatted(epath,
+ preserve_ntfs=False)
self.assertTrue(value)
self.assertIn("safe for", msg.lower())
@@ -1112,10 +1378,68 @@ class TestCanDevBeReformatted(CiTestCase):
epath + '-part3': {'num': 3, 'fs': 'ext',
'realpath': '/dev/sdb3'}
}}})
- value, msg = dsaz.can_dev_be_reformatted(epath)
+ value, msg = dsaz.can_dev_be_reformatted(epath,
+ preserve_ntfs=False)
self.assertFalse(value)
self.assertIn("3 or more", msg.lower())
+ def test_ntfs_mount_errors_true(self):
+ """can_dev_be_reformatted does not fail if NTFS is unknown fstype."""
+ self.patchup({
+ '/dev/sda': {
+ 'partitions': {
+ '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []}
+ }}})
+
+ err = ("Unexpected error while running command.\n",
+ "Command: ['mount', '-o', 'ro,sync', '-t', 'auto', ",
+ "'/dev/sda1', '/fake-tmp/dir']\n"
+ "Exit code: 32\n"
+ "Reason: -\n"
+ "Stdout: -\n"
+ "Stderr: mount: unknown filesystem type 'ntfs'")
+ self.m_mount_cb.side_effect = MountFailedError(
+ 'Failed mounting %s to %s due to: %s' %
+ ('/dev/sda', '/fake-tmp/dir', err))
+
+ value, msg = dsaz.can_dev_be_reformatted('/dev/sda',
+ preserve_ntfs=False)
+ self.assertTrue(value)
+ self.assertIn('cannot mount NTFS, assuming', msg)
+
+ def test_never_destroy_ntfs_config_false(self):
+ """Normally formattable situation with never_destroy_ntfs set."""
+ self.patchup({
+ '/dev/sda': {
+ 'partitions': {
+ '/dev/sda1': {'num': 1, 'fs': 'ntfs',
+ 'files': ['dataloss_warning_readme.txt']}
+ }}})
+ value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
+ preserve_ntfs=True)
+ self.assertFalse(value)
+ self.assertIn("config says to never destroy NTFS "
+ "(datasource.Azure.never_destroy_ntfs)", msg)
+
+
+class TestClearCachedData(CiTestCase):
+
+ def test_clear_cached_attrs_clears_imds(self):
+ """All class attributes are reset to defaults, including imds data."""
+ tmp = self.tmp_dir()
+ paths = helpers.Paths(
+ {'cloud_dir': tmp, 'run_dir': tmp})
+ dsrc = dsaz.DataSourceAzure({}, distro=None, paths=paths)
+ clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds]
+ dsrc.metadata = 'md'
+ dsrc.userdata = 'ud'
+ dsrc._metadata_imds = 'imds'
+ dsrc._dirty_cache = True
+ dsrc.clear_cached_attrs()
+ self.assertEqual(
+ [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds],
+ clean_values)
+
class TestAzureNetExists(CiTestCase):
@@ -1125,19 +1449,9 @@ class TestAzureNetExists(CiTestCase):
self.assertTrue(hasattr(dsaz, "DataSourceAzureNet"))
-@mock.patch('cloudinit.sources.DataSourceAzure.util.subp')
-@mock.patch.object(dsaz, 'get_hostname')
-@mock.patch.object(dsaz, 'set_hostname')
-class TestAzureDataSourcePreprovisioning(CiTestCase):
-
- def setUp(self):
- super(TestAzureDataSourcePreprovisioning, self).setUp()
- tmp = self.tmp_dir()
- self.waagent_d = self.tmp_path('/var/lib/waagent', tmp)
- self.paths = helpers.Paths({'cloud_dir': tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
+class TestPreprovisioningReadAzureOvfFlag(CiTestCase):
- def test_read_azure_ovf_with_true_flag(self, *args):
+ def test_read_azure_ovf_with_true_flag(self):
"""The read_azure_ovf method should set the PreprovisionedVM
cfg flag if the proper setting is present."""
content = construct_valid_ovf_env(
@@ -1146,7 +1460,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
cfg = ret[2]
self.assertTrue(cfg['PreprovisionedVm'])
- def test_read_azure_ovf_with_false_flag(self, *args):
+ def test_read_azure_ovf_with_false_flag(self):
"""The read_azure_ovf method should set the PreprovisionedVM
cfg flag to false if the proper setting is false."""
content = construct_valid_ovf_env(
@@ -1155,7 +1469,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
cfg = ret[2]
self.assertFalse(cfg['PreprovisionedVm'])
- def test_read_azure_ovf_without_flag(self, *args):
+ def test_read_azure_ovf_without_flag(self):
"""The read_azure_ovf method should not set the
PreprovisionedVM cfg flag."""
content = construct_valid_ovf_env()
@@ -1163,12 +1477,121 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
cfg = ret[2]
self.assertFalse(cfg['PreprovisionedVm'])
- @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD')
- @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
- @mock.patch('requests.Session.request')
+
+@mock.patch('os.path.isfile')
+class TestPreprovisioningShouldReprovision(CiTestCase):
+
+ def setUp(self):
+ super(TestPreprovisioningShouldReprovision, self).setUp()
+ tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path('/var/lib/waagent', tmp)
+ self.paths = helpers.Paths({'cloud_dir': tmp})
+ dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
+ def test__should_reprovision_with_true_cfg(self, isfile, write_f):
+ """The _should_reprovision method should return true with config
+ flag present."""
+ isfile.return_value = False
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ self.assertTrue(dsa._should_reprovision(
+ (None, None, {'PreprovisionedVm': True}, None)))
+
+ def test__should_reprovision_with_file_existing(self, isfile):
+ """The _should_reprovision method should return True if the sentinal
+ exists."""
+ isfile.return_value = True
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ self.assertTrue(dsa._should_reprovision(
+ (None, None, {'preprovisionedvm': False}, None)))
+
+ def test__should_reprovision_returns_false(self, isfile):
+ """The _should_reprovision method should return False
+ if config and sentinal are not present."""
+ isfile.return_value = False
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ self.assertFalse(dsa._should_reprovision((None, None, {}, None)))
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
+ def test_reprovision_calls__poll_imds(self, _poll_imds, isfile):
+ """_reprovision will poll IMDS."""
+ isfile.return_value = False
+ hostname = "myhost"
+ username = "myuser"
+ odata = {'HostName': hostname, 'UserName': username}
+ _poll_imds.return_value = construct_valid_ovf_env(data=odata)
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa._reprovision()
+ _poll_imds.assert_called_with()
+
+
+@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
+@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+@mock.patch('requests.Session.request')
+@mock.patch(
+ 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
+class TestPreprovisioningPollIMDS(CiTestCase):
+
+ def setUp(self):
+ super(TestPreprovisioningPollIMDS, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp)
+ self.paths = helpers.Paths({'cloud_dir': self.tmp})
+ dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
+ def test_poll_imds_calls_report_ready(self, write_f, report_ready_func,
+ fake_resp, m_dhcp, m_net):
+ """The poll_imds will call report_ready after creating marker file."""
+ report_marker = self.tmp_path('report_marker', self.tmp)
+ lease = {
+ 'interface': 'eth9', 'fixed-address': '192.168.2.9',
+ 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
+ 'unknown-245': '624c3620'}
+ m_dhcp.return_value = [lease]
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ mock_path = (
+ 'cloudinit.sources.DataSourceAzure.REPORTED_READY_MARKER_FILE')
+ with mock.patch(mock_path, report_marker):
+ dsa._poll_imds()
+ self.assertEqual(report_ready_func.call_count, 1)
+ report_ready_func.assert_called_with(lease=lease)
+
+ def test_poll_imds_report_ready_false(self, report_ready_func,
+ fake_resp, m_dhcp, m_net):
+ """The poll_imds should not call reporting ready
+ when flag is false"""
+ report_marker = self.tmp_path('report_marker', self.tmp)
+ write_file(report_marker, content='dont run report_ready :)')
+ m_dhcp.return_value = [{
+ 'interface': 'eth9', 'fixed-address': '192.168.2.9',
+ 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
+ 'unknown-245': '624c3620'}]
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ mock_path = (
+ 'cloudinit.sources.DataSourceAzure.REPORTED_READY_MARKER_FILE')
+ with mock.patch(mock_path, report_marker):
+ dsa._poll_imds()
+ self.assertEqual(report_ready_func.call_count, 0)
+
+
+@mock.patch('cloudinit.sources.DataSourceAzure.util.subp')
+@mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
+@mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD')
+@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
+@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+@mock.patch('requests.Session.request')
+class TestAzureDataSourcePreprovisioning(CiTestCase):
+
+ def setUp(self):
+ super(TestAzureDataSourcePreprovisioning, self).setUp()
+ tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path('/var/lib/waagent', tmp)
+ self.paths = helpers.Paths({'cloud_dir': tmp})
+ dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
+
def test_poll_imds_returns_ovf_env(self, fake_resp, m_dhcp, m_net,
- m_is_bsd, *args):
+ m_is_bsd, write_f, subp):
"""The _poll_imds method should return the ovf_env.xml."""
m_is_bsd.return_value = False
m_dhcp.return_value = [{
@@ -1194,12 +1617,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
prefix_or_mask='255.255.255.0', router='192.168.2.1')
self.assertEqual(m_net.call_count, 1)
- @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD')
- @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
- @mock.patch('requests.Session.request')
def test__reprovision_calls__poll_imds(self, fake_resp, m_dhcp, m_net,
- m_is_bsd, *args):
+ m_is_bsd, write_f, subp):
"""The _reprovision method should call poll IMDS."""
m_is_bsd.return_value = False
m_dhcp.return_value = [{
@@ -1216,7 +1635,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
fake_resp.return_value = mock.MagicMock(status_code=200, text=content,
content=content)
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- md, ud, cfg, d = dsa._reprovision()
+ md, _ud, cfg, _d = dsa._reprovision()
self.assertEqual(md['local-hostname'], hostname)
self.assertEqual(cfg['system_info']['default_user']['name'], username)
self.assertEqual(fake_resp.call_args_list,
@@ -1231,32 +1650,95 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
prefix_or_mask='255.255.255.0', router='192.168.2.1')
self.assertEqual(m_net.call_count, 1)
- @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
- @mock.patch('os.path.isfile')
- def test__should_reprovision_with_true_cfg(self, isfile, write_f, *args):
- """The _should_reprovision method should return true with config
- flag present."""
- isfile.return_value = False
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- self.assertTrue(dsa._should_reprovision(
- (None, None, {'PreprovisionedVm': True}, None)))
- @mock.patch('os.path.isfile')
- def test__should_reprovision_with_file_existing(self, isfile, *args):
- """The _should_reprovision method should return True if the sentinal
- exists."""
- isfile.return_value = True
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- self.assertTrue(dsa._should_reprovision(
- (None, None, {'preprovisionedvm': False}, None)))
+class TestRemoveUbuntuNetworkConfigScripts(CiTestCase):
- @mock.patch('os.path.isfile')
- def test__should_reprovision_returns_false(self, isfile, *args):
- """The _should_reprovision method should return False
- if config and sentinal are not present."""
- isfile.return_value = False
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- self.assertFalse(dsa._should_reprovision((None, None, {}, None)))
+ with_logs = True
+
+ def setUp(self):
+ super(TestRemoveUbuntuNetworkConfigScripts, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_remove_network_scripts_removes_both_files_and_directories(self):
+ """Any files or directories in paths are removed when present."""
+ file1 = self.tmp_path('file1', dir=self.tmp)
+ subdir = self.tmp_path('sub1', dir=self.tmp)
+ subfile = self.tmp_path('leaf1', dir=subdir)
+ write_file(file1, 'file1content')
+ write_file(subfile, 'leafcontent')
+ dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1])
+
+ for path in (file1, subdir, subfile):
+ self.assertFalse(os.path.exists(path),
+ 'Found unremoved: %s' % path)
+
+ expected_logs = [
+ 'INFO: Removing Ubuntu extended network scripts because cloud-init'
+ ' updates Azure network configuration on the following event:'
+ ' System boot.',
+ 'Recursively deleting %s' % subdir,
+ 'Attempting to remove %s' % file1]
+ for log in expected_logs:
+ self.assertIn(log, self.logs.getvalue())
+
+ def test_remove_network_scripts_only_attempts_removal_if_path_exists(self):
+ """Any files or directories absent are skipped without error."""
+ dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[
+ self.tmp_path('nodirhere/', dir=self.tmp),
+ self.tmp_path('notfilehere', dir=self.tmp)])
+ self.assertNotIn('/not/a', self.logs.getvalue()) # No delete logs
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.os.path.exists')
+ def test_remove_network_scripts_default_removes_stock_scripts(self,
+ m_exists):
+ """Azure's stock ubuntu image scripts and artifacts are removed."""
+ # Report path absent on all to avoid delete operation
+ m_exists.return_value = False
+ dsaz.maybe_remove_ubuntu_network_config_scripts()
+ calls = m_exists.call_args_list
+ for path in dsaz.UBUNTU_EXTENDED_NETWORK_SCRIPTS:
+ self.assertIn(mock.call(path), calls)
+
+
+class TestWBIsPlatformViable(CiTestCase):
+ """White box tests for _is_platform_viable."""
+ with_logs = True
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data')
+ def test_true_on_non_azure_chassis(self, m_read_dmi_data):
+ """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG."""
+ m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG
+ self.assertTrue(dsaz._is_platform_viable('doesnotmatter'))
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.os.path.exists')
+ @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data')
+ def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist):
+ """Return True if ovf-env.xml exists in known seed dirs."""
+ # Non-matching Azure chassis-asset-tag
+ m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'
+
+ m_exist.return_value = True
+ self.assertTrue(dsaz._is_platform_viable('/some/seed/dir'))
+ m_exist.called_once_with('/other/seed/dir')
+
+ def test_false_on_no_matching_azure_criteria(self):
+ """Report non-azure on unmatched asset tag, ovf-env absent and no dev.
+
+ Return False when the asset tag doesn't match Azure's static
+ AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs
+ and no devices have a label starting with prefix 'rd_rdfe_'.
+ """
+ self.assertFalse(wrap_and_call(
+ 'cloudinit.sources.DataSourceAzure',
+ {'os.path.exists': False,
+ # Non-matching Azure chassis-asset-tag
+ 'util.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X',
+ 'util.which': None},
+ dsaz._is_platform_viable, 'doesnotmatter'))
+ self.assertIn(
+ "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format(
+ dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'),
+ self.logs.getvalue())
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index b42b073f..26b2b93d 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -85,7 +85,9 @@ class TestFindEndpoint(CiTestCase):
self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}}
self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None))
- def test_latest_lease_used(self):
+ @mock.patch('cloudinit.sources.helpers.azure.util.is_FreeBSD')
+ def test_latest_lease_used(self, m_is_freebsd):
+ m_is_freebsd.return_value = False # To avoid hitting load_file
encoded_addresses = ['5:4:3:2', '4:3:2:1']
file_content = '\n'.join([self._build_lease_content(encoded_address)
for encoded_address in encoded_addresses])
@@ -195,7 +197,7 @@ class TestAzureEndpointHttpClient(CiTestCase):
self.addCleanup(patches.close)
self.read_file_or_url = patches.enter_context(
- mock.patch.object(azure_helper.util, 'read_file_or_url'))
+ mock.patch.object(azure_helper.url_helper, 'read_file_or_url'))
def test_non_secure_get(self):
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py
index f6a59b6b..380ad1b5 100644
--- a/tests/unittests/test_datasource/test_cloudsigma.py
+++ b/tests/unittests/test_datasource/test_cloudsigma.py
@@ -42,6 +42,9 @@ class CepkoMock(Cepko):
class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
def setUp(self):
super(DataSourceCloudSigmaTest, self).setUp()
+ self.add_patch(
+ "cloudinit.sources.DataSourceCloudSigma.util.is_container",
+ "m_is_container", return_value=False)
self.paths = helpers.Paths({'run_dir': self.tmp_dir()})
self.datasource = DataSourceCloudSigma.DataSourceCloudSigma(
"", "", paths=self.paths)
diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py
index ec333888..6b01a4ea 100644
--- a/tests/unittests/test_datasource/test_common.py
+++ b/tests/unittests/test_datasource/test_common.py
@@ -20,6 +20,7 @@ from cloudinit.sources import (
DataSourceNoCloud as NoCloud,
DataSourceOpenNebula as OpenNebula,
DataSourceOpenStack as OpenStack,
+ DataSourceOracle as Oracle,
DataSourceOVF as OVF,
DataSourceScaleway as Scaleway,
DataSourceSmartOS as SmartOS,
@@ -37,9 +38,12 @@ DEFAULT_LOCAL = [
IBMCloud.DataSourceIBMCloud,
NoCloud.DataSourceNoCloud,
OpenNebula.DataSourceOpenNebula,
+ Oracle.DataSourceOracle,
OVF.DataSourceOVF,
SmartOS.DataSourceSmartOS,
Ec2.DataSourceEc2Local,
+ OpenStack.DataSourceOpenStackLocal,
+ Scaleway.DataSourceScaleway,
]
DEFAULT_NETWORK = [
@@ -54,7 +58,6 @@ DEFAULT_NETWORK = [
NoCloud.DataSourceNoCloudNet,
OpenStack.DataSourceOpenStack,
OVF.DataSourceOVFNet,
- Scaleway.DataSourceScaleway,
]
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index 68400f22..231619c9 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -136,6 +136,7 @@ NETWORK_DATA_3 = {
]
}
+BOND_MAC = "fa:16:3e:b3:72:36"
NETWORK_DATA_BOND = {
"services": [
{"type": "dns", "address": "1.1.1.191"},
@@ -163,7 +164,7 @@ NETWORK_DATA_BOND = {
{"bond_links": ["eth0", "eth1"],
"bond_miimon": 100, "bond_mode": "4",
"bond_xmit_hash_policy": "layer3+4",
- "ethernet_mac_address": "0c:c4:7a:34:6e:3c",
+ "ethernet_mac_address": BOND_MAC,
"id": "bond0", "type": "bond"},
{"ethernet_mac_address": "fa:16:3e:b3:72:30",
"id": "vlan2", "type": "vlan", "vlan_id": 602,
@@ -224,6 +225,9 @@ class TestConfigDriveDataSource(CiTestCase):
def setUp(self):
super(TestConfigDriveDataSource, self).setUp()
+ self.add_patch(
+ "cloudinit.sources.DataSourceConfigDrive.util.find_devs_with",
+ "m_find_devs_with", return_value=[])
self.tmp = self.tmp_dir()
def test_ec2_metadata(self):
@@ -642,7 +646,7 @@ class TestConvertNetworkData(CiTestCase):
routes)
eni_renderer = eni.Renderer()
eni_renderer.render_network_state(
- network_state.parse_net_config_data(ncfg), self.tmp)
+ network_state.parse_net_config_data(ncfg), target=self.tmp)
with open(os.path.join(self.tmp, "etc",
"network", "interfaces"), 'r') as f:
eni_rendering = f.read()
@@ -664,7 +668,7 @@ class TestConvertNetworkData(CiTestCase):
eni_renderer = eni.Renderer()
eni_renderer.render_network_state(
- network_state.parse_net_config_data(ncfg), self.tmp)
+ network_state.parse_net_config_data(ncfg), target=self.tmp)
with open(os.path.join(self.tmp, "etc",
"network", "interfaces"), 'r') as f:
eni_rendering = f.read()
@@ -688,6 +692,9 @@ class TestConvertNetworkData(CiTestCase):
self.assertIn("auto oeth0", eni_rendering)
self.assertIn("auto oeth1", eni_rendering)
self.assertIn("auto bond0", eni_rendering)
+ # The bond should have the given mac address
+ pos = eni_rendering.find("auto bond0")
+ self.assertIn(BOND_MAC, eni_rendering[pos:])
def test_vlan(self):
# light testing of vlan config conversion and eni rendering
@@ -695,7 +702,7 @@ class TestConvertNetworkData(CiTestCase):
known_macs=KNOWN_MACS)
eni_renderer = eni.Renderer()
eni_renderer.render_network_state(
- network_state.parse_net_config_data(ncfg), self.tmp)
+ network_state.parse_net_config_data(ncfg), target=self.tmp)
with open(os.path.join(self.tmp, "etc",
"network", "interfaces"), 'r') as f:
eni_rendering = f.read()
diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py
index dff8b1ec..497e7610 100644
--- a/tests/unittests/test_datasource/test_ec2.py
+++ b/tests/unittests/test_datasource/test_ec2.py
@@ -191,7 +191,6 @@ def register_mock_metaserver(base_url, data):
register(base_url, 'not found', status=404)
def myreg(*argc, **kwargs):
- # print("register_url(%s, %s)" % (argc, kwargs))
return httpretty.register_uri(httpretty.GET, *argc, **kwargs)
register_helper(myreg, base_url, data)
@@ -236,7 +235,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
return_value=platform_data)
if md:
- httpretty.HTTPretty.allow_net_connect = False
all_versions = (
[ds.min_metadata_version] + ds.extended_metadata_versions)
for version in all_versions:
@@ -255,7 +253,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
register_mock_metaserver(instance_id_url, None)
return ds
- @httpretty.activate
def test_network_config_property_returns_version_1_network_data(self):
"""network_config property returns network version 1 for metadata.
@@ -288,7 +285,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
- @httpretty.activate
def test_network_config_property_set_dhcp4_on_private_ipv4(self):
"""network_config property configures dhcp4 on private ipv4 nics.
@@ -330,7 +326,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
ds._network_config = {'cached': 'data'}
self.assertEqual({'cached': 'data'}, ds.network_config)
- @httpretty.activate
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp):
"""Refresh the network_config Ec2 cache if network key is absent.
@@ -364,7 +359,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
'type': 'physical'}]}
self.assertEqual(expected, ds.network_config)
- @httpretty.activate
def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self):
"""get_instance-id gets DataSourceEc2Local.identity if not present.
@@ -397,7 +391,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
ds.metadata = DEFAULT_METADATA
self.assertEqual('my-identity-id', ds.get_instance_id())
- @httpretty.activate
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
def test_valid_platform_with_strict_true(self, m_dhcp):
"""Valid platform data should return true with strict_id true."""
@@ -409,7 +402,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertTrue(ret)
self.assertEqual(0, m_dhcp.call_count)
- @httpretty.activate
def test_valid_platform_with_strict_false(self):
"""Valid platform data should return true with strict_id false."""
ds = self._setup_ds(
@@ -419,7 +411,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
ret = ds.get_data()
self.assertTrue(ret)
- @httpretty.activate
def test_unknown_platform_with_strict_true(self):
"""Unknown platform data with strict_id true should return False."""
uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a'
@@ -430,7 +421,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
ret = ds.get_data()
self.assertFalse(ret)
- @httpretty.activate
def test_unknown_platform_with_strict_false(self):
"""Unknown platform data with strict_id false should return True."""
uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a'
@@ -462,7 +452,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
' not {0}'.format(platform_name))
self.assertIn(message, self.logs.getvalue())
- @httpretty.activate
@mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD')
def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd):
"""DataSourceEc2Local returns False on BSD.
@@ -481,7 +470,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
"FreeBSD doesn't support running dhclient with -sf",
self.logs.getvalue())
- @httpretty.activate
@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
@mock.patch('cloudinit.net.find_fallback_nic')
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index eb3cec42..41176c6a 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -78,7 +78,6 @@ def _set_mock_metadata(gce_meta=None):
return (404, headers, '')
# reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316
- httpretty.reset()
httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback)
diff --git a/tests/unittests/test_datasource/test_ibmcloud.py b/tests/unittests/test_datasource/test_ibmcloud.py
index 621cfe49..e639ae47 100644
--- a/tests/unittests/test_datasource/test_ibmcloud.py
+++ b/tests/unittests/test_datasource/test_ibmcloud.py
@@ -259,4 +259,54 @@ class TestReadMD(test_helpers.CiTestCase):
ret['metadata'])
+class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase):
+ """Test the _is_ibm_provisioning method."""
+ inst_log = "/root/swinstall.log"
+ prov_cfg = "/root/provisioningConfiguration.cfg"
+ boot_ref = "/proc/1/environ"
+ with_logs = True
+
+ def _call_with_root(self, rootd):
+ self.reRoot(rootd)
+ return ibm._is_ibm_provisioning()
+
+ def test_no_config(self):
+ """No provisioning config means not provisioning."""
+ self.assertFalse(self._call_with_root(self.tmp_dir()))
+
+ def test_config_only(self):
+ """A provisioning config without a log means provisioning."""
+ rootd = self.tmp_dir()
+ test_helpers.populate_dir(rootd, {self.prov_cfg: "key=value"})
+ self.assertTrue(self._call_with_root(rootd))
+
+ def test_config_with_old_log(self):
+ """A config with a log from previous boot is not provisioning."""
+ rootd = self.tmp_dir()
+ data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", -30),
+ self.boot_ref: ("PWD=/", 0)}
+ test_helpers.populate_dir_with_ts(rootd, data)
+ self.assertFalse(self._call_with_root(rootd=rootd))
+ self.assertIn("from previous boot", self.logs.getvalue())
+
+ def test_config_with_new_log(self):
+ """A config with a log from this boot is provisioning."""
+ rootd = self.tmp_dir()
+ data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", 30),
+ self.boot_ref: ("PWD=/", 0)}
+ test_helpers.populate_dir_with_ts(rootd, data)
+ self.assertTrue(self._call_with_root(rootd=rootd))
+ self.assertIn("from current boot", self.logs.getvalue())
+
+ def test_config_and_log_no_reference(self):
+ """If the config and log existed, but no reference, assume not."""
+ rootd = self.tmp_dir()
+ test_helpers.populate_dir(
+ rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"})
+ self.assertFalse(self._call_with_root(rootd=rootd))
+ self.assertIn("no reference file", self.logs.getvalue())
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index 6e4031cf..c84d067e 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -53,7 +53,7 @@ class TestMAASDataSource(CiTestCase):
my_d = os.path.join(self.tmp, "valid_extra")
populate_dir(my_d, data)
- ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d)
+ ud, md, _vd = DataSourceMAAS.read_maas_seed_dir(my_d)
self.assertEqual(userdata, ud)
for key in ('instance-id', 'local-hostname'):
@@ -149,7 +149,7 @@ class TestMAASDataSource(CiTestCase):
'meta-data/local-hostname': 'test-hostname',
'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(),
}
- ud, md, vd = self.mock_read_maas_seed_url(
+ _ud, md, vd = self.mock_read_maas_seed_url(
valid, "http://example.com/foo")
self.assertEqual(valid['meta-data/instance-id'], md['instance-id'])
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index 70d50de4..21931eb7 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -25,6 +25,8 @@ class TestNoCloudDataSource(CiTestCase):
self.mocks.enter_context(
mock.patch.object(util, 'get_cmdline', return_value=self.cmdline))
+ self.mocks.enter_context(
+ mock.patch.object(util, 'read_dmi_data', return_value=None))
def test_nocloud_seed_dir(self):
md = {'instance-id': 'IID', 'dsmode': 'local'}
@@ -51,9 +53,6 @@ class TestNoCloudDataSource(CiTestCase):
class PsuedoException(Exception):
pass
- def my_find_devs_with(*args, **kwargs):
- raise PsuedoException
-
self.mocks.enter_context(
mock.patch.object(util, 'find_devs_with',
side_effect=PsuedoException))
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index ab42f344..61591017 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -43,6 +43,7 @@ DS_PATH = "cloudinit.sources.DataSourceOpenNebula"
class TestOpenNebulaDataSource(CiTestCase):
parsed_user = None
+ allowed_subp = ['bash']
def setUp(self):
super(TestOpenNebulaDataSource, self).setUp()
@@ -354,6 +355,412 @@ class TestOpenNebulaNetwork(unittest.TestCase):
system_nics = ('eth0', 'ens3')
+ def test_context_devname(self):
+ """Verify context_devname correctly returns mac and name."""
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH1_MAC': '02:00:0a:12:0f:0f', }
+ expected = {
+ '02:00:0a:12:01:01': 'ETH0',
+ '02:00:0a:12:0f:0f': 'ETH1', }
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(expected, net.context_devname)
+
+ def test_get_nameservers(self):
+ """
+ Verify get_nameservers('device') correctly returns DNS server addresses
+ and search domains.
+ """
+ context = {
+ 'DNS': '1.2.3.8',
+ 'ETH0_DNS': '1.2.3.6 1.2.3.7',
+ 'ETH0_SEARCH_DOMAIN': 'example.com example.org', }
+ expected = {
+ 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
+ 'search': ['example.com', 'example.org']}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_nameservers('eth0')
+ self.assertEqual(expected, val)
+
+ def test_get_mtu(self):
+ """Verify get_mtu('device') correctly returns MTU size."""
+ context = {'ETH0_MTU': '1280'}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_mtu('eth0')
+ self.assertEqual('1280', val)
+
+ def test_get_ip(self):
+ """Verify get_ip('device') correctly returns IPv4 address."""
+ context = {'ETH0_IP': PUBLIC_IP}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_ip('eth0', MACADDR)
+ self.assertEqual(PUBLIC_IP, val)
+
+ def test_get_ip_emptystring(self):
+ """
+ Verify get_ip('device') correctly returns IPv4 address.
+ It returns IP address created by MAC address if ETH0_IP has empty
+ string.
+ """
+ context = {'ETH0_IP': ''}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_ip('eth0', MACADDR)
+ self.assertEqual(IP_BY_MACADDR, val)
+
+ def test_get_ip6(self):
+ """
+ Verify get_ip6('device') correctly returns IPv6 address.
+ In this case, IPv6 address is Given by ETH0_IP6.
+ """
+ context = {
+ 'ETH0_IP6': IP6_GLOBAL,
+ 'ETH0_IP6_ULA': '', }
+ expected = [IP6_GLOBAL]
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_ip6('eth0')
+ self.assertEqual(expected, val)
+
+ def test_get_ip6_ula(self):
+ """
+ Verify get_ip6('device') correctly returns IPv6 address.
+ In this case, IPv6 address is Given by ETH0_IP6_ULA.
+ """
+ context = {
+ 'ETH0_IP6': '',
+ 'ETH0_IP6_ULA': IP6_ULA, }
+ expected = [IP6_ULA]
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_ip6('eth0')
+ self.assertEqual(expected, val)
+
+ def test_get_ip6_dual(self):
+ """
+ Verify get_ip6('device') correctly returns IPv6 address.
+ In this case, IPv6 addresses are Given by ETH0_IP6 and ETH0_IP6_ULA.
+ """
+ context = {
+ 'ETH0_IP6': IP6_GLOBAL,
+ 'ETH0_IP6_ULA': IP6_ULA, }
+ expected = [IP6_GLOBAL, IP6_ULA]
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_ip6('eth0')
+ self.assertEqual(expected, val)
+
+ def test_get_ip6_prefix(self):
+ """
+ Verify get_ip6_prefix('device') correctly returns IPv6 prefix.
+ """
+ context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_ip6_prefix('eth0')
+ self.assertEqual(IP6_PREFIX, val)
+
+ def test_get_ip6_prefix_emptystring(self):
+ """
+ Verify get_ip6_prefix('device') correctly returns IPv6 prefix.
+ It returns default value '64' if ETH0_IP6_PREFIX_LENGTH has empty
+ string.
+ """
+ context = {'ETH0_IP6_PREFIX_LENGTH': ''}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_ip6_prefix('eth0')
+ self.assertEqual('64', val)
+
+ def test_get_gateway(self):
+ """
+ Verify get_gateway('device') correctly returns IPv4 default gateway
+ address.
+ """
+ context = {'ETH0_GATEWAY': '1.2.3.5'}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_gateway('eth0')
+ self.assertEqual('1.2.3.5', val)
+
+ def test_get_gateway6(self):
+ """
+ Verify get_gateway6('device') correctly returns IPv6 default gateway
+ address.
+ """
+ context = {'ETH0_GATEWAY6': IP6_GW}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_gateway6('eth0')
+ self.assertEqual(IP6_GW, val)
+
+ def test_get_mask(self):
+ """
+ Verify get_mask('device') correctly returns IPv4 subnet mask.
+ """
+ context = {'ETH0_MASK': '255.255.0.0'}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_mask('eth0')
+ self.assertEqual('255.255.0.0', val)
+
+ def test_get_mask_emptystring(self):
+ """
+ Verify get_mask('device') correctly returns IPv4 subnet mask.
+ It returns default value '255.255.255.0' if ETH0_MASK has empty string.
+ """
+ context = {'ETH0_MASK': ''}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_mask('eth0')
+ self.assertEqual('255.255.255.0', val)
+
+ def test_get_network(self):
+ """
+ Verify get_network('device') correctly returns IPv4 network address.
+ """
+ context = {'ETH0_NETWORK': '1.2.3.0'}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_network('eth0', MACADDR)
+ self.assertEqual('1.2.3.0', val)
+
+ def test_get_network_emptystring(self):
+ """
+ Verify get_network('device') correctly returns IPv4 network address.
+ It returns network address created by MAC address if ETH0_NETWORK has
+ empty string.
+ """
+ context = {'ETH0_NETWORK': ''}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_network('eth0', MACADDR)
+ self.assertEqual('10.18.1.0', val)
+
+ def test_get_field(self):
+ """
+ Verify get_field('device', 'name') returns *context* value.
+ """
+ context = {'ETH9_DUMMY': 'DUMMY_VALUE'}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_field('eth9', 'dummy')
+ self.assertEqual('DUMMY_VALUE', val)
+
+ def test_get_field_withdefaultvalue(self):
+ """
+ Verify get_field('device', 'name', 'default value') returns *context*
+ value.
+ """
+ context = {'ETH9_DUMMY': 'DUMMY_VALUE'}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE')
+ self.assertEqual('DUMMY_VALUE', val)
+
+ def test_get_field_withdefaultvalue_emptycontext(self):
+ """
+ Verify get_field('device', 'name', 'default value') returns *default*
+ value if context value is empty string.
+ """
+ context = {'ETH9_DUMMY': ''}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE')
+ self.assertEqual('DEFAULT_VALUE', val)
+
+ def test_get_field_emptycontext(self):
+ """
+ Verify get_field('device', 'name') returns None if context value is
+ empty string.
+ """
+ context = {'ETH9_DUMMY': ''}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_field('eth9', 'dummy')
+ self.assertEqual(None, val)
+
+ def test_get_field_nonecontext(self):
+ """
+ Verify get_field('device', 'name') returns None if context value is
+ None.
+ """
+ context = {'ETH9_DUMMY': None}
+ net = ds.OpenNebulaNetwork(context)
+ val = net.get_field('eth9', 'dummy')
+ self.assertEqual(None, val)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_gateway(self, m_get_phys_by_mac):
+ """Test rendering with/without IPv4 gateway"""
+ self.maxDiff = None
+ # empty ETH0_GATEWAY
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_GATEWAY': '', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set ETH0_GATEWAY
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_GATEWAY': '1.2.3.5', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'gateway4': '1.2.3.5',
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_gateway6(self, m_get_phys_by_mac):
+ """Test rendering with/without IPv6 gateway"""
+ self.maxDiff = None
+ # empty ETH0_GATEWAY6
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_GATEWAY6': '', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set ETH0_GATEWAY6
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_GATEWAY6': IP6_GW, }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'gateway6': IP6_GW,
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_ipv6address(self, m_get_phys_by_mac):
+ """Test rendering with/without IPv6 address"""
+ self.maxDiff = None
+ # empty ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_IP6': '',
+ 'ETH0_IP6_ULA': '',
+ 'ETH0_IP6_PREFIX_LENGTH': '', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_IP6': IP6_GLOBAL,
+ 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX,
+ 'ETH0_IP6_ULA': IP6_ULA, }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [
+ IP_BY_MACADDR + '/' + IP4_PREFIX,
+ IP6_GLOBAL + '/' + IP6_PREFIX,
+ IP6_ULA + '/' + IP6_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_dns(self, m_get_phys_by_mac):
+ """Test rendering with/without DNS server, search domain"""
+ self.maxDiff = None
+ # empty DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'DNS': '',
+ 'ETH0_DNS': '',
+ 'ETH0_SEARCH_DOMAIN': '', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'DNS': '1.2.3.8',
+ 'ETH0_DNS': '1.2.3.6 1.2.3.7',
+ 'ETH0_SEARCH_DOMAIN': 'example.com example.org', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'nameservers': {
+ 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
+ 'search': ['example.com', 'example.org']},
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_gen_conf_mtu(self, m_get_phys_by_mac):
+ """Test rendering with/without MTU"""
+ self.maxDiff = None
+ # empty ETH0_MTU
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_MTU': '', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
+ # set ETH0_MTU
+ context = {
+ 'ETH0_MAC': '02:00:0a:12:01:01',
+ 'ETH0_MTU': '1280', }
+ for nic in self.system_nics:
+ expected = {
+ 'version': 2,
+ 'ethernets': {
+ nic: {
+ 'mtu': '1280',
+ 'match': {'macaddress': MACADDR},
+ 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ m_get_phys_by_mac.return_value = {MACADDR: nic}
+ net = ds.OpenNebulaNetwork(context)
+ self.assertEqual(net.gen_conf(), expected)
+
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
def test_eth0(self, m_get_phys_by_mac):
for nic in self.system_nics:
@@ -395,7 +802,6 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MACADDR},
'addresses': [IP_BY_MACADDR + '/16'],
'gateway4': '1.2.3.5',
- 'gateway6': None,
'nameservers': {
'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8']}}}}
@@ -494,7 +900,6 @@ class TestOpenNebulaNetwork(unittest.TestCase):
'match': {'macaddress': MAC_1},
'addresses': ['10.3.1.3/16'],
'gateway4': '10.3.0.1',
- 'gateway6': None,
'nameservers': {
'addresses': ['10.3.1.2', '1.2.3.8'],
'search': [
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index 42c31554..a731f1ed 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -12,11 +12,11 @@ import re
from cloudinit.tests import helpers as test_helpers
from six.moves.urllib.parse import urlparse
-from six import StringIO
+from six import StringIO, text_type
from cloudinit import helpers
from cloudinit import settings
-from cloudinit.sources import convert_vendordata
+from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET
from cloudinit.sources import DataSourceOpenStack as ds
from cloudinit.sources.helpers import openstack
from cloudinit import util
@@ -69,6 +69,8 @@ EC2_VERSIONS = [
'latest',
]
+MOCK_PATH = 'cloudinit.sources.DataSourceOpenStack.'
+
# TODO _register_uris should leverage test_ec2.register_mock_metaserver.
def _register_uris(version, ec2_files, ec2_meta, os_files):
@@ -129,13 +131,14 @@ def _read_metadata_service():
class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
+
+ with_logs = True
VERSION = 'latest'
def setUp(self):
super(TestOpenStackDataSource, self).setUp()
self.tmp = self.tmp_dir()
- @hp.activate
def test_successful(self):
_register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
f = _read_metadata_service()
@@ -157,7 +160,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c',
metadata.get('instance-id'))
- @hp.activate
def test_no_ec2(self):
_register_uris(self.VERSION, {}, {}, OS_FILES)
f = _read_metadata_service()
@@ -168,7 +170,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual({}, f.get('ec2-metadata'))
self.assertEqual(2, f.get('version'))
- @hp.activate
def test_bad_metadata(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
@@ -177,7 +178,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(openstack.NonReadable, _read_metadata_service)
- @hp.activate
def test_bad_uuid(self):
os_files = copy.deepcopy(OS_FILES)
os_meta = copy.deepcopy(OSTACK_META)
@@ -186,9 +186,8 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
if k.endswith('meta_data.json'):
os_files[k] = json.dumps(os_meta)
_register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(openstack.BrokenMetadata, _read_metadata_service)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
- @hp.activate
def test_userdata_empty(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
@@ -201,7 +200,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertFalse(f.get('userdata'))
- @hp.activate
def test_vendordata_empty(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
@@ -213,32 +211,32 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertFalse(f.get('vendordata'))
- @hp.activate
def test_vendordata_invalid(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('vendor_data.json'):
os_files[k] = '{' # some invalid json
_register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(openstack.BrokenMetadata, _read_metadata_service)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
- @hp.activate
def test_metadata_invalid(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('meta_data.json'):
os_files[k] = '{' # some invalid json
_register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(openstack.BrokenMetadata, _read_metadata_service)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
- @hp.activate
- def test_datasource(self):
+ @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ def test_datasource(self, m_dhcp):
_register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
- ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
- None,
- helpers.Paths({'run_dir': self.tmp}))
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
self.assertIsNone(ds_os.version)
- found = ds_os.get_data()
+ mock_path = MOCK_PATH + 'detect_openstack'
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
self.assertTrue(found)
self.assertEqual(2, ds_os.version)
md = dict(ds_os.metadata)
@@ -250,8 +248,40 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual(2, len(ds_os.files))
self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure)
self.assertIsNone(ds_os.vendordata_raw)
+ m_dhcp.assert_not_called()
@hp.activate
+ @test_helpers.mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
+ @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ def test_local_datasource(self, m_dhcp, m_net):
+ """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data."""
+ _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ ds_os_local = ds.DataSourceOpenStackLocal(
+ settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ ds_os_local._fallback_interface = 'eth9' # Monkey patch for dhcp
+ m_dhcp.return_value = [{
+ 'interface': 'eth9', 'fixed-address': '192.168.2.9',
+ 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
+ 'broadcast-address': '192.168.2.255'}]
+
+ self.assertIsNone(ds_os_local.version)
+ mock_path = MOCK_PATH + 'detect_openstack'
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os_local.get_data()
+ self.assertTrue(found)
+ self.assertEqual(2, ds_os_local.version)
+ md = dict(ds_os_local.metadata)
+ md.pop('instance-id', None)
+ md.pop('local-hostname', None)
+ self.assertEqual(OSTACK_META, md)
+ self.assertEqual(EC2_META, ds_os_local.ec2_metadata)
+ self.assertEqual(USER_DATA, ds_os_local.userdata_raw)
+ self.assertEqual(2, len(ds_os_local.files))
+ self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure)
+ self.assertIsNone(ds_os_local.vendordata_raw)
+ m_dhcp.assert_called_with('eth9')
+
def test_bad_datasource_meta(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
@@ -262,11 +292,17 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
None,
helpers.Paths({'run_dir': self.tmp}))
self.assertIsNone(ds_os.version)
- found = ds_os.get_data()
+ mock_path = MOCK_PATH + 'detect_openstack'
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
self.assertFalse(found)
self.assertIsNone(ds_os.version)
+ self.assertIn(
+ 'InvalidMetaDataException: Broken metadata address'
+ ' http://169.254.169.25',
+ self.logs.getvalue())
- @hp.activate
def test_no_datasource(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
@@ -281,11 +317,53 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
'timeout': 0,
}
self.assertIsNone(ds_os.version)
- found = ds_os.get_data()
+ mock_path = MOCK_PATH + 'detect_openstack'
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
self.assertFalse(found)
self.assertIsNone(ds_os.version)
- @hp.activate
+ def test_network_config_disabled_by_datasource_config(self):
+ """The network_config can be disabled from datasource config."""
+ mock_path = MOCK_PATH + 'openstack.convert_net_json'
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ ds_os.ds_cfg = {'apply_network_config': False}
+ sample_json = {'links': [{'ethernet_mac_address': 'mymac'}],
+ 'networks': [], 'services': []}
+ ds_os.network_json = sample_json # Ignore this content from metadata
+ with test_helpers.mock.patch(mock_path) as m_convert_json:
+ self.assertIsNone(ds_os.network_config)
+ m_convert_json.assert_not_called()
+
+ def test_network_config_from_network_json(self):
+ """The datasource gets network_config from network_data.json."""
+ mock_path = MOCK_PATH + 'openstack.convert_net_json'
+ example_cfg = {'version': 1, 'config': []}
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ sample_json = {'links': [{'ethernet_mac_address': 'mymac'}],
+ 'networks': [], 'services': []}
+ ds_os.network_json = sample_json
+ with test_helpers.mock.patch(mock_path) as m_convert_json:
+ m_convert_json.return_value = example_cfg
+ self.assertEqual(example_cfg, ds_os.network_config)
+ self.assertIn(
+ 'network config provided via network_json', self.logs.getvalue())
+ m_convert_json.assert_called_with(sample_json, known_macs=None)
+
+ def test_network_config_cached(self):
+ """The datasource caches the network_config property."""
+ mock_path = MOCK_PATH + 'openstack.convert_net_json'
+ example_cfg = {'version': 1, 'config': []}
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ ds_os._network_config = example_cfg
+ with test_helpers.mock.patch(mock_path) as m_convert_json:
+ self.assertEqual(example_cfg, ds_os.network_config)
+ m_convert_json.assert_not_called()
+
def test_disabled_datasource(self):
os_files = copy.deepcopy(OS_FILES)
os_meta = copy.deepcopy(OSTACK_META)
@@ -304,10 +382,42 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
'timeout': 0,
}
self.assertIsNone(ds_os.version)
- found = ds_os.get_data()
+ mock_path = MOCK_PATH + 'detect_openstack'
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
self.assertFalse(found)
self.assertIsNone(ds_os.version)
+ @hp.activate
+ def test_wb__crawl_metadata_does_not_persist(self):
+ """_crawl_metadata returns current metadata and does not cache."""
+ _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ crawled_data = ds_os._crawl_metadata()
+ self.assertEqual(UNSET, ds_os.ec2_metadata)
+ self.assertIsNone(ds_os.userdata_raw)
+ self.assertEqual(0, len(ds_os.files))
+ self.assertIsNone(ds_os.vendordata_raw)
+ self.assertEqual(
+ ['dsmode', 'ec2-metadata', 'files', 'metadata', 'networkdata',
+ 'userdata', 'vendordata', 'version'],
+ sorted(crawled_data.keys()))
+ self.assertEqual('local', crawled_data['dsmode'])
+ self.assertEqual(EC2_META, crawled_data['ec2-metadata'])
+ self.assertEqual(2, len(crawled_data['files']))
+ md = copy.deepcopy(crawled_data['metadata'])
+ md.pop('instance-id')
+ md.pop('local-hostname')
+ self.assertEqual(OSTACK_META, md)
+ self.assertEqual(
+ json.loads(OS_FILES['openstack/latest/network_data.json']),
+ crawled_data['networkdata'])
+ self.assertEqual(USER_DATA, crawled_data['userdata'])
+ self.assertEqual(VENDOR_DATA, crawled_data['vendordata'])
+ self.assertEqual(2, crawled_data['version'])
+
class TestVendorDataLoading(test_helpers.TestCase):
def cvj(self, data):
@@ -339,4 +449,200 @@ class TestVendorDataLoading(test_helpers.TestCase):
data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']}
self.assertEqual(self.cvj(data), data['cloud-init'])
+
+@test_helpers.mock.patch(MOCK_PATH + 'util.is_x86')
+class TestDetectOpenStack(test_helpers.CiTestCase):
+
+ def test_detect_openstack_non_intel_x86(self, m_is_x86):
+ """Return True on non-intel platforms because dmi isn't conclusive."""
+ m_is_x86.return_value = False
+ self.assertTrue(
+ ds.detect_openstack(), 'Expected detect_openstack == True')
+
+ @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env')
+ @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
+ def test_not_detect_openstack_intel_x86_ec2(self, m_dmi, m_proc_env,
+ m_is_x86):
+ """Return False on EC2 platforms."""
+ m_is_x86.return_value = True
+ # No product_name in proc/1/environ
+ m_proc_env.return_value = {'HOME': '/'}
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == 'system-product-name':
+ return 'HVM domU' # Nothing 'openstackish' on EC2
+ if dmi_key == 'chassis-asset-tag':
+ return '' # Empty string on EC2
+ assert False, 'Unexpected dmi read of %s' % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertFalse(
+ ds.detect_openstack(), 'Expected detect_openstack == False on EC2')
+ m_proc_env.assert_called_with(1)
+
+ @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
+ def test_detect_openstack_intel_product_name_compute(self, m_dmi,
+ m_is_x86):
+ """Return True on OpenStack compute and nova instances."""
+ m_is_x86.return_value = True
+ openstack_product_names = ['OpenStack Nova', 'OpenStack Compute']
+
+ for product_name in openstack_product_names:
+ m_dmi.return_value = product_name
+ self.assertTrue(
+ ds.detect_openstack(), 'Failed to detect_openstack')
+
+ @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
+ def test_detect_openstack_opentelekomcloud_chassis_asset_tag(self, m_dmi,
+ m_is_x86):
+ """Return True on OpenStack reporting OpenTelekomCloud asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == 'system-product-name':
+ return 'HVM domU' # Nothing 'openstackish' on OpenTelekomCloud
+ if dmi_key == 'chassis-asset-tag':
+ return 'OpenTelekomCloud'
+ assert False, 'Unexpected dmi read of %s' % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ 'Expected detect_openstack == True on OpenTelekomCloud')
+
+ @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
+ def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi,
+ m_is_x86):
+ """Return True on OpenStack reporting Oracle cloud asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == 'system-product-name':
+ return 'Standard PC (i440FX + PIIX, 1996)' # No match
+ if dmi_key == 'chassis-asset-tag':
+ return 'OracleCloud.com'
+ assert False, 'Unexpected dmi read of %s' % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(accept_oracle=True),
+ 'Expected detect_openstack == True on OracleCloud.com')
+ self.assertFalse(
+ ds.detect_openstack(accept_oracle=False),
+ 'Expected detect_openstack == False.')
+
+ @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env')
+ @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
+ def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env,
+ m_is_x86):
+ """Return True when nova product_name specified in /proc/1/environ."""
+ m_is_x86.return_value = True
+ # Nova product_name in proc/1/environ
+ m_proc_env.return_value = {
+ 'HOME': '/', 'product_name': 'OpenStack Nova'}
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == 'system-product-name':
+ return 'HVM domU' # Nothing 'openstackish'
+ if dmi_key == 'chassis-asset-tag':
+ return '' # Nothin 'openstackish'
+ assert False, 'Unexpected dmi read of %s' % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ 'Expected detect_openstack == True on OpenTelekomCloud')
+ m_proc_env.assert_called_with(1)
+
+
+class TestMetadataReader(test_helpers.HttprettyTestCase):
+ """Test the MetadataReader."""
+ burl = 'http://169.254.169.254/'
+ md_base = {
+ 'availability_zone': 'myaz1',
+ 'hostname': 'sm-foo-test.novalocal',
+ "keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}],
+ 'launch_index': 0,
+ 'name': 'sm-foo-test',
+ 'public_keys': {'mykey': PUBKEY},
+ 'project_id': '6a103f813b774b9fb15a4fcd36e1c056',
+ 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'}
+
+ def register(self, path, body=None, status=200):
+ content = (body if not isinstance(body, text_type)
+ else body.encode('utf-8'))
+ hp.register_uri(
+ hp.GET, self.burl + "openstack" + path, status=status,
+ body=content)
+
+ def register_versions(self, versions):
+ self.register("", '\n'.join(versions))
+ self.register("/", '\n'.join(versions))
+
+ def register_version(self, version, data):
+ content = '\n'.join(sorted(data.keys()))
+ self.register(version, content)
+ self.register(version + "/", content)
+ for path, content in data.items():
+ self.register("/%s/%s" % (version, path), content)
+ self.register("/%s/%s" % (version, path), content)
+ if 'user_data' not in data:
+ self.register("/%s/user_data" % version, "nodata", status=404)
+
+ def test__find_working_version(self):
+ """Test a working version ignores unsupported."""
+ unsup = "2016-11-09"
+ self.register_versions(
+ [openstack.OS_FOLSOM, openstack.OS_LIBERTY, unsup,
+ openstack.OS_LATEST])
+ self.assertEqual(
+ openstack.OS_LIBERTY,
+ openstack.MetadataReader(self.burl)._find_working_version())
+
+ def test__find_working_version_uses_latest(self):
+ """'latest' should be used if no supported versions."""
+ unsup1, unsup2 = ("2016-11-09", '2017-06-06')
+ self.register_versions([unsup1, unsup2, openstack.OS_LATEST])
+ self.assertEqual(
+ openstack.OS_LATEST,
+ openstack.MetadataReader(self.burl)._find_working_version())
+
+ def test_read_v2_os_ocata(self):
+ """Validate return value of read_v2 for os_ocata data."""
+ md = copy.deepcopy(self.md_base)
+ md['devices'] = []
+ network_data = {'links': [], 'networks': [], 'services': []}
+ vendor_data = {}
+ vendor_data2 = {"static": {}}
+
+ data = {
+ 'meta_data.json': json.dumps(md),
+ 'network_data.json': json.dumps(network_data),
+ 'vendor_data.json': json.dumps(vendor_data),
+ 'vendor_data2.json': json.dumps(vendor_data2),
+ }
+
+ self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST])
+ self.register_version(openstack.OS_OCATA, data)
+
+ mock_read_ec2 = test_helpers.mock.MagicMock(
+ return_value={'instance-id': 'unused-ec2'})
+ expected_md = copy.deepcopy(md)
+ expected_md.update(
+ {'instance-id': md['uuid'], 'local-hostname': md['hostname']})
+ expected = {
+ 'userdata': '', # Annoying, no user-data results in empty string.
+ 'version': 2,
+ 'metadata': expected_md,
+ 'vendordata': vendor_data,
+ 'networkdata': network_data,
+ 'ec2-metadata': mock_read_ec2.return_value,
+ 'files': {},
+ }
+ reader = openstack.MetadataReader(self.burl)
+ reader._read_ec2_metadata = mock_read_ec2
+ self.assertEqual(expected, reader.read_v2())
+ self.assertEqual(1, mock_read_ec2.call_count)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py
index fc4eb36e..9d52eb99 100644
--- a/tests/unittests/test_datasource/test_ovf.py
+++ b/tests/unittests/test_datasource/test_ovf.py
@@ -124,7 +124,9 @@ class TestDatasourceOVF(CiTestCase):
ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
retcode = wrap_and_call(
'cloudinit.sources.DataSourceOVF',
- {'util.read_dmi_data': None},
+ {'util.read_dmi_data': None,
+ 'transport_iso9660': (False, None, None),
+ 'transport_vmware_guestd': (False, None, None)},
ds.get_data)
self.assertFalse(retcode, 'Expected False return from ds.get_data')
self.assertIn(
@@ -138,7 +140,9 @@ class TestDatasourceOVF(CiTestCase):
paths=paths)
retcode = wrap_and_call(
'cloudinit.sources.DataSourceOVF',
- {'util.read_dmi_data': 'vmware'},
+ {'util.read_dmi_data': 'vmware',
+ 'transport_iso9660': (False, None, None),
+ 'transport_vmware_guestd': (False, None, None)},
ds.get_data)
self.assertFalse(retcode, 'Expected False return from ds.get_data')
self.assertIn(
diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py
index 8dec06b1..c2bc7a00 100644
--- a/tests/unittests/test_datasource/test_scaleway.py
+++ b/tests/unittests/test_datasource/test_scaleway.py
@@ -176,12 +176,18 @@ class TestDataSourceScaleway(HttprettyTestCase):
self.vendordata_url = \
DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url']
- @httpretty.activate
+ self.add_patch('cloudinit.sources.DataSourceScaleway.on_scaleway',
+ '_m_on_scaleway', return_value=True)
+ self.add_patch(
+ 'cloudinit.sources.DataSourceScaleway.net.find_fallback_nic',
+ '_m_find_fallback_nic', return_value='scalewaynic0')
+
+ @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
@mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
get_source_address_adapter)
@mock.patch('cloudinit.util.get_cmdline')
@mock.patch('time.sleep', return_value=None)
- def test_metadata_ok(self, sleep, m_get_cmdline):
+ def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4):
"""
get_data() returns metadata, user data and vendor data.
"""
@@ -212,12 +218,12 @@ class TestDataSourceScaleway(HttprettyTestCase):
self.assertIsNone(self.datasource.region)
self.assertEqual(sleep.call_count, 0)
- @httpretty.activate
+ @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
@mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
get_source_address_adapter)
@mock.patch('cloudinit.util.get_cmdline')
@mock.patch('time.sleep', return_value=None)
- def test_metadata_404(self, sleep, m_get_cmdline):
+ def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4):
"""
get_data() returns metadata, but no user data nor vendor data.
"""
@@ -236,12 +242,12 @@ class TestDataSourceScaleway(HttprettyTestCase):
self.assertIsNone(self.datasource.get_vendordata_raw())
self.assertEqual(sleep.call_count, 0)
- @httpretty.activate
+ @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
@mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
get_source_address_adapter)
@mock.patch('cloudinit.util.get_cmdline')
@mock.patch('time.sleep', return_value=None)
- def test_metadata_rate_limit(self, sleep, m_get_cmdline):
+ def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4):
"""
get_data() is rate limited two times by the metadata API when fetching
user data.
@@ -265,3 +271,67 @@ class TestDataSourceScaleway(HttprettyTestCase):
self.assertEqual(self.datasource.get_userdata_raw(),
DataResponses.FAKE_USER_DATA)
self.assertEqual(sleep.call_count, 2)
+
+ @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
+ @mock.patch('cloudinit.util.get_cmdline')
+ def test_network_config_ok(self, m_get_cmdline, fallback_nic):
+ """
+ network_config will only generate IPv4 config if no ipv6 data is
+ available in the metadata
+ """
+ m_get_cmdline.return_value = 'scaleway'
+ fallback_nic.return_value = 'ens2'
+ self.datasource.metadata['ipv6'] = None
+
+ netcfg = self.datasource.network_config
+ resp = {'version': 1,
+ 'config': [{
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [{'type': 'dhcp4'}]}]
+ }
+ self.assertEqual(netcfg, resp)
+
+ @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
+ @mock.patch('cloudinit.util.get_cmdline')
+ def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic):
+ """
+ network_config will only generate IPv4/v6 configs if ipv6 data is
+ available in the metadata
+ """
+ m_get_cmdline.return_value = 'scaleway'
+ fallback_nic.return_value = 'ens2'
+ self.datasource.metadata['ipv6'] = {
+ 'address': '2000:abc:4444:9876::42:999',
+ 'gateway': '2000:abc:4444:9876::42:000',
+ 'netmask': '127',
+ }
+
+ netcfg = self.datasource.network_config
+ resp = {'version': 1,
+ 'config': [{
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [{'type': 'dhcp4'},
+ {'type': 'static',
+ 'address': '2000:abc:4444:9876::42:999',
+ 'gateway': '2000:abc:4444:9876::42:000',
+ 'netmask': '127', }
+ ]
+
+ }]
+ }
+ self.assertEqual(netcfg, resp)
+
+ @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
+ @mock.patch('cloudinit.util.get_cmdline')
+ def test_network_config_existing(self, m_get_cmdline, fallback_nic):
+ """
+ network_config() should return the same data if a network config
+ already exists
+ """
+ m_get_cmdline.return_value = 'scaleway'
+ self.datasource._network_config = '0xdeadbeef'
+
+ netcfg = self.datasource.network_config
+ self.assertEqual(netcfg, '0xdeadbeef')
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 88bae5f9..46d67b94 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -1,4 +1,5 @@
# Copyright (C) 2013 Canonical Ltd.
+# Copyright (c) 2018, Joyent, Inc.
#
# Author: Ben Howard <ben.howard@canonical.com>
#
@@ -15,26 +16,40 @@ from __future__ import print_function
from binascii import crc32
import json
+import multiprocessing
import os
import os.path
import re
-import shutil
+import signal
import stat
-import tempfile
+import unittest2
import uuid
from cloudinit import serial
from cloudinit.sources import DataSourceSmartOS
from cloudinit.sources.DataSourceSmartOS import (
- convert_smartos_network_data as convert_net)
+ convert_smartos_network_data as convert_net,
+ SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ,
+ identify_file)
import six
from cloudinit import helpers as c_helpers
-from cloudinit.util import b64e
+from cloudinit.util import (
+ b64e, subp, ProcessExecutionError, which, write_file)
-from cloudinit.tests.helpers import mock, FilesystemMockingTestCase, TestCase
+from cloudinit.tests.helpers import (
+ CiTestCase, mock, FilesystemMockingTestCase, skipIf)
+
+try:
+ import serial as _pyserial
+ assert _pyserial # avoid pyflakes error F401: import unused
+ HAS_PYSERIAL = True
+except ImportError:
+ HAS_PYSERIAL = False
+
+DSMOS = 'cloudinit.sources.DataSourceSmartOS'
SDC_NICS = json.loads("""
[
{
@@ -318,12 +333,19 @@ MOCK_RETURNS = {
DMI_DATA_RETURN = 'smartdc'
+# Useful for calculating the length of a frame body. A SUCCESS body will be
+# followed by more characters or be one character less if SUCCESS with no
+# payload. See Section 4.3 of https://eng.joyent.com/mdata/protocol.html.
+SUCCESS_LEN = len('0123abcd SUCCESS ')
+NOTFOUND_LEN = len('0123abcd NOTFOUND')
+
class PsuedoJoyentClient(object):
def __init__(self, data=None):
if data is None:
data = MOCK_RETURNS.copy()
self.data = data
+ self._is_open = False
return
def get(self, key, default=None, strip=False):
@@ -344,39 +366,43 @@ class PsuedoJoyentClient(object):
def exists(self):
return True
+ def open_transport(self):
+ assert(not self._is_open)
+ self._is_open = True
+
+ def close_transport(self):
+ assert(self._is_open)
+ self._is_open = False
+
class TestSmartOSDataSource(FilesystemMockingTestCase):
+ jmc_cfact = None
+ get_smartos_environ = None
+
def setUp(self):
super(TestSmartOSDataSource, self).setUp()
- dsmos = 'cloudinit.sources.DataSourceSmartOS'
- patcher = mock.patch(dsmos + ".jmc_client_factory")
- self.jmc_cfact = patcher.start()
- self.addCleanup(patcher.stop)
- patcher = mock.patch(dsmos + ".get_smartos_environ")
- self.get_smartos_environ = patcher.start()
- self.addCleanup(patcher.stop)
-
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
- self.paths = c_helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
-
- self.legacy_user_d = os.path.join(self.tmp, 'legacy_user_tmp')
+ self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ")
+ self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact")
+ self.legacy_user_d = self.tmp_path('legacy_user_tmp')
os.mkdir(self.legacy_user_d)
-
- self.orig_lud = DataSourceSmartOS.LEGACY_USER_D
- DataSourceSmartOS.LEGACY_USER_D = self.legacy_user_d
-
- def tearDown(self):
- DataSourceSmartOS.LEGACY_USER_D = self.orig_lud
- super(TestSmartOSDataSource, self).tearDown()
+ self.add_patch(DSMOS + ".LEGACY_USER_D", "m_legacy_user_d",
+ autospec=False, new=self.legacy_user_d)
+ self.add_patch(DSMOS + ".identify_file", "m_identify_file",
+ return_value="text/plain")
def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM,
sys_cfg=None, ds_cfg=None):
self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata)
self.get_smartos_environ.return_value = mode
+ tmpd = self.tmp_dir()
+ dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd),
+ 'run_dir': self.tmp_path('run_dir')}
+ for d in dirs.values():
+ os.mkdir(d)
+ paths = c_helpers.Paths(dirs)
+
if sys_cfg is None:
sys_cfg = {}
@@ -385,7 +411,7 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
sys_cfg['datasource']['SmartOS'] = ds_cfg
return DataSourceSmartOS.DataSourceSmartOS(
- sys_cfg, distro=None, paths=self.paths)
+ sys_cfg, distro=None, paths=paths)
def test_no_base64(self):
ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True}
@@ -421,6 +447,34 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
self.assertEqual(MOCK_RETURNS['hostname'],
dsrc.metadata['local-hostname'])
+ def test_hostname_if_no_sdc_hostname(self):
+ my_returns = MOCK_RETURNS.copy()
+ my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname']
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(my_returns['hostname'],
+ dsrc.metadata['local-hostname'])
+
+ def test_sdc_hostname_if_no_hostname(self):
+ my_returns = MOCK_RETURNS.copy()
+ my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname']
+ del my_returns['hostname']
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(my_returns['sdc:hostname'],
+ dsrc.metadata['local-hostname'])
+
+ def test_sdc_uuid_if_no_hostname_or_sdc_hostname(self):
+ my_returns = MOCK_RETURNS.copy()
+ del my_returns['hostname']
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(my_returns['sdc:uuid'],
+ dsrc.metadata['local-hostname'])
+
def test_userdata(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
@@ -445,6 +499,7 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
dsrc.metadata['user-script'])
legacy_script_f = "%s/user-script" % self.legacy_user_d
+ print("legacy_script_f=%s" % legacy_script_f)
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
@@ -592,8 +647,68 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
mydscfg['disk_aliases']['FOO'])
+class TestIdentifyFile(CiTestCase):
+ """Test the 'identify_file' utility."""
+ @skipIf(not which("file"), "command 'file' not available.")
+ def test_file_happy_path(self):
+ """Test file is available and functional on plain text."""
+ fname = self.tmp_path("myfile")
+ write_file(fname, "plain text content here\n")
+ with self.allow_subp(["file"]):
+ self.assertEqual("text/plain", identify_file(fname))
+
+ @mock.patch(DSMOS + ".util.subp")
+ def test_returns_none_on_error(self, m_subp):
+ """On 'file' execution error, None should be returned."""
+ m_subp.side_effect = ProcessExecutionError("FILE_FAILED", exit_code=99)
+ fname = self.tmp_path("myfile")
+ write_file(fname, "plain text content here\n")
+ self.assertEqual(None, identify_file(fname))
+ self.assertEqual(
+ [mock.call(["file", "--brief", "--mime-type", fname])],
+ m_subp.call_args_list)
+
+
+class ShortReader(object):
+ """Implements a 'read' interface for bytes provided.
+ much like io.BytesIO but the 'endbyte' acts as if EOF.
+ When it is reached a short will be returned."""
+ def __init__(self, initial_bytes, endbyte=b'\0'):
+ self.data = initial_bytes
+ self.index = 0
+ self.len = len(self.data)
+ self.endbyte = endbyte
+
+ @property
+ def emptied(self):
+ return self.index >= self.len
+
+ def read(self, size=-1):
+ """Read size bytes but not past a null."""
+ if size == 0 or self.index >= self.len:
+ return b''
+
+ rsize = size
+ if size < 0 or size + self.index > self.len:
+ rsize = self.len - self.index
+
+ next_null = self.data.find(self.endbyte, self.index, rsize)
+ if next_null >= 0:
+ rsize = next_null - self.index + 1
+ i = self.index
+ self.index += rsize
+ ret = self.data[i:i + rsize]
+ if len(ret) and ret[-1:] == self.endbyte:
+ ret = ret[:-1]
+ return ret
+
+
class TestJoyentMetadataClient(FilesystemMockingTestCase):
+ invalid = b'invalid command\n'
+ failure = b'FAILURE\n'
+ v2_ok = b'V2_OK\n'
+
def setUp(self):
super(TestJoyentMetadataClient, self).setUp()
@@ -603,7 +718,7 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
self.response_parts = {
'command': 'SUCCESS',
'crc': 'b5a9ff00',
- 'length': 17 + len(b64e(self.metadata_value)),
+ 'length': SUCCESS_LEN + len(b64e(self.metadata_value)),
'payload': b64e(self.metadata_value),
'request_id': '{0:08x}'.format(self.request_id),
}
@@ -636,6 +751,11 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
return DataSourceSmartOS.JoyentMetadataClient(
fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM)
+ def _get_serial_client(self):
+ self.serial.timeout = 1
+ return DataSourceSmartOS.JoyentMetadataSerialClient(None,
+ fp=self.serial)
+
def assertEndsWith(self, haystack, prefix):
self.assertTrue(haystack.endswith(prefix),
"{0} does not end with '{1}'".format(
@@ -646,12 +766,14 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
"{0} does not start with '{1}'".format(
repr(haystack), prefix))
+ def assertNoMoreSideEffects(self, obj):
+ self.assertRaises(StopIteration, obj)
+
def test_get_metadata_writes_a_single_line(self):
client = self._get_client()
client.get('some_key')
self.assertEqual(1, self.serial.write.call_count)
written_line = self.serial.write.call_args[0][0]
- print(type(written_line))
self.assertEndsWith(written_line.decode('ascii'),
b'\n'.decode('ascii'))
self.assertEqual(1, written_line.count(b'\n'))
@@ -732,13 +854,75 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
def test_get_metadata_returns_None_if_value_not_found(self):
self.response_parts['payload'] = ''
self.response_parts['command'] = 'NOTFOUND'
- self.response_parts['length'] = 17
+ self.response_parts['length'] = NOTFOUND_LEN
client = self._get_client()
client._checksum = lambda _: self.response_parts['crc']
self.assertIsNone(client.get('some_key'))
+ def test_negotiate(self):
+ client = self._get_client()
+ reader = ShortReader(self.v2_ok)
+ client.fp.read.side_effect = reader.read
+ client._negotiate()
+ self.assertTrue(reader.emptied)
+
+ def test_negotiate_short_response(self):
+ client = self._get_client()
+ # chopped '\n' from v2_ok.
+ reader = ShortReader(self.v2_ok[:-1] + b'\0')
+ client.fp.read.side_effect = reader.read
+ self.assertRaises(DataSourceSmartOS.JoyentMetadataTimeoutException,
+ client._negotiate)
+ self.assertTrue(reader.emptied)
+
+ def test_negotiate_bad_response(self):
+ client = self._get_client()
+ reader = ShortReader(b'garbage\n' + self.v2_ok)
+ client.fp.read.side_effect = reader.read
+ self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
+ client._negotiate)
+ self.assertEqual(self.v2_ok, client.fp.read())
+
+ def test_serial_open_transport(self):
+ client = self._get_serial_client()
+ reader = ShortReader(b'garbage\0' + self.invalid + self.v2_ok)
+ client.fp.read.side_effect = reader.read
+ client.open_transport()
+ self.assertTrue(reader.emptied)
+
+ def test_flush_failure(self):
+ client = self._get_serial_client()
+ reader = ShortReader(b'garbage' + b'\0' + self.failure +
+ self.invalid + self.v2_ok)
+ client.fp.read.side_effect = reader.read
+ client.open_transport()
+ self.assertTrue(reader.emptied)
+
+ def test_flush_many_timeouts(self):
+ client = self._get_serial_client()
+ reader = ShortReader(b'\0' * 100 + self.invalid + self.v2_ok)
+ client.fp.read.side_effect = reader.read
+ client.open_transport()
+ self.assertTrue(reader.emptied)
+
+ def test_list_metadata_returns_list(self):
+ parts = ['foo', 'bar']
+ value = b64e('\n'.join(parts))
+ self.response_parts['payload'] = value
+ self.response_parts['crc'] = '40873553'
+ self.response_parts['length'] = SUCCESS_LEN + len(value)
+ client = self._get_client()
+ self.assertEqual(client.list(), parts)
+
+ def test_list_metadata_returns_empty_list_if_no_customer_metadata(self):
+ del self.response_parts['payload']
+ self.response_parts['length'] = SUCCESS_LEN - 1
+ self.response_parts['crc'] = '14e563ba'
+ client = self._get_client()
+ self.assertEqual(client.list(), [])
+
-class TestNetworkConversion(TestCase):
+class TestNetworkConversion(CiTestCase):
def test_convert_simple(self):
expected = {
'version': 1,
@@ -872,4 +1056,94 @@ class TestNetworkConversion(TestCase):
found = convert_net(SDC_NICS_SINGLE_GATEWAY)
self.assertEqual(expected, found)
+ def test_routes_on_all_nics(self):
+ routes = [
+ {'linklocal': False, 'dst': '3.0.0.0/8', 'gateway': '8.12.42.3'},
+ {'linklocal': False, 'dst': '4.0.0.0/8', 'gateway': '10.210.1.4'}]
+ expected = {
+ 'version': 1,
+ 'config': [
+ {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500,
+ 'name': 'net0', 'type': 'physical',
+ 'subnets': [{'address': '8.12.42.26/24',
+ 'gateway': '8.12.42.1', 'type': 'static',
+ 'routes': [{'network': '3.0.0.0/8',
+ 'gateway': '8.12.42.3'},
+ {'network': '4.0.0.0/8',
+ 'gateway': '10.210.1.4'}]}]},
+ {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500,
+ 'name': 'net1', 'type': 'physical',
+ 'subnets': [{'address': '10.210.1.27/24', 'type': 'static',
+ 'routes': [{'network': '3.0.0.0/8',
+ 'gateway': '8.12.42.3'},
+ {'network': '4.0.0.0/8',
+ 'gateway': '10.210.1.4'}]}]}]}
+ found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes)
+ self.maxDiff = None
+ self.assertEqual(expected, found)
+
+
+@unittest2.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM,
+ "Only supported on KVM and bhyve guests under SmartOS")
+@unittest2.skipUnless(os.access(SERIAL_DEVICE, os.W_OK),
+ "Requires write access to " + SERIAL_DEVICE)
+@unittest2.skipUnless(HAS_PYSERIAL is True, "pyserial not available")
+class TestSerialConcurrency(CiTestCase):
+ """
+ This class tests locking on an actual serial port, and as such can only
+ be run in a kvm or bhyve guest running on a SmartOS host. A test run on
+ a metadata socket will not be valid because a metadata socket ensures
+ there is only one session over a connection. In contrast, in the
+ absence of proper locking multiple processes opening the same serial
+ port can corrupt each others' exchanges with the metadata server.
+
+ This takes on the order of 2 to 3 minutes to run.
+ """
+ allowed_subp = ['mdata-get']
+
+ def setUp(self):
+ self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop)
+ self.mdata_proc.start()
+ super(TestSerialConcurrency, self).setUp()
+
+ def tearDown(self):
+ # os.kill() rather than mdata_proc.terminate() to avoid console spam.
+ os.kill(self.mdata_proc.pid, signal.SIGKILL)
+ self.mdata_proc.join()
+ super(TestSerialConcurrency, self).tearDown()
+
+ def start_mdata_loop(self):
+ """
+ The mdata-get command is repeatedly run in a separate process so
+ that it may try to race with metadata operations performed in the
+ main test process. Use of mdata-get is better than two processes
+ using the protocol implementation in DataSourceSmartOS because we
+ are testing to be sure that cloud-init and mdata-get respect each
+ others locks.
+ """
+ rcs = list(range(0, 256))
+ while True:
+ subp(['mdata-get', 'sdc:routes'], rcs=rcs)
+
+ def test_all_keys(self):
+ self.assertIsNotNone(self.mdata_proc.pid)
+ ds = DataSourceSmartOS
+ keys = [tup[0] for tup in ds.SMARTOS_ATTRIB_MAP.values()]
+ keys.extend(ds.SMARTOS_ATTRIB_JSON.values())
+
+ client = ds.jmc_client_factory(smartos_type=SMARTOS_ENV_KVM)
+ self.assertIsNotNone(client)
+
+ # The behavior that we are testing for was observed mdata-get running
+ # 10 times at roughly the same time as cloud-init fetched each key
+ # once. cloud-init would regularly see failures before making it
+ # through all keys once.
+ for _ in range(0, 3):
+ for key in keys:
+ # We don't care about the return value, just that it doesn't
+ # thrown any exceptions.
+ client.get(key)
+
+ self.assertIsNone(self.mdata_proc.exitcode)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py
index 5670904a..c3f258d5 100644
--- a/tests/unittests/test_distros/test_create_users.py
+++ b/tests/unittests/test_distros/test_create_users.py
@@ -1,7 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import re
+
from cloudinit import distros
-from cloudinit.tests.helpers import (TestCase, mock)
+from cloudinit import ssh_util
+from cloudinit.tests.helpers import (CiTestCase, mock)
class MyBaseDistro(distros.Distro):
@@ -44,8 +47,12 @@ class MyBaseDistro(distros.Distro):
@mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False)
@mock.patch("cloudinit.distros.util.subp")
-class TestCreateUser(TestCase):
+class TestCreateUser(CiTestCase):
+
+ with_logs = True
+
def setUp(self):
+ super(TestCreateUser, self).setUp()
self.dist = MyBaseDistro()
def _useradd2call(self, args):
@@ -145,4 +152,92 @@ class TestCreateUser(TestCase):
mock.call(['passwd', '-l', user])]
self.assertEqual(m_subp.call_args_list, expected)
+ def test_explicit_sudo_false(self, m_subp, m_is_snappy):
+ user = 'foouser'
+ self.dist.create_user(user, sudo=False)
+ self.assertEqual(
+ m_subp.call_args_list,
+ [self._useradd2call([user, '-m']),
+ mock.call(['passwd', '-l', user])])
+
+ @mock.patch('cloudinit.ssh_util.setup_user_keys')
+ def test_setup_ssh_authorized_keys_with_string(
+ self, m_setup_user_keys, m_subp, m_is_snappy):
+ """ssh_authorized_keys allows string and calls setup_user_keys."""
+ user = 'foouser'
+ self.dist.create_user(user, ssh_authorized_keys='mykey')
+ self.assertEqual(
+ m_subp.call_args_list,
+ [self._useradd2call([user, '-m']),
+ mock.call(['passwd', '-l', user])])
+ m_setup_user_keys.assert_called_once_with(set(['mykey']), user)
+
+ @mock.patch('cloudinit.ssh_util.setup_user_keys')
+ def test_setup_ssh_authorized_keys_with_list(
+ self, m_setup_user_keys, m_subp, m_is_snappy):
+ """ssh_authorized_keys allows lists and calls setup_user_keys."""
+ user = 'foouser'
+ self.dist.create_user(user, ssh_authorized_keys=['key1', 'key2'])
+ self.assertEqual(
+ m_subp.call_args_list,
+ [self._useradd2call([user, '-m']),
+ mock.call(['passwd', '-l', user])])
+ m_setup_user_keys.assert_called_once_with(set(['key1', 'key2']), user)
+
+ @mock.patch('cloudinit.ssh_util.setup_user_keys')
+ def test_setup_ssh_authorized_keys_with_integer(
+ self, m_setup_user_keys, m_subp, m_is_snappy):
+ """ssh_authorized_keys warns on non-iterable/string type."""
+ user = 'foouser'
+ self.dist.create_user(user, ssh_authorized_keys=-1)
+ m_setup_user_keys.assert_called_once_with(set([]), user)
+ match = re.match(
+ r'.*WARNING: Invalid type \'<(type|class) \'int\'>\' detected for'
+ ' \'ssh_authorized_keys\'.*',
+ self.logs.getvalue(),
+ re.DOTALL)
+ self.assertIsNotNone(
+ match, 'Missing ssh_authorized_keys invalid type warning')
+
+ @mock.patch('cloudinit.ssh_util.setup_user_keys')
+ def test_create_user_with_ssh_redirect_user_no_cloud_keys(
+ self, m_setup_user_keys, m_subp, m_is_snappy):
+ """Log a warning when trying to redirect a user no cloud ssh keys."""
+ user = 'foouser'
+ self.dist.create_user(user, ssh_redirect_user='someuser')
+ self.assertIn(
+ 'WARNING: Unable to disable ssh logins for foouser given '
+ 'ssh_redirect_user: someuser. No cloud public-keys present.\n',
+ self.logs.getvalue())
+ m_setup_user_keys.assert_not_called()
+
+ @mock.patch('cloudinit.ssh_util.setup_user_keys')
+ def test_create_user_with_ssh_redirect_user_with_cloud_keys(
+ self, m_setup_user_keys, m_subp, m_is_snappy):
+ """Disable ssh when ssh_redirect_user and cloud ssh keys are set."""
+ user = 'foouser'
+ self.dist.create_user(
+ user, ssh_redirect_user='someuser', cloud_public_ssh_keys=['key1'])
+ disable_prefix = ssh_util.DISABLE_USER_OPTS
+ disable_prefix = disable_prefix.replace('$USER', 'someuser')
+ disable_prefix = disable_prefix.replace('$DISABLE_USER', user)
+ m_setup_user_keys.assert_called_once_with(
+ set(['key1']), 'foouser', options=disable_prefix)
+
+ @mock.patch('cloudinit.ssh_util.setup_user_keys')
+ def test_create_user_with_ssh_redirect_user_does_not_disable_auth_keys(
+ self, m_setup_user_keys, m_subp, m_is_snappy):
+ """Do not disable ssh_authorized_keys when ssh_redirect_user is set."""
+ user = 'foouser'
+ self.dist.create_user(
+ user, ssh_authorized_keys='auth1', ssh_redirect_user='someuser',
+ cloud_public_ssh_keys=['key1'])
+ disable_prefix = ssh_util.DISABLE_USER_OPTS
+ disable_prefix = disable_prefix.replace('$USER', 'someuser')
+ disable_prefix = disable_prefix.replace('$DISABLE_USER', user)
+ self.assertEqual(
+ m_setup_user_keys.call_args_list,
+ [mock.call(set(['auth1']), user), # not disabled
+ mock.call(set(['key1']), 'foouser', options=disable_prefix)])
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index 1c2e45fe..6e339355 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -2,24 +2,19 @@
import os
from six import StringIO
-import stat
from textwrap import dedent
try:
from unittest import mock
except ImportError:
import mock
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
from cloudinit import distros
from cloudinit.distros.parsers.sys_conf import SysConf
from cloudinit import helpers
-from cloudinit.net import eni
from cloudinit import settings
-from cloudinit.tests.helpers import FilesystemMockingTestCase
+from cloudinit.tests.helpers import (
+ FilesystemMockingTestCase, dir2dict, populate_dir)
from cloudinit import util
@@ -39,6 +34,19 @@ auto eth1
iface eth1 inet dhcp
'''
+BASE_NET_CFG_FROM_V2 = '''
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 192.168.1.5/24
+ gateway 192.168.1.254
+
+auto eth1
+iface eth1 inet dhcp
+'''
+
BASE_NET_CFG_IPV6 = '''
auto lo
iface lo inet loopback
@@ -82,7 +90,7 @@ V1_NET_CFG = {'config': [{'name': 'eth0',
'type': 'physical'}],
'version': 1}
-V1_NET_CFG_OUTPUT = """
+V1_NET_CFG_OUTPUT = """\
# This file is generated from information provided by
# the datasource. Changes to it will not persist across an instance.
# To disable cloud-init's network configuration capabilities, write a file
@@ -116,7 +124,7 @@ V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0',
'version': 1}
-V1_TO_V2_NET_CFG_OUTPUT = """
+V1_TO_V2_NET_CFG_OUTPUT = """\
# This file is generated from information provided by
# the datasource. Changes to it will not persist across an instance.
# To disable cloud-init's network configuration capabilities, write a file
@@ -145,7 +153,7 @@ V2_NET_CFG = {
}
-V2_TO_V2_NET_CFG_OUTPUT = """
+V2_TO_V2_NET_CFG_OUTPUT = """\
# This file is generated from information provided by
# the datasource. Changes to it will not persist across an instance.
# To disable cloud-init's network configuration capabilities, write a file
@@ -176,18 +184,13 @@ class WriteBuffer(object):
return self.buffer.getvalue()
-class TestNetCfgDistro(FilesystemMockingTestCase):
+class TestNetCfgDistroBase(FilesystemMockingTestCase):
- frbsd_ifout = """\
-hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
- options=51b<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,TSO4,LRO>
- ether 00:15:5d:4c:73:00
- inet6 fe80::215:5dff:fe4c:7300%hn0 prefixlen 64 scopeid 0x2
- inet 10.156.76.127 netmask 0xfffffc00 broadcast 10.156.79.255
- nd6 options=23<PERFORMNUD,ACCEPT_RTADV,AUTO_LINKLOCAL>
- media: Ethernet autoselect (10Gbase-T <full-duplex>)
- status: active
-"""
+ def setUp(self):
+ super(TestNetCfgDistroBase, self).setUp()
+ self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy')
+ self.add_patch('cloudinit.util.system_info', 'm_sysinfo')
+ self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')}
def _get_distro(self, dname, renderers=None):
cls = distros.fetch(dname)
@@ -198,144 +201,6 @@ hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
paths = helpers.Paths({})
return cls(dname, cfg.get('system_info'), paths)
- def test_simple_write_ub(self):
- ub_distro = self._get_distro('ubuntu')
- with ExitStack() as mocks:
- write_bufs = {}
-
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(os.path, 'isfile', return_value=False))
-
- ub_distro.apply_network(BASE_NET_CFG, False)
-
- self.assertEqual(len(write_bufs), 1)
- eni_name = '/etc/network/interfaces.d/50-cloud-init.cfg'
- self.assertIn(eni_name, write_bufs)
- write_buf = write_bufs[eni_name]
- self.assertEqual(str(write_buf).strip(), BASE_NET_CFG.strip())
- self.assertEqual(write_buf.mode, 0o644)
-
- def test_apply_network_config_eni_ub(self):
- ub_distro = self._get_distro('ubuntu')
- with ExitStack() as mocks:
- write_bufs = {}
-
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- # eni availability checks
- mocks.enter_context(
- mock.patch.object(util, 'which', return_value=True))
- mocks.enter_context(
- mock.patch.object(eni, 'available', return_value=True))
- mocks.enter_context(
- mock.patch.object(util, 'ensure_dir'))
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(os.path, 'isfile', return_value=False))
- mocks.enter_context(
- mock.patch("cloudinit.net.eni.glob.glob",
- return_value=[]))
-
- ub_distro.apply_network_config(V1_NET_CFG, False)
-
- self.assertEqual(len(write_bufs), 2)
- eni_name = '/etc/network/interfaces.d/50-cloud-init.cfg'
- self.assertIn(eni_name, write_bufs)
- write_buf = write_bufs[eni_name]
- self.assertEqual(str(write_buf).strip(), V1_NET_CFG_OUTPUT.strip())
- self.assertEqual(write_buf.mode, 0o644)
-
- def test_apply_network_config_v1_to_netplan_ub(self):
- renderers = ['netplan']
- devlist = ['eth0', 'lo']
- ub_distro = self._get_distro('ubuntu', renderers=renderers)
- with ExitStack() as mocks:
- write_bufs = {}
-
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- mocks.enter_context(
- mock.patch.object(util, 'which', return_value=True))
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(util, 'ensure_dir'))
- mocks.enter_context(
- mock.patch.object(util, 'subp', return_value=(0, 0)))
- mocks.enter_context(
- mock.patch.object(os.path, 'isfile', return_value=False))
- mocks.enter_context(
- mock.patch("cloudinit.net.netplan.get_devicelist",
- return_value=devlist))
-
- ub_distro.apply_network_config(V1_NET_CFG, False)
-
- self.assertEqual(len(write_bufs), 1)
- netplan_name = '/etc/netplan/50-cloud-init.yaml'
- self.assertIn(netplan_name, write_bufs)
- write_buf = write_bufs[netplan_name]
- self.assertEqual(str(write_buf).strip(),
- V1_TO_V2_NET_CFG_OUTPUT.strip())
- self.assertEqual(write_buf.mode, 0o644)
-
- def test_apply_network_config_v2_passthrough_ub(self):
- renderers = ['netplan']
- devlist = ['eth0', 'lo']
- ub_distro = self._get_distro('ubuntu', renderers=renderers)
- with ExitStack() as mocks:
- write_bufs = {}
-
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- mocks.enter_context(
- mock.patch.object(util, 'which', return_value=True))
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(util, 'ensure_dir'))
- mocks.enter_context(
- mock.patch.object(util, 'subp', return_value=(0, 0)))
- mocks.enter_context(
- mock.patch.object(os.path, 'isfile', return_value=False))
- # FreeBSD does not have '/sys/class/net' file,
- # so we need mock here.
- mocks.enter_context(
- mock.patch.object(os, 'listdir', return_value=devlist))
- ub_distro.apply_network_config(V2_NET_CFG, False)
-
- self.assertEqual(len(write_bufs), 1)
- netplan_name = '/etc/netplan/50-cloud-init.yaml'
- self.assertIn(netplan_name, write_bufs)
- write_buf = write_bufs[netplan_name]
- self.assertEqual(str(write_buf).strip(),
- V2_TO_V2_NET_CFG_OUTPUT.strip())
- self.assertEqual(write_buf.mode, 0o644)
-
def assertCfgEquals(self, blob1, blob2):
b1 = dict(SysConf(blob1.strip().splitlines()))
b2 = dict(SysConf(blob2.strip().splitlines()))
@@ -347,6 +212,20 @@ hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
for (k, v) in b1.items():
self.assertEqual(v, b2[k])
+
+class TestNetCfgDistroFreebsd(TestNetCfgDistroBase):
+
+ frbsd_ifout = """\
+hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
+ options=51b<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,TSO4,LRO>
+ ether 00:15:5d:4c:73:00
+ inet6 fe80::215:5dff:fe4c:7300%hn0 prefixlen 64 scopeid 0x2
+ inet 10.156.76.127 netmask 0xfffffc00 broadcast 10.156.79.255
+ nd6 options=23<PERFORMNUD,ACCEPT_RTADV,AUTO_LINKLOCAL>
+ media: Ethernet autoselect (10Gbase-T <full-duplex>)
+ status: active
+"""
+
@mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_list')
@mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ifname_out')
def test_get_ip_nic_freebsd(self, ifname_out, iflist):
@@ -370,349 +249,59 @@ hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
res = frbsd_distro.generate_fallback_config()
self.assertIsNotNone(res)
- def test_simple_write_rh(self):
- rh_distro = self._get_distro('rhel')
-
- write_bufs = {}
-
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- with ExitStack() as mocks:
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(util, 'load_file', return_value=''))
- mocks.enter_context(
- mock.patch.object(os.path, 'isfile', return_value=False))
-
- rh_distro.apply_network(BASE_NET_CFG, False)
-
- self.assertEqual(len(write_bufs), 4)
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-lo',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-lo']
- expected_buf = '''
-DEVICE="lo"
-ONBOOT=yes
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0']
- expected_buf = '''
-DEVICE="eth0"
-BOOTPROTO="static"
-NETMASK="255.255.255.0"
-IPADDR="192.168.1.5"
-ONBOOT=yes
-GATEWAY="192.168.1.254"
-BROADCAST="192.168.1.0"
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1']
- expected_buf = '''
-DEVICE="eth1"
-BOOTPROTO="dhcp"
-ONBOOT=yes
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- self.assertIn('/etc/sysconfig/network', write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network']
- expected_buf = '''
-# Created by cloud-init v. 0.7
-NETWORKING=yes
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- def test_apply_network_config_rh(self):
- renderers = ['sysconfig']
- rh_distro = self._get_distro('rhel', renderers=renderers)
-
- write_bufs = {}
-
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- with ExitStack() as mocks:
- # sysconfig availability checks
- mocks.enter_context(
- mock.patch.object(util, 'which', return_value=True))
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(util, 'load_file', return_value=''))
- mocks.enter_context(
- mock.patch.object(os.path, 'isfile', return_value=True))
-
- rh_distro.apply_network_config(V1_NET_CFG, False)
-
- self.assertEqual(len(write_bufs), 5)
-
- # eth0
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0']
- expected_buf = '''
-# Created by cloud-init on instance boot automatically, do not edit.
-#
-BOOTPROTO=none
-DEFROUTE=yes
-DEVICE=eth0
-GATEWAY=192.168.1.254
-IPADDR=192.168.1.5
-NETMASK=255.255.255.0
-NM_CONTROLLED=no
-ONBOOT=yes
-TYPE=Ethernet
-USERCTL=no
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- # eth1
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1']
- expected_buf = '''
-# Created by cloud-init on instance boot automatically, do not edit.
-#
-BOOTPROTO=dhcp
-DEVICE=eth1
-NM_CONTROLLED=no
-ONBOOT=yes
-TYPE=Ethernet
-USERCTL=no
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- self.assertIn('/etc/sysconfig/network', write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network']
- expected_buf = '''
-# Created by cloud-init v. 0.7
-NETWORKING=yes
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- def test_write_ipv6_rhel(self):
- rh_distro = self._get_distro('rhel')
-
- write_bufs = {}
-
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- with ExitStack() as mocks:
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(util, 'load_file', return_value=''))
- mocks.enter_context(
- mock.patch.object(os.path, 'isfile', return_value=False))
- rh_distro.apply_network(BASE_NET_CFG_IPV6, False)
-
- self.assertEqual(len(write_bufs), 4)
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-lo',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-lo']
- expected_buf = '''
-DEVICE="lo"
-ONBOOT=yes
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0']
- expected_buf = '''
-DEVICE="eth0"
-BOOTPROTO="static"
-NETMASK="255.255.255.0"
-IPADDR="192.168.1.5"
-ONBOOT=yes
-GATEWAY="192.168.1.254"
-BROADCAST="192.168.1.0"
-IPV6INIT=yes
-IPV6ADDR="2607:f0d0:1002:0011::2"
-IPV6_DEFAULTGW="2607:f0d0:1002:0011::1"
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1']
- expected_buf = '''
-DEVICE="eth1"
-BOOTPROTO="static"
-NETMASK="255.255.255.0"
-IPADDR="192.168.1.6"
-ONBOOT=no
-GATEWAY="192.168.1.254"
-BROADCAST="192.168.1.0"
-IPV6INIT=yes
-IPV6ADDR="2607:f0d0:1002:0011::3"
-IPV6_DEFAULTGW="2607:f0d0:1002:0011::1"
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- self.assertIn('/etc/sysconfig/network', write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network']
- expected_buf = '''
-# Created by cloud-init v. 0.7
-NETWORKING=yes
-NETWORKING_IPV6=yes
-IPV6_AUTOCONF=no
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- def test_apply_network_config_ipv6_rh(self):
- renderers = ['sysconfig']
- rh_distro = self._get_distro('rhel', renderers=renderers)
-
- write_bufs = {}
-
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- with ExitStack() as mocks:
- mocks.enter_context(
- mock.patch.object(util, 'which', return_value=True))
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(util, 'load_file', return_value=''))
- mocks.enter_context(
- mock.patch.object(os.path, 'isfile', return_value=True))
-
- rh_distro.apply_network_config(V1_NET_CFG_IPV6, False)
-
- self.assertEqual(len(write_bufs), 5)
-
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0']
- expected_buf = '''
-# Created by cloud-init on instance boot automatically, do not edit.
-#
-BOOTPROTO=none
-DEFROUTE=yes
-DEVICE=eth0
-IPV6ADDR=2607:f0d0:1002:0011::2/64
-IPV6INIT=yes
-IPV6_DEFAULTGW=2607:f0d0:1002:0011::1
-NM_CONTROLLED=no
-ONBOOT=yes
-TYPE=Ethernet
-USERCTL=no
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
- self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1',
- write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1']
- expected_buf = '''
-# Created by cloud-init on instance boot automatically, do not edit.
-#
-BOOTPROTO=dhcp
-DEVICE=eth1
-NM_CONTROLLED=no
-ONBOOT=yes
-TYPE=Ethernet
-USERCTL=no
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
- self.assertIn('/etc/sysconfig/network', write_bufs)
- write_buf = write_bufs['/etc/sysconfig/network']
- expected_buf = '''
-# Created by cloud-init v. 0.7
-NETWORKING=yes
-NETWORKING_IPV6=yes
-IPV6_AUTOCONF=no
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
-
def test_simple_write_freebsd(self):
fbsd_distro = self._get_distro('freebsd')
- write_bufs = {}
+ rc_conf = '/etc/rc.conf'
read_bufs = {
- '/etc/rc.conf': '',
- '/etc/resolv.conf': '',
+ rc_conf: 'initial-rc-conf-not-validated',
+ '/etc/resolv.conf': 'initial-resolv-conf-not-validated',
}
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- def replace_read(fname, read_cb=None, quiet=False):
- if fname not in read_bufs:
- if fname in write_bufs:
- return str(write_bufs[fname])
- raise IOError("%s not found" % fname)
- else:
- if fname in write_bufs:
- return str(write_bufs[fname])
- return read_bufs[fname]
-
- with ExitStack() as mocks:
- mocks.enter_context(
- mock.patch.object(util, 'subp', return_value=('vtnet0', '')))
- mocks.enter_context(
- mock.patch.object(os.path, 'exists', return_value=False))
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(util, 'load_file', replace_read))
-
- fbsd_distro.apply_network(BASE_NET_CFG, False)
-
- self.assertIn('/etc/rc.conf', write_bufs)
- write_buf = write_bufs['/etc/rc.conf']
- expected_buf = '''
-ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0"
-ifconfig_vtnet1="DHCP"
-defaultrouter="192.168.1.254"
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
+ tmpd = self.tmp_dir()
+ populate_dir(tmpd, read_bufs)
+ with self.reRooted(tmpd):
+ with mock.patch("cloudinit.distros.freebsd.util.subp",
+ return_value=('vtnet0', '')):
+ fbsd_distro.apply_network(BASE_NET_CFG, False)
+ results = dir2dict(tmpd)
+
+ self.assertIn(rc_conf, results)
+ self.assertCfgEquals(
+ dedent('''\
+ ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0"
+ ifconfig_vtnet1="DHCP"
+ defaultrouter="192.168.1.254"
+ '''), results[rc_conf])
+ self.assertEqual(0o644, get_mode(rc_conf, tmpd))
+
+ def test_simple_write_freebsd_from_v2eni(self):
+ fbsd_distro = self._get_distro('freebsd')
+
+ rc_conf = '/etc/rc.conf'
+ read_bufs = {
+ rc_conf: 'initial-rc-conf-not-validated',
+ '/etc/resolv.conf': 'initial-resolv-conf-not-validated',
+ }
- def test_apply_network_config_fallback(self):
+ tmpd = self.tmp_dir()
+ populate_dir(tmpd, read_bufs)
+ with self.reRooted(tmpd):
+ with mock.patch("cloudinit.distros.freebsd.util.subp",
+ return_value=('vtnet0', '')):
+ fbsd_distro.apply_network(BASE_NET_CFG_FROM_V2, False)
+ results = dir2dict(tmpd)
+
+ self.assertIn(rc_conf, results)
+ self.assertCfgEquals(
+ dedent('''\
+ ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0"
+ ifconfig_vtnet1="DHCP"
+ defaultrouter="192.168.1.254"
+ '''), results[rc_conf])
+ self.assertEqual(0o644, get_mode(rc_conf, tmpd))
+
+ def test_apply_network_config_fallback_freebsd(self):
fbsd_distro = self._get_distro('freebsd')
# a weak attempt to verify that we don't have an implementation
@@ -729,89 +318,293 @@ defaultrouter="192.168.1.254"
"subnets": [{"type": "dhcp"}]}],
'version': 1}
- write_bufs = {}
+ rc_conf = '/etc/rc.conf'
read_bufs = {
- '/etc/rc.conf': '',
- '/etc/resolv.conf': '',
+ rc_conf: 'initial-rc-conf-not-validated',
+ '/etc/resolv.conf': 'initial-resolv-conf-not-validated',
}
- def replace_write(filename, content, mode=0o644, omode="wb"):
- buf = WriteBuffer()
- buf.mode = mode
- buf.omode = omode
- buf.write(content)
- write_bufs[filename] = buf
-
- def replace_read(fname, read_cb=None, quiet=False):
- if fname not in read_bufs:
- if fname in write_bufs:
- return str(write_bufs[fname])
- raise IOError("%s not found" % fname)
- else:
- if fname in write_bufs:
- return str(write_bufs[fname])
- return read_bufs[fname]
-
- with ExitStack() as mocks:
- mocks.enter_context(
- mock.patch.object(util, 'subp', return_value=('vtnet0', '')))
- mocks.enter_context(
- mock.patch.object(os.path, 'exists', return_value=False))
- mocks.enter_context(
- mock.patch.object(util, 'write_file', replace_write))
- mocks.enter_context(
- mock.patch.object(util, 'load_file', replace_read))
-
- fbsd_distro.apply_network_config(mynetcfg, bring_up=False)
-
- self.assertIn('/etc/rc.conf', write_bufs)
- write_buf = write_bufs['/etc/rc.conf']
- expected_buf = '''
-ifconfig_vtnet0="DHCP"
-'''
- self.assertCfgEquals(expected_buf, str(write_buf))
- self.assertEqual(write_buf.mode, 0o644)
+ tmpd = self.tmp_dir()
+ populate_dir(tmpd, read_bufs)
+ with self.reRooted(tmpd):
+ with mock.patch("cloudinit.distros.freebsd.util.subp",
+ return_value=('vtnet0', '')):
+ fbsd_distro.apply_network_config(mynetcfg, bring_up=False)
+ results = dir2dict(tmpd)
+
+ self.assertIn(rc_conf, results)
+ self.assertCfgEquals('ifconfig_vtnet0="DHCP"', results[rc_conf])
+ self.assertEqual(0o644, get_mode(rc_conf, tmpd))
+
+
+class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
+
+ def setUp(self):
+ super(TestNetCfgDistroUbuntuEni, self).setUp()
+ self.distro = self._get_distro('ubuntu', renderers=['eni'])
+
+ def eni_path(self):
+ return '/etc/network/interfaces.d/50-cloud-init.cfg'
+
+ def _apply_and_verify_eni(self, apply_fn, config, expected_cfgs=None,
+ bringup=False):
+ if not expected_cfgs:
+ raise ValueError('expected_cfg must not be None')
+
+ tmpd = None
+ with mock.patch('cloudinit.net.eni.available') as m_avail:
+ m_avail.return_value = True
+ with self.reRooted(tmpd) as tmpd:
+ apply_fn(config, bringup)
+
+ results = dir2dict(tmpd)
+ for cfgpath, expected in expected_cfgs.items():
+ print("----------")
+ print(expected)
+ print("^^^^ expected | rendered VVVVVVV")
+ print(results[cfgpath])
+ print("----------")
+ self.assertEqual(expected, results[cfgpath])
+ self.assertEqual(0o644, get_mode(cfgpath, tmpd))
- def test_simple_write_opensuse(self):
- """Opensuse network rendering writes appropriate sysconfg files."""
- tmpdir = self.tmp_dir()
- self.patchOS(tmpdir)
- self.patchUtils(tmpdir)
- distro = self._get_distro('opensuse')
+ def test_apply_network_config_eni_ub(self):
+ expected_cfgs = {
+ self.eni_path(): V1_NET_CFG_OUTPUT,
+ }
+ # ub_distro.apply_network_config(V1_NET_CFG, False)
+ self._apply_and_verify_eni(self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy())
+
+
+class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase):
+ def setUp(self):
+ super(TestNetCfgDistroUbuntuNetplan, self).setUp()
+ self.distro = self._get_distro('ubuntu', renderers=['netplan'])
+ self.devlist = ['eth0', 'lo']
+
+ def _apply_and_verify_netplan(self, apply_fn, config, expected_cfgs=None,
+ bringup=False):
+ if not expected_cfgs:
+ raise ValueError('expected_cfg must not be None')
+
+ tmpd = None
+ with mock.patch('cloudinit.net.netplan.available',
+ return_value=True):
+ with mock.patch("cloudinit.net.netplan.get_devicelist",
+ return_value=self.devlist):
+ with self.reRooted(tmpd) as tmpd:
+ apply_fn(config, bringup)
+
+ results = dir2dict(tmpd)
+ for cfgpath, expected in expected_cfgs.items():
+ print("----------")
+ print(expected)
+ print("^^^^ expected | rendered VVVVVVV")
+ print(results[cfgpath])
+ print("----------")
+ self.assertEqual(expected, results[cfgpath])
+ self.assertEqual(0o644, get_mode(cfgpath, tmpd))
+
+ def netplan_path(self):
+ return '/etc/netplan/50-cloud-init.yaml'
- distro.apply_network(BASE_NET_CFG, False)
+ def test_apply_network_config_v1_to_netplan_ub(self):
+ expected_cfgs = {
+ self.netplan_path(): V1_TO_V2_NET_CFG_OUTPUT,
+ }
+
+ # ub_distro.apply_network_config(V1_NET_CFG, False)
+ self._apply_and_verify_netplan(self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy())
- lo_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-lo')
- eth0_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-eth0')
- eth1_path = os.path.join(tmpdir, 'etc/sysconfig/network/ifcfg-eth1')
+ def test_apply_network_config_v2_passthrough_ub(self):
expected_cfgs = {
- lo_path: dedent('''
- STARTMODE="auto"
- USERCONTROL="no"
- FIREWALL="no"
- '''),
- eth0_path: dedent('''
- BOOTPROTO="static"
- BROADCAST="192.168.1.0"
- GATEWAY="192.168.1.254"
- IPADDR="192.168.1.5"
- NETMASK="255.255.255.0"
- STARTMODE="auto"
- USERCONTROL="no"
- ETHTOOL_OPTIONS=""
- '''),
- eth1_path: dedent('''
- BOOTPROTO="dhcp"
- STARTMODE="auto"
- USERCONTROL="no"
- ETHTOOL_OPTIONS=""
- ''')
+ self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT,
}
- for cfgpath in (lo_path, eth0_path, eth1_path):
- self.assertCfgEquals(
- expected_cfgs[cfgpath],
- util.load_file(cfgpath))
- file_stat = os.stat(cfgpath)
- self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+ # ub_distro.apply_network_config(V2_NET_CFG, False)
+ self._apply_and_verify_netplan(self.distro.apply_network_config,
+ V2_NET_CFG,
+ expected_cfgs=expected_cfgs.copy())
+
+
+class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
+
+ def setUp(self):
+ super(TestNetCfgDistroRedhat, self).setUp()
+ self.distro = self._get_distro('rhel', renderers=['sysconfig'])
+
+ def ifcfg_path(self, ifname):
+ return '/etc/sysconfig/network-scripts/ifcfg-%s' % ifname
+
+ def control_path(self):
+ return '/etc/sysconfig/network'
+
+ def _apply_and_verify(self, apply_fn, config, expected_cfgs=None,
+ bringup=False):
+ if not expected_cfgs:
+ raise ValueError('expected_cfg must not be None')
+
+ tmpd = None
+ with mock.patch('cloudinit.net.sysconfig.available') as m_avail:
+ m_avail.return_value = True
+ with self.reRooted(tmpd) as tmpd:
+ apply_fn(config, bringup)
+
+ results = dir2dict(tmpd)
+ for cfgpath, expected in expected_cfgs.items():
+ self.assertCfgEquals(expected, results[cfgpath])
+ self.assertEqual(0o644, get_mode(cfgpath, tmpd))
+
+ def test_apply_network_config_rh(self):
+ expected_cfgs = {
+ self.ifcfg_path('eth0'): dedent("""\
+ BOOTPROTO=none
+ DEFROUTE=yes
+ DEVICE=eth0
+ GATEWAY=192.168.1.254
+ IPADDR=192.168.1.5
+ NETMASK=255.255.255.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ self.ifcfg_path('eth1'): dedent("""\
+ BOOTPROTO=dhcp
+ DEVICE=eth1
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ self.control_path(): dedent("""\
+ NETWORKING=yes
+ """),
+ }
+ # rh_distro.apply_network_config(V1_NET_CFG, False)
+ self._apply_and_verify(self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy())
+
+ def test_apply_network_config_ipv6_rh(self):
+ expected_cfgs = {
+ self.ifcfg_path('eth0'): dedent("""\
+ BOOTPROTO=none
+ DEFROUTE=yes
+ DEVICE=eth0
+ IPV6ADDR=2607:f0d0:1002:0011::2/64
+ IPV6INIT=yes
+ IPV6_DEFAULTGW=2607:f0d0:1002:0011::1
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ self.ifcfg_path('eth1'): dedent("""\
+ BOOTPROTO=dhcp
+ DEVICE=eth1
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ self.control_path(): dedent("""\
+ NETWORKING=yes
+ NETWORKING_IPV6=yes
+ IPV6_AUTOCONF=no
+ """),
+ }
+ # rh_distro.apply_network_config(V1_NET_CFG_IPV6, False)
+ self._apply_and_verify(self.distro.apply_network_config,
+ V1_NET_CFG_IPV6,
+ expected_cfgs=expected_cfgs.copy())
+
+
+class TestNetCfgDistroOpensuse(TestNetCfgDistroBase):
+
+ def setUp(self):
+ super(TestNetCfgDistroOpensuse, self).setUp()
+ self.distro = self._get_distro('opensuse', renderers=['sysconfig'])
+
+ def ifcfg_path(self, ifname):
+ return '/etc/sysconfig/network/ifcfg-%s' % ifname
+
+ def _apply_and_verify(self, apply_fn, config, expected_cfgs=None,
+ bringup=False):
+ if not expected_cfgs:
+ raise ValueError('expected_cfg must not be None')
+
+ tmpd = None
+ with mock.patch('cloudinit.net.sysconfig.available') as m_avail:
+ m_avail.return_value = True
+ with self.reRooted(tmpd) as tmpd:
+ apply_fn(config, bringup)
+
+ results = dir2dict(tmpd)
+ for cfgpath, expected in expected_cfgs.items():
+ self.assertCfgEquals(expected, results[cfgpath])
+ self.assertEqual(0o644, get_mode(cfgpath, tmpd))
+
+ def test_apply_network_config_opensuse(self):
+ """Opensuse uses apply_network_config and renders sysconfig"""
+ expected_cfgs = {
+ self.ifcfg_path('eth0'): dedent("""\
+ BOOTPROTO=none
+ DEFROUTE=yes
+ DEVICE=eth0
+ GATEWAY=192.168.1.254
+ IPADDR=192.168.1.5
+ NETMASK=255.255.255.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ self.ifcfg_path('eth1'): dedent("""\
+ BOOTPROTO=dhcp
+ DEVICE=eth1
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ }
+ self._apply_and_verify(self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy())
+
+ def test_apply_network_config_ipv6_opensuse(self):
+ """Opensuse uses apply_network_config and renders sysconfig w/ipv6"""
+ expected_cfgs = {
+ self.ifcfg_path('eth0'): dedent("""\
+ BOOTPROTO=none
+ DEFROUTE=yes
+ DEVICE=eth0
+ IPV6ADDR=2607:f0d0:1002:0011::2/64
+ IPV6INIT=yes
+ IPV6_DEFAULTGW=2607:f0d0:1002:0011::1
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ self.ifcfg_path('eth1'): dedent("""\
+ BOOTPROTO=dhcp
+ DEVICE=eth1
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ }
+ self._apply_and_verify(self.distro.apply_network_config,
+ V1_NET_CFG_IPV6,
+ expected_cfgs=expected_cfgs.copy())
+
+
+def get_mode(path, target=None):
+ return os.stat(util.target_path(target, path)).st_mode & 0o777
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py
index 0fa9cdb5..fa4b6cfe 100644
--- a/tests/unittests/test_distros/test_user_data_normalize.py
+++ b/tests/unittests/test_distros/test_user_data_normalize.py
@@ -22,6 +22,12 @@ bcfg = {
class TestUGNormalize(TestCase):
+ def setUp(self):
+ super(TestUGNormalize, self).setUp()
+ self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy')
+ self.add_patch('cloudinit.util.system_info', 'm_sysinfo')
+ self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')}
+
def _make_distro(self, dtype, def_user=None):
cfg = dict(settings.CFG_BUILTIN)
cfg['system_info']['distro'] = dtype
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index 53643989..46778e95 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -1,5 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from collections import namedtuple
import copy
import os
from uuid import uuid4
@@ -7,9 +8,11 @@ from uuid import uuid4
from cloudinit import safeyaml
from cloudinit import util
from cloudinit.tests.helpers import (
- CiTestCase, dir2dict, populate_dir)
+ CiTestCase, dir2dict, populate_dir, populate_dir_with_ts)
-from cloudinit.sources import DataSourceIBMCloud as dsibm
+from cloudinit.sources import DataSourceIBMCloud as ds_ibm
+from cloudinit.sources import DataSourceSmartOS as ds_smartos
+from cloudinit.sources import DataSourceOracle as ds_oracle
UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu "
"SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux")
@@ -66,19 +69,29 @@ P_SYS_VENDOR = "sys/class/dmi/id/sys_vendor"
P_SEED_DIR = "var/lib/cloud/seed"
P_DSID_CFG = "etc/cloud/ds-identify.cfg"
-IBM_PROVISIONING_CHECK_PATH = "/root/provisioningConfiguration.cfg"
IBM_CONFIG_UUID = "9796-932E"
+MOCK_VIRT_IS_CONTAINER_OTHER = {'name': 'detect_virt',
+ 'RET': 'container-other', 'ret': 0}
MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0}
MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0}
+# currenty' SmartOS hypervisor "bhyve" is unknown by systemd-detect-virt.
+MOCK_VIRT_IS_VM_OTHER = {'name': 'detect_virt', 'RET': 'vm-other', 'ret': 0}
MOCK_VIRT_IS_XEN = {'name': 'detect_virt', 'RET': 'xen', 'ret': 0}
MOCK_UNAME_IS_PPC64 = {'name': 'uname', 'out': UNAME_PPC64EL, 'ret': 0}
+shell_true = 0
+shell_false = 1
-class TestDsIdentify(CiTestCase):
+CallReturn = namedtuple('CallReturn',
+ ['rc', 'stdout', 'stderr', 'cfg', 'files'])
+
+
+class DsIdentifyBase(CiTestCase):
dsid_path = os.path.realpath('tools/ds-identify')
+ allowed_subp = ['sh']
- def call(self, rootd=None, mocks=None, args=None, files=None,
+ def call(self, rootd=None, mocks=None, func="main", args=None, files=None,
policy_dmi=DI_DEFAULT_POLICY,
policy_no_dmi=DI_DEFAULT_POLICY_NO_DMI,
ec2_strict_id=DI_EC2_STRICT_ID_DEFAULT):
@@ -135,7 +148,7 @@ class TestDsIdentify(CiTestCase):
mocklines.append(write_mock(d))
endlines = [
- 'main %s' % ' '.join(['"%s"' % s for s in args])
+ func + ' ' + ' '.join(['"%s"' % s for s in args])
]
with open(wrap, "w") as fp:
@@ -159,12 +172,14 @@ class TestDsIdentify(CiTestCase):
cfg = {"_INVALID_YAML": contents,
"_EXCEPTION": str(e)}
- return rc, out, err, cfg, dir2dict(rootd)
+ return CallReturn(rc, out, err, cfg, dir2dict(rootd))
def _call_via_dict(self, data, rootd=None, **kwargs):
# return output of self.call with a dict input like VALID_CFG[item]
xwargs = {'rootd': rootd}
- for k in ('mocks', 'args', 'policy_dmi', 'policy_no_dmi', 'files'):
+ passthrough = ('mocks', 'func', 'args', 'policy_dmi',
+ 'policy_no_dmi', 'files')
+ for k in passthrough:
if k in data:
xwargs[k] = data[k]
if k in kwargs:
@@ -178,18 +193,21 @@ class TestDsIdentify(CiTestCase):
data, RC_FOUND, dslist=[data.get('ds'), DS_NONE])
def _check_via_dict(self, data, rc, dslist=None, **kwargs):
- found_rc, out, err, cfg, files = self._call_via_dict(data, **kwargs)
+ ret = self._call_via_dict(data, **kwargs)
good = False
try:
- self.assertEqual(rc, found_rc)
+ self.assertEqual(rc, ret.rc)
if dslist is not None:
- self.assertEqual(dslist, cfg['datasource_list'])
+ self.assertEqual(dslist, ret.cfg['datasource_list'])
good = True
finally:
if not good:
- _print_run_output(rc, out, err, cfg, files)
- return rc, out, err, cfg, files
+ _print_run_output(ret.rc, ret.stdout, ret.stderr, ret.cfg,
+ ret.files)
+ return ret
+
+class TestDsIdentify(DsIdentifyBase):
def test_wb_print_variables(self):
"""_print_info reports an array of discovered variables to stderr."""
data = VALID_CFG['Azure-dmi-detection']
@@ -237,20 +255,50 @@ class TestDsIdentify(CiTestCase):
def test_config_drive(self):
"""ConfigDrive datasource has a disk with LABEL=config-2."""
self._test_ds_found('ConfigDrive')
- return
def test_config_drive_upper(self):
"""ConfigDrive datasource has a disk with LABEL=CONFIG-2."""
self._test_ds_found('ConfigDriveUpper')
return
+ def test_config_drive_seed(self):
+ """Config Drive seed directory."""
+ self._test_ds_found('ConfigDrive-seed')
+
+ def test_config_drive_interacts_with_ibmcloud_config_disk(self):
+ """Verify ConfigDrive interaction with IBMCloud.
+
+ If ConfigDrive is enabled and not IBMCloud, then ConfigDrive
+ should claim the ibmcloud 'config-2' disk.
+ If IBMCloud is enabled, then ConfigDrive should skip."""
+ data = copy.deepcopy(VALID_CFG['IBMCloud-config-2'])
+ files = data.get('files', {})
+ if not files:
+ data['files'] = files
+ cfgpath = 'etc/cloud/cloud.cfg.d/99_networklayer_common.cfg'
+
+ # with list including IBMCloud, config drive should be not found.
+ files[cfgpath] = 'datasource_list: [ ConfigDrive, IBMCloud ]\n'
+ ret = self._check_via_dict(data, shell_true)
+ self.assertEqual(
+ ret.cfg.get('datasource_list'), ['IBMCloud', 'None'])
+
+ # But if IBMCloud is not enabled, config drive should claim this.
+ files[cfgpath] = 'datasource_list: [ ConfigDrive, NoCloud ]\n'
+ ret = self._check_via_dict(data, shell_true)
+ self.assertEqual(
+ ret.cfg.get('datasource_list'), ['ConfigDrive', 'None'])
+
def test_ibmcloud_template_userdata_in_provisioning(self):
"""Template provisioned with user-data during provisioning stage.
Template provisioning with user-data has METADATA disk,
datasource should return not found."""
data = copy.deepcopy(VALID_CFG['IBMCloud-metadata'])
- data['files'] = {IBM_PROVISIONING_CHECK_PATH: 'xxx'}
+ # change the 'is_ibm_provisioning' mock to return 1 (false)
+ isprov_m = [m for m in data['mocks']
+ if m["name"] == "is_ibm_provisioning"][0]
+ isprov_m['ret'] = shell_true
return self._check_via_dict(data, RC_NOT_FOUND)
def test_ibmcloud_template_userdata(self):
@@ -265,7 +313,8 @@ class TestDsIdentify(CiTestCase):
no disks attached. Datasource should return not found."""
data = copy.deepcopy(VALID_CFG['IBMCloud-nodisks'])
- data['files'] = {IBM_PROVISIONING_CHECK_PATH: 'xxx'}
+ data['mocks'].append(
+ {'name': 'is_ibm_provisioning', 'ret': shell_true})
return self._check_via_dict(data, RC_NOT_FOUND)
def test_ibmcloud_template_no_userdata(self):
@@ -290,11 +339,42 @@ class TestDsIdentify(CiTestCase):
break
if not offset:
raise ValueError("Expected to find 'blkid' mock, but did not.")
- data['mocks'][offset]['out'] = d['out'].replace(dsibm.IBM_CONFIG_UUID,
+ data['mocks'][offset]['out'] = d['out'].replace(ds_ibm.IBM_CONFIG_UUID,
"DEAD-BEEF")
self._check_via_dict(
data, rc=RC_FOUND, dslist=['ConfigDrive', DS_NONE])
+ def test_ibmcloud_with_nocloud_seed(self):
+ """NoCloud seed should be preferred over IBMCloud.
+
+ A nocloud seed should be preferred over IBMCloud even if enabled.
+ Ubuntu 16.04 images have <vlc>/seed/nocloud-net. LP: #1766401."""
+ data = copy.deepcopy(VALID_CFG['IBMCloud-config-2'])
+ files = data.get('files', {})
+ if not files:
+ data['files'] = files
+ files.update(VALID_CFG['NoCloud-seed']['files'])
+ ret = self._check_via_dict(data, shell_true)
+ self.assertEqual(
+ ['NoCloud', 'IBMCloud', 'None'],
+ ret.cfg.get('datasource_list'))
+
+ def test_ibmcloud_with_configdrive_seed(self):
+ """ConfigDrive seed should be preferred over IBMCloud.
+
+ A ConfigDrive seed should be preferred over IBMCloud even if enabled.
+ Ubuntu 16.04 images have a fstab entry that mounts the
+ METADATA disk into <vlc>/seed/config_drive. LP: ##1766401."""
+ data = copy.deepcopy(VALID_CFG['IBMCloud-config-2'])
+ files = data.get('files', {})
+ if not files:
+ data['files'] = files
+ files.update(VALID_CFG['ConfigDrive-seed']['files'])
+ ret = self._check_via_dict(data, shell_true)
+ self.assertEqual(
+ ['ConfigDrive', 'IBMCloud', 'None'],
+ ret.cfg.get('datasource_list'))
+
def test_policy_disabled(self):
"""A Builtin policy of 'disabled' should return not found.
@@ -445,6 +525,92 @@ class TestDsIdentify(CiTestCase):
"""Hetzner cloud is identified in sys_vendor."""
self._test_ds_found('Hetzner')
+ def test_smartos_bhyve(self):
+ """SmartOS cloud identified by SmartDC in dmi."""
+ self._test_ds_found('SmartOS-bhyve')
+
+ def test_smartos_lxbrand(self):
+ """SmartOS cloud identified on lxbrand container."""
+ self._test_ds_found('SmartOS-lxbrand')
+
+ def test_smartos_lxbrand_requires_socket(self):
+ """SmartOS cloud should not be identified if no socket file."""
+ mycfg = copy.deepcopy(VALID_CFG['SmartOS-lxbrand'])
+ del mycfg['files'][ds_smartos.METADATA_SOCKFILE]
+ self._check_via_dict(mycfg, rc=RC_NOT_FOUND, policy_dmi="disabled")
+
+ def test_path_env_gets_set_from_main(self):
+ """PATH environment should always have some tokens when main is run.
+
+ We explicitly call main as we want to ensure it updates PATH."""
+ cust = copy.deepcopy(VALID_CFG['NoCloud'])
+ rootd = self.tmp_dir()
+ mpp = 'main-printpath'
+ pre = "MYPATH="
+ cust['files'][mpp] = (
+ 'PATH="/mycust/path"; main; r=$?; echo ' + pre + '$PATH; exit $r;')
+ ret = self._check_via_dict(
+ cust, RC_FOUND,
+ func=".", args=[os.path.join(rootd, mpp)], rootd=rootd)
+ line = [l for l in ret.stdout.splitlines() if l.startswith(pre)][0]
+ toks = line.replace(pre, "").split(":")
+ expected = ["/sbin", "/bin", "/usr/sbin", "/usr/bin", "/mycust/path"]
+ self.assertEqual(expected, [p for p in expected if p in toks],
+ "path did not have expected tokens")
+
+
+class TestIsIBMProvisioning(DsIdentifyBase):
+ """Test the is_ibm_provisioning method in ds-identify."""
+
+ inst_log = "/root/swinstall.log"
+ prov_cfg = "/root/provisioningConfiguration.cfg"
+ boot_ref = "/proc/1/environ"
+ funcname = "is_ibm_provisioning"
+
+ def test_no_config(self):
+ """No provisioning config means not provisioning."""
+ ret = self.call(files={}, func=self.funcname)
+ self.assertEqual(shell_false, ret.rc)
+
+ def test_config_only(self):
+ """A provisioning config without a log means provisioning."""
+ ret = self.call(files={self.prov_cfg: "key=value"}, func=self.funcname)
+ self.assertEqual(shell_true, ret.rc)
+
+ def test_config_with_old_log(self):
+ """A config with a log from previous boot is not provisioning."""
+ rootd = self.tmp_dir()
+ data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", -30),
+ self.boot_ref: ("PWD=/", 0)}
+ populate_dir_with_ts(rootd, data)
+ ret = self.call(rootd=rootd, func=self.funcname)
+ self.assertEqual(shell_false, ret.rc)
+ self.assertIn("from previous boot", ret.stderr)
+
+ def test_config_with_new_log(self):
+ """A config with a log from this boot is provisioning."""
+ rootd = self.tmp_dir()
+ data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", 30),
+ self.boot_ref: ("PWD=/", 0)}
+ populate_dir_with_ts(rootd, data)
+ ret = self.call(rootd=rootd, func=self.funcname)
+ self.assertEqual(shell_true, ret.rc)
+ self.assertIn("from current boot", ret.stderr)
+
+
+class TestOracle(DsIdentifyBase):
+ def test_found_by_chassis(self):
+ """Simple positive test of Oracle by chassis id."""
+ self._test_ds_found('Oracle')
+
+ def test_not_found(self):
+ """Simple negative test of Oracle."""
+ mycfg = copy.deepcopy(VALID_CFG['Oracle'])
+ mycfg['files'][P_CHASSIS_ASSET_TAG] = "Not Oracle"
+ self._check_via_dict(mycfg, rc=RC_NOT_FOUND)
+
def blkid_out(disks=None):
"""Convert a list of disk dictionaries into blkid content."""
@@ -631,6 +797,12 @@ VALID_CFG = {
},
],
},
+ 'ConfigDrive-seed': {
+ 'ds': 'ConfigDrive',
+ 'files': {
+ os.path.join(P_SEED_DIR, 'config_drive', 'openstack',
+ 'latest', 'meta_data.json'): 'md\n'},
+ },
'Hetzner': {
'ds': 'Hetzner',
'files': {P_SYS_VENDOR: 'Hetzner\n'},
@@ -639,6 +811,7 @@ VALID_CFG = {
'ds': 'IBMCloud',
'mocks': [
MOCK_VIRT_IS_XEN,
+ {'name': 'is_ibm_provisioning', 'ret': shell_false},
{'name': 'blkid', 'ret': 0,
'out': blkid_out(
[{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
@@ -652,12 +825,13 @@ VALID_CFG = {
'ds': 'IBMCloud',
'mocks': [
MOCK_VIRT_IS_XEN,
+ {'name': 'is_ibm_provisioning', 'ret': shell_false},
{'name': 'blkid', 'ret': 0,
'out': blkid_out(
[{'DEVNAME': 'xvda1', 'TYPE': 'ext3', 'PARTUUID': uuid4(),
'UUID': uuid4(), 'LABEL': 'cloudimg-bootfs'},
{'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'config-2',
- 'UUID': dsibm.IBM_CONFIG_UUID},
+ 'UUID': ds_ibm.IBM_CONFIG_UUID},
{'DEVNAME': 'xvda2', 'TYPE': 'ext4',
'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4(),
'UUID': uuid4()},
@@ -669,6 +843,7 @@ VALID_CFG = {
'ds': 'IBMCloud',
'mocks': [
MOCK_VIRT_IS_XEN,
+ {'name': 'is_ibm_provisioning', 'ret': shell_false},
{'name': 'blkid', 'ret': 0,
'out': blkid_out(
[{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
@@ -677,6 +852,38 @@ VALID_CFG = {
},
],
},
+ 'Oracle': {
+ 'ds': 'Oracle',
+ 'files': {
+ P_CHASSIS_ASSET_TAG: ds_oracle.CHASSIS_ASSET_TAG + '\n',
+ }
+ },
+ 'SmartOS-bhyve': {
+ 'ds': 'SmartOS',
+ 'mocks': [
+ MOCK_VIRT_IS_VM_OTHER,
+ {'name': 'blkid', 'ret': 0,
+ 'out': blkid_out(
+ [{'DEVNAME': 'vda1', 'TYPE': 'ext4',
+ 'PARTUUID': '49ec635a-01'},
+ {'DEVNAME': 'vda2', 'TYPE': 'swap',
+ 'LABEL': 'cloudimg-swap', 'PARTUUID': '49ec635a-02'}]),
+ },
+ ],
+ 'files': {P_PRODUCT_NAME: 'SmartDC HVM\n'},
+ },
+ 'SmartOS-lxbrand': {
+ 'ds': 'SmartOS',
+ 'mocks': [
+ MOCK_VIRT_IS_CONTAINER_OTHER,
+ {'name': 'uname', 'ret': 0,
+ 'out': ("Linux d43da87a-daca-60e8-e6d4-d2ed372662a3 4.3.0 "
+ "BrandZ virtual linux x86_64 GNU/Linux")},
+ {'name': 'blkid', 'ret': 2, 'out': ''},
+ ],
+ 'files': {ds_smartos.METADATA_SOCKFILE: 'would be a socket\n'},
+ }
+
}
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py
index af78997f..3f50f57d 100644
--- a/tests/unittests/test_ec2_util.py
+++ b/tests/unittests/test_ec2_util.py
@@ -11,7 +11,6 @@ from cloudinit import url_helper as uh
class TestEc2Util(helpers.HttprettyTestCase):
VERSION = 'latest'
- @hp.activate
def test_userdata_fetch(self):
hp.register_uri(hp.GET,
'http://169.254.169.254/%s/user-data' % (self.VERSION),
@@ -20,7 +19,6 @@ class TestEc2Util(helpers.HttprettyTestCase):
userdata = eu.get_instance_userdata(self.VERSION)
self.assertEqual('stuff', userdata.decode('utf-8'))
- @hp.activate
def test_userdata_fetch_fail_not_found(self):
hp.register_uri(hp.GET,
'http://169.254.169.254/%s/user-data' % (self.VERSION),
@@ -28,7 +26,6 @@ class TestEc2Util(helpers.HttprettyTestCase):
userdata = eu.get_instance_userdata(self.VERSION, retries=0)
self.assertEqual('', userdata)
- @hp.activate
def test_userdata_fetch_fail_server_dead(self):
hp.register_uri(hp.GET,
'http://169.254.169.254/%s/user-data' % (self.VERSION),
@@ -36,7 +33,6 @@ class TestEc2Util(helpers.HttprettyTestCase):
userdata = eu.get_instance_userdata(self.VERSION, retries=0)
self.assertEqual('', userdata)
- @hp.activate
def test_userdata_fetch_fail_server_not_found(self):
hp.register_uri(hp.GET,
'http://169.254.169.254/%s/user-data' % (self.VERSION),
@@ -44,7 +40,6 @@ class TestEc2Util(helpers.HttprettyTestCase):
userdata = eu.get_instance_userdata(self.VERSION)
self.assertEqual('', userdata)
- @hp.activate
def test_metadata_fetch_no_keys(self):
base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
@@ -62,7 +57,6 @@ class TestEc2Util(helpers.HttprettyTestCase):
self.assertEqual(md['instance-id'], '123')
self.assertEqual(md['ami-launch-index'], '1')
- @hp.activate
def test_metadata_fetch_key(self):
base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
@@ -83,7 +77,6 @@ class TestEc2Util(helpers.HttprettyTestCase):
self.assertEqual(md['instance-id'], '123')
self.assertEqual(1, len(md['public-keys']))
- @hp.activate
def test_metadata_fetch_with_2_keys(self):
base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
@@ -108,7 +101,6 @@ class TestEc2Util(helpers.HttprettyTestCase):
self.assertEqual(md['instance-id'], '123')
self.assertEqual(2, len(md['public-keys']))
- @hp.activate
def test_metadata_fetch_bdm(self):
base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
@@ -140,7 +132,6 @@ class TestEc2Util(helpers.HttprettyTestCase):
self.assertEqual(bdm['ami'], 'sdb')
self.assertEqual(bdm['ephemeral0'], 'sdc')
- @hp.activate
def test_metadata_no_security_credentials(self):
base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py
index 6364d38e..e1a5d2c8 100644
--- a/tests/unittests/test_filters/test_launch_index.py
+++ b/tests/unittests/test_filters/test_launch_index.py
@@ -55,7 +55,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
return True
def testMultiEmailIndex(self):
- test_data = self.readResource('filter_cloud_multipart_2.email')
+ test_data = helpers.readResource('filter_cloud_multipart_2.email')
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
self.assertTrue(count_messages(message) > 0)
@@ -70,7 +70,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
self.assertCounts(message, expected_counts)
def testHeaderEmailIndex(self):
- test_data = self.readResource('filter_cloud_multipart_header.email')
+ test_data = helpers.readResource('filter_cloud_multipart_header.email')
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
self.assertTrue(count_messages(message) > 0)
@@ -85,7 +85,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
self.assertCounts(message, expected_counts)
def testConfigEmailIndex(self):
- test_data = self.readResource('filter_cloud_multipart_1.email')
+ test_data = helpers.readResource('filter_cloud_multipart_1.email')
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
self.assertTrue(count_messages(message) > 0)
@@ -99,7 +99,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
self.assertCounts(message, expected_counts)
def testNoneIndex(self):
- test_data = self.readResource('filter_cloud_multipart.yaml')
+ test_data = helpers.readResource('filter_cloud_multipart.yaml')
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
start_count = count_messages(message)
@@ -108,7 +108,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
self.assertTrue(self.equivalentMessage(message, filtered_message))
def testIndexes(self):
- test_data = self.readResource('filter_cloud_multipart.yaml')
+ test_data = helpers.readResource('filter_cloud_multipart.yaml')
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
start_count = count_messages(message)
diff --git a/tests/unittests/test_handler/test_handler_apt_conf_v1.py b/tests/unittests/test_handler/test_handler_apt_conf_v1.py
index 83f962a9..6a4b03ee 100644
--- a/tests/unittests/test_handler/test_handler_apt_conf_v1.py
+++ b/tests/unittests/test_handler/test_handler_apt_conf_v1.py
@@ -12,10 +12,6 @@ import shutil
import tempfile
-def load_tfile_or_url(*args, **kwargs):
- return(util.decode_binary(util.read_file_or_url(*args, **kwargs).contents))
-
-
class TestAptProxyConfig(TestCase):
def setUp(self):
super(TestAptProxyConfig, self).setUp()
@@ -36,7 +32,7 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
- contents = load_tfile_or_url(self.pfile)
+ contents = util.load_file(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_http_proxy_written(self):
@@ -46,7 +42,7 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
- contents = load_tfile_or_url(self.pfile)
+ contents = util.load_file(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_all_proxy_written(self):
@@ -64,7 +60,7 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
- contents = load_tfile_or_url(self.pfile)
+ contents = util.load_file(self.pfile)
for ptype, pval in values.items():
self.assertTrue(self._search_apt_config(contents, ptype, pval))
@@ -80,7 +76,7 @@ class TestAptProxyConfig(TestCase):
cc_apt_configure.apply_apt_config({'proxy': "foo"},
self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
- contents = load_tfile_or_url(self.pfile)
+ contents = util.load_file(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "foo"))
def test_config_written(self):
@@ -92,14 +88,14 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(os.path.isfile(self.cfile))
self.assertFalse(os.path.isfile(self.pfile))
- self.assertEqual(load_tfile_or_url(self.cfile), payload)
+ self.assertEqual(util.load_file(self.cfile), payload)
def test_config_replaced(self):
util.write_file(self.pfile, "content doesnt matter")
cc_apt_configure.apply_apt_config({'conf': "foo"},
self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.cfile))
- self.assertEqual(load_tfile_or_url(self.cfile), "foo")
+ self.assertEqual(util.load_file(self.cfile), "foo")
def test_config_deleted(self):
# if no 'conf' is provided, delete any previously written file
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
index d2b96f0b..23bd6e10 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
@@ -64,13 +64,6 @@ deb-src http://archive.ubuntu.com/ubuntu/ fakerelease main restricted
""")
-def load_tfile_or_url(*args, **kwargs):
- """load_tfile_or_url
- load file and return content after decoding
- """
- return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents)
-
-
class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
"""TestAptSourceConfigSourceList
Main Class to test sources list rendering
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py
index 46ca4ce4..a3132fbd 100644
--- a/tests/unittests/test_handler/test_handler_apt_source_v1.py
+++ b/tests/unittests/test_handler/test_handler_apt_source_v1.py
@@ -39,13 +39,6 @@ S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
-def load_tfile_or_url(*args, **kwargs):
- """load_tfile_or_url
- load file and return content after decoding
- """
- return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents)
-
-
class FakeDistro(object):
"""Fake Distro helper object"""
def update_package_sources(self):
@@ -125,7 +118,7 @@ class TestAptSourceConfig(TestCase):
self.assertTrue(os.path.isfile(filename))
- contents = load_tfile_or_url(filename)
+ contents = util.load_file(filename)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb", "http://archive.ubuntu.com/ubuntu",
"karmic-backports",
@@ -157,13 +150,13 @@ class TestAptSourceConfig(TestCase):
self.apt_src_basic(self.aptlistfile, cfg)
# extra verify on two extra files of this test
- contents = load_tfile_or_url(self.aptlistfile2)
+ contents = util.load_file(self.aptlistfile2)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb", "http://archive.ubuntu.com/ubuntu",
"precise-backports",
"main universe multiverse restricted"),
contents, flags=re.IGNORECASE))
- contents = load_tfile_or_url(self.aptlistfile3)
+ contents = util.load_file(self.aptlistfile3)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb", "http://archive.ubuntu.com/ubuntu",
"lucid-backports",
@@ -220,7 +213,7 @@ class TestAptSourceConfig(TestCase):
self.assertTrue(os.path.isfile(filename))
- contents = load_tfile_or_url(filename)
+ contents = util.load_file(filename)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb", params['MIRROR'], params['RELEASE'],
"multiverse"),
@@ -241,12 +234,12 @@ class TestAptSourceConfig(TestCase):
# extra verify on two extra files of this test
params = self._get_default_params()
- contents = load_tfile_or_url(self.aptlistfile2)
+ contents = util.load_file(self.aptlistfile2)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb", params['MIRROR'], params['RELEASE'],
"main"),
contents, flags=re.IGNORECASE))
- contents = load_tfile_or_url(self.aptlistfile3)
+ contents = util.load_file(self.aptlistfile3)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb", params['MIRROR'], params['RELEASE'],
"universe"),
@@ -296,7 +289,7 @@ class TestAptSourceConfig(TestCase):
self.assertTrue(os.path.isfile(filename))
- contents = load_tfile_or_url(filename)
+ contents = util.load_file(filename)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb",
('http://ppa.launchpad.net/smoser/'
@@ -336,14 +329,14 @@ class TestAptSourceConfig(TestCase):
'filename': self.aptlistfile3}
self.apt_src_keyid(self.aptlistfile, [cfg1, cfg2, cfg3], 3)
- contents = load_tfile_or_url(self.aptlistfile2)
+ contents = util.load_file(self.aptlistfile2)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb",
('http://ppa.launchpad.net/smoser/'
'cloud-init-test/ubuntu'),
"xenial", "universe"),
contents, flags=re.IGNORECASE))
- contents = load_tfile_or_url(self.aptlistfile3)
+ contents = util.load_file(self.aptlistfile3)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb",
('http://ppa.launchpad.net/smoser/'
@@ -375,7 +368,7 @@ class TestAptSourceConfig(TestCase):
self.assertTrue(os.path.isfile(filename))
- contents = load_tfile_or_url(filename)
+ contents = util.load_file(filename)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb",
('http://ppa.launchpad.net/smoser/'
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py
index 7bb1b7c4..90fe6eed 100644
--- a/tests/unittests/test_handler/test_handler_apt_source_v3.py
+++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py
@@ -48,12 +48,9 @@ ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
TARGET = None
-
-def load_tfile(*args, **kwargs):
- """load_tfile_or_url
- load file and return content after decoding
- """
- return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents)
+MOCK_LSB_RELEASE_DATA = {
+ 'id': 'Ubuntu', 'description': 'Ubuntu 18.04.1 LTS',
+ 'release': '18.04', 'codename': 'bionic'}
class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
@@ -71,6 +68,9 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
self.join = os.path.join
self.matcher = re.compile(ADD_APT_REPO_MATCH).search
+ self.add_patch(
+ 'cloudinit.config.cc_apt_configure.util.lsb_release',
+ 'm_lsb_release', return_value=MOCK_LSB_RELEASE_DATA.copy())
@staticmethod
def _add_apt_sources(*args, **kwargs):
@@ -83,7 +83,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
Get the most basic default mrror and release info to be used in tests
"""
params = {}
- params['RELEASE'] = util.lsb_release()['codename']
+ params['RELEASE'] = MOCK_LSB_RELEASE_DATA['release']
arch = 'amd64'
params['MIRROR'] = cc_apt_configure.\
get_default_mirrors(arch)["PRIMARY"]
@@ -119,7 +119,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
self.assertTrue(os.path.isfile(filename))
- contents = load_tfile(filename)
+ contents = util.load_file(filename)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb", "http://test.ubuntu.com/ubuntu",
"karmic-backports",
@@ -151,13 +151,13 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
self._apt_src_basic(self.aptlistfile, cfg)
# extra verify on two extra files of this test
- contents = load_tfile(self.aptlistfile2)
+ contents = util.load_file(self.aptlistfile2)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb", "http://test.ubuntu.com/ubuntu",
"precise-backports",
"main universe multiverse restricted"),
contents, flags=re.IGNORECASE))
- contents = load_tfile(self.aptlistfile3)
+ contents = util.load_file(self.aptlistfile3)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb", "http://test.ubuntu.com/ubuntu",
"lucid-backports",
@@ -174,7 +174,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
self.assertTrue(os.path.isfile(filename))
- contents = load_tfile(filename)
+ contents = util.load_file(filename)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb", params['MIRROR'], params['RELEASE'],
"multiverse"),
@@ -201,12 +201,12 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
# extra verify on two extra files of this test
params = self._get_default_params()
- contents = load_tfile(self.aptlistfile2)
+ contents = util.load_file(self.aptlistfile2)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb", params['MIRROR'], params['RELEASE'],
"main"),
contents, flags=re.IGNORECASE))
- contents = load_tfile(self.aptlistfile3)
+ contents = util.load_file(self.aptlistfile3)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb", params['MIRROR'], params['RELEASE'],
"universe"),
@@ -240,7 +240,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
self.assertTrue(os.path.isfile(filename))
- contents = load_tfile(filename)
+ contents = util.load_file(filename)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb",
('http://ppa.launchpad.net/smoser/'
@@ -277,14 +277,14 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
'keyid': "03683F77"}}
self._apt_src_keyid(self.aptlistfile, cfg, 3)
- contents = load_tfile(self.aptlistfile2)
+ contents = util.load_file(self.aptlistfile2)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb",
('http://ppa.launchpad.net/smoser/'
'cloud-init-test/ubuntu'),
"xenial", "universe"),
contents, flags=re.IGNORECASE))
- contents = load_tfile(self.aptlistfile3)
+ contents = util.load_file(self.aptlistfile3)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb",
('http://ppa.launchpad.net/smoser/'
@@ -310,7 +310,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
self.assertTrue(os.path.isfile(self.aptlistfile))
- contents = load_tfile(self.aptlistfile)
+ contents = util.load_file(self.aptlistfile)
self.assertTrue(re.search(r"%s %s %s %s\n" %
("deb",
('http://ppa.launchpad.net/smoser/'
@@ -471,7 +471,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
'uri':
'http://testsec.ubuntu.com/%s/' % component}]}
post = ("%s_dists_%s-updates_InRelease" %
- (component, util.lsb_release()['codename']))
+ (component, MOCK_LSB_RELEASE_DATA['codename']))
fromfn = ("%s/%s_%s" % (pre, archive, post))
tofn = ("%s/test.ubuntu.com_%s" % (pre, post))
@@ -528,7 +528,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
expected = sorted([npre + suff for opre, npre, suff in files])
# create files
- for (opre, npre, suff) in files:
+ for (opre, _npre, suff) in files:
fpath = os.path.join(apt_lists_d, opre + suff)
util.write_file(fpath, content=fpath)
@@ -949,7 +949,8 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
self.assertEqual(
orig, cc_apt_configure.disable_suites(["proposed"], orig, rel))
- def test_apt_v3_mirror_search_dns(self):
+ @mock.patch("cloudinit.util.get_hostname", return_value='abc.localdomain')
+ def test_apt_v3_mirror_search_dns(self, m_get_hostname):
"""test_apt_v3_mirror_search_dns - Test searching dns patterns"""
pmir = "phit"
smir = "shit"
diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py
index 29fc25e4..a76760fa 100644
--- a/tests/unittests/test_handler/test_handler_bootcmd.py
+++ b/tests/unittests/test_handler/test_handler_bootcmd.py
@@ -1,9 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.config import cc_bootcmd
+from cloudinit.config.cc_bootcmd import handle, schema
from cloudinit.sources import DataSourceNone
from cloudinit import (distros, helpers, cloud, util)
-from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema
+from cloudinit.tests.helpers import (
+ CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
import logging
import tempfile
@@ -50,7 +51,7 @@ class TestBootcmd(CiTestCase):
"""When the provided config doesn't contain bootcmd, skip it."""
cfg = {}
mycloud = self._get_cloud('ubuntu')
- cc_bootcmd.handle('notimportant', cfg, mycloud, LOG, None)
+ handle('notimportant', cfg, mycloud, LOG, None)
self.assertIn(
"Skipping module named notimportant, no 'bootcmd' key",
self.logs.getvalue())
@@ -60,7 +61,7 @@ class TestBootcmd(CiTestCase):
invalid_config = {'bootcmd': 1}
cc = self._get_cloud('ubuntu')
with self.assertRaises(TypeError) as context_manager:
- cc_bootcmd.handle('cc_bootcmd', invalid_config, cc, LOG, [])
+ handle('cc_bootcmd', invalid_config, cc, LOG, [])
self.assertIn('Failed to shellify bootcmd', self.logs.getvalue())
self.assertEqual(
"Input to shellify was type 'int'. Expected list or tuple.",
@@ -76,7 +77,7 @@ class TestBootcmd(CiTestCase):
invalid_config = {'bootcmd': 1}
cc = self._get_cloud('ubuntu')
with self.assertRaises(TypeError):
- cc_bootcmd.handle('cc_bootcmd', invalid_config, cc, LOG, [])
+ handle('cc_bootcmd', invalid_config, cc, LOG, [])
self.assertIn(
'Invalid config:\nbootcmd: 1 is not of type \'array\'',
self.logs.getvalue())
@@ -93,7 +94,7 @@ class TestBootcmd(CiTestCase):
'bootcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]}
cc = self._get_cloud('ubuntu')
with self.assertRaises(TypeError) as context_manager:
- cc_bootcmd.handle('cc_bootcmd', invalid_config, cc, LOG, [])
+ handle('cc_bootcmd', invalid_config, cc, LOG, [])
expected_warnings = [
'bootcmd.1: 20 is not valid under any of the given schemas',
'bootcmd.3: {\'a\': \'n\'} is not valid under any of the given'
@@ -117,7 +118,8 @@ class TestBootcmd(CiTestCase):
'echo {0} $INSTANCE_ID > {1}'.format(my_id, out_file)]}
with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
- cc_bootcmd.handle('cc_bootcmd', valid_config, cc, LOG, [])
+ with self.allow_subp(['/bin/sh']):
+ handle('cc_bootcmd', valid_config, cc, LOG, [])
self.assertEqual(my_id + ' iid-datasource-none\n',
util.load_file(out_file))
@@ -127,15 +129,33 @@ class TestBootcmd(CiTestCase):
valid_config = {'bootcmd': ['exit 1']} # Script with error
with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
- with self.assertRaises(util.ProcessExecutionError) as ctxt_manager:
- cc_bootcmd.handle('does-not-matter', valid_config, cc, LOG, [])
+ with self.allow_subp(['/bin/sh']):
+ with self.assertRaises(util.ProcessExecutionError) as ctxt:
+ handle('does-not-matter', valid_config, cc, LOG, [])
self.assertIn(
'Unexpected error while running command.\n'
"Command: ['/bin/sh',",
- str(ctxt_manager.exception))
+ str(ctxt.exception))
self.assertIn(
'Failed to run bootcmd module does-not-matter',
self.logs.getvalue())
+@skipUnlessJsonSchema()
+class TestSchema(CiTestCase, SchemaTestCaseMixin):
+ """Directly test schema rather than through handle."""
+
+ schema = schema
+
+ def test_duplicates_are_fine_array_array(self):
+ """Duplicated commands array/array entries are allowed."""
+ self.assertSchemaValid(
+ ["byebye", "byebye"], 'command entries can be duplicate')
+
+ def test_duplicates_are_fine_array_string(self):
+ """Duplicated commands array/string entries are allowed."""
+ self.assertSchemaValid(
+ ["echo bye", "echo bye"], "command entries can be duplicate.")
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py
index 0136a93d..b16532ea 100644
--- a/tests/unittests/test_handler/test_handler_chef.py
+++ b/tests/unittests/test_handler/test_handler_chef.py
@@ -14,27 +14,43 @@ from cloudinit.sources import DataSourceNone
from cloudinit import util
from cloudinit.tests.helpers import (
- CiTestCase, FilesystemMockingTestCase, mock, skipIf)
+ HttprettyTestCase, FilesystemMockingTestCase, mock, skipIf)
LOG = logging.getLogger(__name__)
CLIENT_TEMPL = os.path.sep.join(["templates", "chef_client.rb.tmpl"])
+# This is adjusted to use http because using with https causes issue
+# in some openssl/httpretty combinations.
+# https://github.com/gabrielfalcao/HTTPretty/issues/242
+# We saw issue in opensuse 42.3 with
+# httpretty=0.8.8-7.1 ndg-httpsclient=0.4.0-3.2 pyOpenSSL=16.0.0-4.1
+OMNIBUS_URL_HTTP = cc_chef.OMNIBUS_URL.replace("https:", "http:")
-class TestInstallChefOmnibus(CiTestCase):
+
+class TestInstallChefOmnibus(HttprettyTestCase):
def setUp(self):
+ super(TestInstallChefOmnibus, self).setUp()
self.new_root = self.tmp_dir()
- @httpretty.activate
+ @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP)
def test_install_chef_from_omnibus_runs_chef_url_content(self):
- """install_chef_from_omnibus runs downloaded OMNIBUS_URL as script."""
- chef_outfile = self.tmp_path('chef.out', self.new_root)
- response = '#!/bin/bash\necho "Hi Mom" > {0}'.format(chef_outfile)
+ """install_chef_from_omnibus calls subp_blob_in_tempfile."""
+ response = b'#!/bin/bash\necho "Hi Mom"'
httpretty.register_uri(
httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200)
- cc_chef.install_chef_from_omnibus()
- self.assertEqual('Hi Mom\n', util.load_file(chef_outfile))
+ ret = (None, None) # stdout, stderr but capture=False
+
+ with mock.patch("cloudinit.config.cc_chef.util.subp_blob_in_tempfile",
+ return_value=ret) as m_subp_blob:
+ cc_chef.install_chef_from_omnibus()
+ # admittedly whitebox, but assuming subp_blob_in_tempfile works
+ # this should be fine.
+ self.assertEqual(
+ [mock.call(blob=response, args=[], basename='chef-omnibus-install',
+ capture=False)],
+ m_subp_blob.call_args_list)
@mock.patch('cloudinit.config.cc_chef.url_helper.readurl')
@mock.patch('cloudinit.config.cc_chef.util.subp_blob_in_tempfile')
@@ -65,7 +81,7 @@ class TestInstallChefOmnibus(CiTestCase):
expected_subp_kwargs,
m_subp_blob.call_args_list[0][1])
- @httpretty.activate
+ @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP)
@mock.patch('cloudinit.config.cc_chef.util.subp_blob_in_tempfile')
def test_install_chef_from_omnibus_has_omnibus_version(self, m_subp_blob):
"""install_chef_from_omnibus provides version arg to OMNIBUS_URL."""
diff --git a/tests/unittests/test_handler/test_handler_etc_hosts.py b/tests/unittests/test_handler/test_handler_etc_hosts.py
index ced05a8d..d854afcb 100644
--- a/tests/unittests/test_handler/test_handler_etc_hosts.py
+++ b/tests/unittests/test_handler/test_handler_etc_hosts.py
@@ -49,6 +49,7 @@ class TestHostsFile(t_help.FilesystemMockingTestCase):
if '192.168.1.1\tblah.blah.us\tblah' not in contents:
self.assertIsNone('Default etc/hosts content modified')
+ @t_help.skipUnlessJinja()
def test_write_etc_hosts_suse_template(self):
cfg = {
'manage_etc_hosts': 'template',
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
index a2054980..2478ebc4 100644
--- a/tests/unittests/test_handler/test_handler_lxd.py
+++ b/tests/unittests/test_handler/test_handler_lxd.py
@@ -33,45 +33,56 @@ class TestLxd(t_help.CiTestCase):
cc = cloud.Cloud(ds, paths, {}, d, None)
return cc
+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
@mock.patch("cloudinit.config.cc_lxd.util")
- def test_lxd_init(self, mock_util):
+ def test_lxd_init(self, mock_util, m_maybe_clean):
cc = self._get_cloud('ubuntu')
mock_util.which.return_value = True
+ m_maybe_clean.return_value = None
cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
self.assertTrue(mock_util.which.called)
- init_call = mock_util.subp.call_args_list[0][0][0]
- self.assertEqual(init_call,
- ['lxd', 'init', '--auto',
- '--network-address=0.0.0.0',
- '--storage-backend=zfs',
- '--storage-pool=poolname'])
+ # no bridge config, so maybe_cleanup should not be called.
+ self.assertFalse(m_maybe_clean.called)
+ self.assertEqual(
+ [mock.call(['lxd', 'waitready', '--timeout=300']),
+ mock.call(
+ ['lxd', 'init', '--auto', '--network-address=0.0.0.0',
+ '--storage-backend=zfs', '--storage-pool=poolname'])],
+ mock_util.subp.call_args_list)
+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
@mock.patch("cloudinit.config.cc_lxd.util")
- def test_lxd_install(self, mock_util):
+ def test_lxd_install(self, mock_util, m_maybe_clean):
cc = self._get_cloud('ubuntu')
cc.distro = mock.MagicMock()
mock_util.which.return_value = None
cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
self.assertNotIn('WARN', self.logs.getvalue())
self.assertTrue(cc.distro.install_packages.called)
+ cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
+ self.assertFalse(m_maybe_clean.called)
install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
self.assertEqual(sorted(install_pkg), ['lxd', 'zfs'])
+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
@mock.patch("cloudinit.config.cc_lxd.util")
- def test_no_init_does_nothing(self, mock_util):
+ def test_no_init_does_nothing(self, mock_util, m_maybe_clean):
cc = self._get_cloud('ubuntu')
cc.distro = mock.MagicMock()
cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, [])
self.assertFalse(cc.distro.install_packages.called)
self.assertFalse(mock_util.subp.called)
+ self.assertFalse(m_maybe_clean.called)
+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
@mock.patch("cloudinit.config.cc_lxd.util")
- def test_no_lxd_does_nothing(self, mock_util):
+ def test_no_lxd_does_nothing(self, mock_util, m_maybe_clean):
cc = self._get_cloud('ubuntu')
cc.distro = mock.MagicMock()
cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, [])
self.assertFalse(cc.distro.install_packages.called)
self.assertFalse(mock_util.subp.called)
+ self.assertFalse(m_maybe_clean.called)
def test_lxd_debconf_new_full(self):
data = {"mode": "new",
@@ -147,14 +158,13 @@ class TestLxd(t_help.CiTestCase):
"domain": "lxd"}
self.assertEqual(
cc_lxd.bridge_to_cmd(data),
- (["lxc", "network", "create", "testbr0",
+ (["network", "create", "testbr0",
"ipv4.address=10.0.8.1/24", "ipv4.nat=true",
"ipv4.dhcp.ranges=10.0.8.2-10.0.8.254",
"ipv6.address=fd98:9e0:3744::1/64",
- "ipv6.nat=true", "dns.domain=lxd",
- "--force-local"],
- ["lxc", "network", "attach-profile",
- "testbr0", "default", "eth0", "--force-local"]))
+ "ipv6.nat=true", "dns.domain=lxd"],
+ ["network", "attach-profile",
+ "testbr0", "default", "eth0"]))
def test_lxd_cmd_new_partial(self):
data = {"mode": "new",
@@ -163,19 +173,18 @@ class TestLxd(t_help.CiTestCase):
"ipv6_nat": "true"}
self.assertEqual(
cc_lxd.bridge_to_cmd(data),
- (["lxc", "network", "create", "lxdbr0", "ipv4.address=none",
- "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true",
- "--force-local"],
- ["lxc", "network", "attach-profile",
- "lxdbr0", "default", "eth0", "--force-local"]))
+ (["network", "create", "lxdbr0", "ipv4.address=none",
+ "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true"],
+ ["network", "attach-profile",
+ "lxdbr0", "default", "eth0"]))
def test_lxd_cmd_existing(self):
data = {"mode": "existing",
"name": "testbr0"}
self.assertEqual(
cc_lxd.bridge_to_cmd(data),
- (None, ["lxc", "network", "attach-profile",
- "testbr0", "default", "eth0", "--force-local"]))
+ (None, ["network", "attach-profile",
+ "testbr0", "default", "eth0"]))
def test_lxd_cmd_none(self):
data = {"mode": "none"}
@@ -183,4 +192,43 @@ class TestLxd(t_help.CiTestCase):
cc_lxd.bridge_to_cmd(data),
(None, None))
+
+class TestLxdMaybeCleanupDefault(t_help.CiTestCase):
+ """Test the implementation of maybe_cleanup_default."""
+
+ defnet = cc_lxd._DEFAULT_NETWORK_NAME
+
+ @mock.patch("cloudinit.config.cc_lxd._lxc")
+ def test_network_other_than_default_not_deleted(self, m_lxc):
+ """deletion or removal should only occur if bridge is default."""
+ cc_lxd.maybe_cleanup_default(
+ net_name="lxdbr1", did_init=True, create=True, attach=True)
+ m_lxc.assert_not_called()
+
+ @mock.patch("cloudinit.config.cc_lxd._lxc")
+ def test_did_init_false_does_not_delete(self, m_lxc):
+ """deletion or removal should only occur if did_init is True."""
+ cc_lxd.maybe_cleanup_default(
+ net_name=self.defnet, did_init=False, create=True, attach=True)
+ m_lxc.assert_not_called()
+
+ @mock.patch("cloudinit.config.cc_lxd._lxc")
+ def test_network_deleted_if_create_true(self, m_lxc):
+ """deletion of network should occur if create is True."""
+ cc_lxd.maybe_cleanup_default(
+ net_name=self.defnet, did_init=True, create=True, attach=False)
+ m_lxc.assert_called_once_with(["network", "delete", self.defnet])
+
+ @mock.patch("cloudinit.config.cc_lxd._lxc")
+ def test_device_removed_if_attach_true(self, m_lxc):
+ """deletion of network should occur if create is True."""
+ nic_name = "my_nic"
+ profile = "my_profile"
+ cc_lxd.maybe_cleanup_default(
+ net_name=self.defnet, did_init=True, create=False, attach=True,
+ profile=profile, nic_name=nic_name)
+ m_lxc.assert_called_once_with(
+ ["profile", "device", "remove", profile, nic_name])
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py
index fe492d4b..8fea6c2a 100644
--- a/tests/unittests/test_handler/test_handler_mounts.py
+++ b/tests/unittests/test_handler/test_handler_mounts.py
@@ -1,8 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os.path
-import shutil
-import tempfile
from cloudinit.config import cc_mounts
@@ -18,8 +16,7 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
def setUp(self):
super(TestSanitizeDevname, self).setUp()
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.new_root)
+ self.new_root = self.tmp_dir()
self.patchOS(self.new_root)
def _touch(self, path):
@@ -134,4 +131,103 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
cc_mounts.sanitize_devname(
'ephemeral0.1', lambda x: disk_path, mock.Mock()))
+
+class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
+
+ swap_path = '/dev/sdb1'
+
+ def setUp(self):
+ super(TestFstabHandling, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.patchOS(self.new_root)
+
+ self.fstab_path = os.path.join(self.new_root, 'etc/fstab')
+ self._makedirs('/etc')
+
+ self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH',
+ 'mock_fstab_path',
+ self.fstab_path,
+ autospec=False)
+
+ self.add_patch('cloudinit.config.cc_mounts._is_block_device',
+ 'mock_is_block_device',
+ return_value=True)
+
+ self.add_patch('cloudinit.config.cc_mounts.util.subp',
+ 'mock_util_subp')
+
+ self.mock_cloud = mock.Mock()
+ self.mock_log = mock.Mock()
+ self.mock_cloud.device_name_to_device = self.device_name_to_device
+
+ def _makedirs(self, directory):
+ directory = os.path.join(self.new_root, directory.lstrip('/'))
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+ def device_name_to_device(self, path):
+ if path == 'swap':
+ return self.swap_path
+ else:
+ dev = None
+
+ return dev
+
+ def test_fstab_no_swap_device(self):
+ '''Ensure that cloud-init adds a discovered swap partition
+ to /etc/fstab.'''
+
+ fstab_original_content = ''
+ fstab_expected_content = (
+ '%s\tnone\tswap\tsw,comment=cloudconfig\t'
+ '0\t0\n' % (self.swap_path,)
+ )
+
+ with open(cc_mounts.FSTAB_PATH, 'w') as fd:
+ fd.write(fstab_original_content)
+
+ cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+
+ with open(cc_mounts.FSTAB_PATH, 'r') as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+
+ def test_fstab_same_swap_device_already_configured(self):
+ '''Ensure that cloud-init will not add a swap device if the same
+ device already exists in /etc/fstab.'''
+
+ fstab_original_content = '%s swap swap defaults 0 0\n' % (
+ self.swap_path,)
+ fstab_expected_content = fstab_original_content
+
+ with open(cc_mounts.FSTAB_PATH, 'w') as fd:
+ fd.write(fstab_original_content)
+
+ cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+
+ with open(cc_mounts.FSTAB_PATH, 'r') as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+
+ def test_fstab_alternate_swap_device_already_configured(self):
+ '''Ensure that cloud-init will add a discovered swap device to
+ /etc/fstab even when there exists a swap definition on another
+ device.'''
+
+ fstab_original_content = '/dev/sdc1 swap swap defaults 0 0\n'
+ fstab_expected_content = (
+ fstab_original_content +
+ '%s\tnone\tswap\tsw,comment=cloudconfig\t'
+ '0\t0\n' % (self.swap_path,)
+ )
+
+ with open(cc_mounts.FSTAB_PATH, 'w') as fd:
+ fd.write(fstab_original_content)
+
+ cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+
+ with open(cc_mounts.FSTAB_PATH, 'r') as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py
index 695897c0..0f22e579 100644
--- a/tests/unittests/test_handler/test_handler_ntp.py
+++ b/tests/unittests/test_handler/test_handler_ntp.py
@@ -3,21 +3,23 @@
from cloudinit.config import cc_ntp
from cloudinit.sources import DataSourceNone
from cloudinit import (distros, helpers, cloud, util)
+
from cloudinit.tests.helpers import (
- FilesystemMockingTestCase, mock, skipUnlessJsonSchema)
+ CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema)
+import copy
import os
from os.path import dirname
import shutil
-NTP_TEMPLATE = b"""\
+NTP_TEMPLATE = """\
## template: jinja
servers {{servers}}
pools {{pools}}
"""
-TIMESYNCD_TEMPLATE = b"""\
+TIMESYNCD_TEMPLATE = """\
## template:jinja
[Time]
{% if servers or pools -%}
@@ -32,56 +34,88 @@ class TestNtp(FilesystemMockingTestCase):
def setUp(self):
super(TestNtp, self).setUp()
- self.subp = util.subp
self.new_root = self.tmp_dir()
+ self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy')
+ self.m_snappy.return_value = False
+ self.add_patch('cloudinit.util.system_info', 'm_sysinfo')
+ self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')}
- def _get_cloud(self, distro):
- self.patchUtils(self.new_root)
+ def _get_cloud(self, distro, sys_cfg=None):
+ self.new_root = self.reRoot(root=self.new_root)
paths = helpers.Paths({'templates_dir': self.new_root})
cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- return cloud.Cloud(myds, paths, {}, mydist, None)
+ if not sys_cfg:
+ sys_cfg = {}
+ mydist = cls(distro, sys_cfg, paths)
+ myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, paths)
+ return cloud.Cloud(myds, paths, sys_cfg, mydist, None)
+
+ def _get_template_path(self, template_name, distro, basepath=None):
+ # ntp.conf.{distro} -> ntp.conf.debian.tmpl
+ template_fn = '{0}.tmpl'.format(
+ template_name.replace('{distro}', distro))
+ if not basepath:
+ basepath = self.new_root
+ path = os.path.join(basepath, template_fn)
+ return path
+
+ def _generate_template(self, template=None):
+ if not template:
+ template = NTP_TEMPLATE
+ confpath = os.path.join(self.new_root, 'client.conf')
+ template_fn = os.path.join(self.new_root, 'client.conf.tmpl')
+ util.write_file(template_fn, content=template)
+ return (confpath, template_fn)
+
+ def _mock_ntp_client_config(self, client=None, distro=None):
+ if not client:
+ client = 'ntp'
+ if not distro:
+ distro = 'ubuntu'
+ dcfg = cc_ntp.distro_ntp_client_configs(distro)
+ if client == 'systemd-timesyncd':
+ template = TIMESYNCD_TEMPLATE
+ else:
+ template = NTP_TEMPLATE
+ (confpath, _template_fn) = self._generate_template(template=template)
+ ntpconfig = copy.deepcopy(dcfg[client])
+ ntpconfig['confpath'] = confpath
+ ntpconfig['template_name'] = os.path.basename(confpath)
+ return ntpconfig
@mock.patch("cloudinit.config.cc_ntp.util")
def test_ntp_install(self, mock_util):
- """ntp_install installs via install_func when check_exe is absent."""
+ """ntp_install_client runs install_func when check_exe is absent."""
mock_util.which.return_value = None # check_exe not found.
install_func = mock.MagicMock()
- cc_ntp.install_ntp(install_func, packages=['ntpx'], check_exe='ntpdx')
-
+ cc_ntp.install_ntp_client(install_func,
+ packages=['ntpx'], check_exe='ntpdx')
mock_util.which.assert_called_with('ntpdx')
install_func.assert_called_once_with(['ntpx'])
@mock.patch("cloudinit.config.cc_ntp.util")
def test_ntp_install_not_needed(self, mock_util):
- """ntp_install doesn't attempt install when check_exe is found."""
- mock_util.which.return_value = ["/usr/sbin/ntpd"] # check_exe found.
+ """ntp_install_client doesn't install when check_exe is found."""
+ client = 'chrony'
+ mock_util.which.return_value = [client] # check_exe found.
install_func = mock.MagicMock()
- cc_ntp.install_ntp(install_func, packages=['ntp'], check_exe='ntpd')
+ cc_ntp.install_ntp_client(install_func, packages=[client],
+ check_exe=client)
install_func.assert_not_called()
@mock.patch("cloudinit.config.cc_ntp.util")
def test_ntp_install_no_op_with_empty_pkg_list(self, mock_util):
- """ntp_install calls install_func with empty list"""
+ """ntp_install_client runs install_func with empty list"""
mock_util.which.return_value = None # check_exe not found
install_func = mock.MagicMock()
- cc_ntp.install_ntp(install_func, packages=[], check_exe='timesyncd')
+ cc_ntp.install_ntp_client(install_func, packages=[],
+ check_exe='timesyncd')
install_func.assert_called_once_with([])
- def test_ntp_rename_ntp_conf(self):
- """When NTP_CONF exists, rename_ntp moves it."""
- ntpconf = self.tmp_path("ntp.conf", self.new_root)
- util.write_file(ntpconf, "")
- with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf):
- cc_ntp.rename_ntp_conf()
- self.assertFalse(os.path.exists(ntpconf))
- self.assertTrue(os.path.exists("{0}.dist".format(ntpconf)))
-
@mock.patch("cloudinit.config.cc_ntp.util")
def test_reload_ntp_defaults(self, mock_util):
"""Test service is restarted/reloaded (defaults)"""
- service = 'ntp'
+ service = 'ntp_service_name'
cmd = ['service', service, 'restart']
cc_ntp.reload_ntp(service)
mock_util.subp.assert_called_with(cmd, capture=True)
@@ -89,193 +123,169 @@ class TestNtp(FilesystemMockingTestCase):
@mock.patch("cloudinit.config.cc_ntp.util")
def test_reload_ntp_systemd(self, mock_util):
"""Test service is restarted/reloaded (systemd)"""
- service = 'ntp'
- cmd = ['systemctl', 'reload-or-restart', service]
+ service = 'ntp_service_name'
cc_ntp.reload_ntp(service, systemd=True)
- mock_util.subp.assert_called_with(cmd, capture=True)
-
- @mock.patch("cloudinit.config.cc_ntp.util")
- def test_reload_ntp_systemd_timesycnd(self, mock_util):
- """Test service is restarted/reloaded (systemd/timesyncd)"""
- service = 'systemd-timesycnd'
cmd = ['systemctl', 'reload-or-restart', service]
- cc_ntp.reload_ntp(service, systemd=True)
mock_util.subp.assert_called_with(cmd, capture=True)
+ def test_ntp_rename_ntp_conf(self):
+ """When NTP_CONF exists, rename_ntp moves it."""
+ ntpconf = self.tmp_path("ntp.conf", self.new_root)
+ util.write_file(ntpconf, "")
+ cc_ntp.rename_ntp_conf(confpath=ntpconf)
+ self.assertFalse(os.path.exists(ntpconf))
+ self.assertTrue(os.path.exists("{0}.dist".format(ntpconf)))
+
def test_ntp_rename_ntp_conf_skip_missing(self):
"""When NTP_CONF doesn't exist rename_ntp doesn't create a file."""
ntpconf = self.tmp_path("ntp.conf", self.new_root)
self.assertFalse(os.path.exists(ntpconf))
- with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf):
- cc_ntp.rename_ntp_conf()
+ cc_ntp.rename_ntp_conf(confpath=ntpconf)
self.assertFalse(os.path.exists("{0}.dist".format(ntpconf)))
self.assertFalse(os.path.exists(ntpconf))
- def test_write_ntp_config_template_from_ntp_conf_tmpl_with_servers(self):
- """write_ntp_config_template reads content from ntp.conf.tmpl.
-
- It reads ntp.conf.tmpl if present and renders the value from servers
- key. When no pools key is defined, template is rendered using an empty
- list for pools.
- """
- distro = 'ubuntu'
- cfg = {
- 'servers': ['192.168.2.1', '192.168.2.2']
- }
- mycloud = self._get_cloud(distro)
- ntp_conf = self.tmp_path("ntp.conf", self.new_root) # Doesn't exist
- # Create ntp.conf.tmpl
- with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
- stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
- cc_ntp.write_ntp_config_template(cfg, mycloud, ntp_conf)
- content = util.read_file_or_url('file://' + ntp_conf).contents
+ def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self):
+ """write_ntp_config_template reads from $client.conf.distro.tmpl"""
+ servers = []
+ pools = ['10.0.0.1', '10.0.0.2']
+ (confpath, template_fn) = self._generate_template()
+ mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR'
+ with mock.patch(mock_path, self.new_root):
+ cc_ntp.write_ntp_config_template('ubuntu',
+ servers=servers, pools=pools,
+ path=confpath,
+ template_fn=template_fn,
+ template=None)
self.assertEqual(
- "servers ['192.168.2.1', '192.168.2.2']\npools []\n",
- content.decode())
+ "servers []\npools ['10.0.0.1', '10.0.0.2']\n",
+ util.load_file(confpath))
- def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self):
- """write_ntp_config_template reads content from ntp.conf.distro.tmpl.
+ def test_write_ntp_config_template_defaults_pools_w_empty_lists(self):
+ """write_ntp_config_template defaults pools servers upon empty config.
- It reads ntp.conf.<distro>.tmpl before attempting ntp.conf.tmpl. It
- renders the value from the keys servers and pools. When no
- servers value is present, template is rendered using an empty list.
+ When both pools and servers are empty, default NR_POOL_SERVERS get
+ configured.
"""
distro = 'ubuntu'
- cfg = {
- 'pools': ['10.0.0.1', '10.0.0.2']
- }
- mycloud = self._get_cloud(distro)
- ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist
- # Create ntp.conf.tmpl which isn't read
- with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
- stream.write(b'NOT READ: ntp.conf.<distro>.tmpl is primary')
- # Create ntp.conf.tmpl.<distro>
- with open('{0}.{1}.tmpl'.format(ntp_conf, distro), 'wb') as stream:
- stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
- cc_ntp.write_ntp_config_template(cfg, mycloud, ntp_conf)
- content = util.read_file_or_url('file://' + ntp_conf).contents
+ pools = cc_ntp.generate_server_names(distro)
+ servers = []
+ (confpath, template_fn) = self._generate_template()
+ mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR'
+ with mock.patch(mock_path, self.new_root):
+ cc_ntp.write_ntp_config_template(distro,
+ servers=servers, pools=pools,
+ path=confpath,
+ template_fn=template_fn,
+ template=None)
self.assertEqual(
- "servers []\npools ['10.0.0.1', '10.0.0.2']\n",
- content.decode())
+ "servers []\npools {0}\n".format(pools),
+ util.load_file(confpath))
- def test_write_ntp_config_template_defaults_pools_when_empty_lists(self):
- """write_ntp_config_template defaults pools servers upon empty config.
+ def test_defaults_pools_empty_lists_sles(self):
+ """write_ntp_config_template defaults opensuse pools upon empty config.
When both pools and servers are empty, default NR_POOL_SERVERS get
configured.
"""
- distro = 'ubuntu'
- mycloud = self._get_cloud(distro)
- ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist
- # Create ntp.conf.tmpl
- with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
- stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
- cc_ntp.write_ntp_config_template({}, mycloud, ntp_conf)
- content = util.read_file_or_url('file://' + ntp_conf).contents
- default_pools = [
- "{0}.{1}.pool.ntp.org".format(x, distro)
- for x in range(0, cc_ntp.NR_POOL_SERVERS)]
+ distro = 'sles'
+ default_pools = cc_ntp.generate_server_names(distro)
+ (confpath, template_fn) = self._generate_template()
+
+ cc_ntp.write_ntp_config_template(distro,
+ servers=[], pools=[],
+ path=confpath,
+ template_fn=template_fn,
+ template=None)
+ for pool in default_pools:
+ self.assertIn('opensuse', pool)
self.assertEqual(
"servers []\npools {0}\n".format(default_pools),
- content.decode())
+ util.load_file(confpath))
self.assertIn(
"Adding distro default ntp pool servers: {0}".format(
",".join(default_pools)),
self.logs.getvalue())
- @mock.patch("cloudinit.config.cc_ntp.ntp_installable")
- def test_ntp_handler_mocked_template(self, m_ntp_install):
- """Test ntp handler renders ubuntu ntp.conf template."""
- pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org']
- servers = ['192.168.23.3', '192.168.23.4']
- cfg = {
- 'ntp': {
- 'pools': pools,
- 'servers': servers
- }
- }
- mycloud = self._get_cloud('ubuntu')
- ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist
- m_ntp_install.return_value = True
-
- # Create ntp.conf.tmpl
- with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
- stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
- with mock.patch.object(util, 'which', return_value=None):
- cc_ntp.handle('notimportant', cfg, mycloud, None, None)
-
- content = util.read_file_or_url('file://' + ntp_conf).contents
- self.assertEqual(
- 'servers {0}\npools {1}\n'.format(servers, pools),
- content.decode())
-
- @mock.patch("cloudinit.config.cc_ntp.util")
- def test_ntp_handler_mocked_template_snappy(self, m_util):
- """Test ntp handler renders timesycnd.conf template on snappy."""
+ def test_timesyncd_template(self):
+ """Test timesycnd template is correct"""
pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org']
servers = ['192.168.23.3', '192.168.23.4']
- cfg = {
- 'ntp': {
- 'pools': pools,
- 'servers': servers
- }
- }
- mycloud = self._get_cloud('ubuntu')
- m_util.system_is_snappy.return_value = True
-
- # Create timesyncd.conf.tmpl
- tsyncd_conf = self.tmp_path("timesyncd.conf", self.new_root)
- template = '{0}.tmpl'.format(tsyncd_conf)
- with open(template, 'wb') as stream:
- stream.write(TIMESYNCD_TEMPLATE)
-
- with mock.patch('cloudinit.config.cc_ntp.TIMESYNCD_CONF', tsyncd_conf):
- cc_ntp.handle('notimportant', cfg, mycloud, None, None)
-
- content = util.read_file_or_url('file://' + tsyncd_conf).contents
+ (confpath, template_fn) = self._generate_template(
+ template=TIMESYNCD_TEMPLATE)
+ cc_ntp.write_ntp_config_template('ubuntu',
+ servers=servers, pools=pools,
+ path=confpath,
+ template_fn=template_fn,
+ template=None)
self.assertEqual(
"[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)),
- content.decode())
-
- def test_ntp_handler_real_distro_templates(self):
- """Test ntp handler renders the shipped distro ntp.conf templates."""
+ util.load_file(confpath))
+
+ def test_distro_ntp_client_configs(self):
+ """Test we have updated ntp client configs on different distros"""
+ delta = copy.deepcopy(cc_ntp.DISTRO_CLIENT_CONFIG)
+ base = copy.deepcopy(cc_ntp.NTP_CLIENT_CONFIG)
+ # confirm no-delta distros match the base config
+ for distro in cc_ntp.distros:
+ if distro not in delta:
+ result = cc_ntp.distro_ntp_client_configs(distro)
+ self.assertEqual(base, result)
+ # for distros with delta, ensure the merged config values match
+ # what is set in the delta
+ for distro in delta.keys():
+ result = cc_ntp.distro_ntp_client_configs(distro)
+ for client in delta[distro].keys():
+ for key in delta[distro][client].keys():
+ self.assertEqual(delta[distro][client][key],
+ result[client][key])
+
+ def test_ntp_handler_real_distro_ntp_templates(self):
+ """Test ntp handler renders the shipped distro ntp client templates."""
pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org']
servers = ['192.168.23.3', '192.168.23.4']
- cfg = {
- 'ntp': {
- 'pools': pools,
- 'servers': servers
- }
- }
- ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist
- for distro in ('debian', 'ubuntu', 'fedora', 'rhel', 'sles'):
- mycloud = self._get_cloud(distro)
- root_dir = dirname(dirname(os.path.realpath(util.__file__)))
- tmpl_file = os.path.join(
- '{0}/templates/ntp.conf.{1}.tmpl'.format(root_dir, distro))
- # Create a copy in our tmp_dir
- shutil.copy(
- tmpl_file,
- os.path.join(self.new_root, 'ntp.conf.%s.tmpl' % distro))
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
- with mock.patch.object(util, 'which', return_value=[True]):
- cc_ntp.handle('notimportant', cfg, mycloud, None, None)
-
- content = util.read_file_or_url('file://' + ntp_conf).contents
- expected_servers = '\n'.join([
- 'server {0} iburst'.format(server) for server in servers])
- self.assertIn(
- expected_servers, content.decode(),
- 'failed to render ntp.conf for distro:{0}'.format(distro))
- expected_pools = '\n'.join([
- 'pool {0} iburst'.format(pool) for pool in pools])
- self.assertIn(
- expected_pools, content.decode(),
- 'failed to render ntp.conf for distro:{0}'.format(distro))
+ for client in ['ntp', 'systemd-timesyncd', 'chrony']:
+ for distro in cc_ntp.distros:
+ distro_cfg = cc_ntp.distro_ntp_client_configs(distro)
+ ntpclient = distro_cfg[client]
+ confpath = (
+ os.path.join(self.new_root, ntpclient.get('confpath')[1:]))
+ template = ntpclient.get('template_name')
+ # find sourcetree template file
+ root_dir = (
+ dirname(dirname(os.path.realpath(util.__file__))) +
+ '/templates')
+ source_fn = self._get_template_path(template, distro,
+ basepath=root_dir)
+ template_fn = self._get_template_path(template, distro)
+ # don't fail if cloud-init doesn't have a template for
+ # a distro,client pair
+ if not os.path.exists(source_fn):
+ continue
+ # Create a copy in our tmp_dir
+ shutil.copy(source_fn, template_fn)
+ cc_ntp.write_ntp_config_template(distro, servers=servers,
+ pools=pools, path=confpath,
+ template_fn=template_fn)
+ content = util.load_file(confpath)
+ if client in ['ntp', 'chrony']:
+ expected_servers = '\n'.join([
+ 'server {0} iburst'.format(srv) for srv in servers])
+ print('distro=%s client=%s' % (distro, client))
+ self.assertIn(expected_servers, content,
+ ('failed to render {0} conf'
+ ' for distro:{1}'.format(client, distro)))
+ expected_pools = '\n'.join([
+ 'pool {0} iburst'.format(pool) for pool in pools])
+ self.assertIn(expected_pools, content,
+ ('failed to render {0} conf'
+ ' for distro:{1}'.format(client, distro)))
+ elif client == 'systemd-timesyncd':
+ expected_content = (
+ "# cloud-init generated file\n" +
+ "# See timesyncd.conf(5) for details.\n\n" +
+ "[Time]\nNTP=%s %s \n" % (" ".join(servers),
+ " ".join(pools)))
+ self.assertEqual(expected_content, content)
def test_no_ntpcfg_does_nothing(self):
"""When no ntp section is defined handler logs a warning and noops."""
@@ -285,95 +295,96 @@ class TestNtp(FilesystemMockingTestCase):
'not present or disabled by cfg\n',
self.logs.getvalue())
- def test_ntp_handler_schema_validation_allows_empty_ntp_config(self):
+ @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ def test_ntp_handler_schema_validation_allows_empty_ntp_config(self,
+ m_select):
"""Ntp schema validation allows for an empty ntp: configuration."""
valid_empty_configs = [{'ntp': {}}, {'ntp': None}]
- distro = 'ubuntu'
- cc = self._get_cloud(distro)
- ntp_conf = os.path.join(self.new_root, 'ntp.conf')
- with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
- stream.write(NTP_TEMPLATE)
for valid_empty_config in valid_empty_configs:
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
- cc_ntp.handle('cc_ntp', valid_empty_config, cc, None, [])
- with open(ntp_conf) as stream:
- content = stream.read()
- default_pools = [
- "{0}.{1}.pool.ntp.org".format(x, distro)
- for x in range(0, cc_ntp.NR_POOL_SERVERS)]
- self.assertEqual(
- "servers []\npools {0}\n".format(default_pools),
- content)
- self.assertNotIn('Invalid config:', self.logs.getvalue())
+ for distro in cc_ntp.distros:
+ mycloud = self._get_cloud(distro)
+ ntpconfig = self._mock_ntp_client_config(distro=distro)
+ confpath = ntpconfig['confpath']
+ m_select.return_value = ntpconfig
+ cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, [])
+ pools = cc_ntp.generate_server_names(mycloud.distro.name)
+ self.assertEqual(
+ "servers []\npools {0}\n".format(pools),
+ util.load_file(confpath))
+ self.assertNotIn('Invalid config:', self.logs.getvalue())
@skipUnlessJsonSchema()
- def test_ntp_handler_schema_validation_warns_non_string_item_type(self):
+ @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ def test_ntp_handler_schema_validation_warns_non_string_item_type(self,
+ m_sel):
"""Ntp schema validation warns of non-strings in pools or servers.
Schema validation is not strict, so ntp config is still be rendered.
"""
invalid_config = {'ntp': {'pools': [123], 'servers': ['valid', None]}}
- cc = self._get_cloud('ubuntu')
- ntp_conf = os.path.join(self.new_root, 'ntp.conf')
- with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
- stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
- cc_ntp.handle('cc_ntp', invalid_config, cc, None, [])
- self.assertIn(
- "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n"
- "ntp.servers.1: None is not of type 'string'",
- self.logs.getvalue())
- with open(ntp_conf) as stream:
- content = stream.read()
- self.assertEqual("servers ['valid', None]\npools [123]\n", content)
+ for distro in cc_ntp.distros:
+ mycloud = self._get_cloud(distro)
+ ntpconfig = self._mock_ntp_client_config(distro=distro)
+ confpath = ntpconfig['confpath']
+ m_sel.return_value = ntpconfig
+ cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, [])
+ self.assertIn(
+ "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n"
+ "ntp.servers.1: None is not of type 'string'",
+ self.logs.getvalue())
+ self.assertEqual("servers ['valid', None]\npools [123]\n",
+ util.load_file(confpath))
@skipUnlessJsonSchema()
- def test_ntp_handler_schema_validation_warns_of_non_array_type(self):
+ @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ def test_ntp_handler_schema_validation_warns_of_non_array_type(self,
+ m_select):
"""Ntp schema validation warns of non-array pools or servers types.
Schema validation is not strict, so ntp config is still be rendered.
"""
invalid_config = {'ntp': {'pools': 123, 'servers': 'non-array'}}
- cc = self._get_cloud('ubuntu')
- ntp_conf = os.path.join(self.new_root, 'ntp.conf')
- with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
- stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
- cc_ntp.handle('cc_ntp', invalid_config, cc, None, [])
- self.assertIn(
- "Invalid config:\nntp.pools: 123 is not of type 'array'\n"
- "ntp.servers: 'non-array' is not of type 'array'",
- self.logs.getvalue())
- with open(ntp_conf) as stream:
- content = stream.read()
- self.assertEqual("servers non-array\npools 123\n", content)
+
+ for distro in cc_ntp.distros:
+ mycloud = self._get_cloud(distro)
+ ntpconfig = self._mock_ntp_client_config(distro=distro)
+ confpath = ntpconfig['confpath']
+ m_select.return_value = ntpconfig
+ cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, [])
+ self.assertIn(
+ "Invalid config:\nntp.pools: 123 is not of type 'array'\n"
+ "ntp.servers: 'non-array' is not of type 'array'",
+ self.logs.getvalue())
+ self.assertEqual("servers non-array\npools 123\n",
+ util.load_file(confpath))
@skipUnlessJsonSchema()
- def test_ntp_handler_schema_validation_warns_invalid_key_present(self):
+ @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ def test_ntp_handler_schema_validation_warns_invalid_key_present(self,
+ m_select):
"""Ntp schema validation warns of invalid keys present in ntp config.
Schema validation is not strict, so ntp config is still be rendered.
"""
invalid_config = {
'ntp': {'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org']}}
- cc = self._get_cloud('ubuntu')
- ntp_conf = os.path.join(self.new_root, 'ntp.conf')
- with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
- stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
- cc_ntp.handle('cc_ntp', invalid_config, cc, None, [])
- self.assertIn(
- "Invalid config:\nntp: Additional properties are not allowed "
- "('invalidkey' was unexpected)",
- self.logs.getvalue())
- with open(ntp_conf) as stream:
- content = stream.read()
- self.assertEqual(
- "servers []\npools ['0.mycompany.pool.ntp.org']\n",
- content)
+ for distro in cc_ntp.distros:
+ mycloud = self._get_cloud(distro)
+ ntpconfig = self._mock_ntp_client_config(distro=distro)
+ confpath = ntpconfig['confpath']
+ m_select.return_value = ntpconfig
+ cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, [])
+ self.assertIn(
+ "Invalid config:\nntp: Additional properties are not allowed "
+ "('invalidkey' was unexpected)",
+ self.logs.getvalue())
+ self.assertEqual(
+ "servers []\npools ['0.mycompany.pool.ntp.org']\n",
+ util.load_file(confpath))
@skipUnlessJsonSchema()
- def test_ntp_handler_schema_validation_warns_of_duplicates(self):
+ @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ def test_ntp_handler_schema_validation_warns_of_duplicates(self, m_select):
"""Ntp schema validation warns of duplicates in servers or pools.
Schema validation is not strict, so ntp config is still be rendered.
@@ -381,74 +392,330 @@ class TestNtp(FilesystemMockingTestCase):
invalid_config = {
'ntp': {'pools': ['0.mypool.org', '0.mypool.org'],
'servers': ['10.0.0.1', '10.0.0.1']}}
- cc = self._get_cloud('ubuntu')
- ntp_conf = os.path.join(self.new_root, 'ntp.conf')
- with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
- stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
- cc_ntp.handle('cc_ntp', invalid_config, cc, None, [])
- self.assertIn(
- "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org'] has "
- "non-unique elements\nntp.servers: ['10.0.0.1', '10.0.0.1'] has "
- "non-unique elements",
- self.logs.getvalue())
- with open(ntp_conf) as stream:
- content = stream.read()
- self.assertEqual(
- "servers ['10.0.0.1', '10.0.0.1']\n"
- "pools ['0.mypool.org', '0.mypool.org']\n",
- content)
+ for distro in cc_ntp.distros:
+ mycloud = self._get_cloud(distro)
+ ntpconfig = self._mock_ntp_client_config(distro=distro)
+ confpath = ntpconfig['confpath']
+ m_select.return_value = ntpconfig
+ cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, [])
+ self.assertIn(
+ "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org']"
+ " has non-unique elements\nntp.servers: "
+ "['10.0.0.1', '10.0.0.1'] has non-unique elements",
+ self.logs.getvalue())
+ self.assertEqual(
+ "servers ['10.0.0.1', '10.0.0.1']\n"
+ "pools ['0.mypool.org', '0.mypool.org']\n",
+ util.load_file(confpath))
- @mock.patch("cloudinit.config.cc_ntp.ntp_installable")
- def test_ntp_handler_timesyncd(self, m_ntp_install):
+ @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ def test_ntp_handler_timesyncd(self, m_select):
"""Test ntp handler configures timesyncd"""
- m_ntp_install.return_value = False
- distro = 'ubuntu'
- cfg = {
- 'servers': ['192.168.2.1', '192.168.2.2'],
- 'pools': ['0.mypool.org'],
- }
- mycloud = self._get_cloud(distro)
- tsyncd_conf = self.tmp_path("timesyncd.conf", self.new_root)
- # Create timesyncd.conf.tmpl
- template = '{0}.tmpl'.format(tsyncd_conf)
- print(template)
- with open(template, 'wb') as stream:
- stream.write(TIMESYNCD_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.TIMESYNCD_CONF', tsyncd_conf):
- cc_ntp.write_ntp_config_template(cfg, mycloud, tsyncd_conf,
- template='timesyncd.conf')
-
- content = util.read_file_or_url('file://' + tsyncd_conf).contents
- self.assertEqual(
- "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n",
- content.decode())
+ servers = ['192.168.2.1', '192.168.2.2']
+ pools = ['0.mypool.org']
+ cfg = {'ntp': {'servers': servers, 'pools': pools}}
+ client = 'systemd-timesyncd'
+ for distro in cc_ntp.distros:
+ mycloud = self._get_cloud(distro)
+ ntpconfig = self._mock_ntp_client_config(distro=distro,
+ client=client)
+ confpath = ntpconfig['confpath']
+ m_select.return_value = ntpconfig
+ cc_ntp.handle('cc_ntp', cfg, mycloud, None, [])
+ self.assertEqual(
+ "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n",
+ util.load_file(confpath))
+
+ @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ def test_ntp_handler_enabled_false(self, m_select):
+ """Test ntp handler does not run if enabled: false """
+ cfg = {'ntp': {'enabled': False}}
+ for distro in cc_ntp.distros:
+ mycloud = self._get_cloud(distro)
+ cc_ntp.handle('notimportant', cfg, mycloud, None, None)
+ self.assertEqual(0, m_select.call_count)
+
+ @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ @mock.patch("cloudinit.distros.Distro.uses_systemd")
+ def test_ntp_the_whole_package(self, m_sysd, m_select):
+ """Test enabled config renders template, and restarts service """
+ cfg = {'ntp': {'enabled': True}}
+ for distro in cc_ntp.distros:
+ mycloud = self._get_cloud(distro)
+ ntpconfig = self._mock_ntp_client_config(distro=distro)
+ confpath = ntpconfig['confpath']
+ service_name = ntpconfig['service_name']
+ m_select.return_value = ntpconfig
+ pools = cc_ntp.generate_server_names(mycloud.distro.name)
+ # force uses systemd path
+ m_sysd.return_value = True
+ with mock.patch('cloudinit.config.cc_ntp.util') as m_util:
+ # allow use of util.mergemanydict
+ m_util.mergemanydict.side_effect = util.mergemanydict
+ # default client is present
+ m_util.which.return_value = True
+ # use the config 'enabled' value
+ m_util.is_false.return_value = util.is_false(
+ cfg['ntp']['enabled'])
+ cc_ntp.handle('notimportant', cfg, mycloud, None, None)
+ m_util.subp.assert_called_with(
+ ['systemctl', 'reload-or-restart',
+ service_name], capture=True)
+ self.assertEqual(
+ "servers []\npools {0}\n".format(pools),
+ util.load_file(confpath))
+
+ def test_opensuse_picks_chrony(self):
+ """Test opensuse picks chrony or ntp on certain distro versions"""
+ # < 15.0 => ntp
+ self.m_sysinfo.return_value = {'dist':
+ ('openSUSE', '13.2', 'Harlequin')}
+ mycloud = self._get_cloud('opensuse')
+ expected_client = mycloud.distro.preferred_ntp_clients[0]
+ self.assertEqual('ntp', expected_client)
+
+ # >= 15.0 and not openSUSE => chrony
+ self.m_sysinfo.return_value = {'dist':
+ ('SLES', '15.0',
+ 'SUSE Linux Enterprise Server 15')}
+ mycloud = self._get_cloud('sles')
+ expected_client = mycloud.distro.preferred_ntp_clients[0]
+ self.assertEqual('chrony', expected_client)
+
+ # >= 15.0 and openSUSE and ver != 42 => chrony
+ self.m_sysinfo.return_value = {'dist': ('openSUSE Tumbleweed',
+ '20180326',
+ 'timbleweed')}
+ mycloud = self._get_cloud('opensuse')
+ expected_client = mycloud.distro.preferred_ntp_clients[0]
+ self.assertEqual('chrony', expected_client)
+
+ def test_ubuntu_xenial_picks_ntp(self):
+ """Test Ubuntu picks ntp on xenial release"""
+
+ self.m_sysinfo.return_value = {'dist': ('Ubuntu', '16.04', 'xenial')}
+ mycloud = self._get_cloud('ubuntu')
+ expected_client = mycloud.distro.preferred_ntp_clients[0]
+ self.assertEqual('ntp', expected_client)
- def test_write_ntp_config_template_defaults_pools_empty_lists_sles(self):
- """write_ntp_config_template defaults pools servers upon empty config.
+ @mock.patch('cloudinit.config.cc_ntp.util.which')
+ def test_snappy_system_picks_timesyncd(self, m_which):
+ """Test snappy systems prefer installed clients"""
- When both pools and servers are empty, default NR_POOL_SERVERS get
- configured.
- """
- distro = 'sles'
- mycloud = self._get_cloud(distro)
- ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist
- # Create ntp.conf.tmpl
- with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream:
- stream.write(NTP_TEMPLATE)
- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf):
- cc_ntp.write_ntp_config_template({}, mycloud, ntp_conf)
- content = util.read_file_or_url('file://' + ntp_conf).contents
- default_pools = [
- "{0}.opensuse.pool.ntp.org".format(x)
- for x in range(0, cc_ntp.NR_POOL_SERVERS)]
- self.assertEqual(
- "servers []\npools {0}\n".format(default_pools),
- content.decode())
- self.assertIn(
- "Adding distro default ntp pool servers: {0}".format(
- ",".join(default_pools)),
- self.logs.getvalue())
+ # we are on ubuntu-core here
+ self.m_snappy.return_value = True
+ # ubuntu core systems will have timesyncd installed
+ m_which.side_effect = iter([None, '/lib/systemd/systemd-timesyncd',
+ None, None, None])
+ distro = 'ubuntu'
+ mycloud = self._get_cloud(distro)
+ distro_configs = cc_ntp.distro_ntp_client_configs(distro)
+ expected_client = 'systemd-timesyncd'
+ expected_cfg = distro_configs[expected_client]
+ expected_calls = []
+ # we only get to timesyncd
+ for client in mycloud.distro.preferred_ntp_clients[0:2]:
+ cfg = distro_configs[client]
+ expected_calls.append(mock.call(cfg['check_exe']))
+ result = cc_ntp.select_ntp_client(None, mycloud.distro)
+ m_which.assert_has_calls(expected_calls)
+ self.assertEqual(sorted(expected_cfg), sorted(cfg))
+ self.assertEqual(sorted(expected_cfg), sorted(result))
+
+ @mock.patch('cloudinit.config.cc_ntp.util.which')
+ def test_ntp_distro_searches_all_preferred_clients(self, m_which):
+ """Test select_ntp_client search all distro perferred clients """
+ # nothing is installed
+ m_which.return_value = None
+ for distro in cc_ntp.distros:
+ mycloud = self._get_cloud(distro)
+ distro_configs = cc_ntp.distro_ntp_client_configs(distro)
+ expected_client = mycloud.distro.preferred_ntp_clients[0]
+ expected_cfg = distro_configs[expected_client]
+ expected_calls = []
+ for client in mycloud.distro.preferred_ntp_clients:
+ cfg = distro_configs[client]
+ expected_calls.append(mock.call(cfg['check_exe']))
+ cc_ntp.select_ntp_client({}, mycloud.distro)
+ m_which.assert_has_calls(expected_calls)
+ self.assertEqual(sorted(expected_cfg), sorted(cfg))
+
+ @mock.patch('cloudinit.config.cc_ntp.util.which')
+ def test_user_cfg_ntp_client_auto_uses_distro_clients(self, m_which):
+ """Test user_cfg.ntp_client='auto' defaults to distro search"""
+ # nothing is installed
+ m_which.return_value = None
+ for distro in cc_ntp.distros:
+ mycloud = self._get_cloud(distro)
+ distro_configs = cc_ntp.distro_ntp_client_configs(distro)
+ expected_client = mycloud.distro.preferred_ntp_clients[0]
+ expected_cfg = distro_configs[expected_client]
+ expected_calls = []
+ for client in mycloud.distro.preferred_ntp_clients:
+ cfg = distro_configs[client]
+ expected_calls.append(mock.call(cfg['check_exe']))
+ cc_ntp.select_ntp_client('auto', mycloud.distro)
+ m_which.assert_has_calls(expected_calls)
+ self.assertEqual(sorted(expected_cfg), sorted(cfg))
+
+ @mock.patch('cloudinit.config.cc_ntp.write_ntp_config_template')
+ @mock.patch('cloudinit.cloud.Cloud.get_template_filename')
+ @mock.patch('cloudinit.config.cc_ntp.util.which')
+ def test_ntp_custom_client_overrides_installed_clients(self, m_which,
+ m_tmpfn, m_write):
+ """Test user client is installed despite other clients present """
+ client = 'ntpdate'
+ cfg = {'ntp': {'ntp_client': client}}
+ for distro in cc_ntp.distros:
+ # client is not installed
+ m_which.side_effect = iter([None])
+ mycloud = self._get_cloud(distro)
+ with mock.patch.object(mycloud.distro,
+ 'install_packages') as m_install:
+ cc_ntp.handle('notimportant', cfg, mycloud, None, None)
+ m_install.assert_called_with([client])
+ m_which.assert_called_with(client)
+
+ @mock.patch('cloudinit.config.cc_ntp.util.which')
+ def test_ntp_system_config_overrides_distro_builtin_clients(self, m_which):
+ """Test distro system_config overrides builtin preferred ntp clients"""
+ system_client = 'chrony'
+ sys_cfg = {'ntp_client': system_client}
+ # no clients installed
+ m_which.return_value = None
+ for distro in cc_ntp.distros:
+ mycloud = self._get_cloud(distro, sys_cfg=sys_cfg)
+ distro_configs = cc_ntp.distro_ntp_client_configs(distro)
+ expected_cfg = distro_configs[system_client]
+ result = cc_ntp.select_ntp_client(None, mycloud.distro)
+ self.assertEqual(sorted(expected_cfg), sorted(result))
+ m_which.assert_has_calls([])
+
+ @mock.patch('cloudinit.config.cc_ntp.util.which')
+ def test_ntp_user_config_overrides_system_cfg(self, m_which):
+ """Test user-data overrides system_config ntp_client"""
+ system_client = 'chrony'
+ sys_cfg = {'ntp_client': system_client}
+ user_client = 'systemd-timesyncd'
+ # no clients installed
+ m_which.return_value = None
+ for distro in cc_ntp.distros:
+ mycloud = self._get_cloud(distro, sys_cfg=sys_cfg)
+ distro_configs = cc_ntp.distro_ntp_client_configs(distro)
+ expected_cfg = distro_configs[user_client]
+ result = cc_ntp.select_ntp_client(user_client, mycloud.distro)
+ self.assertEqual(sorted(expected_cfg), sorted(result))
+ m_which.assert_has_calls([])
+
+ @mock.patch('cloudinit.config.cc_ntp.reload_ntp')
+ @mock.patch('cloudinit.config.cc_ntp.install_ntp_client')
+ def test_ntp_user_provided_config_with_template(self, m_install, m_reload):
+ custom = r'\n#MyCustomTemplate'
+ user_template = NTP_TEMPLATE + custom
+ confpath = os.path.join(self.new_root, 'etc/myntp/myntp.conf')
+ cfg = {
+ 'ntp': {
+ 'pools': ['mypool.org'],
+ 'ntp_client': 'myntpd',
+ 'config': {
+ 'check_exe': 'myntpd',
+ 'confpath': confpath,
+ 'packages': ['myntp'],
+ 'service_name': 'myntp',
+ 'template': user_template,
+ }
+ }
+ }
+ for distro in cc_ntp.distros:
+ mycloud = self._get_cloud(distro)
+ mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR'
+ with mock.patch(mock_path, self.new_root):
+ cc_ntp.handle('notimportant', cfg, mycloud, None, None)
+ self.assertEqual(
+ "servers []\npools ['mypool.org']\n%s" % custom,
+ util.load_file(confpath))
+
+ @mock.patch('cloudinit.config.cc_ntp.supplemental_schema_validation')
+ @mock.patch('cloudinit.config.cc_ntp.reload_ntp')
+ @mock.patch('cloudinit.config.cc_ntp.install_ntp_client')
+ @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ def test_ntp_user_provided_config_template_only(self, m_select, m_install,
+ m_reload, m_schema):
+ """Test custom template for default client"""
+ custom = r'\n#MyCustomTemplate'
+ user_template = NTP_TEMPLATE + custom
+ client = 'chrony'
+ cfg = {
+ 'pools': ['mypool.org'],
+ 'ntp_client': client,
+ 'config': {
+ 'template': user_template,
+ }
+ }
+ expected_merged_cfg = {
+ 'check_exe': 'chronyd',
+ 'confpath': '{tmpdir}/client.conf'.format(tmpdir=self.new_root),
+ 'template_name': 'client.conf', 'template': user_template,
+ 'service_name': 'chrony', 'packages': ['chrony']}
+ for distro in cc_ntp.distros:
+ mycloud = self._get_cloud(distro)
+ ntpconfig = self._mock_ntp_client_config(client=client,
+ distro=distro)
+ confpath = ntpconfig['confpath']
+ m_select.return_value = ntpconfig
+ mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR'
+ with mock.patch(mock_path, self.new_root):
+ cc_ntp.handle('notimportant',
+ {'ntp': cfg}, mycloud, None, None)
+ self.assertEqual(
+ "servers []\npools ['mypool.org']\n%s" % custom,
+ util.load_file(confpath))
+ m_schema.assert_called_with(expected_merged_cfg)
+
+
+class TestSupplementalSchemaValidation(CiTestCase):
+
+ def test_error_on_missing_keys(self):
+ """ValueError raised reporting any missing required ntp:config keys"""
+ cfg = {}
+ match = (r'Invalid ntp configuration:\\nMissing required ntp:config'
+ ' keys: check_exe, confpath, packages, service_name')
+ with self.assertRaisesRegex(ValueError, match):
+ cc_ntp.supplemental_schema_validation(cfg)
+
+ def test_error_requiring_either_template_or_template_name(self):
+ """ValueError raised if both template not template_name are None."""
+ cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '',
+ 'template': None, 'template_name': None, 'packages': []}
+ match = (r'Invalid ntp configuration:\\nEither ntp:config:template'
+ ' or ntp:config:template_name values are required')
+ with self.assertRaisesRegex(ValueError, match):
+ cc_ntp.supplemental_schema_validation(cfg)
+
+ def test_error_on_non_list_values(self):
+ """ValueError raised when packages is not of type list."""
+ cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '',
+ 'template': 'asdf', 'template_name': None, 'packages': 'NOPE'}
+ match = (r'Invalid ntp configuration:\\nExpected a list of required'
+ ' package names for ntp:config:packages. Found \\(NOPE\\)')
+ with self.assertRaisesRegex(ValueError, match):
+ cc_ntp.supplemental_schema_validation(cfg)
+
+ def test_error_on_non_string_values(self):
+ """ValueError raised for any values expected as string type."""
+ cfg = {'confpath': 1, 'check_exe': 2, 'service_name': 3,
+ 'template': 4, 'template_name': 5, 'packages': []}
+ errors = [
+ 'Expected a config file path ntp:config:confpath. Found (1)',
+ 'Expected a string type for ntp:config:check_exe. Found (2)',
+ 'Expected a string type for ntp:config:service_name. Found (3)',
+ 'Expected a string type for ntp:config:template. Found (4)',
+ 'Expected a string type for ntp:config:template_name. Found (5)']
+ with self.assertRaises(ValueError) as context_mgr:
+ cc_ntp.supplemental_schema_validation(cfg)
+ error_msg = str(context_mgr.exception)
+ for error in errors:
+ self.assertIn(error, error_msg)
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py
index 7a7ba1ff..feca56c2 100644
--- a/tests/unittests/test_handler/test_handler_resizefs.py
+++ b/tests/unittests/test_handler/test_handler_resizefs.py
@@ -147,13 +147,15 @@ class TestResizefs(CiTestCase):
def test_resize_ufs_cmd_return(self):
mount_point = '/'
devpth = '/dev/sda2'
- self.assertEqual(('growfs', devpth),
+ self.assertEqual(('growfs', '-y', devpth),
_resize_ufs(mount_point, devpth))
+ @mock.patch('cloudinit.util.is_container', return_value=False)
@mock.patch('cloudinit.util.get_mount_info')
@mock.patch('cloudinit.util.get_device_info_from_zpool')
@mock.patch('cloudinit.util.parse_mount')
- def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount):
+ def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount,
+ is_container):
devpth = 'vmzroot/ROOT/freebsd'
disk = 'gpt/system'
fs_type = 'zfs'
@@ -354,8 +356,10 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
('btrfs', 'filesystem', 'resize', 'max', '/'),
_resize_btrfs("/", "/dev/sda1"))
+ @mock.patch('cloudinit.util.is_container', return_value=True)
@mock.patch('cloudinit.util.is_FreeBSD')
- def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd):
+ def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd,
+ m_is_container):
freebsd.return_value = True
info = 'dev=gpt/system mnt_point=/ path=/'
devpth = maybe_get_writable_device_path('gpt/system', info, LOG)
diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/test_handler/test_handler_runcmd.py
index dbbb2717..9ce334ac 100644
--- a/tests/unittests/test_handler/test_handler_runcmd.py
+++ b/tests/unittests/test_handler/test_handler_runcmd.py
@@ -1,10 +1,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.config import cc_runcmd
+from cloudinit.config.cc_runcmd import handle, schema
from cloudinit.sources import DataSourceNone
from cloudinit import (distros, helpers, cloud, util)
from cloudinit.tests.helpers import (
- FilesystemMockingTestCase, skipUnlessJsonSchema)
+ CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin,
+ skipUnlessJsonSchema)
import logging
import os
@@ -35,7 +36,7 @@ class TestRuncmd(FilesystemMockingTestCase):
"""When the provided config doesn't contain runcmd, skip it."""
cfg = {}
mycloud = self._get_cloud('ubuntu')
- cc_runcmd.handle('notimportant', cfg, mycloud, LOG, None)
+ handle('notimportant', cfg, mycloud, LOG, None)
self.assertIn(
"Skipping module named notimportant, no 'runcmd' key",
self.logs.getvalue())
@@ -44,7 +45,7 @@ class TestRuncmd(FilesystemMockingTestCase):
"""Commands which can't be converted to shell will raise errors."""
invalid_config = {'runcmd': 1}
cc = self._get_cloud('ubuntu')
- cc_runcmd.handle('cc_runcmd', invalid_config, cc, LOG, [])
+ handle('cc_runcmd', invalid_config, cc, LOG, [])
self.assertIn(
'Failed to shellify 1 into file'
' /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd',
@@ -59,7 +60,7 @@ class TestRuncmd(FilesystemMockingTestCase):
"""
invalid_config = {'runcmd': 1}
cc = self._get_cloud('ubuntu')
- cc_runcmd.handle('cc_runcmd', invalid_config, cc, LOG, [])
+ handle('cc_runcmd', invalid_config, cc, LOG, [])
self.assertIn(
'Invalid config:\nruncmd: 1 is not of type \'array\'',
self.logs.getvalue())
@@ -75,7 +76,7 @@ class TestRuncmd(FilesystemMockingTestCase):
invalid_config = {
'runcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]}
cc = self._get_cloud('ubuntu')
- cc_runcmd.handle('cc_runcmd', invalid_config, cc, LOG, [])
+ handle('cc_runcmd', invalid_config, cc, LOG, [])
expected_warnings = [
'runcmd.1: 20 is not valid under any of the given schemas',
'runcmd.3: {\'a\': \'n\'} is not valid under any of the given'
@@ -90,7 +91,7 @@ class TestRuncmd(FilesystemMockingTestCase):
"""Valid runcmd schema is written to a runcmd shell script."""
valid_config = {'runcmd': [['ls', '/']]}
cc = self._get_cloud('ubuntu')
- cc_runcmd.handle('cc_runcmd', valid_config, cc, LOG, [])
+ handle('cc_runcmd', valid_config, cc, LOG, [])
runcmd_file = os.path.join(
self.new_root,
'var/lib/cloud/instances/iid-datasource-none/scripts/runcmd')
@@ -99,4 +100,22 @@ class TestRuncmd(FilesystemMockingTestCase):
self.assertEqual(0o700, stat.S_IMODE(file_stat.st_mode))
+@skipUnlessJsonSchema()
+class TestSchema(CiTestCase, SchemaTestCaseMixin):
+ """Directly test schema rather than through handle."""
+
+ schema = schema
+
+ def test_duplicates_are_fine_array_array(self):
+ """Duplicated commands array/array entries are allowed."""
+ self.assertSchemaValid(
+ [["echo", "bye"], ["echo", "bye"]],
+ "command entries can be duplicate.")
+
+ def test_duplicates_are_fine_array_string(self):
+ """Duplicated commands array/string entries are allowed."""
+ self.assertSchemaValid(
+ ["echo bye", "echo bye"],
+ "command entries can be duplicate.")
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py
index ac41f124..1bad07f6 100644
--- a/tests/unittests/test_handler/test_schema.py
+++ b/tests/unittests/test_handler/test_schema.py
@@ -4,7 +4,7 @@ from cloudinit.config.schema import (
CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file,
get_schema_doc, get_schema, validate_cloudconfig_file,
validate_cloudconfig_schema, main)
-from cloudinit.util import subp, write_file
+from cloudinit.util import write_file
from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema
@@ -134,22 +134,35 @@ class ValidateCloudConfigFileTest(CiTestCase):
with self.assertRaises(SchemaValidationError) as context_mgr:
validate_cloudconfig_file(self.config_file, {})
self.assertEqual(
- 'Cloud config schema errors: header: File {0} needs to begin with '
- '"{1}"'.format(self.config_file, CLOUD_CONFIG_HEADER.decode()),
+ 'Cloud config schema errors: format-l1.c1: File {0} needs to begin'
+ ' with "{1}"'.format(
+ self.config_file, CLOUD_CONFIG_HEADER.decode()),
str(context_mgr.exception))
- def test_validateconfig_file_error_on_non_yaml_format(self):
- """On non-yaml format, validate_cloudconfig_file errors."""
+ def test_validateconfig_file_error_on_non_yaml_scanner_error(self):
+ """On non-yaml scan issues, validate_cloudconfig_file errors."""
+ # Generate a scanner error by providing text on a single line with
+ # improper indent.
+ write_file(self.config_file, '#cloud-config\nasdf:\nasdf')
+ with self.assertRaises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_file(self.config_file, {})
+ self.assertIn(
+ 'schema errors: format-l3.c1: File {0} is not valid yaml.'.format(
+ self.config_file),
+ str(context_mgr.exception))
+
+ def test_validateconfig_file_error_on_non_yaml_parser_error(self):
+ """On non-yaml parser issues, validate_cloudconfig_file errors."""
write_file(self.config_file, '#cloud-config\n{}}')
with self.assertRaises(SchemaValidationError) as context_mgr:
validate_cloudconfig_file(self.config_file, {})
self.assertIn(
- 'schema errors: format: File {0} is not valid yaml.'.format(
+ 'schema errors: format-l2.c3: File {0} is not valid yaml.'.format(
self.config_file),
str(context_mgr.exception))
@skipUnlessJsonSchema()
- def test_validateconfig_file_sctricty_validates_schema(self):
+ def test_validateconfig_file_sctrictly_validates_schema(self):
"""validate_cloudconfig_file raises errors on invalid schema."""
schema = {
'properties': {'p1': {'type': 'string', 'format': 'hostname'}}}
@@ -342,6 +355,20 @@ class MainTest(CiTestCase):
'Expected either --config-file argument or --doc\n',
m_stderr.getvalue())
+ def test_main_absent_config_file(self):
+ """Main exits non-zero when config file is absent."""
+ myargs = ['mycmd', '--annotate', '--config-file', 'NOT_A_FILE']
+ with mock.patch('sys.exit', side_effect=self.sys_exit):
+ with mock.patch('sys.argv', myargs):
+ with mock.patch('sys.stderr', new_callable=StringIO) as \
+ m_stderr:
+ with self.assertRaises(SystemExit) as context_manager:
+ main()
+ self.assertEqual(1, context_manager.exception.code)
+ self.assertEqual(
+ 'Configfile NOT_A_FILE does not exist\n',
+ m_stderr.getvalue())
+
def test_main_prints_docs(self):
"""When --doc parameter is provided, main generates documentation."""
myargs = ['mycmd', '--doc']
@@ -379,8 +406,14 @@ class CloudTestsIntegrationTest(CiTestCase):
integration_testdir = os.path.sep.join(
[testsdir, 'cloud_tests', 'testcases'])
errors = []
- out, _ = subp(['find', integration_testdir, '-name', '*yaml'])
- for filename in out.splitlines():
+
+ yaml_files = []
+ for root, _dirnames, filenames in os.walk(integration_testdir):
+ yaml_files.extend([os.path.join(root, f)
+ for f in filenames if f.endswith(".yaml")])
+ self.assertTrue(len(yaml_files) > 0)
+
+ for filename in yaml_files:
test_cfg = safe_load(open(filename))
cloud_config = test_cfg.get('cloud_config')
if cloud_config:
diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py
index f51358da..3a5072c7 100644
--- a/tests/unittests/test_merging.py
+++ b/tests/unittests/test_merging.py
@@ -100,7 +100,7 @@ def make_dict(max_depth, seed=None):
class TestSimpleRun(helpers.ResourceUsingTestCase):
def _load_merge_files(self):
- merge_root = self.resourceLocation('merge_sources')
+ merge_root = helpers.resourceLocation('merge_sources')
tests = []
source_ids = collections.defaultdict(list)
expected_files = {}
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index c12a487a..5d9c7d92 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -1,13 +1,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit import net
+from cloudinit import distros
from cloudinit.net import cmdline
-from cloudinit.net import eni
-from cloudinit.net import natural_sort_key
-from cloudinit.net import netplan
-from cloudinit.net import network_state
-from cloudinit.net import renderers
-from cloudinit.net import sysconfig
+from cloudinit.net import (
+ eni, interface_has_own_mac, natural_sort_key, netplan, network_state,
+ renderers, sysconfig)
from cloudinit.sources.helpers import openstack
from cloudinit import temp_utils
from cloudinit import util
@@ -132,7 +130,40 @@ OS_SAMPLES = [
'in_macs': {
'fa:16:3e:ed:9a:59': 'eth0',
},
- 'out_sysconfig': [
+ 'out_sysconfig_opensuse': [
+ ('etc/sysconfig/network/ifcfg-eth0',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=none
+DEFROUTE=yes
+DEVICE=eth0
+GATEWAY=172.19.3.254
+HWADDR=fa:16:3e:ed:9a:59
+IPADDR=172.19.1.34
+NETMASK=255.255.252.0
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+""".lstrip()),
+ ('etc/resolv.conf',
+ """
+; Created by cloud-init on instance boot automatically, do not edit.
+;
+nameserver 172.19.0.12
+""".lstrip()),
+ ('etc/NetworkManager/conf.d/99-cloud-init.conf',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+[main]
+dns = none
+""".lstrip()),
+ ('etc/udev/rules.d/70-persistent-net.rules',
+ "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))],
+ 'out_sysconfig_rhel': [
('etc/sysconfig/network-scripts/ifcfg-eth0',
"""
# Created by cloud-init on instance boot automatically, do not edit.
@@ -165,6 +196,7 @@ dns = none
('etc/udev/rules.d/70-persistent-net.rules',
"".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
+
},
{
'in_data': {
@@ -198,7 +230,42 @@ dns = none
'in_macs': {
'fa:16:3e:ed:9a:59': 'eth0',
},
- 'out_sysconfig': [
+ 'out_sysconfig_opensuse': [
+ ('etc/sysconfig/network/ifcfg-eth0',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=none
+DEFROUTE=yes
+DEVICE=eth0
+GATEWAY=172.19.3.254
+HWADDR=fa:16:3e:ed:9a:59
+IPADDR=172.19.1.34
+IPADDR1=10.0.0.10
+NETMASK=255.255.252.0
+NETMASK1=255.255.255.0
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+""".lstrip()),
+ ('etc/resolv.conf',
+ """
+; Created by cloud-init on instance boot automatically, do not edit.
+;
+nameserver 172.19.0.12
+""".lstrip()),
+ ('etc/NetworkManager/conf.d/99-cloud-init.conf',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+[main]
+dns = none
+""".lstrip()),
+ ('etc/udev/rules.d/70-persistent-net.rules',
+ "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))],
+ 'out_sysconfig_rhel': [
('etc/sysconfig/network-scripts/ifcfg-eth0',
"""
# Created by cloud-init on instance boot automatically, do not edit.
@@ -233,6 +300,7 @@ dns = none
('etc/udev/rules.d/70-persistent-net.rules',
"".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
+
},
{
'in_data': {
@@ -286,7 +354,44 @@ dns = none
'in_macs': {
'fa:16:3e:ed:9a:59': 'eth0',
},
- 'out_sysconfig': [
+ 'out_sysconfig_opensuse': [
+ ('etc/sysconfig/network/ifcfg-eth0',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=none
+DEFROUTE=yes
+DEVICE=eth0
+GATEWAY=172.19.3.254
+HWADDR=fa:16:3e:ed:9a:59
+IPADDR=172.19.1.34
+IPV6ADDR=2001:DB8::10/64
+IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64"
+IPV6INIT=yes
+IPV6_DEFAULTGW=2001:DB8::1
+NETMASK=255.255.252.0
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+""".lstrip()),
+ ('etc/resolv.conf',
+ """
+; Created by cloud-init on instance boot automatically, do not edit.
+;
+nameserver 172.19.0.12
+""".lstrip()),
+ ('etc/NetworkManager/conf.d/99-cloud-init.conf',
+ """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+[main]
+dns = none
+""".lstrip()),
+ ('etc/udev/rules.d/70-persistent-net.rules',
+ "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))],
+ 'out_sysconfig_rhel': [
('etc/sysconfig/network-scripts/ifcfg-eth0',
"""
# Created by cloud-init on instance boot automatically, do not edit.
@@ -528,6 +633,7 @@ NETWORK_CONFIGS = {
config:
- type: 'physical'
name: 'iface0'
+ mtu: 8999
subnets:
- type: static
address: 192.168.14.2/24
@@ -553,6 +659,43 @@ NETWORK_CONFIGS = {
"""),
},
},
+ 'dhcpv6_only': {
+ 'expected_eni': textwrap.dedent("""\
+ auto lo
+ iface lo inet loopback
+
+ auto iface0
+ iface iface0 inet6 dhcp
+ """).rstrip(' '),
+ 'expected_netplan': textwrap.dedent("""
+ network:
+ version: 2
+ ethernets:
+ iface0:
+ dhcp6: true
+ """).rstrip(' '),
+ 'yaml': textwrap.dedent("""\
+ version: 1
+ config:
+ - type: 'physical'
+ name: 'iface0'
+ subnets:
+ - {'type': 'dhcp6'}
+ """).rstrip(' '),
+ 'expected_sysconfig': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=iface0
+ DHCPV6C=yes
+ IPV6INIT=yes
+ DEVICE=iface0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ },
+ },
'all': {
'expected_eni': ("""\
auto lo
@@ -608,6 +751,7 @@ iface br0 inet static
bridge_stp off
bridge_waitport 1 eth3
bridge_waitport 2 eth4
+ hwaddress bb:bb:bb:bb:bb:aa
# control-alias br0
iface br0 inet6 static
@@ -626,8 +770,8 @@ iface eth0.101 inet static
dns-nameservers 192.168.0.10 10.23.23.134
dns-search barley.maas sacchromyces.maas brettanomyces.maas
gateway 192.168.0.1
- hwaddress aa:bb:cc:dd:ee:11
mtu 1500
+ hwaddress aa:bb:cc:dd:ee:11
vlan-raw-device eth0
vlan_id 101
@@ -673,6 +817,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
interfaces:
- eth1
- eth2
+ macaddress: aa:bb:cc:dd:ee:ff
parameters:
mii-monitor-interval: 100
mode: active-backup
@@ -685,6 +830,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
interfaces:
- eth3
- eth4
+ macaddress: bb:bb:bb:bb:bb:aa
nameservers:
addresses:
- 8.8.8.8
@@ -723,6 +869,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
id: 101
link: eth0
macaddress: aa:bb:cc:dd:ee:11
+ mtu: 1500
nameservers:
addresses:
- 192.168.0.10
@@ -740,7 +887,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
"""miimon=100"
BONDING_SLAVE0=eth1
BONDING_SLAVE1=eth2
- BOOTPROTO=dhcp
+ BOOTPROTO=none
DEVICE=bond0
DHCPV6C=yes
IPV6INIT=yes
@@ -767,6 +914,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
IPV6ADDR=2001:1::1/64
IPV6INIT=yes
IPV6_DEFAULTGW=2001:4800:78ff:1b::1
+ MACADDR=bb:bb:bb:bb:bb:aa
NETMASK=255.255.255.0
NM_CONTROLLED=no
ONBOOT=yes
@@ -886,6 +1034,8 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
mtu: 1500
subnets:
- type: static
+ # When 'mtu' matches device-level mtu, no warnings
+ mtu: 1500
address: 192.168.0.2/24
gateway: 192.168.0.1
dns_nameservers:
@@ -935,6 +1085,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
use_tempaddr: 1
forwarding: 1
# basically anything in /proc/sys/net/ipv6/conf/.../
+ mac_address: bb:bb:bb:bb:bb:aa
params:
bridge_ageing: 250
bridge_bridgeprio: 22
@@ -994,6 +1145,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
- type: bond
name: bond0
mac_address: "aa:bb:cc:dd:e8:ff"
+ mtu: 9000
bond_interfaces:
- bond0s0
- bond0s1
@@ -1036,6 +1188,8 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
interfaces:
- bond0s0
- bond0s1
+ macaddress: aa:bb:cc:dd:e8:ff
+ mtu: 9000
parameters:
mii-monitor-interval: 100
mode: active-backup
@@ -1108,7 +1262,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
version: 2
"""),
- 'expected_sysconfig': {
+ 'expected_sysconfig_opensuse': {
'ifcfg-bond0': textwrap.dedent("""\
BONDING_MASTER=yes
BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100"
@@ -1123,6 +1277,59 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
IPADDR1=192.168.1.2
IPV6ADDR=2001:1::1/92
IPV6INIT=yes
+ MTU=9000
+ NETMASK=255.255.255.0
+ NETMASK1=255.255.255.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Bond
+ USERCTL=no
+ """),
+ 'ifcfg-bond0s0': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=bond0s0
+ HWADDR=aa:bb:cc:dd:e8:00
+ MASTER=bond0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ SLAVE=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ 'ifroute-bond0': textwrap.dedent("""\
+ ADDRESS0=10.1.3.0
+ GATEWAY0=192.168.0.3
+ NETMASK0=255.255.255.0
+ """),
+ 'ifcfg-bond0s1': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=bond0s1
+ HWADDR=aa:bb:cc:dd:e8:01
+ MASTER=bond0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ SLAVE=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ },
+
+ 'expected_sysconfig_rhel': {
+ 'ifcfg-bond0': textwrap.dedent("""\
+ BONDING_MASTER=yes
+ BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100"
+ BONDING_SLAVE0=bond0s0
+ BONDING_SLAVE1=bond0s1
+ BOOTPROTO=none
+ DEFROUTE=yes
+ DEVICE=bond0
+ GATEWAY=192.168.0.1
+ MACADDR=aa:bb:cc:dd:e8:ff
+ IPADDR=192.168.0.2
+ IPADDR1=192.168.1.2
+ IPV6ADDR=2001:1::1/92
+ IPV6INIT=yes
+ MTU=9000
NETMASK=255.255.255.0
NETMASK1=255.255.255.0
NM_CONTROLLED=no
@@ -1169,6 +1376,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
name: en0
mac_address: "aa:bb:cc:dd:e8:00"
- type: vlan
+ mtu: 2222
name: en0.99
vlan_link: en0
vlan_id: 99
@@ -1204,6 +1412,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true
IPV6ADDR=2001:1::bbbb/96
IPV6INIT=yes
IPV6_DEFAULTGW=2001:1::1
+ MTU=2222
NETMASK=255.255.255.0
NETMASK1=255.255.255.0
NM_CONTROLLED=no
@@ -1405,6 +1614,7 @@ DEFAULT_DEV_ATTRS = {
"address": "07-1C-C6-75-A4-BE",
"device/driver": None,
"device/device": None,
+ "name_assign_type": "4",
}
}
@@ -1443,6 +1653,12 @@ def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net,
class TestGenerateFallbackConfig(CiTestCase):
+ def setUp(self):
+ super(TestGenerateFallbackConfig, self).setUp()
+ self.add_patch(
+ "cloudinit.util.get_cmdline", "m_get_cmdline",
+ return_value="root=/dev/sda1")
+
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
@@ -1452,11 +1668,14 @@ class TestGenerateFallbackConfig(CiTestCase):
'eth0': {
'bridge': False, 'carrier': False, 'dormant': False,
'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'hv_netsvc', 'device/device': '0x3'},
+ 'device/driver': 'hv_netsvc', 'device/device': '0x3',
+ 'name_assign_type': '4'},
'eth1': {
'bridge': False, 'carrier': False, 'dormant': False,
'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'mlx4_core', 'device/device': '0x7'},
+ 'device/driver': 'mlx4_core', 'device/device': '0x7',
+ 'name_assign_type': '4'},
+
}
tmp_dir = self.tmp_dir()
@@ -1474,7 +1693,7 @@ class TestGenerateFallbackConfig(CiTestCase):
# don't set rulepath so eni writes them
renderer = eni.Renderer(
{'eni_path': 'interfaces', 'netrules_path': 'netrules'})
- renderer.render_network_state(ns, render_dir)
+ renderer.render_network_state(ns, target=render_dir)
self.assertTrue(os.path.exists(os.path.join(render_dir,
'interfaces')))
@@ -1512,11 +1731,13 @@ iface eth0 inet dhcp
'eth1': {
'bridge': False, 'carrier': False, 'dormant': False,
'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'hv_netsvc', 'device/device': '0x3'},
+ 'device/driver': 'hv_netsvc', 'device/device': '0x3',
+ 'name_assign_type': '4'},
'eth0': {
'bridge': False, 'carrier': False, 'dormant': False,
'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'mlx4_core', 'device/device': '0x7'},
+ 'device/driver': 'mlx4_core', 'device/device': '0x7',
+ 'name_assign_type': '4'},
}
tmp_dir = self.tmp_dir()
@@ -1536,7 +1757,7 @@ iface eth0 inet dhcp
# don't set rulepath so eni writes them
renderer = eni.Renderer(
{'eni_path': 'interfaces', 'netrules_path': 'netrules'})
- renderer.render_network_state(ns, render_dir)
+ renderer.render_network_state(ns, target=render_dir)
self.assertTrue(os.path.exists(os.path.join(render_dir,
'interfaces')))
@@ -1565,13 +1786,83 @@ iface eth1 inet dhcp
]
self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip())
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("cloudinit.util.udevadm_settle")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.net.get_devicelist")
+ def test_unstable_names(self, mock_get_devicelist, mock_read_sys_net,
+ mock_sys_dev_path, mock_settle, m_get_cmdline):
+ """verify that udevadm settle is called when we find unstable names"""
+ devices = {
+ 'eth0': {
+ 'bridge': False, 'carrier': False, 'dormant': False,
+ 'operstate': 'down', 'address': '00:11:22:33:44:55',
+ 'device/driver': 'hv_netsvc', 'device/device': '0x3',
+ 'name_assign_type': False},
+ 'ens4': {
+ 'bridge': False, 'carrier': False, 'dormant': False,
+ 'operstate': 'down', 'address': '00:11:22:33:44:55',
+ 'device/driver': 'mlx4_core', 'device/device': '0x7',
+ 'name_assign_type': '4'},
-class TestSysConfigRendering(CiTestCase):
+ }
+
+ m_get_cmdline.return_value = ''
+ tmp_dir = self.tmp_dir()
+ _setup_test(tmp_dir, mock_get_devicelist,
+ mock_read_sys_net, mock_sys_dev_path,
+ dev_attrs=devices)
+ net.generate_fallback_config(config_driver=True)
+ self.assertEqual(1, mock_settle.call_count)
+
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("cloudinit.util.udevadm_settle")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.net.get_devicelist")
+ def test_unstable_names_disabled(self, mock_get_devicelist,
+ mock_read_sys_net, mock_sys_dev_path,
+ mock_settle, m_get_cmdline):
+ """verify udevadm settle not called when cmdline has net.ifnames=0"""
+ devices = {
+ 'eth0': {
+ 'bridge': False, 'carrier': False, 'dormant': False,
+ 'operstate': 'down', 'address': '00:11:22:33:44:55',
+ 'device/driver': 'hv_netsvc', 'device/device': '0x3',
+ 'name_assign_type': False},
+ 'ens4': {
+ 'bridge': False, 'carrier': False, 'dormant': False,
+ 'operstate': 'down', 'address': '00:11:22:33:44:55',
+ 'device/driver': 'mlx4_core', 'device/device': '0x7',
+ 'name_assign_type': '4'},
+
+ }
+
+ m_get_cmdline.return_value = 'net.ifnames=0'
+ tmp_dir = self.tmp_dir()
+ _setup_test(tmp_dir, mock_get_devicelist,
+ mock_read_sys_net, mock_sys_dev_path,
+ dev_attrs=devices)
+ net.generate_fallback_config(config_driver=True)
+ self.assertEqual(0, mock_settle.call_count)
+
+
+class TestRhelSysConfigRendering(CiTestCase):
+
+ with_logs = True
scripts_dir = '/etc/sysconfig/network-scripts'
header = ('# Created by cloud-init on instance boot automatically, '
'do not edit.\n#\n')
+ expected_name = 'expected_sysconfig'
+
+ def _get_renderer(self):
+ distro_cls = distros.fetch('rhel')
+ return sysconfig.Renderer(
+ config=distro_cls.renderer_configs.get('sysconfig'))
+
def _render_and_read(self, network_config=None, state=None, dir=None):
if dir is None:
dir = self.tmp_dir()
@@ -1583,8 +1874,8 @@ class TestSysConfigRendering(CiTestCase):
else:
raise ValueError("Expected data or state, got neither")
- renderer = sysconfig.Renderer()
- renderer.render_network_state(ns, dir)
+ renderer = self._get_renderer()
+ renderer.render_network_state(ns, target=dir)
return dir2dict(dir)
def _compare_files_to_expected(self, expected, found):
@@ -1610,12 +1901,13 @@ class TestSysConfigRendering(CiTestCase):
if missing:
raise AssertionError("Missing headers in: %s" % missing)
+ @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
def test_default_generation(self, mock_get_devicelist,
mock_read_sys_net,
- mock_sys_dev_path):
+ mock_sys_dev_path, m_get_cmdline):
tmp_dir = self.tmp_dir()
_setup_test(tmp_dir, mock_get_devicelist,
mock_read_sys_net, mock_sys_dev_path)
@@ -1627,8 +1919,8 @@ class TestSysConfigRendering(CiTestCase):
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
- renderer = sysconfig.Renderer()
- renderer.render_network_state(ns, render_dir)
+ renderer = self._get_renderer()
+ renderer.render_network_state(ns, target=render_dir)
render_file = 'etc/sysconfig/network-scripts/ifcfg-eth1000'
with open(os.path.join(render_dir, render_file)) as fh:
@@ -1679,9 +1971,9 @@ USERCTL=no
network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
ns = network_state.parse_net_config_data(network_cfg,
skip_broken=False)
- renderer = sysconfig.Renderer()
+ renderer = self._get_renderer()
with self.assertRaises(ValueError):
- renderer.render_network_state(ns, render_dir)
+ renderer.render_network_state(ns, target=render_dir)
self.assertEqual([], os.listdir(render_dir))
def test_multiple_ipv6_default_gateways(self):
@@ -1717,9 +2009,9 @@ USERCTL=no
network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
ns = network_state.parse_net_config_data(network_cfg,
skip_broken=False)
- renderer = sysconfig.Renderer()
+ renderer = self._get_renderer()
with self.assertRaises(ValueError):
- renderer.render_network_state(ns, render_dir)
+ renderer.render_network_state(ns, target=render_dir)
self.assertEqual([], os.listdir(render_dir))
def test_openstack_rendering_samples(self):
@@ -1731,12 +2023,13 @@ USERCTL=no
ex_input, known_macs=ex_mac_addrs)
ns = network_state.parse_net_config_data(network_cfg,
skip_broken=False)
- renderer = sysconfig.Renderer()
+ renderer = self._get_renderer()
# render a multiple times to simulate reboots
- renderer.render_network_state(ns, render_dir)
- renderer.render_network_state(ns, render_dir)
- renderer.render_network_state(ns, render_dir)
- for fn, expected_content in os_sample.get('out_sysconfig', []):
+ renderer.render_network_state(ns, target=render_dir)
+ renderer.render_network_state(ns, target=render_dir)
+ renderer.render_network_state(ns, target=render_dir)
+ for fn, expected_content in os_sample.get('out_sysconfig_rhel',
+ []):
with open(os.path.join(render_dir, fn)) as fh:
self.assertEqual(expected_content, fh.read())
@@ -1744,8 +2037,8 @@ USERCTL=no
ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET)
render_dir = self.tmp_path("render")
os.makedirs(render_dir)
- renderer = sysconfig.Renderer()
- renderer.render_network_state(ns, render_dir)
+ renderer = self._get_renderer()
+ renderer.render_network_state(ns, target=render_dir)
found = dir2dict(render_dir)
nspath = '/etc/sysconfig/network-scripts/'
self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
@@ -1770,8 +2063,8 @@ USERCTL=no
ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK)
render_dir = self.tmp_path("render")
os.makedirs(render_dir)
- renderer = sysconfig.Renderer()
- renderer.render_network_state(ns, render_dir)
+ renderer = self._get_renderer()
+ renderer.render_network_state(ns, target=render_dir)
found = dir2dict(render_dir)
nspath = '/etc/sysconfig/network-scripts/'
self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
@@ -1788,56 +2081,369 @@ USERCTL=no
self.assertEqual(expected, found[nspath + 'ifcfg-eth0'])
def test_bond_config(self):
+ expected_name = 'expected_sysconfig_rhel'
+ entry = NETWORK_CONFIGS['bond']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry[expected_name], found)
+ self._assert_headers(found)
+
+ def test_vlan_config(self):
+ entry = NETWORK_CONFIGS['vlan']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
+ def test_bridge_config(self):
+ entry = NETWORK_CONFIGS['bridge']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
+ def test_manual_config(self):
+ entry = NETWORK_CONFIGS['manual']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
+ def test_all_config(self):
+ entry = NETWORK_CONFIGS['all']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+ self.assertNotIn(
+ 'WARNING: Network config: ignoring eth0.101 device-level mtu',
+ self.logs.getvalue())
+
+ def test_small_config(self):
+ entry = NETWORK_CONFIGS['small']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
+ def test_v4_and_v6_static_config(self):
+ entry = NETWORK_CONFIGS['v4_and_v6_static']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+ expected_msg = (
+ 'WARNING: Network config: ignoring iface0 device-level mtu:8999'
+ ' because ipv4 subnet-level mtu:9000 provided.')
+ self.assertIn(expected_msg, self.logs.getvalue())
+
+ def test_dhcpv6_only_config(self):
+ entry = NETWORK_CONFIGS['dhcpv6_only']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
+
+class TestOpenSuseSysConfigRendering(CiTestCase):
+
+ with_logs = True
+
+ scripts_dir = '/etc/sysconfig/network'
+ header = ('# Created by cloud-init on instance boot automatically, '
+ 'do not edit.\n#\n')
+
+ expected_name = 'expected_sysconfig'
+
+ def _get_renderer(self):
+ distro_cls = distros.fetch('opensuse')
+ return sysconfig.Renderer(
+ config=distro_cls.renderer_configs.get('sysconfig'))
+
+ def _render_and_read(self, network_config=None, state=None, dir=None):
+ if dir is None:
+ dir = self.tmp_dir()
+
+ if network_config:
+ ns = network_state.parse_net_config_data(network_config)
+ elif state:
+ ns = state
+ else:
+ raise ValueError("Expected data or state, got neither")
+
+ renderer = self._get_renderer()
+ renderer.render_network_state(ns, target=dir)
+ return dir2dict(dir)
+
+ def _compare_files_to_expected(self, expected, found):
+ orig_maxdiff = self.maxDiff
+ expected_d = dict(
+ (os.path.join(self.scripts_dir, k), util.load_shell_content(v))
+ for k, v in expected.items())
+
+ # only compare the files in scripts_dir
+ scripts_found = dict(
+ (k, util.load_shell_content(v)) for k, v in found.items()
+ if k.startswith(self.scripts_dir))
+ try:
+ self.maxDiff = None
+ self.assertEqual(expected_d, scripts_found)
+ finally:
+ self.maxDiff = orig_maxdiff
+
+ def _assert_headers(self, found):
+ missing = [f for f in found
+ if (f.startswith(self.scripts_dir) and
+ not found[f].startswith(self.header))]
+ if missing:
+ raise AssertionError("Missing headers in: %s" % missing)
+
+ @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.net.get_devicelist")
+ def test_default_generation(self, mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path, m_get_cmdline):
+ tmp_dir = self.tmp_dir()
+ _setup_test(tmp_dir, mock_get_devicelist,
+ mock_read_sys_net, mock_sys_dev_path)
+
+ network_cfg = net.generate_fallback_config()
+ ns = network_state.parse_net_config_data(network_cfg,
+ skip_broken=False)
+
+ render_dir = os.path.join(tmp_dir, "render")
+ os.makedirs(render_dir)
+
+ renderer = self._get_renderer()
+ renderer.render_network_state(ns, target=render_dir)
+
+ render_file = 'etc/sysconfig/network/ifcfg-eth1000'
+ with open(os.path.join(render_dir, render_file)) as fh:
+ content = fh.read()
+ expected_content = """
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=dhcp
+DEVICE=eth1000
+HWADDR=07-1C-C6-75-A4-BE
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+""".lstrip()
+ self.assertEqual(expected_content, content)
+
+ def test_multiple_ipv4_default_gateways(self):
+ """ValueError is raised when duplicate ipv4 gateways exist."""
+ net_json = {
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
+ "networks": [{
+ "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+ "type": "ipv4", "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [{
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ }, {
+ "netmask": "0.0.0.0", # A second default gateway
+ "network": "0.0.0.0",
+ "gateway": "172.20.3.254",
+ }],
+ "ip_address": "172.19.1.34", "id": "network0"
+ }],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ "mtu": None, "type": "bridge", "id":
+ "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ },
+ ],
+ }
+ macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+ render_dir = self.tmp_dir()
+ network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
+ ns = network_state.parse_net_config_data(network_cfg,
+ skip_broken=False)
+ renderer = self._get_renderer()
+ with self.assertRaises(ValueError):
+ renderer.render_network_state(ns, target=render_dir)
+ self.assertEqual([], os.listdir(render_dir))
+
+ def test_multiple_ipv6_default_gateways(self):
+ """ValueError is raised when duplicate ipv6 gateways exist."""
+ net_json = {
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
+ "networks": [{
+ "network_id": "public-ipv6",
+ "type": "ipv6", "netmask": "",
+ "link": "tap1a81968a-79",
+ "routes": [{
+ "gateway": "2001:DB8::1",
+ "netmask": "::",
+ "network": "::"
+ }, {
+ "gateway": "2001:DB9::1",
+ "netmask": "::",
+ "network": "::"
+ }],
+ "ip_address": "2001:DB8::10", "id": "network1"
+ }],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ "mtu": None, "type": "bridge", "id":
+ "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ },
+ ],
+ }
+ macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+ render_dir = self.tmp_dir()
+ network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
+ ns = network_state.parse_net_config_data(network_cfg,
+ skip_broken=False)
+ renderer = self._get_renderer()
+ with self.assertRaises(ValueError):
+ renderer.render_network_state(ns, target=render_dir)
+ self.assertEqual([], os.listdir(render_dir))
+
+ def test_openstack_rendering_samples(self):
+ for os_sample in OS_SAMPLES:
+ render_dir = self.tmp_dir()
+ ex_input = os_sample['in_data']
+ ex_mac_addrs = os_sample['in_macs']
+ network_cfg = openstack.convert_net_json(
+ ex_input, known_macs=ex_mac_addrs)
+ ns = network_state.parse_net_config_data(network_cfg,
+ skip_broken=False)
+ renderer = self._get_renderer()
+ # render a multiple times to simulate reboots
+ renderer.render_network_state(ns, target=render_dir)
+ renderer.render_network_state(ns, target=render_dir)
+ renderer.render_network_state(ns, target=render_dir)
+ for fn, expected_content in os_sample.get('out_sysconfig_opensuse',
+ []):
+ with open(os.path.join(render_dir, fn)) as fh:
+ self.assertEqual(expected_content, fh.read())
+
+ def test_network_config_v1_samples(self):
+ ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET)
+ render_dir = self.tmp_path("render")
+ os.makedirs(render_dir)
+ renderer = self._get_renderer()
+ renderer.render_network_state(ns, target=render_dir)
+ found = dir2dict(render_dir)
+ nspath = '/etc/sysconfig/network/'
+ self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
+ expected = """\
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=none
+DEFROUTE=yes
+DEVICE=interface0
+GATEWAY=10.0.2.2
+HWADDR=52:54:00:12:34:00
+IPADDR=10.0.2.15
+NETMASK=255.255.255.0
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+"""
+ self.assertEqual(expected, found[nspath + 'ifcfg-interface0'])
+
+ def test_config_with_explicit_loopback(self):
+ ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK)
+ render_dir = self.tmp_path("render")
+ os.makedirs(render_dir)
+ renderer = self._get_renderer()
+ renderer.render_network_state(ns, target=render_dir)
+ found = dir2dict(render_dir)
+ nspath = '/etc/sysconfig/network/'
+ self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
+ expected = """\
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=dhcp
+DEVICE=eth0
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+"""
+ self.assertEqual(expected, found[nspath + 'ifcfg-eth0'])
+
+ def test_bond_config(self):
+ expected_name = 'expected_sysconfig_opensuse'
entry = NETWORK_CONFIGS['bond']
found = self._render_and_read(network_config=yaml.load(entry['yaml']))
- self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ for fname, contents in entry[expected_name].items():
+ print(fname)
+ print(contents)
+ print()
+ print('-- expected ^ | v rendered --')
+ for fname, contents in found.items():
+ print(fname)
+ print(contents)
+ print()
+ self._compare_files_to_expected(entry[expected_name], found)
self._assert_headers(found)
def test_vlan_config(self):
entry = NETWORK_CONFIGS['vlan']
found = self._render_and_read(network_config=yaml.load(entry['yaml']))
- self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_bridge_config(self):
entry = NETWORK_CONFIGS['bridge']
found = self._render_and_read(network_config=yaml.load(entry['yaml']))
- self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_manual_config(self):
entry = NETWORK_CONFIGS['manual']
found = self._render_and_read(network_config=yaml.load(entry['yaml']))
- self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_all_config(self):
entry = NETWORK_CONFIGS['all']
found = self._render_and_read(network_config=yaml.load(entry['yaml']))
- self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
+ self.assertNotIn(
+ 'WARNING: Network config: ignoring eth0.101 device-level mtu',
+ self.logs.getvalue())
def test_small_config(self):
entry = NETWORK_CONFIGS['small']
found = self._render_and_read(network_config=yaml.load(entry['yaml']))
- self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_v4_and_v6_static_config(self):
entry = NETWORK_CONFIGS['v4_and_v6_static']
found = self._render_and_read(network_config=yaml.load(entry['yaml']))
- self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+ expected_msg = (
+ 'WARNING: Network config: ignoring iface0 device-level mtu:8999'
+ ' because ipv4 subnet-level mtu:9000 provided.')
+ self.assertIn(expected_msg, self.logs.getvalue())
+
+ def test_dhcpv6_only_config(self):
+ entry = NETWORK_CONFIGS['dhcpv6_only']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
class TestEniNetRendering(CiTestCase):
+ @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
def test_default_generation(self, mock_get_devicelist,
mock_read_sys_net,
- mock_sys_dev_path):
+ mock_sys_dev_path, m_get_cmdline):
tmp_dir = self.tmp_dir()
_setup_test(tmp_dir, mock_get_devicelist,
mock_read_sys_net, mock_sys_dev_path)
@@ -1851,7 +2457,7 @@ class TestEniNetRendering(CiTestCase):
renderer = eni.Renderer(
{'eni_path': 'interfaces', 'netrules_path': None})
- renderer.render_network_state(ns, render_dir)
+ renderer.render_network_state(ns, target=render_dir)
self.assertTrue(os.path.exists(os.path.join(render_dir,
'interfaces')))
@@ -1871,7 +2477,7 @@ iface eth1000 inet dhcp
tmp_dir = self.tmp_dir()
ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK)
renderer = eni.Renderer()
- renderer.render_network_state(ns, tmp_dir)
+ renderer.render_network_state(ns, target=tmp_dir)
expected = """\
auto lo
iface lo inet loopback
@@ -1885,6 +2491,7 @@ iface eth0 inet dhcp
class TestNetplanNetRendering(CiTestCase):
+ @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
@mock.patch("cloudinit.net.netplan._clean_default")
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@@ -1892,7 +2499,7 @@ class TestNetplanNetRendering(CiTestCase):
def test_default_generation(self, mock_get_devicelist,
mock_read_sys_net,
mock_sys_dev_path,
- mock_clean_default):
+ mock_clean_default, m_get_cmdline):
tmp_dir = self.tmp_dir()
_setup_test(tmp_dir, mock_get_devicelist,
mock_read_sys_net, mock_sys_dev_path)
@@ -1907,7 +2514,7 @@ class TestNetplanNetRendering(CiTestCase):
render_target = 'netplan.yaml'
renderer = netplan.Renderer(
{'netplan_path': render_target, 'postcmds': False})
- renderer.render_network_state(ns, render_dir)
+ renderer.render_network_state(ns, target=render_dir)
self.assertTrue(os.path.exists(os.path.join(render_dir,
render_target)))
@@ -2012,7 +2619,7 @@ class TestNetplanPostcommands(CiTestCase):
render_target = 'netplan.yaml'
renderer = netplan.Renderer(
{'netplan_path': render_target, 'postcmds': True})
- renderer.render_network_state(ns, render_dir)
+ renderer.render_network_state(ns, target=render_dir)
mock_netplan_generate.assert_called_with(run=True)
mock_net_setup_link.assert_called_with(run=True)
@@ -2037,7 +2644,7 @@ class TestNetplanPostcommands(CiTestCase):
'/sys/class/net/lo'], capture=True),
]
with mock.patch.object(os.path, 'islink', return_value=True):
- renderer.render_network_state(ns, render_dir)
+ renderer.render_network_state(ns, target=render_dir)
mock_subp.assert_has_calls(expected)
@@ -2232,7 +2839,7 @@ class TestNetplanRoundTrip(CiTestCase):
renderer = netplan.Renderer(
config={'netplan_path': netplan_path})
- renderer.render_network_state(ns, target)
+ renderer.render_network_state(ns, target=target)
return dir2dict(target)
def testsimple_render_bond_netplan(self):
@@ -2277,6 +2884,13 @@ class TestNetplanRoundTrip(CiTestCase):
entry['expected_netplan'].splitlines(),
files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ def testsimple_render_dhcpv6_only(self):
+ entry = NETWORK_CONFIGS['dhcpv6_only']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self.assertEqual(
+ entry['expected_netplan'].splitlines(),
+ files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+
def testsimple_render_all(self):
entry = NETWORK_CONFIGS['all']
files = self._render_and_read(network_config=yaml.load(entry['yaml']))
@@ -2296,6 +2910,7 @@ class TestNetplanRoundTrip(CiTestCase):
class TestEniRoundTrip(CiTestCase):
+
def _render_and_read(self, network_config=None, state=None, eni_path=None,
netrules_path=None, dir=None):
if dir is None:
@@ -2314,7 +2929,7 @@ class TestEniRoundTrip(CiTestCase):
renderer = eni.Renderer(
config={'eni_path': eni_path, 'netrules_path': netrules_path})
- renderer.render_network_state(ns, dir)
+ renderer.render_network_state(ns, target=dir)
return dir2dict(dir)
def testsimple_convert_and_render(self):
@@ -2345,6 +2960,13 @@ class TestEniRoundTrip(CiTestCase):
entry['expected_eni'].splitlines(),
files['/etc/network/interfaces'].splitlines())
+ def testsimple_render_dhcpv6_only(self):
+ entry = NETWORK_CONFIGS['dhcpv6_only']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self.assertEqual(
+ entry['expected_eni'].splitlines(),
+ files['/etc/network/interfaces'].splitlines())
+
def testsimple_render_v4_and_v6_static(self):
entry = NETWORK_CONFIGS['v4_and_v6_static']
files = self._render_and_read(network_config=yaml.load(entry['yaml']))
@@ -2569,6 +3191,43 @@ class TestGetInterfaces(CiTestCase):
any_order=True)
+class TestInterfaceHasOwnMac(CiTestCase):
+ """Test interface_has_own_mac. This is admittedly a bit whitebox."""
+
+ @mock.patch('cloudinit.net.read_sys_net_int', return_value=None)
+ def test_non_strict_with_no_addr_assign_type(self, m_read_sys_net_int):
+ """If nic does not have addr_assign_type, it is not "stolen".
+
+ SmartOS containers do not provide the addr_assign_type in /sys.
+
+ $ ( cd /sys/class/net/eth0/ && grep -r . *)
+ address:90:b8:d0:20:e1:b0
+ addr_len:6
+ flags:0x1043
+ ifindex:2
+ mtu:1500
+ tx_queue_len:1
+ type:1
+ """
+ self.assertTrue(interface_has_own_mac("eth0"))
+
+ @mock.patch('cloudinit.net.read_sys_net_int', return_value=None)
+ def test_strict_with_no_addr_assign_type_raises(self, m_read_sys_net_int):
+ with self.assertRaises(ValueError):
+ interface_has_own_mac("eth0", True)
+
+ @mock.patch('cloudinit.net.read_sys_net_int')
+ def test_expected_values(self, m_read_sys_net_int):
+ msg = "address_assign_type=%d said to not have own mac"
+ for address_assign_type in (0, 1, 3):
+ m_read_sys_net_int.return_value = address_assign_type
+ self.assertTrue(
+ interface_has_own_mac("eth0", msg % address_assign_type))
+
+ m_read_sys_net_int.return_value = 2
+ self.assertFalse(interface_has_own_mac("eth0"))
+
+
class TestGetInterfacesByMac(CiTestCase):
_data = {'bonds': ['bond1'],
'bridges': ['bridge1'],
@@ -2601,11 +3260,15 @@ class TestGetInterfacesByMac(CiTestCase):
def _se_interface_has_own_mac(self, name):
return name in self.data['own_macs']
+ def _se_get_ib_interface_hwaddr(self, name, ethernet_format):
+ ib_hwaddr = self.data.get('ib_hwaddr', {})
+ return ib_hwaddr.get(name, {}).get(ethernet_format)
+
def _mock_setup(self):
self.data = copy.deepcopy(self._data)
self.data['devices'] = set(list(self.data['macs'].keys()))
mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge',
- 'interface_has_own_mac', 'is_vlan')
+ 'interface_has_own_mac', 'is_vlan', 'get_ib_interface_hwaddr')
self.mocks = {}
for n in mocks:
m = mock.patch('cloudinit.net.' + n,
@@ -2679,6 +3342,20 @@ class TestGetInterfacesByMac(CiTestCase):
ret = net.get_interfaces_by_mac()
self.assertEqual('lo', ret[empty_mac])
+ def test_ib(self):
+ ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56'
+ ib_addr_eth_format = '00:11:22:33:44:56'
+ self._mock_setup()
+ self.data['devices'] = ['enp0s1', 'ib0']
+ self.data['own_macs'].append('ib0')
+ self.data['macs']['ib0'] = ib_addr
+ self.data['ib_hwaddr'] = {'ib0': {True: ib_addr_eth_format,
+ False: ib_addr}}
+ result = net.get_interfaces_by_mac()
+ expected = {'aa:aa:aa:aa:aa:01': 'enp0s1',
+ ib_addr_eth_format: 'ib0', ib_addr: 'ib0'}
+ self.assertEqual(expected, result)
+
class TestInterfacesSorting(CiTestCase):
@@ -2693,6 +3370,67 @@ class TestInterfacesSorting(CiTestCase):
['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3'])
+class TestGetIBHwaddrsByInterface(CiTestCase):
+
+ _ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56'
+ _ib_addr_eth_format = '00:11:22:33:44:56'
+ _data = {'devices': ['enp0s1', 'enp0s2', 'bond1', 'bridge1',
+ 'bridge1-nic', 'tun0', 'ib0'],
+ 'bonds': ['bond1'],
+ 'bridges': ['bridge1'],
+ 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', 'ib0'],
+ 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01',
+ 'enp0s2': 'aa:aa:aa:aa:aa:02',
+ 'bond1': 'aa:aa:aa:aa:aa:01',
+ 'bridge1': 'aa:aa:aa:aa:aa:03',
+ 'bridge1-nic': 'aa:aa:aa:aa:aa:03',
+ 'tun0': None,
+ 'ib0': _ib_addr},
+ 'ib_hwaddr': {'ib0': {True: _ib_addr_eth_format,
+ False: _ib_addr}}}
+ data = {}
+
+ def _mock_setup(self):
+ self.data = copy.deepcopy(self._data)
+ mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge',
+ 'interface_has_own_mac', 'get_ib_interface_hwaddr')
+ self.mocks = {}
+ for n in mocks:
+ m = mock.patch('cloudinit.net.' + n,
+ side_effect=getattr(self, '_se_' + n))
+ self.addCleanup(m.stop)
+ self.mocks[n] = m.start()
+
+ def _se_get_devicelist(self):
+ return self.data['devices']
+
+ def _se_get_interface_mac(self, name):
+ return self.data['macs'][name]
+
+ def _se_is_bridge(self, name):
+ return name in self.data['bridges']
+
+ def _se_interface_has_own_mac(self, name):
+ return name in self.data['own_macs']
+
+ def _se_get_ib_interface_hwaddr(self, name, ethernet_format):
+ ib_hwaddr = self.data.get('ib_hwaddr', {})
+ return ib_hwaddr.get(name, {}).get(ethernet_format)
+
+ def test_ethernet(self):
+ self._mock_setup()
+ self.data['devices'].remove('ib0')
+ result = net.get_ib_hwaddrs_by_interface()
+ expected = {}
+ self.assertEqual(expected, result)
+
+ def test_ib(self):
+ self._mock_setup()
+ result = net.get_ib_hwaddrs_by_interface()
+ expected = {'ib0': self._ib_addr}
+ self.assertEqual(expected, result)
+
+
def _gzip_data(data):
with io.BytesIO() as iobuf:
gzfp = gzip.GzipFile(mode="wb", fileobj=iobuf)
diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py
new file mode 100644
index 00000000..2e64c6c7
--- /dev/null
+++ b/tests/unittests/test_reporting_hyperv.py
@@ -0,0 +1,134 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.reporting import events
+from cloudinit.reporting import handlers
+
+import json
+import os
+
+from cloudinit import util
+from cloudinit.tests.helpers import CiTestCase
+
+
+class TestKvpEncoding(CiTestCase):
+ def test_encode_decode(self):
+ kvp = {'key': 'key1', 'value': 'value1'}
+ kvp_reporting = handlers.HyperVKvpReportingHandler()
+ data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value'])
+ self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE)
+ decoded_kvp = kvp_reporting._decode_kvp_item(data)
+ self.assertEqual(kvp, decoded_kvp)
+
+
+class TextKvpReporter(CiTestCase):
+ def setUp(self):
+ super(TextKvpReporter, self).setUp()
+ self.tmp_file_path = self.tmp_path('kvp_pool_file')
+ util.ensure_file(self.tmp_file_path)
+
+ def test_event_type_can_be_filtered(self):
+ reporter = handlers.HyperVKvpReportingHandler(
+ kvp_file_path=self.tmp_file_path,
+ event_types=['foo', 'bar'])
+
+ reporter.publish_event(
+ events.ReportingEvent('foo', 'name', 'description'))
+ reporter.publish_event(
+ events.ReportingEvent('some_other', 'name', 'description3'))
+ reporter.q.join()
+
+ kvps = list(reporter._iterate_kvps(0))
+ self.assertEqual(1, len(kvps))
+
+ reporter.publish_event(
+ events.ReportingEvent('bar', 'name', 'description2'))
+ reporter.q.join()
+ kvps = list(reporter._iterate_kvps(0))
+ self.assertEqual(2, len(kvps))
+
+ self.assertIn('foo', kvps[0]['key'])
+ self.assertIn('bar', kvps[1]['key'])
+ self.assertNotIn('some_other', kvps[0]['key'])
+ self.assertNotIn('some_other', kvps[1]['key'])
+
+ def test_events_are_over_written(self):
+ reporter = handlers.HyperVKvpReportingHandler(
+ kvp_file_path=self.tmp_file_path)
+
+ self.assertEqual(0, len(list(reporter._iterate_kvps(0))))
+
+ reporter.publish_event(
+ events.ReportingEvent('foo', 'name1', 'description'))
+ reporter.publish_event(
+ events.ReportingEvent('foo', 'name2', 'description'))
+ reporter.q.join()
+ self.assertEqual(2, len(list(reporter._iterate_kvps(0))))
+
+ reporter2 = handlers.HyperVKvpReportingHandler(
+ kvp_file_path=self.tmp_file_path)
+ reporter2.incarnation_no = reporter.incarnation_no + 1
+ reporter2.publish_event(
+ events.ReportingEvent('foo', 'name3', 'description'))
+ reporter2.q.join()
+
+ self.assertEqual(2, len(list(reporter2._iterate_kvps(0))))
+
+ def test_events_with_higher_incarnation_not_over_written(self):
+ reporter = handlers.HyperVKvpReportingHandler(
+ kvp_file_path=self.tmp_file_path)
+
+ self.assertEqual(0, len(list(reporter._iterate_kvps(0))))
+
+ reporter.publish_event(
+ events.ReportingEvent('foo', 'name1', 'description'))
+ reporter.publish_event(
+ events.ReportingEvent('foo', 'name2', 'description'))
+ reporter.q.join()
+ self.assertEqual(2, len(list(reporter._iterate_kvps(0))))
+
+ reporter3 = handlers.HyperVKvpReportingHandler(
+ kvp_file_path=self.tmp_file_path)
+ reporter3.incarnation_no = reporter.incarnation_no - 1
+ reporter3.publish_event(
+ events.ReportingEvent('foo', 'name3', 'description'))
+ reporter3.q.join()
+ self.assertEqual(3, len(list(reporter3._iterate_kvps(0))))
+
+ def test_finish_event_result_is_logged(self):
+ reporter = handlers.HyperVKvpReportingHandler(
+ kvp_file_path=self.tmp_file_path)
+ reporter.publish_event(
+ events.FinishReportingEvent('name2', 'description1',
+ result=events.status.FAIL))
+ reporter.q.join()
+ self.assertIn('FAIL', list(reporter._iterate_kvps(0))[0]['value'])
+
+ def test_file_operation_issue(self):
+ os.remove(self.tmp_file_path)
+ reporter = handlers.HyperVKvpReportingHandler(
+ kvp_file_path=self.tmp_file_path)
+ reporter.publish_event(
+ events.FinishReportingEvent('name2', 'description1',
+ result=events.status.FAIL))
+ reporter.q.join()
+
+ def test_event_very_long(self):
+ reporter = handlers.HyperVKvpReportingHandler(
+ kvp_file_path=self.tmp_file_path)
+ description = 'ab' * reporter.HV_KVP_EXCHANGE_MAX_VALUE_SIZE
+ long_event = events.FinishReportingEvent(
+ 'event_name',
+ description,
+ result=events.status.FAIL)
+ reporter.publish_event(long_event)
+ reporter.q.join()
+ kvps = list(reporter._iterate_kvps(0))
+ self.assertEqual(3, len(kvps))
+
+ # restore from the kvp to see the content are all there
+ full_description = ''
+ for i in range(len(kvps)):
+ msg_slice = json.loads(kvps[i]['value'])
+ self.assertEqual(msg_slice['msg_i'], i)
+ full_description += msg_slice['msg']
+ self.assertEqual(description, full_description)
diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py
index 22718108..4cd27eed 100644
--- a/tests/unittests/test_rh_subscription.py
+++ b/tests/unittests/test_rh_subscription.py
@@ -8,10 +8,16 @@ import logging
from cloudinit.config import cc_rh_subscription
from cloudinit import util
-from cloudinit.tests.helpers import TestCase, mock
+from cloudinit.tests.helpers import CiTestCase, mock
+SUBMGR = cc_rh_subscription.SubscriptionManager
+SUB_MAN_CLI = 'cloudinit.config.cc_rh_subscription._sub_man_cli'
+
+
+@mock.patch(SUB_MAN_CLI)
+class GoodTests(CiTestCase):
+ with_logs = True
-class GoodTests(TestCase):
def setUp(self):
super(GoodTests, self).setUp()
self.name = "cc_rh_subscription"
@@ -19,7 +25,6 @@ class GoodTests(TestCase):
self.log = logging.getLogger("good_tests")
self.args = []
self.handle = cc_rh_subscription.handle
- self.SM = cc_rh_subscription.SubscriptionManager
self.config = {'rh_subscription':
{'username': 'scooby@do.com',
@@ -35,55 +40,47 @@ class GoodTests(TestCase):
'disable-repo': ['repo4', 'repo5']
}}
- def test_already_registered(self):
+ def test_already_registered(self, m_sman_cli):
'''
Emulates a system that is already registered. Ensure it gets
a non-ProcessExecution error from is_registered()
'''
- with mock.patch.object(cc_rh_subscription.SubscriptionManager,
- '_sub_man_cli') as mockobj:
- self.SM.log_success = mock.MagicMock()
- self.handle(self.name, self.config, self.cloud_init,
- self.log, self.args)
- self.assertEqual(self.SM.log_success.call_count, 1)
- self.assertEqual(mockobj.call_count, 1)
-
- def test_simple_registration(self):
+ self.handle(self.name, self.config, self.cloud_init,
+ self.log, self.args)
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assertIn('System is already registered', self.logs.getvalue())
+
+ def test_simple_registration(self, m_sman_cli):
'''
Simple registration with username and password
'''
- self.SM.log_success = mock.MagicMock()
reg = "The system has been registered with ID:" \
" 12345678-abde-abcde-1234-1234567890abc"
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (reg, 'bar')])
+ m_sman_cli.side_effect = [util.ProcessExecutionError, (reg, 'bar')]
self.handle(self.name, self.config, self.cloud_init,
self.log, self.args)
- self.assertIn(mock.call(['identity']),
- self.SM._sub_man_cli.call_args_list)
+ self.assertIn(mock.call(['identity']), m_sman_cli.call_args_list)
self.assertIn(mock.call(['register', '--username=scooby@do.com',
'--password=scooby-snacks'],
logstring_val=True),
- self.SM._sub_man_cli.call_args_list)
-
- self.assertEqual(self.SM.log_success.call_count, 1)
- self.assertEqual(self.SM._sub_man_cli.call_count, 2)
+ m_sman_cli.call_args_list)
+ self.assertIn('rh_subscription plugin completed successfully',
+ self.logs.getvalue())
+ self.assertEqual(m_sman_cli.call_count, 2)
@mock.patch.object(cc_rh_subscription.SubscriptionManager, "_getRepos")
- @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_sub_man_cli")
- def test_update_repos_disable_with_none(self, m_sub_man_cli, m_get_repos):
+ def test_update_repos_disable_with_none(self, m_get_repos, m_sman_cli):
cfg = copy.deepcopy(self.config)
m_get_repos.return_value = ([], ['repo1'])
- m_sub_man_cli.return_value = (b'', b'')
cfg['rh_subscription'].update(
{'enable-repo': ['repo1'], 'disable-repo': None})
mysm = cc_rh_subscription.SubscriptionManager(cfg)
self.assertEqual(True, mysm.update_repos())
m_get_repos.assert_called_with()
- self.assertEqual(m_sub_man_cli.call_args_list,
+ self.assertEqual(m_sman_cli.call_args_list,
[mock.call(['repos', '--enable=repo1'])])
- def test_full_registration(self):
+ def test_full_registration(self, m_sman_cli):
'''
Registration with auto-attach, service-level, adding pools,
and enabling and disabling yum repos
@@ -93,26 +90,28 @@ class GoodTests(TestCase):
call_lists.append(['repos', '--disable=repo5', '--enable=repo2',
'--enable=repo3'])
call_lists.append(['attach', '--auto', '--servicelevel=self-support'])
- self.SM.log_success = mock.MagicMock()
reg = "The system has been registered with ID:" \
" 12345678-abde-abcde-1234-1234567890abc"
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (reg, 'bar'),
- ('Service level set to: self-support', ''),
- ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''),
- ('Repo ID: repo1\nRepo ID: repo5\n', ''),
- ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: '
- 'repo4', ''),
- ('', '')])
+ m_sman_cli.side_effect = [
+ util.ProcessExecutionError,
+ (reg, 'bar'),
+ ('Service level set to: self-support', ''),
+ ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''),
+ ('Repo ID: repo1\nRepo ID: repo5\n', ''),
+ ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4', ''),
+ ('', '')]
self.handle(self.name, self.config_full, self.cloud_init,
self.log, self.args)
+ self.assertEqual(m_sman_cli.call_count, 9)
for call in call_lists:
- self.assertIn(mock.call(call), self.SM._sub_man_cli.call_args_list)
- self.assertEqual(self.SM.log_success.call_count, 1)
- self.assertEqual(self.SM._sub_man_cli.call_count, 9)
+ self.assertIn(mock.call(call), m_sman_cli.call_args_list)
+ self.assertIn("rh_subscription plugin completed successfully",
+ self.logs.getvalue())
-class TestBadInput(TestCase):
+@mock.patch(SUB_MAN_CLI)
+class TestBadInput(CiTestCase):
+ with_logs = True
name = "cc_rh_subscription"
cloud_init = None
log = logging.getLogger("bad_tests")
@@ -155,81 +154,81 @@ class TestBadInput(TestCase):
super(TestBadInput, self).setUp()
self.handle = cc_rh_subscription.handle
- def test_no_password(self):
- '''
- Attempt to register without the password key/value
- '''
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
+ def assert_logged_warnings(self, warnings):
+ logs = self.logs.getvalue()
+ missing = [w for w in warnings if "WARNING: " + w not in logs]
+ self.assertEqual([], missing, "Missing expected warnings.")
+
+ def test_no_password(self, m_sman_cli):
+ '''Attempt to register without the password key/value.'''
+ m_sman_cli.side_effect = [util.ProcessExecutionError,
+ (self.reg, 'bar')]
self.handle(self.name, self.config_no_password, self.cloud_init,
self.log, self.args)
- self.assertEqual(self.SM._sub_man_cli.call_count, 0)
+ self.assertEqual(m_sman_cli.call_count, 0)
- def test_no_org(self):
- '''
- Attempt to register without the org key/value
- '''
- self.input_is_missing_data(self.config_no_key)
-
- def test_service_level_without_auto(self):
- '''
- Attempt to register using service-level without the auto-attach key
- '''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
+ def test_no_org(self, m_sman_cli):
+ '''Attempt to register without the org key/value.'''
+ m_sman_cli.side_effect = [util.ProcessExecutionError]
+ self.handle(self.name, self.config_no_key, self.cloud_init,
+ self.log, self.args)
+ m_sman_cli.assert_called_with(['identity'])
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assert_logged_warnings((
+ 'Unable to register system due to incomplete information.',
+ 'Use either activationkey and org *or* userid and password',
+ 'Registration failed or did not run completely',
+ 'rh_subscription plugin did not complete successfully'))
+
+ def test_service_level_without_auto(self, m_sman_cli):
+ '''Attempt to register using service-level without auto-attach key.'''
+ m_sman_cli.side_effect = [util.ProcessExecutionError,
+ (self.reg, 'bar')]
self.handle(self.name, self.config_service, self.cloud_init,
self.log, self.args)
- self.assertEqual(self.SM._sub_man_cli.call_count, 1)
- self.assertEqual(self.SM.log_warn.call_count, 2)
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assert_logged_warnings((
+ 'The service-level key must be used in conjunction with ',
+ 'rh_subscription plugin did not complete successfully'))
- def test_pool_not_a_list(self):
+ def test_pool_not_a_list(self, m_sman_cli):
'''
Register with pools that are not in the format of a list
'''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
+ m_sman_cli.side_effect = [util.ProcessExecutionError,
+ (self.reg, 'bar')]
self.handle(self.name, self.config_badpool, self.cloud_init,
self.log, self.args)
- self.assertEqual(self.SM._sub_man_cli.call_count, 2)
- self.assertEqual(self.SM.log_warn.call_count, 2)
+ self.assertEqual(m_sman_cli.call_count, 2)
+ self.assert_logged_warnings((
+ 'Pools must in the format of a list',
+ 'rh_subscription plugin did not complete successfully'))
- def test_repo_not_a_list(self):
+ def test_repo_not_a_list(self, m_sman_cli):
'''
Register with repos that are not in the format of a list
'''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
+ m_sman_cli.side_effect = [util.ProcessExecutionError,
+ (self.reg, 'bar')]
self.handle(self.name, self.config_badrepo, self.cloud_init,
self.log, self.args)
- self.assertEqual(self.SM.log_warn.call_count, 3)
- self.assertEqual(self.SM._sub_man_cli.call_count, 2)
+ self.assertEqual(m_sman_cli.call_count, 2)
+ self.assert_logged_warnings((
+ 'Repo IDs must in the format of a list.',
+ 'Unable to add or remove repos',
+ 'rh_subscription plugin did not complete successfully'))
- def test_bad_key_value(self):
+ def test_bad_key_value(self, m_sman_cli):
'''
Attempt to register with a key that we don't know
'''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
+ m_sman_cli.side_effect = [util.ProcessExecutionError,
+ (self.reg, 'bar')]
self.handle(self.name, self.config_badkey, self.cloud_init,
self.log, self.args)
- self.assertEqual(self.SM.log_warn.call_count, 2)
- self.assertEqual(self.SM._sub_man_cli.call_count, 1)
-
- def input_is_missing_data(self, config):
- '''
- Helper def for tests that having missing information
- '''
- self.SM.log_warn = mock.MagicMock()
- self.SM._sub_man_cli = mock.MagicMock(
- side_effect=[util.ProcessExecutionError])
- self.handle(self.name, config, self.cloud_init,
- self.log, self.args)
- self.SM._sub_man_cli.assert_called_with(['identity'])
- self.assertEqual(self.SM.log_warn.call_count, 4)
- self.assertEqual(self.SM._sub_man_cli.call_count, 1)
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assert_logged_warnings((
+ 'fookey is not a valid key for rh_subscription. Valid keys are:',
+ 'rh_subscription plugin did not complete successfully'))
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py
index 5d3f1ca3..d1ac4942 100644
--- a/tests/unittests/test_runs/test_merge_run.py
+++ b/tests/unittests/test_runs/test_merge_run.py
@@ -25,7 +25,7 @@ class TestMergeRun(helpers.FilesystemMockingTestCase):
'cloud_init_modules': ['write-files'],
'system_info': {'paths': {'run_dir': new_root}}
}
- ud = self.readResource('user_data.1.txt')
+ ud = helpers.readResource('user_data.1.txt')
cloud_cfg = util.yaml_dumps(cfg)
util.ensure_dir(os.path.join(new_root, 'etc', 'cloud'))
util.write_file(os.path.join(new_root, 'etc',
diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py
index 762974e9..d67c422c 100644
--- a/tests/unittests/test_runs/test_simple_run.py
+++ b/tests/unittests/test_runs/test_simple_run.py
@@ -1,5 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
import os
@@ -127,8 +128,9 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
"""run_section forced skipped modules by using unverified_modules."""
# re-write cloud.cfg with unverified_modules override
- self.cfg['unverified_modules'] = ['spacewalk'] # Would have skipped
- cloud_cfg = util.yaml_dumps(self.cfg)
+ cfg = copy.deepcopy(self.cfg)
+ cfg['unverified_modules'] = ['spacewalk'] # Would have skipped
+ cloud_cfg = util.yaml_dumps(cfg)
util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
util.write_file(os.path.join(self.new_root, 'etc',
'cloud', 'cloud.cfg'), cloud_cfg)
@@ -150,4 +152,30 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
"running unverified_modules: 'spacewalk'",
self.logs.getvalue())
+ def test_none_ds_run_with_no_config_modules(self):
+ """run_section will report no modules run when none are configured."""
+
+ # re-write cloud.cfg with unverified_modules override
+ cfg = copy.deepcopy(self.cfg)
+ # Represent empty configuration in /etc/cloud/cloud.cfg
+ cfg['cloud_init_modules'] = None
+ cloud_cfg = util.yaml_dumps(cfg)
+ util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
+ util.write_file(os.path.join(self.new_root, 'etc',
+ 'cloud', 'cloud.cfg'), cloud_cfg)
+
+ initer = stages.Init()
+ initer.read_cfg()
+ initer.initialize()
+ initer.fetch()
+ initer.instancify()
+ initer.update()
+ initer.cloudify().run('consume_data', initer.consume_data,
+ args=[PER_INSTANCE], freq=PER_INSTANCE)
+
+ mods = stages.Modules(initer)
+ (which_ran, failures) = mods.run_section('cloud_init_modules')
+ self.assertTrue(len(failures) == 0)
+ self.assertEqual([], which_ran)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index 4c62c8be..73ae897f 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -4,6 +4,7 @@ from mock import patch
from cloudinit import ssh_util
from cloudinit.tests import helpers as test_helpers
+from cloudinit import util
VALID_CONTENT = {
@@ -56,7 +57,7 @@ TEST_OPTIONS = (
'user \"root\".\';echo;sleep 10"')
-class TestAuthKeyLineParser(test_helpers.TestCase):
+class TestAuthKeyLineParser(test_helpers.CiTestCase):
def test_simple_parse(self):
# test key line with common 3 fields (keytype, base64, comment)
@@ -126,7 +127,7 @@ class TestAuthKeyLineParser(test_helpers.TestCase):
self.assertFalse(key.valid())
-class TestUpdateAuthorizedKeys(test_helpers.TestCase):
+class TestUpdateAuthorizedKeys(test_helpers.CiTestCase):
def test_new_keys_replace(self):
"""new entries with the same base64 should replace old."""
@@ -168,7 +169,7 @@ class TestUpdateAuthorizedKeys(test_helpers.TestCase):
self.assertEqual(expected, found)
-class TestParseSSHConfig(test_helpers.TestCase):
+class TestParseSSHConfig(test_helpers.CiTestCase):
def setUp(self):
self.load_file_patch = patch('cloudinit.ssh_util.util.load_file')
@@ -235,4 +236,94 @@ class TestParseSSHConfig(test_helpers.TestCase):
self.assertEqual('foo', ret[0].key)
self.assertEqual('bar', ret[0].value)
+
+class TestUpdateSshConfigLines(test_helpers.CiTestCase):
+ """Test the update_ssh_config_lines method."""
+ exlines = [
+ "#PasswordAuthentication yes",
+ "UsePAM yes",
+ "# Comment line",
+ "AcceptEnv LANG LC_*",
+ "X11Forwarding no",
+ ]
+ pwauth = "PasswordAuthentication"
+
+ def check_line(self, line, opt, val):
+ self.assertEqual(line.key, opt.lower())
+ self.assertEqual(line.value, val)
+ self.assertIn(opt, str(line))
+ self.assertIn(val, str(line))
+
+ def test_new_option_added(self):
+ """A single update of non-existing option."""
+ lines = ssh_util.parse_ssh_config_lines(list(self.exlines))
+ result = ssh_util.update_ssh_config_lines(lines, {'MyKey': 'MyVal'})
+ self.assertEqual(['MyKey'], result)
+ self.check_line(lines[-1], "MyKey", "MyVal")
+
+ def test_commented_out_not_updated_but_appended(self):
+ """Implementation does not un-comment and update lines."""
+ lines = ssh_util.parse_ssh_config_lines(list(self.exlines))
+ result = ssh_util.update_ssh_config_lines(lines, {self.pwauth: "no"})
+ self.assertEqual([self.pwauth], result)
+ self.check_line(lines[-1], self.pwauth, "no")
+
+ def test_single_option_updated(self):
+ """A single update should have change made and line updated."""
+ opt, val = ("UsePAM", "no")
+ lines = ssh_util.parse_ssh_config_lines(list(self.exlines))
+ result = ssh_util.update_ssh_config_lines(lines, {opt: val})
+ self.assertEqual([opt], result)
+ self.check_line(lines[1], opt, val)
+
+ def test_multiple_updates_with_add(self):
+ """Verify multiple updates some added some changed, some not."""
+ updates = {"UsePAM": "no", "X11Forwarding": "no", "NewOpt": "newval",
+ "AcceptEnv": "LANG ADD LC_*"}
+ lines = ssh_util.parse_ssh_config_lines(list(self.exlines))
+ result = ssh_util.update_ssh_config_lines(lines, updates)
+ self.assertEqual(set(["UsePAM", "NewOpt", "AcceptEnv"]), set(result))
+ self.check_line(lines[3], "AcceptEnv", updates["AcceptEnv"])
+
+ def test_return_empty_if_no_changes(self):
+ """If there are no changes, then return should be empty list."""
+ updates = {"UsePAM": "yes"}
+ lines = ssh_util.parse_ssh_config_lines(list(self.exlines))
+ result = ssh_util.update_ssh_config_lines(lines, updates)
+ self.assertEqual([], result)
+ self.assertEqual(self.exlines, [str(l) for l in lines])
+
+ def test_keycase_not_modified(self):
+ """Original case of key should not be changed on update.
+ This behavior is to keep original config as much intact as can be."""
+ updates = {"usepam": "no"}
+ lines = ssh_util.parse_ssh_config_lines(list(self.exlines))
+ result = ssh_util.update_ssh_config_lines(lines, updates)
+ self.assertEqual(["usepam"], result)
+ self.assertEqual("UsePAM no", str(lines[1]))
+
+
+class TestUpdateSshConfig(test_helpers.CiTestCase):
+ cfgdata = '\n'.join(["#Option val", "MyKey ORIG_VAL", ""])
+
+ def test_modified(self):
+ mycfg = self.tmp_path("ssh_config_1")
+ util.write_file(mycfg, self.cfgdata)
+ ret = ssh_util.update_ssh_config({"MyKey": "NEW_VAL"}, mycfg)
+ self.assertTrue(ret)
+ found = util.load_file(mycfg)
+ self.assertEqual(self.cfgdata.replace("ORIG_VAL", "NEW_VAL"), found)
+ # assert there is a newline at end of file (LP: #1677205)
+ self.assertEqual('\n', found[-1])
+
+ def test_not_modified(self):
+ mycfg = self.tmp_path("ssh_config_2")
+ util.write_file(mycfg, self.cfgdata)
+ with patch("cloudinit.ssh_util.util.write_file") as m_write_file:
+ ret = ssh_util.update_ssh_config({"MyKey": "ORIG_VAL"}, mycfg)
+ self.assertFalse(ret)
+ self.assertEqual(self.cfgdata, util.load_file(mycfg))
+ m_write_file.assert_not_called()
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py
index 53154d33..c36e6eb0 100644
--- a/tests/unittests/test_templating.py
+++ b/tests/unittests/test_templating.py
@@ -10,6 +10,7 @@ from cloudinit.tests import helpers as test_helpers
import textwrap
from cloudinit import templater
+from cloudinit.util import load_file, write_file
try:
import Cheetah
@@ -19,7 +20,20 @@ except ImportError:
HAS_CHEETAH = False
-class TestTemplates(test_helpers.TestCase):
+class TestTemplates(test_helpers.CiTestCase):
+
+ with_logs = True
+
+ jinja_utf8 = b'It\xe2\x80\x99s not ascii, {{name}}\n'
+ jinja_utf8_rbob = b'It\xe2\x80\x99s not ascii, bob\n'.decode('utf-8')
+
+ @staticmethod
+ def add_header(renderer, data):
+ """Return text (py2 unicode/py3 str) with template header."""
+ if isinstance(data, bytes):
+ data = data.decode('utf-8')
+ return "## template: %s\n" % renderer + data
+
def test_render_basic(self):
in_data = textwrap.dedent("""
${b}
@@ -39,12 +53,12 @@ class TestTemplates(test_helpers.TestCase):
def test_detection(self):
blob = "## template:cheetah"
- (template_type, renderer, contents) = templater.detect_template(blob)
+ (template_type, _renderer, contents) = templater.detect_template(blob)
self.assertIn("cheetah", template_type)
self.assertEqual("", contents.strip())
blob = "blahblah $blah"
- (template_type, renderer, contents) = templater.detect_template(blob)
+ (template_type, _renderer, _contents) = templater.detect_template(blob)
self.assertIn("cheetah", template_type)
self.assertEqual(blob, contents)
@@ -106,4 +120,52 @@ $a,$b'''
'codename': codename})
self.assertEqual(ex_data, out_data)
+ def test_jinja_nonascii_render_to_string(self):
+ """Test jinja render_to_string with non-ascii content."""
+ self.assertEqual(
+ templater.render_string(
+ self.add_header("jinja", self.jinja_utf8), {"name": "bob"}),
+ self.jinja_utf8_rbob)
+
+ def test_jinja_nonascii_render_undefined_variables_to_default_py3(self):
+ """Test py3 jinja render_to_string with undefined variable default."""
+ self.assertEqual(
+ templater.render_string(
+ self.add_header("jinja", self.jinja_utf8), {}),
+ self.jinja_utf8_rbob.replace('bob', 'CI_MISSING_JINJA_VAR/name'))
+
+ def test_jinja_nonascii_render_to_file(self):
+ """Test jinja render_to_file of a filename with non-ascii content."""
+ tmpl_fn = self.tmp_path("j-render-to-file.template")
+ out_fn = self.tmp_path("j-render-to-file.out")
+ write_file(filename=tmpl_fn, omode="wb",
+ content=self.add_header(
+ "jinja", self.jinja_utf8).encode('utf-8'))
+ templater.render_to_file(tmpl_fn, out_fn, {"name": "bob"})
+ result = load_file(out_fn, decode=False).decode('utf-8')
+ self.assertEqual(result, self.jinja_utf8_rbob)
+
+ def test_jinja_nonascii_render_from_file(self):
+ """Test jinja render_from_file with non-ascii content."""
+ tmpl_fn = self.tmp_path("j-render-from-file.template")
+ write_file(tmpl_fn, omode="wb",
+ content=self.add_header(
+ "jinja", self.jinja_utf8).encode('utf-8'))
+ result = templater.render_from_file(tmpl_fn, {"name": "bob"})
+ self.assertEqual(result, self.jinja_utf8_rbob)
+
+ @test_helpers.skipIfJinja()
+ def test_jinja_warns_on_missing_dep_and_uses_basic_renderer(self):
+ """Test jinja render_from_file will fallback to basic renderer."""
+ tmpl_fn = self.tmp_path("j-render-from-file.template")
+ write_file(tmpl_fn, omode="wb",
+ content=self.add_header(
+ "jinja", self.jinja_utf8).encode('utf-8'))
+ result = templater.render_from_file(tmpl_fn, {"name": "bob"})
+ self.assertEqual(result, self.jinja_utf8.decode())
+ self.assertIn(
+ 'WARNING: Jinja not available as the selected renderer for desired'
+ ' template, reverting to the basic renderer.',
+ self.logs.getvalue())
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 50101906..5a14479a 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -4,6 +4,7 @@ from __future__ import print_function
import logging
import os
+import re
import shutil
import stat
import tempfile
@@ -23,6 +24,7 @@ except ImportError:
BASH = util.which('bash')
+BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name'
class FakeSelinux(object):
@@ -265,26 +267,49 @@ class TestGetCmdline(helpers.TestCase):
self.assertEqual("abcd 123", ret)
-class TestLoadYaml(helpers.TestCase):
+class TestLoadYaml(helpers.CiTestCase):
mydefault = "7b03a8ebace993d806255121073fed52"
+ with_logs = True
def test_simple(self):
mydata = {'1': "one", '2': "two"}
self.assertEqual(util.load_yaml(yaml.dump(mydata)), mydata)
def test_nonallowed_returns_default(self):
+ '''Any unallowed types result in returning default; log the issue.'''
# for now, anything not in the allowed list just returns the default.
myyaml = yaml.dump({'1': "one"})
self.assertEqual(util.load_yaml(blob=myyaml,
default=self.mydefault,
allowed=(str,)),
self.mydefault)
-
- def test_bogus_returns_default(self):
+ regex = re.compile(
+ r'Yaml load allows \(<(class|type) \'str\'>,\) root types, but'
+ r' got dict')
+ self.assertTrue(regex.search(self.logs.getvalue()),
+ msg='Missing expected yaml load error')
+
+ def test_bogus_scan_error_returns_default(self):
+ '''On Yaml scan error, load_yaml returns the default and logs issue.'''
badyaml = "1\n 2:"
self.assertEqual(util.load_yaml(blob=badyaml,
default=self.mydefault),
self.mydefault)
+ self.assertIn(
+ 'Failed loading yaml blob. Invalid format at line 2 column 3:'
+ ' "mapping values are not allowed here',
+ self.logs.getvalue())
+
+ def test_bogus_parse_error_returns_default(self):
+ '''On Yaml parse error, load_yaml returns default and logs issue.'''
+ badyaml = "{}}"
+ self.assertEqual(util.load_yaml(blob=badyaml,
+ default=self.mydefault),
+ self.mydefault)
+ self.assertIn(
+ 'Failed loading yaml blob. Invalid format at line 1 column 3:'
+ " \"expected \'<document start>\', but found \'}\'",
+ self.logs.getvalue())
def test_unsafe_types(self):
# should not load complex types
@@ -325,7 +350,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
def test_precise_ext4_root(self):
- lines = self.readResource('mountinfo_precise_ext4.txt').splitlines()
+ lines = helpers.readResource('mountinfo_precise_ext4.txt').splitlines()
expected = ('/dev/mapper/vg0-root', 'ext4', '/')
self.assertEqual(expected, util.parse_mount_info('/', lines))
@@ -347,7 +372,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))
def test_raring_btrfs_root(self):
- lines = self.readResource('mountinfo_raring_btrfs.txt').splitlines()
+ lines = helpers.readResource('mountinfo_raring_btrfs.txt').splitlines()
expected = ('/dev/vda1', 'btrfs', '/')
self.assertEqual(expected, util.parse_mount_info('/', lines))
@@ -373,7 +398,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
m_os.path.exists.return_value = True
# mock subp command from util.get_mount_info_fs_on_zpool
zpool_output.return_value = (
- self.readResource('zpool_status_simple.txt'), ''
+ helpers.readResource('zpool_status_simple.txt'), ''
)
# save function return values and do asserts
ret = util.get_device_info_from_zpool('vmzroot')
@@ -406,7 +431,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
m_os.path.exists.return_value = True
# mock subp command from util.get_mount_info_fs_on_zpool
zpool_output.return_value = (
- self.readResource('zpool_status_simple.txt'), 'error'
+ helpers.readResource('zpool_status_simple.txt'), 'error'
)
# save function return values and do asserts
ret = util.get_device_info_from_zpool('vmzroot')
@@ -414,7 +439,8 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
@mock.patch('cloudinit.util.subp')
def test_parse_mount_with_ext(self, mount_out):
- mount_out.return_value = (self.readResource('mount_parse_ext.txt'), '')
+ mount_out.return_value = (
+ helpers.readResource('mount_parse_ext.txt'), '')
# this one is valid and exists in mount_parse_ext.txt
ret = util.parse_mount('/var')
self.assertEqual(('/dev/mapper/vg00-lv_var', 'ext4', '/var'), ret)
@@ -430,7 +456,8 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
@mock.patch('cloudinit.util.subp')
def test_parse_mount_with_zfs(self, mount_out):
- mount_out.return_value = (self.readResource('mount_parse_zfs.txt'), '')
+ mount_out.return_value = (
+ helpers.readResource('mount_parse_zfs.txt'), '')
# this one is valid and exists in mount_parse_zfs.txt
ret = util.parse_mount('/var')
self.assertEqual(('vmzroot/ROOT/freebsd/var', 'zfs', '/var'), ret)
@@ -442,6 +469,29 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
self.assertIsNone(ret)
+class TestIsX86(helpers.CiTestCase):
+
+ def test_is_x86_matches_x86_types(self):
+ """is_x86 returns True if CPU architecture matches."""
+ matched_arches = ['x86_64', 'i386', 'i586', 'i686']
+ for arch in matched_arches:
+ self.assertTrue(
+ util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch)
+
+ def test_is_x86_unmatched_types(self):
+ """is_x86 returns Fale on non-intel x86 architectures."""
+ unmatched_arches = ['ia64', '9000/800', 'arm64v71']
+ for arch in unmatched_arches:
+ self.assertFalse(
+ util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch)
+
+ @mock.patch('cloudinit.util.os.uname')
+ def test_is_x86_calls_uname_for_architecture(self, m_uname):
+ """is_x86 returns True if platform from uname matches."""
+ m_uname.return_value = [0, 1, 2, 3, 'x86_64']
+ self.assertTrue(util.is_x86())
+
+
class TestReadDMIData(helpers.FilesystemMockingTestCase):
def setUp(self):
@@ -693,6 +743,8 @@ class TestReadSeeded(helpers.TestCase):
class TestSubp(helpers.CiTestCase):
with_logs = True
+ allowed_subp = [BASH, 'cat', helpers.CiTestCase.SUBP_SHELL_TRUE,
+ BOGUS_COMMAND, sys.executable]
stdin2err = [BASH, '-c', 'cat >&2']
stdin2out = ['cat']
@@ -700,7 +752,6 @@ class TestSubp(helpers.CiTestCase):
utf8_valid = b'start \xc3\xa9 end'
utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7'
printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--']
- bogus_command = 'this-is-not-expected-to-be-a-program-name'
def printf_cmd(self, *args):
# bash's printf supports \xaa. So does /usr/bin/printf
@@ -772,11 +823,11 @@ class TestSubp(helpers.CiTestCase):
def test_subp_reads_env(self):
with mock.patch.dict("os.environ", values={'FOO': 'BAR'}):
- out, err = util.subp(self.printenv + ['FOO'], capture=True)
+ out, _err = util.subp(self.printenv + ['FOO'], capture=True)
self.assertEqual('FOO=BAR', out.splitlines()[0])
def test_subp_env_and_update_env(self):
- out, err = util.subp(
+ out, _err = util.subp(
self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
env={'FOO': 'BAR'},
update_env={'HOME': '/myhome', 'K2': 'V2'})
@@ -786,7 +837,7 @@ class TestSubp(helpers.CiTestCase):
def test_subp_update_env(self):
extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'}
with mock.patch.dict("os.environ", values=extra):
- out, err = util.subp(
+ out, _err = util.subp(
self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
update_env={'HOME': '/myhome', 'K2': 'V2'})
@@ -799,9 +850,18 @@ class TestSubp(helpers.CiTestCase):
util.write_file(noshebang, 'true\n')
os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC)
- self.assertRaisesRegex(util.ProcessExecutionError,
- 'Missing #! in script\?',
- util.subp, (noshebang,))
+ with self.allow_subp([noshebang]):
+ self.assertRaisesRegex(util.ProcessExecutionError,
+ r'Missing #! in script\?',
+ util.subp, (noshebang,))
+
+ def test_subp_combined_stderr_stdout(self):
+ """Providing combine_capture as True redirects stderr to stdout."""
+ data = b'hello world'
+ (out, err) = util.subp(self.stdin2err, capture=True,
+ combine_capture=True, decode=False, data=data)
+ self.assertEqual(b'', err)
+ self.assertEqual(data, out)
def test_returns_none_if_no_capture(self):
(out, err) = util.subp(self.stdin2out, data=b'', capture=False)
@@ -811,14 +871,14 @@ class TestSubp(helpers.CiTestCase):
def test_exception_has_out_err_are_bytes_if_decode_false(self):
"""Raised exc should have stderr, stdout as bytes if no decode."""
with self.assertRaises(util.ProcessExecutionError) as cm:
- util.subp([self.bogus_command], decode=False)
+ util.subp([BOGUS_COMMAND], decode=False)
self.assertTrue(isinstance(cm.exception.stdout, bytes))
self.assertTrue(isinstance(cm.exception.stderr, bytes))
def test_exception_has_out_err_are_bytes_if_decode_true(self):
"""Raised exc should have stderr, stdout as string if no decode."""
with self.assertRaises(util.ProcessExecutionError) as cm:
- util.subp([self.bogus_command], decode=True)
+ util.subp([BOGUS_COMMAND], decode=True)
self.assertTrue(isinstance(cm.exception.stdout, six.string_types))
self.assertTrue(isinstance(cm.exception.stderr, six.string_types))
@@ -868,10 +928,10 @@ class TestSubp(helpers.CiTestCase):
logs.append(log)
with self.assertRaises(util.ProcessExecutionError):
- util.subp([self.bogus_command], status_cb=status_cb)
+ util.subp([BOGUS_COMMAND], status_cb=status_cb)
expected = [
- 'Begin run command: {cmd}\n'.format(cmd=self.bogus_command),
+ 'Begin run command: {cmd}\n'.format(cmd=BOGUS_COMMAND),
'ERROR: End run command: invalid command provided\n']
self.assertEqual(expected, logs)
@@ -883,13 +943,13 @@ class TestSubp(helpers.CiTestCase):
logs.append(log)
with self.assertRaises(util.ProcessExecutionError):
- util.subp(['ls', '/I/dont/exist'], status_cb=status_cb)
- util.subp(['ls'], status_cb=status_cb)
+ util.subp([BASH, '-c', 'exit 2'], status_cb=status_cb)
+ util.subp([BASH, '-c', 'exit 0'], status_cb=status_cb)
expected = [
- 'Begin run command: ls /I/dont/exist\n',
+ 'Begin run command: %s -c exit 2\n' % BASH,
'ERROR: End run command: exit(2)\n',
- 'Begin run command: ls\n',
+ 'Begin run command: %s -c exit 0\n' % BASH,
'End run command: exit(0)\n']
self.assertEqual(expected, logs)
@@ -1055,4 +1115,60 @@ class TestLoadShellContent(helpers.TestCase):
''])))
+class TestGetProcEnv(helpers.TestCase):
+ """test get_proc_env."""
+ null = b'\x00'
+ simple1 = b'HOME=/'
+ simple2 = b'PATH=/bin:/sbin'
+ bootflag = b'BOOTABLE_FLAG=\x80' # from LP: #1775371
+ mixed = b'MIXED=' + b'ab\xccde'
+
+ def _val_decoded(self, blob, encoding='utf-8', errors='replace'):
+ # return the value portion of key=val decoded.
+ return blob.split(b'=', 1)[1].decode(encoding, errors)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_non_utf8_in_environment(self, m_load_file):
+ """env may have non utf-8 decodable content."""
+ content = self.null.join(
+ (self.bootflag, self.simple1, self.simple2, self.mixed))
+ m_load_file.return_value = content
+
+ self.assertEqual(
+ {'BOOTABLE_FLAG': self._val_decoded(self.bootflag),
+ 'HOME': '/', 'PATH': '/bin:/sbin',
+ 'MIXED': self._val_decoded(self.mixed)},
+ util.get_proc_env(1))
+ self.assertEqual(1, m_load_file.call_count)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_encoding_none_returns_bytes(self, m_load_file):
+ """encoding none returns bytes."""
+ lines = (self.bootflag, self.simple1, self.simple2, self.mixed)
+ content = self.null.join(lines)
+ m_load_file.return_value = content
+
+ self.assertEqual(
+ dict([t.split(b'=') for t in lines]),
+ util.get_proc_env(1, encoding=None))
+ self.assertEqual(1, m_load_file.call_count)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_all_utf8_encoded(self, m_load_file):
+ """common path where only utf-8 decodable content."""
+ content = self.null.join((self.simple1, self.simple2))
+ m_load_file.return_value = content
+ self.assertEqual(
+ {'HOME': '/', 'PATH': '/bin:/sbin'},
+ util.get_proc_env(1))
+ self.assertEqual(1, m_load_file.call_count)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_non_existing_file_returns_empty_dict(self, m_load_file):
+ """as implemented, a non-existing pid returns empty dict.
+ This is how it was originally implemented."""
+ m_load_file.side_effect = OSError("File does not exist.")
+ self.assertEqual({}, util.get_proc_env(1))
+ self.assertEqual(1, m_load_file.call_count)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_version.py b/tests/unittests/test_version.py
deleted file mode 100644
index d012f69d..00000000
--- a/tests/unittests/test_version.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.tests.helpers import CiTestCase
-from cloudinit import version
-
-
-class TestExportsFeatures(CiTestCase):
- def test_has_network_config_v1(self):
- self.assertIn('NETWORK_CONFIG_V1', version.FEATURES)
-
- def test_has_network_config_v2(self):
- self.assertIn('NETWORK_CONFIG_V2', version.FEATURES)
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py
index 036f6879..602dedb0 100644
--- a/tests/unittests/test_vmware_config_file.py
+++ b/tests/unittests/test_vmware_config_file.py
@@ -2,11 +2,15 @@
# Copyright (C) 2016 VMware INC.
#
# Author: Sankar Tanguturi <stanguturi@vmware.com>
+# Pengpeng Sun <pengpengs@vmware.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
import logging
+import os
import sys
+import tempfile
+import textwrap
from cloudinit.sources.DataSourceOVF import get_network_config_from_conf
from cloudinit.sources.DataSourceOVF import read_vmware_imc
@@ -343,4 +347,115 @@ class TestVmwareConfigFile(CiTestCase):
conf = Config(cf)
self.assertEqual("test-script", conf.custom_script_name)
+
+class TestVmwareNetConfig(CiTestCase):
+ """Test conversion of vmware config to cloud-init config."""
+
+ def _get_NicConfigurator(self, text):
+ fp = None
+ try:
+ with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(),
+ delete=False) as fp:
+ fp.write(text)
+ fp.close()
+ cfg = Config(ConfigFile(fp.name))
+ return NicConfigurator(cfg.nics, use_system_devices=False)
+ finally:
+ if fp:
+ os.unlink(fp.name)
+
+ def test_non_primary_nic_without_gateway(self):
+ """A non primary nic set is not required to have a gateway."""
+ config = textwrap.dedent("""\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = myhost1
+ DOMAINNAME = eng.vmware.com
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:a6:8c:08
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 10.20.87.154
+ NETMASK = 255.255.252.0
+ """)
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [{'type': 'physical', 'name': 'NIC1',
+ 'mac_address': '00:50:56:a6:8c:08',
+ 'subnets': [
+ {'control': 'auto', 'type': 'static',
+ 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}],
+ nc.generate())
+
+ def test_non_primary_nic_with_gateway(self):
+ """A non primary nic set can have a gateway."""
+ config = textwrap.dedent("""\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = myhost1
+ DOMAINNAME = eng.vmware.com
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:a6:8c:08
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 10.20.87.154
+ NETMASK = 255.255.252.0
+ GATEWAY = 10.20.87.253
+ """)
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [{'type': 'physical', 'name': 'NIC1',
+ 'mac_address': '00:50:56:a6:8c:08',
+ 'subnets': [
+ {'control': 'auto', 'type': 'static',
+ 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]},
+ {'type': 'route', 'destination': '10.20.84.0/22',
+ 'gateway': '10.20.87.253', 'metric': 10000}],
+ nc.generate())
+
+ def test_a_primary_nic_with_gateway(self):
+ """A primary nic set can have a gateway."""
+ config = textwrap.dedent("""\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = myhost1
+ DOMAINNAME = eng.vmware.com
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:a6:8c:08
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 10.20.87.154
+ NETMASK = 255.255.252.0
+ PRIMARY = true
+ GATEWAY = 10.20.87.253
+ """)
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [{'type': 'physical', 'name': 'NIC1',
+ 'mac_address': '00:50:56:a6:8c:08',
+ 'subnets': [
+ {'control': 'auto', 'type': 'static',
+ 'address': '10.20.87.154', 'netmask': '255.255.252.0',
+ 'gateway': '10.20.87.253'}]}],
+ nc.generate())
+
+
# vi: ts=4 expandtab
diff --git a/tools/Z99-cloud-locale-test.sh b/tools/Z99-cloud-locale-test.sh
index 4978d87e..9ee44bd2 100644
--- a/tools/Z99-cloud-locale-test.sh
+++ b/tools/Z99-cloud-locale-test.sh
@@ -11,8 +11,11 @@
# of how to fix them.
locale_warn() {
- local bad_names="" bad_lcs="" key="" val="" var="" vars="" bad_kv=""
- local w1 w2 w3 w4 remain
+ command -v local >/dev/null && local _local="local" ||
+ typeset _local="typeset"
+
+ $_local bad_names="" bad_lcs="" key="" val="" var="" vars="" bad_kv=""
+ $_local w1 w2 w3 w4 remain
# if shell is zsh, act like sh only for this function (-L).
# The behavior change will not permenently affect user's shell.
@@ -53,8 +56,8 @@ locale_warn() {
printf " This can affect your user experience significantly, including the\n"
printf " ability to manage packages. You may install the locales by running:\n\n"
- local bad invalid="" to_gen="" sfile="/usr/share/i18n/SUPPORTED"
- local pkgs=""
+ $_local bad invalid="" to_gen="" sfile="/usr/share/i18n/SUPPORTED"
+ $_local local pkgs=""
if [ -e "$sfile" ]; then
for bad in ${bad_lcs}; do
grep -q -i "${bad}" "$sfile" &&
@@ -67,7 +70,7 @@ locale_warn() {
fi
to_gen=${to_gen# }
- local pkgs=""
+ $_local pkgs=""
for bad in ${to_gen}; do
pkgs="${pkgs} language-pack-${bad%%_*}"
done
diff --git a/tools/Z99-cloudinit-warnings.sh b/tools/Z99-cloudinit-warnings.sh
index 1d413374..cb8b4638 100644
--- a/tools/Z99-cloudinit-warnings.sh
+++ b/tools/Z99-cloudinit-warnings.sh
@@ -4,9 +4,11 @@
# Purpose: show user warnings on login.
cloud_init_warnings() {
- local warning="" idir="/var/lib/cloud/instance" n=0
- local warndir="$idir/warnings"
- local ufile="$HOME/.cloud-warnings.skip" sfile="$warndir/.skip"
+ command -v local >/dev/null && local _local="local" ||
+ typeset _local="typeset"
+ $_local warning="" idir="/var/lib/cloud/instance" n=0
+ $_local warndir="$idir/warnings"
+ $_local ufile="$HOME/.cloud-warnings.skip" sfile="$warndir/.skip"
[ -d "$warndir" ] || return 0
[ ! -f "$ufile" ] || return 0
[ ! -f "$sfile" ] || return 0
diff --git a/tools/ds-identify b/tools/ds-identify
index 9a2db5c4..5afe5aa1 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -1,16 +1,25 @@
#!/bin/sh
+# shellcheck disable=2015,2039,2162,2166
#
# ds-identify is configured via /etc/cloud/ds-identify.cfg
-# or on the kernel command line. It takes primarily 2 inputs:
+# or on the kernel command line. It takes the following inputs:
+#
# datasource: can specify the datasource that should be used.
-# kernel command line option: ci.datasource=<dsname>
+# kernel command line option: ci.datasource=<dsname> or ci.ds=<dsname>
+# example line in /etc/cloud/ds-identify.cfg:
+# datasource: Ec2
#
# policy: a string that indicates how ds-identify should operate.
-# kernel command line option: ci.di.policy=<policy>
+#
# The format is:
# <mode>,found=value,maybe=value,notfound=value
# default setting is:
-# search,found=all,maybe=all,notfound=disable
+# search,found=all,maybe=all,notfound=disabled
+#
+# kernel command line option: ci.di.policy=<policy>
+# example line in /etc/cloud/ds-identify.cfg:
+# policy: search,found=all,maybe=none,notfound=disabled
+#
#
# Mode:
# disabled: disable cloud-init
@@ -115,7 +124,7 @@ DI_DSNAME=""
# be searched if there is no setting found in config.
DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \
-OVF SmartOS Scaleway Hetzner IBMCloud"
+OVF SmartOS Scaleway Hetzner IBMCloud Oracle"
DI_DSLIST=""
DI_MODE=""
DI_ON_FOUND=""
@@ -186,6 +195,16 @@ block_dev_with_label() {
return 0
}
+ensure_sane_path() {
+ local t
+ for t in /sbin /usr/sbin /bin /usr/bin; do
+ case ":$PATH:" in
+ *:$t:*|*:$t/:*) continue;;
+ esac
+ PATH="${PATH:+${PATH}:}$t"
+ done
+}
+
read_fs_info() {
cached "${DI_BLKID_OUTPUT}" && return 0
# do not rely on links in /dev/disk which might not be present yet.
@@ -210,7 +229,9 @@ read_fs_info() {
# 'set --' will collapse multiple consecutive entries in IFS for
# whitespace characters (\n, tab, " ") so we cannot rely on getting
# empty lines in "$@" below.
- IFS="$CR"; set -- $out; IFS="$oifs"
+
+ # shellcheck disable=2086
+ { IFS="$CR"; set -- $out; IFS="$oifs"; }
for line in "$@"; do
case "${line}" in
@@ -258,7 +279,7 @@ read_virt() {
is_container() {
case "${DI_VIRT}" in
- lxc|lxc-libvirt|systemd-nspawn|docker|rkt) return 0;;
+ container-other|lxc|lxc-libvirt|systemd-nspawn|docker|rkt) return 0;;
*) return 1;;
esac
}
@@ -310,6 +331,7 @@ read_dmi_product_serial() {
DI_DMI_PRODUCT_SERIAL="$_RET"
}
+# shellcheck disable=2034
read_uname_info() {
# run uname, and parse output.
# uname is tricky to parse as it outputs always in a given order
@@ -329,6 +351,7 @@ read_uname_info() {
return $ret
}
fi
+ # shellcheck disable=2086
set -- $out
DI_UNAME_KERNEL_NAME="$1"
DI_UNAME_NODENAME="$2"
@@ -356,7 +379,8 @@ parse_yaml_array() {
# the fix was to quote the open bracket (val=${val#"["}) (LP: #1689648)
val=${val#"["}
val=${val%"]"}
- IFS=","; set -- $val; IFS="$oifs"
+ # shellcheck disable=2086
+ { IFS=","; set -- $val; IFS="$oifs"; }
for tok in "$@"; do
trim "$tok"
unquote "$_RET"
@@ -392,7 +416,7 @@ read_datasource_list() {
fi
if [ -z "$dslist" ]; then
dslist=${DI_DSLIST_DEFAULT}
- debug 1 "no datasource_list found, using default:" $dslist
+ debug 1 "no datasource_list found, using default: $dslist"
fi
DI_DSLIST=$dslist
return 0
@@ -403,7 +427,8 @@ read_pid1_product_name() {
cached "${DI_PID_1_PRODUCT_NAME}" && return
[ -r "${PATH_PROC_1_ENVIRON}" ] || return
out=$(tr '\0' '\n' <"${PATH_PROC_1_ENVIRON}")
- IFS="$CR"; set -- $out; IFS="$oifs"
+ # shellcheck disable=2086
+ { IFS="$CR"; set -- $out; IFS="$oifs"; }
for tok in "$@"; do
key=${tok%%=*}
[ "$key" != "$tok" ] || continue
@@ -470,6 +495,7 @@ nocase_equal() {
[ "$1" = "$2" ] && return 0
local delim="-delim-"
+ # shellcheck disable=2018,2019
out=$(echo "$1${delim}$2" | tr A-Z a-z)
[ "${out#*${delim}}" = "${out%${delim}*}" ]
}
@@ -546,11 +572,13 @@ check_config() {
else
files="$*"
fi
- set +f; set -- $files; set -f;
+ # shellcheck disable=2086
+ { set +f; set -- $files; set -f; }
if [ "$1" = "$files" -a ! -f "$1" ]; then
return 1
fi
local fname="" line="" ret="" found=0 found_fn=""
+ # shellcheck disable=2094
for fname in "$@"; do
[ -f "$fname" ] || continue
while read line; do
@@ -600,7 +628,6 @@ dscheck_NoCloud() {
*\ ds=nocloud*) return ${DS_FOUND};;
esac
- is_ibm_cloud && return ${DS_NOT_FOUND}
for d in nocloud nocloud-net; do
check_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
@@ -611,11 +638,12 @@ dscheck_NoCloud() {
return ${DS_NOT_FOUND}
}
+is_ds_enabled() {
+ local name="$1" pad=" ${DI_DSLIST} "
+ [ "${pad#* $name }" != "${pad}" ]
+}
+
check_configdrive_v2() {
- is_ibm_cloud && return ${DS_NOT_FOUND}
- if has_fs_with_label CONFIG-2 config-2; then
- return ${DS_FOUND}
- fi
# look in /config-drive <vlc>/seed/config_drive for a directory
# openstack/YYYY-MM-DD format with a file meta_data.json
local d=""
@@ -630,6 +658,15 @@ check_configdrive_v2() {
debug 1 "config drive seeded directory had only 'latest'"
return ${DS_FOUND}
fi
+
+ local ibm_enabled=false
+ is_ds_enabled "IBMCloud" && ibm_enabled=true
+ debug 1 "is_ds_enabled(IBMCloud) = $ibm_enabled."
+ [ "$ibm_enabled" = "true" ] && is_ibm_cloud && return ${DS_NOT_FOUND}
+
+ if has_fs_with_label CONFIG-2 config-2; then
+ return ${DS_FOUND}
+ fi
return ${DS_NOT_FOUND}
}
@@ -786,7 +823,7 @@ ec2_read_strict_setting() {
# 3. look for the key 'strict_id' (datasource/Ec2/strict_id)
# only in cloud.cfg or cloud.cfg.d/EC2.cfg (case insensitive)
local cfg="${PATH_ETC_CI_CFG}" cfg_d="${PATH_ETC_CI_CFG_D}"
- if check_config strict_id $cfg "$cfg_d/*[Ee][Cc]2*.cfg"; then
+ if check_config strict_id "$cfg" "$cfg_d/*[Ee][Cc]2*.cfg"; then
debug 2 "${_RET_fname} set strict_id to $_RET"
return 0
fi
@@ -971,12 +1008,14 @@ dscheck_SmartOS() {
# joyent cloud has two virt types: kvm and container
# on kvm, product name on joyent public cloud shows 'SmartDC HVM'
# on the container platform, uname's version has: BrandZ virtual linux
+ # for container, we also verify that the socketfile exists to protect
+ # against embedded containers (lxd running on brandz)
local smartdc_kver="BrandZ virtual linux"
+ local metadata_sockfile="${PATH_ROOT}/native/.zonecontrol/metadata.sock"
dmi_product_name_matches "SmartDC*" && return $DS_FOUND
- if [ "${DI_UNAME_KERNEL_VERSION}" = "${smartdc_kver}" ] &&
- [ "${DI_VIRT}" = "container-other" ]; then
- return ${DS_FOUND}
- fi
+ [ "${DI_UNAME_KERNEL_VERSION}" = "${smartdc_kver}" ] &&
+ [ -e "${metadata_sockfile}" ] &&
+ return ${DS_FOUND}
return ${DS_NOT_FOUND}
}
@@ -993,7 +1032,7 @@ dscheck_Scaleway() {
*\ scaleway\ *) return ${DS_FOUND};;
esac
- if [ -f ${PATH_ROOT}/var/run/scaleway ]; then
+ if [ -f "${PATH_ROOT}/var/run/scaleway" ]; then
return ${DS_FOUND}
fi
@@ -1005,8 +1044,32 @@ dscheck_Hetzner() {
return ${DS_NOT_FOUND}
}
+dscheck_Oracle() {
+ local asset_tag="OracleCloud.com"
+ dmi_chassis_asset_tag_matches "${asset_tag}" && return ${DS_FOUND}
+ return ${DS_NOT_FOUND}
+}
+
is_ibm_provisioning() {
- [ -f "${PATH_ROOT}/root/provisioningConfiguration.cfg" ]
+ local pcfg="${PATH_ROOT}/root/provisioningConfiguration.cfg"
+ local logf="${PATH_ROOT}/root/swinstall.log"
+ local is_prov=false msg="config '$pcfg' did not exist."
+ if [ -f "$pcfg" ]; then
+ msg="config '$pcfg' exists."
+ is_prov=true
+ if [ -f "$logf" ]; then
+ if [ "$logf" -nt "$PATH_PROC_1_ENVIRON" ]; then
+ msg="$msg log '$logf' from current boot."
+ else
+ is_prov=false
+ msg="$msg log '$logf' from previous boot."
+ fi
+ else
+ msg="$msg log '$logf' did not exist."
+ fi
+ fi
+ debug 2 "ibm_provisioning=$is_prov: $msg"
+ [ "$is_prov" = "true" ]
}
is_ibm_cloud() {
@@ -1130,6 +1193,7 @@ found() {
}
trim() {
+ # shellcheck disable=2048,2086
set -- $*
_RET="$*"
}
@@ -1150,7 +1214,7 @@ _read_config() {
# if no parameters are set, modifies _rc scoped environment vars.
# if keyname is provided, then returns found value of that key.
local keyname="${1:-_unset}"
- local line="" hash="#" ckey="" key="" val=""
+ local line="" hash="#" key="" val=""
while read line; do
line=${line%%${hash}*}
key="${line%%:*}"
@@ -1228,7 +1292,8 @@ parse_policy() {
local mode="" report="" found="" maybe="" notfound=""
local oifs="$IFS" tok="" val=""
- IFS=","; set -- $policy; IFS="$oifs"
+ # shellcheck disable=2086
+ { IFS=","; set -- $policy; IFS="$oifs"; }
for tok in "$@"; do
val=${tok#*=}
case "$tok" in
@@ -1295,15 +1360,15 @@ manual_clean_and_existing() {
}
read_uptime() {
- local up idle
+ local up _
_RET="${UNAVAILABLE}"
- [ -f "$PATH_PROC_UPTIME" ] &&
- read up idle < "$PATH_PROC_UPTIME" && _RET="$up"
+ [ -f "$PATH_PROC_UPTIME" ] && read up _ < "$PATH_PROC_UPTIME" &&
+ _RET="$up"
return
}
_main() {
- local dscheck="" ret_dis=1 ret_en=0
+ local dscheck_fn="" ret_dis=1 ret_en=0
read_uptime
debug 1 "[up ${_RET}s]" "ds-identify $*"
@@ -1338,8 +1403,9 @@ _main() {
return
fi
- # if there is only a single entry in $DI_DSLIST
+ # shellcheck disable=2086
set -- $DI_DSLIST
+ # if there is only a single entry in $DI_DSLIST
if [ $# -eq 1 ] || [ $# -eq 2 -a "$2" = "None" ] ; then
debug 1 "single entry in datasource_list ($DI_DSLIST) use that."
found "$@"
@@ -1372,6 +1438,7 @@ _main() {
done
debug 2 "found=${found# } maybe=${maybe# }"
+ # shellcheck disable=2086
set -- $found
if [ $# -ne 0 ]; then
if [ $# -eq 1 ]; then
@@ -1387,6 +1454,7 @@ _main() {
return
fi
+ # shellcheck disable=2086
set -- $maybe
if [ $# -ne 0 -a "${DI_ON_MAYBE}" != "none" ]; then
debug 1 "$# datasources returned maybe: $*"
@@ -1415,18 +1483,19 @@ _main() {
*) error "Unexpected result";;
esac
debug 1 "$msg"
- return $ret
+ return "$ret"
}
main() {
local ret=""
+ ensure_sane_path
[ -d "$PATH_RUN_CI" ] || mkdir -p "$PATH_RUN_CI"
if [ "${1:+$1}" != "--force" ] && [ -f "$PATH_RUN_CI_CFG" ] &&
[ -f "$PATH_RUN_DI_RESULT" ]; then
if read ret < "$PATH_RUN_DI_RESULT"; then
if [ "$ret" = "0" ] || [ "$ret" = "1" ]; then
debug 2 "used cached result $ret. pass --force to re-run."
- return $ret;
+ return "$ret";
fi
debug 1 "previous run returned unexpected '$ret'. Re-running."
else
@@ -1438,7 +1507,7 @@ main() {
echo "$ret" > "$PATH_RUN_DI_RESULT"
read_uptime
debug 1 "[up ${_RET}s]" "returning $ret"
- return $ret
+ return "$ret"
}
noop() {
diff --git a/tools/make-tarball b/tools/make-tarball
index 3197689f..8d540139 100755
--- a/tools/make-tarball
+++ b/tools/make-tarball
@@ -13,22 +13,28 @@ Usage: ${0##*/} [revision]
create a tarball of revision (default HEAD)
options:
- -o | --output FILE write to file
+ -h | --help print usage
+ -o | --output FILE write to file
+ --orig-tarball Write file cloud-init_<version>.orig.tar.gz
+ --long Use git describe --long for versioning
EOF
}
short_opts="ho:v"
-long_opts="help,output:,long,verbose"
+long_opts="help,output:,orig-tarball,long"
getopt_out=$(getopt --name "${0##*/}" \
--options "${short_opts}" --long "${long_opts}" -- "$@") &&
eval set -- "${getopt_out}" || { Usage 1>&2; exit 1; }
long_opt=""
+orig_opt=""
while [ $# -ne 0 ]; do
cur=$1; next=$2
case "$cur" in
+ -h|--help) Usage; exit 0;;
-o|--output) output=$next; shift;;
--long) long_opt="--long";;
+ --orig-tarball) orig_opt=".orig";;
--) shift; break;;
esac
shift;
@@ -39,7 +45,10 @@ version=$(git describe --abbrev=8 "--match=[0-9]*" ${long_opt} $rev)
archive_base="cloud-init-$version"
if [ -z "$output" ]; then
- output="$archive_base.tar.gz"
+ if [ ! -z "$orig_opt" ]; then
+ archive_base="cloud-init_$version"
+ fi
+ output="$archive_base$orig_opt.tar.gz"
fi
# when building an archiving from HEAD, ensure that there aren't any
diff --git a/tools/net-convert.py b/tools/net-convert.py
deleted file mode 100755
index 68559cbf..00000000
--- a/tools/net-convert.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/python3
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import argparse
-import json
-import os
-import yaml
-
-from cloudinit.sources.helpers import openstack
-
-from cloudinit.net import eni
-from cloudinit.net import netplan
-from cloudinit.net import network_state
-from cloudinit.net import sysconfig
-
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument("--network-data", "-p", type=open,
- metavar="PATH", required=True)
- parser.add_argument("--kind", "-k",
- choices=['eni', 'network_data.json', 'yaml'],
- required=True)
- parser.add_argument("-d", "--directory",
- metavar="PATH",
- help="directory to place output in",
- required=True)
- parser.add_argument("-m", "--mac",
- metavar="name,mac",
- action='append',
- help="interface name to mac mapping")
- parser.add_argument("--output-kind", "-ok",
- choices=['eni', 'netplan', 'sysconfig'],
- required=True)
- args = parser.parse_args()
-
- if not os.path.isdir(args.directory):
- os.makedirs(args.directory)
-
- if args.mac:
- known_macs = {}
- for item in args.mac:
- iface_name, iface_mac = item.split(",", 1)
- known_macs[iface_mac] = iface_name
- else:
- known_macs = None
-
- net_data = args.network_data.read()
- if args.kind == "eni":
- pre_ns = eni.convert_eni_data(net_data)
- ns = network_state.parse_net_config_data(pre_ns)
- elif args.kind == "yaml":
- pre_ns = yaml.load(net_data)
- if 'network' in pre_ns:
- pre_ns = pre_ns.get('network')
- print("Input YAML")
- print(yaml.dump(pre_ns, default_flow_style=False, indent=4))
- ns = network_state.parse_net_config_data(pre_ns)
- else:
- pre_ns = openstack.convert_net_json(
- json.loads(net_data), known_macs=known_macs)
- ns = network_state.parse_net_config_data(pre_ns)
-
- if not ns:
- raise RuntimeError("No valid network_state object created from"
- "input data")
-
- print("\nInternal State")
- print(yaml.dump(ns, default_flow_style=False, indent=4))
- if args.output_kind == "eni":
- r_cls = eni.Renderer
- elif args.output_kind == "netplan":
- r_cls = netplan.Renderer
- else:
- r_cls = sysconfig.Renderer
-
- r = r_cls()
- r.render_network_state(network_state=ns, target=args.directory)
-
-
-if __name__ == '__main__':
- main()
-
-# vi: ts=4 expandtab
diff --git a/tools/read-dependencies b/tools/read-dependencies
index 421f470a..b4656e69 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -51,6 +51,10 @@ MAYBE_RELIABLE_YUM_INSTALL = [
""",
'reliable-yum-install']
+ZYPPER_INSTALL = [
+ 'zypper', '--non-interactive', '--gpg-auto-import-keys', 'install',
+ '--auto-agree-with-licenses']
+
DRY_DISTRO_INSTALL_PKG_CMD = {
'centos': ['yum', 'install', '--assumeyes'],
'redhat': ['yum', 'install', '--assumeyes'],
@@ -61,8 +65,8 @@ DISTRO_INSTALL_PKG_CMD = {
'redhat': MAYBE_RELIABLE_YUM_INSTALL,
'debian': ['apt', 'install', '-y'],
'ubuntu': ['apt', 'install', '-y'],
- 'opensuse': ['zypper', 'install'],
- 'suse': ['zypper', 'install']
+ 'opensuse': ZYPPER_INSTALL,
+ 'suse': ZYPPER_INSTALL,
}
diff --git a/tools/run-centos b/tools/run-centos
index cb241ee5..4506b20d 100755
--- a/tools/run-centos
+++ b/tools/run-centos
@@ -1,18 +1,17 @@
#!/bin/bash
# This file is part of cloud-init. See LICENSE file for license information.
-set -u
-
-VERBOSITY=0
-TEMP_D=""
-KEEP=false
-CONTAINER=""
-
-error() { echo "$@" 1>&2; }
-fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
-errorrc() { local r=$?; error "$@" "ret=$r"; return $r; }
+deprecated() {
+cat <<EOF
+ ================ DEPRECATED ================
+ | run-centos is deprecated. Please replace |
+ | your usage with tools/run-container . |
+ ================ DEPRECATED ================
+EOF
+}
Usage() {
+ deprecated
cat <<EOF
Usage: ${0##*/} [ options ] version
@@ -34,319 +33,40 @@ Usage: ${0##*/} [ options ] version
Example:
* ${0##*/} --rpm --srpm --unittest 6
EOF
+ deprecated
+EOF
}
bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; return 1; }
-cleanup() {
- if [ -n "$CONTAINER" -a "$KEEP" = "false" ]; then
- delete_container "$CONTAINER"
- fi
- [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
-}
-
-debug() {
- local level=${1}; shift;
- [ "${level}" -gt "${VERBOSITY}" ] && return
- error "${@}"
-}
-
-
-inside_as() {
- # inside_as(container_name, user, cmd[, args])
- # executes cmd with args inside container as user in users home dir.
- local name="$1" user="$2"
- shift 2
- if [ "$user" = "root" ]; then
- inside "$name" "$@"
- return
- fi
- local stuffed="" b64=""
- stuffed=$(getopt --shell sh --options "" -- -- "$@")
- stuffed=${stuffed# -- }
- b64=$(printf "%s\n" "$stuffed" | base64 --wrap=0)
- inside "$name" su "$user" -c \
- 'cd; eval set -- "$(echo '$b64' | base64 --decode)" && exec "$@"'
-}
-
-inside_as_cd() {
- local name="$1" user="$2" dir="$3"
- shift 3
- inside_as "$name" "$user" sh -c 'cd "$0" && exec "$@"' "$dir" "$@"
-}
-
-inside() {
- local name="$1"
- shift
- lxc exec "$name" -- "$@"
-}
-
-inject_cloud_init(){
- # take current cloud-init git dir and put it inside $name at
- # ~$user/cloud-init.
- local name="$1" user="$2" dirty="$3"
- local changes="" top_d="" dname="cloud-init" pstat=""
- local gitdir="" commitish=""
- gitdir=$(git rev-parse --git-dir) || {
- errorrc "Failed to get git dir in $PWD";
- return
- }
- local t=${gitdir%/*}
- case "$t" in
- */worktrees)
- if [ -f "${t%worktrees}/config" ]; then
- gitdir="${t%worktrees}"
- fi
- esac
-
- # attempt to get branch name.
- commitish=$(git rev-parse --abbrev-ref HEAD) || {
- errorrc "Failed git rev-parse --abbrev-ref HEAD"
- return
- }
- if [ "$commitish" = "HEAD" ]; then
- # detached head
- commitish=$(git rev-parse HEAD) || {
- errorrc "failed git rev-parse HEAD"
- return
- }
- fi
-
- local local_changes=false
- if ! git diff --quiet "$commitish"; then
- # there are local changes not committed.
- local_changes=true
- if [ "$dirty" = "false" ]; then
- error "WARNING: You had uncommitted changes. Those changes will "
- error "be put into 'local-changes.diff' inside the container. "
- error "To test these changes you must pass --dirty."
- fi
- fi
-
- debug 1 "collecting ${gitdir} ($dname) into user $user in $name."
- tar -C "${gitdir}" -cpf - . |
- inside_as "$name" "$user" sh -ec '
- dname=$1
- commitish=$2
- rm -Rf "$dname"
- mkdir -p $dname/.git
- cd $dname/.git
- tar -xpf -
- cd ..
- git config core.bare false
- out=$(git checkout $commitish 2>&1) ||
- { echo "failed git checkout $commitish: $out" 1>&2; exit 1; }
- out=$(git checkout . 2>&1) ||
- { echo "failed git checkout .: $out" 1>&2; exit 1; }
- ' extract "$dname" "$commitish"
- [ "${PIPESTATUS[*]}" = "0 0" ] || {
- error "Failed to push tarball of '$gitdir' into $name" \
- " for user $user (dname=$dname)"
- return 1
- }
- echo "local_changes=$local_changes dirty=$dirty"
- if [ "$local_changes" = "true" ]; then
- git diff "$commitish" |
- inside_as "$name" "$user" sh -exc '
- cd "$1"
- if [ "$2" = "true" ]; then
- git apply
- else
- cat > local-changes.diff
- fi
- ' insert_changes "$dname" "$dirty"
- [ "${PIPESTATUS[*]}" = "0 0" ] || {
- error "Failed to apply local changes."
- return 1
- }
- fi
-
- return 0
-}
-
-prep() {
- # we need some very basic things not present in the container.
- # - git
- # - tar (CentOS 6 lxc container does not have it)
- # - python-argparse (or python3)
- local needed="" pair="" pkg="" cmd="" needed=""
- for pair in tar:tar git:git; do
- pkg=${pair#*:}
- cmd=${pair%%:*}
- command -v $cmd >/dev/null 2>&1 || needed="${needed} $pkg"
- done
- if ! command -v python3; then
- python -c "import argparse" >/dev/null 2>&1 ||
- needed="${needed} python-argparse"
- fi
- needed=${needed# }
- if [ -z "$needed" ]; then
- error "No prep packages needed"
- return 0
+main() {
+ if [ "$1" = "-h" -o "$1" == "--help" ]; then
+ Usage 1>&2;
+ exit 0;
fi
- error "Installing prep packages: ${needed}"
- set -- $needed
- local n max r
- n=0; max=10;
- bcmd="yum install --downloadonly --assumeyes --setopt=keepcache=1"
- while n=$(($n+1)); do
- error ":: running $bcmd $* [$n/$max]"
- $bcmd "$@"
- r=$?
- [ $r -eq 0 ] && break
- [ $n -ge $max ] && { error "gave up on $bcmd"; exit $r; }
- nap=$(($n*5))
- error ":: failed [$r] ($n/$max). sleeping $nap."
- sleep $nap
- done
- error ":: running yum install --cacheonly --assumeyes $*"
- yum install --cacheonly --assumeyes "$@"
-}
-
-start_container() {
- local src="$1" name="$2"
- debug 1 "starting container $name from '$src'"
- lxc launch "$src" "$name" || {
- errorrc "Failed to start container '$name' from '$src'";
+ local pt="" mydir=$(dirname "$0")
+ local run_container="$mydir/run-container"
+ if [ ! -x "$run_container" ]; then
+ bad_Usage "Could not find run-container."
return
- }
- CONTAINER=$name
-
- local out="" ret=""
- debug 1 "waiting for networking"
- out=$(inside "$name" sh -c '
- i=0
- while [ $i -lt 60 ]; do
- getent hosts mirrorlist.centos.org && exit 0
- sleep 2
- done' 2>&1)
- ret=$?
- if [ $ret -ne 0 ]; then
- error "Waiting for network in container '$name' failed. [$ret]"
- error "$out"
- return $ret
- fi
-
- if [ ! -z "${http_proxy-}" ]; then
- debug 1 "configuring proxy ${http_proxy}"
- inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf"
- inside "$name" sed -i s/enabled=1/enabled=0/ /etc/yum/pluginconf.d/fastestmirror.conf
fi
-}
-
-delete_container() {
- debug 1 "removing container $1 [--keep to keep]"
- lxc delete --force "$1"
-}
-
-main() {
- local short_opts="ahkrsuv"
- local long_opts="artifact,dirty,help,keep,rpm,srpm,unittest,verbose"
- local getopt_out=""
- getopt_out=$(getopt --name "${0##*/}" \
- --options "${short_opts}" --long "${long_opts}" -- "$@") &&
- eval set -- "${getopt_out}" ||
- { bad_Usage; return; }
-
- local cur="" next=""
- local artifact="" keep="" rpm="" srpm="" unittest="" version=""
- local dirty=false
-
+
+ pt=( "$run_container" )
while [ $# -ne 0 ]; do
cur="${1:-}"; next="${2:-}";
case "$cur" in
- -a|--artifact) artifact=1;;
- --dirty) dirty=true;;
- -h|--help) Usage ; exit 0;;
- -k|--keep) KEEP=true;;
- -r|--rpm) rpm=1;;
- -s|--srpm) srpm=1;;
- -u|--unittest) unittest=1;;
- -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
- --) shift; break;;
+ -r|--rpm) cur="--package";;
+ -s|--srpm) cur="--source-package";;
+ -a|--artifact) cur="--artifacts=.";;
+ 6|7) cur="centos/$cur";;
esac
+ pt[${#pt[@]}]="$cur"
shift;
done
-
- [ $# -eq 1 ] || { bad_Usage "ERROR: Must provide version!"; return; }
- version="$1"
- case "$version" in
- 6|7) :;;
- *) error "Expected version of 6 or 7, not '$version'"; return;;
- esac
-
- TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") ||
- fail "failed to make tempdir"
- trap cleanup EXIT
-
- # program starts here
- local uuid="" name="" user="ci-test" cdir=""
- cdir="/home/$user/cloud-init"
- uuid=$(uuidgen -t) || { error "no uuidgen"; return 1; }
- name="cloud-init-centos-${uuid%%-*}"
-
- start_container "images:centos/$version" "$name"
-
- # prep the container (install very basic dependencies)
- inside "$name" bash -s prep <"$0" ||
- { errorrc "Failed to prep container $name"; return; }
-
- # add the user
- inside "$name" useradd "$user"
-
- debug 1 "inserting cloud-init"
- inject_cloud_init "$name" "$user" "$dirty" || {
- errorrc "FAIL: injecting cloud-init into $name failed."
- return
- }
-
- inside_as_cd "$name" root "$cdir" \
- ./tools/read-dependencies --distro=centos --test-distro || {
- errorrc "FAIL: failed to install dependencies with read-dependencies"
- return
- }
-
- local errors=0
- inside_as_cd "$name" "$user" "$cdir" \
- sh -ec "git status" ||
- { errorrc "git checkout failed."; errors=$(($errors+1)); }
-
- if [ -n "$unittest" ]; then
- debug 1 "running unit tests."
- inside_as_cd "$name" "$user" "$cdir" \
- nosetests tests/unittests cloudinit ||
- { errorrc "nosetests failed."; errors=$(($errors+1)); }
- fi
-
- if [ -n "$srpm" ]; then
- debug 1 "building srpm."
- inside_as_cd "$name" "$user" "$cdir" ./packages/brpm --srpm ||
- { errorrc "brpm --srpm."; errors=$(($errors+1)); }
- fi
-
- if [ -n "$rpm" ]; then
- debug 1 "building rpm."
- inside_as_cd "$name" "$user" "$cdir" ./packages/brpm ||
- { errorrc "brpm failed."; errors=$(($errors+1)); }
- fi
-
- if [ -n "$artifact" ]; then
- for built_rpm in $(inside "$name" sh -c "echo $cdir/*.rpm"); do
- lxc file pull "$name/$built_rpm" .
- done
- fi
-
- if [ "$errors" != "0" ]; then
- error "there were $errors errors."
- return 1
- fi
- return 0
+ deprecated
+ exec "${pt[@]}"
}
-if [ "${1:-}" = "prep" ]; then
- shift
- prep "$@"
-else
- main "$@"
-fi
+main "$@"
+
# vi: ts=4 expandtab
diff --git a/tools/run-container b/tools/run-container
new file mode 100755
index 00000000..6dedb757
--- /dev/null
+++ b/tools/run-container
@@ -0,0 +1,592 @@
+#!/bin/bash
+# This file is part of cloud-init. See LICENSE file for license information.
+#
+# shellcheck disable=2015,2016,2039,2162,2166
+
+set -u
+
+VERBOSITY=0
+KEEP=false
+CONTAINER=""
+DEFAULT_WAIT_MAX=30
+
+error() { echo "$@" 1>&2; }
+fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
+errorrc() { local r=$?; error "$@" "ret=$r"; return $r; }
+
+Usage() {
+ cat <<EOF
+Usage: ${0##*/} [ options ] [images:]image-ref
+
+ This utility can makes it easier to run tests, build rpm and source rpm
+ generation inside a LXC of the specified version of CentOS.
+
+ To see images available, run 'lxc image list images:'
+ Example input:
+ centos/7
+ opensuse/42.3
+ debian/10
+
+ options:
+ -a | --artifacts DIR copy build artifacts out to DIR.
+ by default artifacts are not copied out.
+ --dirty apply local changes before running tests.
+ If not provided, a clean checkout of branch is
+ tested. Inside container, changes are in
+ local-changes.diff.
+ -k | --keep keep container after tests
+ --pyexe V python version to use. Default=auto.
+ Should be name of an executable.
+ ('python2' or 'python3')
+ -p | --package build a binary package (.deb or .rpm)
+ -s | --source-package build source package (debuild -S or srpm)
+ -u | --unittest run unit tests
+
+ Example:
+ * ${0##*/} --package --source-package --unittest centos/6
+EOF
+}
+
+bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; return 1; }
+cleanup() {
+ if [ -n "$CONTAINER" ]; then
+ if [ "$KEEP" = "true" ]; then
+ error "not deleting container '$CONTAINER' due to --keep"
+ else
+ delete_container "$CONTAINER"
+ fi
+ fi
+}
+
+debug() {
+ local level=${1}; shift;
+ [ "${level}" -gt "${VERBOSITY}" ] && return
+ error "${@}"
+}
+
+
+inside_as() {
+ # inside_as(container_name, user, cmd[, args])
+ # executes cmd with args inside container as user in users home dir.
+ local name="$1" user="$2"
+ shift 2
+ if [ "$user" = "root" ]; then
+ inside "$name" "$@"
+ return
+ fi
+ local stuffed="" b64=""
+ stuffed=$(getopt --shell sh --options "" -- -- "$@")
+ stuffed=${stuffed# -- }
+ b64=$(printf "%s\n" "$stuffed" | base64 --wrap=0)
+ inside "$name" su "$user" -c \
+ 'cd; eval set -- "$(echo '"$b64"' | base64 --decode)" && exec "$@"';
+}
+
+inside_as_cd() {
+ local name="$1" user="$2" dir="$3"
+ shift 3
+ inside_as "$name" "$user" sh -c 'cd "$0" && exec "$@"' "$dir" "$@"
+}
+
+inside() {
+ local name="$1"
+ shift
+ lxc exec "$name" -- "$@"
+}
+
+inject_cloud_init(){
+ # take current cloud-init git dir and put it inside $name at
+ # ~$user/cloud-init.
+ local name="$1" user="$2" dirty="$3"
+ local dname="cloud-init" gitdir="" commitish=""
+ gitdir=$(git rev-parse --git-dir) || {
+ errorrc "Failed to get git dir in $PWD";
+ return
+ }
+ local t=${gitdir%/*}
+ case "$t" in
+ */worktrees)
+ if [ -f "${t%worktrees}/config" ]; then
+ gitdir="${t%worktrees}"
+ fi
+ esac
+
+ # attempt to get branch name.
+ commitish=$(git rev-parse --abbrev-ref HEAD) || {
+ errorrc "Failed git rev-parse --abbrev-ref HEAD"
+ return
+ }
+ if [ "$commitish" = "HEAD" ]; then
+ # detached head
+ commitish=$(git rev-parse HEAD) || {
+ errorrc "failed git rev-parse HEAD"
+ return
+ }
+ fi
+
+ local local_changes=false
+ if ! git diff --quiet "$commitish"; then
+ # there are local changes not committed.
+ local_changes=true
+ if [ "$dirty" = "false" ]; then
+ error "WARNING: You had uncommitted changes. Those changes will "
+ error "be put into 'local-changes.diff' inside the container. "
+ error "To test these changes you must pass --dirty."
+ fi
+ fi
+
+ debug 1 "collecting ${gitdir} ($dname) into user $user in $name."
+ tar -C "${gitdir}" -cpf - . |
+ inside_as "$name" "$user" sh -ec '
+ dname=$1
+ commitish=$2
+ rm -Rf "$dname"
+ mkdir -p $dname/.git
+ cd $dname/.git
+ tar -xpf -
+ cd ..
+ git config core.bare false
+ out=$(git checkout $commitish 2>&1) ||
+ { echo "failed git checkout $commitish: $out" 1>&2; exit 1; }
+ out=$(git checkout . 2>&1) ||
+ { echo "failed git checkout .: $out" 1>&2; exit 1; }
+ ' extract "$dname" "$commitish"
+ [ "${PIPESTATUS[*]}" = "0 0" ] || {
+ error "Failed to push tarball of '$gitdir' into $name" \
+ " for user $user (dname=$dname)"
+ return 1
+ }
+
+ echo "local_changes=$local_changes dirty=$dirty"
+ if [ "$local_changes" = "true" ]; then
+ git diff "$commitish" |
+ inside_as "$name" "$user" sh -exc '
+ cd "$1"
+ if [ "$2" = "true" ]; then
+ git apply
+ else
+ cat > local-changes.diff
+ fi
+ ' insert_changes "$dname" "$dirty"
+ [ "${PIPESTATUS[*]}" = "0 0" ] || {
+ error "Failed to apply local changes."
+ return 1
+ }
+ fi
+
+ return 0
+}
+
+get_os_info_in() {
+ # prep the container (install very basic dependencies)
+ [ -n "${OS_VERSION:-}" -a -n "${OS_NAME:-}" ] && return 0
+ data=$(run_self_inside "$name" os_info) ||
+ { errorrc "Failed to get os-info in container $name"; return; }
+ eval "$data" && [ -n "${OS_VERSION:-}" -a -n "${OS_NAME:-}" ] || return
+ debug 1 "determined $name is $OS_NAME/$OS_VERSION"
+}
+
+os_info() {
+ get_os_info || return
+ echo "OS_NAME=$OS_NAME"
+ echo "OS_VERSION=$OS_VERSION"
+}
+
+get_os_info() {
+ # run inside container, set OS_NAME, OS_VERSION
+ # example OS_NAME are centos, debian, opensuse
+ [ -n "${OS_NAME:-}" -a -n "${OS_VERSION:-}" ] && return 0
+ if [ -f /etc/os-release ]; then
+ OS_NAME=$(sh -c '. /etc/os-release; echo $ID')
+ OS_VERSION=$(sh -c '. /etc/os-release; echo $VERSION_ID')
+ if [ -z "$OS_VERSION" ]; then
+ local pname=""
+ pname=$(sh -c '. /etc/os-release; echo $PRETTY_NAME')
+ case "$pname" in
+ *buster*) OS_VERSION=10;;
+ *sid*) OS_VERSION="sid";;
+ esac
+ fi
+ elif [ -f /etc/centos-release ]; then
+ local line=""
+ read line < /etc/centos-release
+ case "$line" in
+ CentOS\ *\ 6.*) OS_VERSION="6"; OS_NAME="centos";;
+ esac
+ fi
+ [ -n "${OS_NAME:-}" -a -n "${OS_VERSION:-}" ] ||
+ { error "Unable to determine OS_NAME/OS_VERSION"; return 1; }
+}
+
+yum_install() {
+ local n=0 max=10 ret
+ bcmd="yum install --downloadonly --assumeyes --setopt=keepcache=1"
+ while n=$((n+1)); do
+ error ":: running $bcmd $* [$n/$max]"
+ $bcmd "$@"
+ ret=$?
+ [ $ret -eq 0 ] && break
+ [ $n -ge $max ] && { error "gave up on $bcmd"; exit $ret; }
+ nap=$((n*5))
+ error ":: failed [$ret] ($n/$max). sleeping $nap."
+ sleep $nap
+ done
+ error ":: running yum install --cacheonly --assumeyes $*"
+ yum install --cacheonly --assumeyes "$@"
+}
+
+zypper_install() {
+ local pkgs="$*"
+ set -- zypper --non-interactive --gpg-auto-import-keys install \
+ --auto-agree-with-licenses "$@"
+ debug 1 ":: installing $pkgs with zypper: $*"
+ "$@"
+}
+
+apt_install() {
+ apt-get update -q && apt-get install --no-install-recommends "$@"
+}
+
+install_packages() {
+ get_os_info || return
+ case "$OS_NAME" in
+ centos) yum_install "$@";;
+ opensuse) zypper_install "$@";;
+ debian|ubuntu) apt_install "$@";;
+ *) error "Do not know how to install packages on ${OS_NAME}";
+ return 1;;
+ esac
+}
+
+prep() {
+ # we need some very basic things not present in the container.
+ # - git
+ # - tar (CentOS 6 lxc container does not have it)
+ # - python-argparse (or python3)
+ local needed="" pair="" pkg="" cmd="" needed=""
+ local pairs="tar:tar git:git"
+ local pyexe="$1"
+ get_os_info
+ local py2pkg="python2" py3pkg="python3"
+ case "$OS_NAME" in
+ opensuse)
+ py2pkg="python-base"
+ py3pkg="python3-base";;
+ esac
+
+ case "$pyexe" in
+ python2) pairs="$pairs python2:$py2pkg";;
+ python3) pairs="$pairs python3:$py3pkg";;
+ esac
+
+ for pair in $pairs; do
+ pkg=${pair#*:}
+ cmd=${pair%%:*}
+ command -v "$cmd" >/dev/null 2>&1 || needed="${needed} $pkg"
+ done
+ if [ "$OS_NAME" = "centos" -a "$pyexe" = "python2" ]; then
+ python -c "import argparse" >/dev/null 2>&1 ||
+ needed="${needed} python-argparse"
+ fi
+ needed=${needed# }
+ if [ -z "$needed" ]; then
+ error "No prep packages needed"
+ return 0
+ fi
+ error "Installing prep packages: ${needed}"
+ # shellcheck disable=SC2086
+ set -- $needed
+ install_packages "$@"
+}
+
+nose() {
+ local pyexe="$1" cmd=""
+ shift
+ get_os_info
+ if [ "$OS_NAME/$OS_VERSION" = "centos/6" ]; then
+ cmd="nosetests"
+ else
+ cmd="$pyexe -m nose"
+ fi
+ ${cmd} "$@"
+}
+
+is_done_cloudinit() {
+ [ -e "/run/cloud-init/result.json" ]
+ _RET=""
+}
+
+is_done_systemd() {
+ local s="" num="$1"
+ s=$(systemctl is-system-running 2>&1);
+ _RET="$? $s"
+ case "$s" in
+ initializing|starting) return 1;;
+ *[Ff]ailed*connect*bus*)
+ # warn if not the first run.
+ [ "$num" -lt 5 ] ||
+ error "Failed to connect to systemd bus [${_RET%% *}]";
+ return 1;;
+ esac
+ return 0
+}
+
+is_done_other() {
+ local out=""
+ out=$(getent hosts ubuntu.com 2>&1)
+ return
+}
+
+wait_inside() {
+ local name="$1" max="${2:-${DEFAULT_WAIT_MAX}}" debug=${3:-0}
+ local i=0 check="is_done_other";
+ if [ -e /run/systemd ]; then
+ check=is_done_systemd
+ elif [ -x /usr/bin/cloud-init ]; then
+ check=is_done_cloudinit
+ fi
+ [ "$debug" != "0" ] && debug 1 "check=$check"
+ while ! $check $i && i=$((i+1)); do
+ [ "$i" -ge "$max" ] && exit 1
+ [ "$debug" = "0" ] || echo -n .
+ sleep 1
+ done
+ if [ "$debug" != "0" ]; then
+ read up _ </proc/uptime
+ debug 1 "[$name ${i:+done after $i }up=$up${_RET:+ ${_RET}}]"
+ fi
+}
+
+wait_for_boot() {
+ local name="$1"
+ local out="" ret="" wtime=$DEFAULT_WAIT_MAX
+ get_os_info_in "$name"
+ [ "$OS_NAME" = "debian" ] && wtime=300 &&
+ debug 1 "on debian we wait for ${wtime}s"
+ debug 1 "waiting for boot of $name"
+ run_self_inside "$name" wait_inside "$name" "$wtime" "$VERBOSITY" ||
+ { errorrc "wait inside $name failed."; return; }
+
+ if [ ! -z "${http_proxy-}" ]; then
+ if [ "$OS_NAME" = "centos" ]; then
+ debug 1 "configuring proxy ${http_proxy}"
+ inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf"
+ inside "$name" sed -i s/enabled=1/enabled=0/ \
+ /etc/yum/pluginconf.d/fastestmirror.conf
+ else
+ debug 1 "do not know how to configure proxy on $OS_NAME"
+ fi
+ fi
+}
+
+start_container() {
+ local src="$1" name="$2"
+ debug 1 "starting container $name from '$src'"
+ lxc launch "$src" "$name" || {
+ errorrc "Failed to start container '$name' from '$src'";
+ return
+ }
+ CONTAINER=$name
+ wait_for_boot "$name"
+}
+
+delete_container() {
+ debug 1 "removing container $1 [--keep to keep]"
+ lxc delete --force "$1"
+}
+
+run_self_inside() {
+ # run_self_inside(container, args)
+ local name="$1"
+ shift
+ inside "$name" bash -s "$@" <"$0"
+}
+
+run_self_inside_as_cd() {
+ local name="$1" user="$2" dir="$3"
+ shift 3
+ inside_as_cd "$name" "$user" "$dir" bash -s "$@" <"$0"
+}
+
+main() {
+ local short_opts="a:hknpsuv"
+ local long_opts="artifacts:,dirty,help,keep,name:,pyexe:,package,source-package,unittest,verbose"
+ local getopt_out=""
+ getopt_out=$(getopt --name "${0##*/}" \
+ --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+ eval set -- "${getopt_out}" ||
+ { bad_Usage; return; }
+
+ local cur="" next=""
+ local package=false srcpackage=false unittest="" name=""
+ local dirty=false pyexe="auto" artifact_d="."
+
+ while [ $# -ne 0 ]; do
+ cur="${1:-}"; next="${2:-}";
+ case "$cur" in
+ -a|--artifacts) artifact_d="$next";;
+ --dirty) dirty=true;;
+ -h|--help) Usage ; exit 0;;
+ -k|--keep) KEEP=true;;
+ -n|--name) name="$next"; shift;;
+ --pyexe) pyexe=$next; shift;;
+ -p|--package) package=true;;
+ -s|--source-package) srcpackage=true;;
+ -u|--unittest) unittest=1;;
+ -v|--verbose) VERBOSITY=$((VERBOSITY+1));;
+ --) shift; break;;
+ esac
+ shift;
+ done
+
+ [ $# -eq 1 ] || { bad_Usage "Expected 1 arg, got $# ($*)"; return; }
+ local img_ref_in="$1"
+ case "${img_ref_in}" in
+ *:*) img_ref="${img_ref_in}";;
+ *) img_ref="images:${img_ref_in}";;
+ esac
+
+ # program starts here
+ local out="" user="ci-test" cdir="" home=""
+ home="/home/$user"
+ cdir="$home/cloud-init"
+ if [ -z "$name" ]; then
+ if out=$(petname 2>&1); then
+ name="ci-${out}"
+ elif out=$(uuidgen -t 2>&1); then
+ name="ci-${out%%-*}"
+ else
+ error "Must provide name or have petname or uuidgen"
+ return 1
+ fi
+ fi
+
+ trap cleanup EXIT
+
+ start_container "$img_ref" "$name" ||
+ { errorrc "Failed to start container for $img_ref"; return; }
+
+ get_os_info_in "$name" ||
+ { errorrc "failed to get os_info in $name"; return; }
+
+ if [ "$pyexe" = "auto" ]; then
+ case "$OS_NAME/$OS_VERSION" in
+ centos/*|opensuse/*) pyexe=python2;;
+ *) pyexe=python3;;
+ esac
+ debug 1 "set pyexe=$pyexe for $OS_NAME/$OS_VERSION"
+ fi
+
+ # prep the container (install very basic dependencies)
+ run_self_inside "$name" prep "$pyexe" ||
+ { errorrc "Failed to prep container $name"; return; }
+
+ # add the user
+ inside "$name" useradd "$user" --create-home "--home-dir=$home" ||
+ { errorrc "Failed to add user '$user' in '$name'"; return 1; }
+
+ debug 1 "inserting cloud-init"
+ inject_cloud_init "$name" "$user" "$dirty" || {
+ errorrc "FAIL: injecting cloud-init into $name failed."
+ return
+ }
+
+ inside_as_cd "$name" root "$cdir" \
+ $pyexe ./tools/read-dependencies "--distro=${OS_NAME}" \
+ --test-distro || {
+ errorrc "FAIL: failed to install dependencies with read-dependencies"
+ return
+ }
+
+ local errors=( )
+ inside_as_cd "$name" "$user" "$cdir" git status || {
+ errorrc "git checkout failed."
+ errors[${#errors[@]}]="git checkout";
+ }
+
+ if [ -n "$unittest" ]; then
+ debug 1 "running unit tests."
+ run_self_inside_as_cd "$name" "$user" "$cdir" nose "$pyexe" \
+ tests/unittests cloudinit/ || {
+ errorrc "nosetests failed.";
+ errors[${#errors[@]}]="nosetests"
+ }
+ fi
+
+ local build_pkg="" build_srcpkg="" pkg_ext="" distflag=""
+ case "$OS_NAME" in
+ centos) distflag="--distro=redhat";;
+ opensuse) distflag="--distro=suse";;
+ esac
+
+ case "$OS_NAME" in
+ debian|ubuntu)
+ build_pkg="./packages/bddeb -d"
+ build_srcpkg="./packages/bddeb -S -d"
+ pkg_ext=".deb";;
+ centos|opensuse)
+ build_pkg="./packages/brpm $distflag"
+ build_srcpkg="./packages/brpm $distflag --srpm"
+ pkg_ext=".rpm";;
+ esac
+ if [ "$srcpackage" = "true" ]; then
+ [ -n "$build_srcpkg" ] || {
+ error "Unknown package command for $OS_NAME"
+ return 1
+ }
+ debug 1 "building source package with $build_srcpkg."
+ # shellcheck disable=SC2086
+ inside_as_cd "$name" "$user" "$cdir" $pyexe $build_srcpkg || {
+ errorrc "failed: $build_srcpkg";
+ errors[${#errors[@]}]="source package"
+ }
+ fi
+
+ if [ "$package" = "true" ]; then
+ [ -n "$build_pkg" ] || {
+ error "Unknown build source command for $OS_NAME"
+ return 1
+ }
+ debug 1 "building binary package with $build_pkg."
+ # shellcheck disable=SC2086
+ inside_as_cd "$name" "$user" "$cdir" $pyexe $build_pkg || {
+ errorrc "failed: $build_pkg";
+ errors[${#errors[@]}]="binary package"
+ }
+ fi
+
+ if [ -n "$artifact_d" ] &&
+ [ "$package" = "true" -o "$srcpackage" = "true" ]; then
+ local art=""
+ artifact_d="${artifact_d%/}/"
+ [ -d "${artifact_d}" ] || mkdir -p "$artifact_d" || {
+ errorrc "failed to create artifact dir '$artifact_d'"
+ return
+ }
+
+ for art in $(inside "$name" sh -c "echo $cdir/*${pkg_ext}"); do
+ lxc file pull "$name/$art" "$artifact_d" || {
+ errorrc "Failed to pull '$name/$art' to ${artifact_d}"
+ errors[${#errors[@]}]="artifact copy: $art"
+ }
+ debug 1 "wrote ${artifact_d}${art##*/}"
+ done
+ fi
+
+ if [ "${#errors[@]}" != "0" ]; then
+ local e=""
+ error "there were ${#errors[@]} errors."
+ for e in "${errors[@]}"; do
+ error " $e"
+ done
+ return 1
+ fi
+ return 0
+}
+
+case "${1:-}" in
+ prep|os_info|wait_inside|nose) _n=$1; shift; "$_n" "$@";;
+ *) main "$@";;
+esac
+
+# vi: ts=4 expandtab
diff --git a/tools/tox-venv b/tools/tox-venv
index 76ed5076..a5d21625 100755
--- a/tools/tox-venv
+++ b/tools/tox-venv
@@ -1,42 +1,185 @@
#!/bin/sh
+# https://gist.github.com/smoser/2d4100a6a5d230ca937f
+CR='
+'
error() { echo "$@" 1>&2; }
fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
+get_env_dirs() {
+ # read 'tox --showconfig'. return list of
+ # envname:dir
+ local key="" equal="" val="" curenv="" out=""
+ while read key equal val; do
+ case "$key" in
+ "[testenv:"*)
+ curenv=${key#*:};
+ curenv=${curenv%%"]"*};
+ continue;;
+ esac
+ if [ "${key#*=}" != "$key" ]; then
+ # older tox shows key=value or key= value
+ # newer tox shows: key = value
+ key=${key%%=*}
+ val=${equal}
+ fi
+ [ "$key" = "envdir" ] || continue
+ out="${out:+${out}${CR}}${curenv}:$val"
+ done
+ echo "$out"
+}
+
+load_config() {
+ local tox_ini="$1" out="" envs=""
+ if [ "$tox_ini" = "${CACHED_ENVS_INI}" ]; then
+ _RET="$CACHED_ENVS"
+ return
+ fi
+ out=$(tox -c "$tox_ini" --showconfig) || return 1
+ envs=$(echo "$out" | get_env_dirs) || return 1
+ CACHED_ENVS="$envs"
+ CACHED_ENVS_INI="$tox_ini"
+ _RET="$envs"
+}
+
+list_environments() {
+ local tox_ini="$1" prefix=" " out="" envs="" oifs="$IFS"
+ load_config "$tox_ini" || return 1
+ envs="${_RET}"
+ IFS="$CR"
+ for d in ${envs}; do
+ env=${d%%:*}
+ dir=${d#*:}
+ [ -f "$dir/bin/activate" ] && s="*" || s=""
+ echo "${prefix}$env$s";
+ done
+ IFS="$oifs"
+}
+
+get_command() {
+ local tox_ini="$1" env="$2" out=""
+ shift 2
+ out=$(
+ sed -e ':x; /\\$/ { N; s/\\\n[ ]*//; tx };' "${tox_ini}" |
+ gawk '
+ $1 ~ /^\[testenv.*\]/ {
+ name=$1;
+ sub("\\[", "", name); sub(".*:", "", name);
+ sub("].*", "", name);
+ curenv=name; };
+ $1 == "basepython" && (name == "testenv" || name == n) { python=$3 }
+ $1 == "commands" && (name == "testenv" || name == n) {
+ sub("commands = ", ""); cmd = $0; };
+ END {
+ sub("{envpython}", python, cmd);
+ sub("{toxinidir}", toxinidir, cmd);
+ if (inargs == "") replacement = "\\1"
+ else replacement = inargs
+ cmd = gensub(/{posargs:?([^}]*)}/, replacement, "global", cmd)
+ print(cmd);
+ }' n="$env" toxinidir="$(dirname $tox_ini)" inargs="$*")
+ if [ -z "$out" ]; then
+ error "Failed to find command for $env in $tox_ini"
+ return 1
+ fi
+ echo "$out"
+}
+
+get_env_dir() {
+ local tox_ini="$1" env="$2" oifs="$IFS" t="" d="" envs=""
+ if [ "${TOX_VENV_SHORTCUT:-1}" != "0" ]; then
+ local stox_d="${tox_ini%/*}/.tox/${env}"
+ if [ -e "${stox_d}/bin/activate" ]; then
+ _RET="${stox_d}"
+ return
+ fi
+ fi
+ load_config "$tox_ini" && envs="$_RET" || return 1
+ IFS="$CR"
+ for t in $envs; do
+ [ "$env" = "${t%%:*}" ] && d="${t#*:}" && break
+ done
+ IFS=${oifs}
+ [ -n "$d" ] || return 1
+ _RET="$d"
+}
+
Usage() {
- cat <<EOF
-Usage: ${0##*/} tox-environment [command [args]]
+ local tox_ini="$1"
+ cat <<EOF
+Usage: ${0##*/} [--no-create] tox-environment [command [args]]
run command with provided arguments in the provided tox environment
- command defaults to \${SHELL:-/bin/sh}.
+ command defaults to 'cmd' (see below).
+
+ run with '--list' to show available environments
- invoke with '--list' to show available environments
+ if 'command' above is literal 'cmd' or '-', then the 'command' will
+ be read from tox.ini. This allows you to do:
+ tox-venv py27 - tests/some/sub/dir
+ and have the 'command' read correctly and have that execute:
+ python -m nose tests/some/sub/dir
EOF
-}
-list_toxes() {
- local td="$1" pre="$2" d=""
- ( cd "$tox_d" &&
- for d in *; do [ -f "$d/bin/activate" ] && echo "${pre}$d"; done)
+
+ if [ -f "$tox_ini" ]; then
+ local oini=${tox_ini}
+ [ "${tox_ini}" -ef "$PWD/tox.ini" ] && oini="./tox.ini"
+ echo
+ echo "environments in $oini"
+ list_environments "$tox_ini"
+ fi
}
-[ $# -eq 0 ] && { Usage 1>&2; exit 1; }
-[ "$1" = "-h" -o "$1" = "--help" ] && { Usage; exit 0; }
+if [ -f tox.ini ]; then
+ tox_ini="$PWD/tox.ini"
+else
+ tox_ini="${0%/*}/../tox.ini"
+fi
-env="$1"
-shift
-tox_d="${0%/*}/../.tox"
-activate="$tox_d/$env/bin/activate"
+[ $# -eq 0 ] && { Usage "$tox_ini" 1>&2; exit 1; }
+[ "$1" = "-h" -o "$1" = "--help" ] && { Usage "$tox_ini"; exit 0; }
+[ -f "$tox_ini" ] || fail "$tox_ini: did not find tox.ini"
-[ -d "$tox_d" ] || fail "$tox_d: not a dir. maybe run 'tox'?"
+if [ "$1" = "-l" -o "$1" = "--list" ]; then
+ list_environments "$tox_ini"
+ exit
+fi
-[ "$env" = "-l" -o "$env" = "--list" ] && { list_toxes ; exit ; }
+nocreate="false"
+if [ "$1" = "--no-create" ]; then
+ nocreate="true"
+ shift
+fi
-if [ ! -f "$activate" ]; then
- error "$env: not a valid tox environment?"
- error "try one of:"
- list_toxes "$tox_d" " "
- fail
+env="$1"
+shift
+[ "$1" = "--" ] && shift
+get_env_dir "$tox_ini" "$env" && activate="$_RET/bin/activate" || activate=""
+
+if [ -z "$activate" -o ! -f "$activate" ]; then
+ if $nocreate; then
+ fail "tox env '$env' did not exist, and no-create specified"
+ elif [ -n "$activate" ]; then
+ error "attempting to create $env:"
+ error " tox -c $tox_ini --recreate --notest -e $env"
+ tox -c "$tox_ini" --recreate --notest -e "$env" ||
+ fail "failed creation of env $env"
+ else
+ error "$env: not a valid tox environment?"
+ error "found tox_ini=$tox_ini"
+ error "try one of:"
+ list_environments "$tox_ini" 1>&2
+ fail
+ fi
fi
. "$activate"
-[ "$#" -gt 0 ] || set -- ${SHELL:-/bin/bash}
+[ $# -eq 0 ] && set -- cmd
+if [ "$1" = "cmd" -o "$1" = "-" ]; then
+ shift
+ out=$(get_command "$tox_ini" "$env" "$@") || exit
+ eval set -- "$out"
+fi
+echo "inside tox:$env running: $*" 1>&2
debian_chroot="tox:$env" exec "$@"
+
+# vi: ts=4 expandtab
diff --git a/tox.ini b/tox.ini
index 818ade3d..2fb3209d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py27, py3, flake8, xenial, pylint
+envlist = py27, py3, xenial, pycodestyle, pyflakes, pylint
recreate = True
[testenv]
@@ -7,14 +7,11 @@ commands = python -m nose {posargs:tests/unittests cloudinit}
setenv =
LC_ALL = en_US.utf-8
-[testenv:flake8]
+[testenv:pycodestyle]
basepython = python3
deps =
- pycodestyle==2.3.1
- pyflakes==1.5.0
- flake8==3.3.0
- hacking==0.13.0
-commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/}
+ pycodestyle==2.4.0
+commands = {envpython} -m pycodestyle {posargs:cloudinit/ tests/ tools/}
# https://github.com/gabrielfalcao/HTTPretty/issues/223
setenv =
@@ -118,6 +115,11 @@ deps =
commands = {envpython} -m pycodestyle {posargs:cloudinit/ tests/ tools/}
deps = pycodestyle
+[testenv:pyflakes]
+commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/}
+deps =
+ pyflakes==1.6.0
+
[testenv:tip-pyflakes]
commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/}
deps = pyflakes